From 4b0369217be29d5c423f220fb5349ef3e276038d Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 21 Feb 2017 21:54:19 -0600 Subject: [PATCH 001/266] partition_mesh creates facial_adjacency_groups --- meshmode/mesh/processing.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 37e4ac26..f49ccfbe 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -139,11 +139,21 @@ def partition_mesh(mesh, part_per_element, part_nr): new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups) + part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) return (part_mesh, queried_elems) +def set_rank_boundaries(part_mesh, mesh, part_to_global): + """ + Looks through facial_adjacency_groups in part_mesh. + If a boundary is found, then it is possible that it + used to be connected to other faces from mesh. + If this is the case, then part_mesh will have special + boundary_tags where faces used to be connected. + """ + + # {{{ orientations def find_volume_mesh_element_group_orientation(vertices, grp): -- GitLab From 38f0c741112f3b18e2312b500b632e42569250e6 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 23 Feb 2017 23:49:08 -0600 Subject: [PATCH 002/266] boundary tags set in partition_mesh --- meshmode/mesh/processing.py | 33 +++++++++++++++++++++++---------- test/test_meshmode.py | 27 ++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index f49ccfbe..49d27253 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -141,17 +141,30 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) - return (part_mesh, queried_elems) - + from meshmode.mesh import BTAG_ALL + + for igrp in range(num_groups): + f_group = part_mesh.facial_adjacency_groups[igrp][None] + grp_elems = f_group.elements + grp_faces = f_group.element_faces + for elem_idx in range(len(grp_elems)): + elem = grp_elems[elem_idx] + face = grp_faces[elem_idx] + tag = -f_group.neighbors[elem_idx] + parent_elem = queried_elems[elem] + parent_group = 0 + while parent_elem >= mesh.groups[parent_group].nelements: + parent_elem -= mesh.groups[parent_group].nelements + parent_group += 1 + assert parent_group < num_groups, "oops..." + parent_facial_group = mesh.facial_adjacency_groups[parent_group][None] + idxs = np.where(parent_facial_group.elements == parent_elem)[0] + for parent_face in parent_facial_group.element_faces[idxs]: + if face == parent_face: + f_group.neighbors[elem_idx] = -(tag ^ part_mesh.boundary_tag_bit(BTAG_ALL)) + #print("Boundary face", face, "of element", elem, "should be connected to", parent_elem, "in parent mesh.") -def set_rank_boundaries(part_mesh, mesh, part_to_global): - """ - Looks through facial_adjacency_groups in part_mesh. - If a boundary is found, then it is possible that it - used to be connected to other faces from mesh. - If this is the case, then part_mesh will have special - boundary_tags where faces used to be connected. - """ + return (part_mesh, queried_elems) # {{{ orientations diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a793743c..ca413910 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -70,11 +70,11 @@ def test_partition_boxes_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh - mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + #from meshmode.mesh.processing import merge_disjoint_meshes + #mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -92,7 +92,24 @@ def test_partition_boxes_mesh(): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] assert mesh.nelements == np.sum( - [new_meshes[i].nelements for i in range(num_parts)]) + [new_meshes[i].nelements for i in range(num_parts)]), \ + "part_mesh has the wrong number of elements" + + print(count_BTAG_ALL(mesh)) + print(np.sum([count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)])) + assert count_BTAG_ALL(mesh) == np.sum( + [count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)]), \ + "part_mesh has the wrong number of BTAG_ALL boundaries" + + +def count_BTAG_ALL(mesh): + num_bnds = 0 + for adj_groups in mesh.facial_adjacency_groups: + bdry_group = adj_groups[None] + for mesh_tag in -bdry_group.neighbors: + if mesh_tag & mesh.boundary_tag_bit(BTAG_ALL) != 0: + num_bnds += 1 + return num_bnds # }}} -- GitLab From be9146e4574c99b8c5010fc19f4f6f63db57c5c6 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 27 Feb 2017 21:15:49 -0600 Subject: [PATCH 003/266] Work on boundary tags within partition_mesh --- meshmode/mesh/processing.py | 13 +++++++------ test/test_meshmode.py | 19 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 49d27253..93943e1d 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -157,12 +157,13 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_elem -= mesh.groups[parent_group].nelements parent_group += 1 assert parent_group < num_groups, "oops..." - parent_facial_group = mesh.facial_adjacency_groups[parent_group][None] - idxs = np.where(parent_facial_group.elements == parent_elem)[0] - for parent_face in parent_facial_group.element_faces[idxs]: - if face == parent_face: - f_group.neighbors[elem_idx] = -(tag ^ part_mesh.boundary_tag_bit(BTAG_ALL)) - #print("Boundary face", face, "of element", elem, "should be connected to", parent_elem, "in parent mesh.") + parent_f_group = mesh.facial_adjacency_groups[parent_group] + for _, parent_facial_group in parent_f_group.items(): + for idx in np.where(parent_facial_group.elements == parent_elem)[0]: + if parent_facial_group.neighbors[idx] >= 0: + if face == parent_facial_group.element_faces[idx]: + f_group.neighbors[elem_idx] = -(tag & ~part_mesh.boundary_tag_bit(BTAG_ALL)) + #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index ca413910..97fc59de 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -95,20 +95,19 @@ def test_partition_boxes_mesh(): [new_meshes[i].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" - print(count_BTAG_ALL(mesh)) - print(np.sum([count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)])) - assert count_BTAG_ALL(mesh) == np.sum( - [count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)]), \ + assert count_btag_all(mesh) == np.sum( + [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" -def count_BTAG_ALL(mesh): +def count_btag_all(mesh): num_bnds = 0 - for adj_groups in mesh.facial_adjacency_groups: - bdry_group = adj_groups[None] - for mesh_tag in -bdry_group.neighbors: - if mesh_tag & mesh.boundary_tag_bit(BTAG_ALL) != 0: - num_bnds += 1 + for adj_dict in mesh.facial_adjacency_groups: + for _, bdry_group in adj_dict.items(): + for neighbors in bdry_group.neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: + num_bnds += 1 return num_bnds # }}} -- GitLab From 143da67731cf62f5fa96ef5d929bb5ea3a5ebab6 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Feb 2017 00:20:10 -0600 Subject: [PATCH 004/266] Rank boundary tags --- meshmode/mesh/processing.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 93943e1d..762cf41c 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -138,8 +138,12 @@ def partition_mesh(mesh, part_per_element, part_nr): type(mesh_group)(mesh_group.order, new_indices[group_nr], new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) + num_parts = np.max(part_per_element) + boundary_tags = list(range(num_parts)) + from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) + part_mesh = Mesh(new_vertices, new_mesh_groups, \ + facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import BTAG_ALL @@ -162,7 +166,12 @@ def partition_mesh(mesh, part_per_element, part_nr): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: - f_group.neighbors[elem_idx] = -(tag & ~part_mesh.boundary_tag_bit(BTAG_ALL)) + rank_neighbor = parent_facial_group.neighbors[idx] + # TODO: With mulitple groups, rank_neighbors will be wrong. + neighbor_part_num = part_per_element[rank_neighbor] + tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tag = tag | part_mesh.boundary_tag_bit(neighbor_part_num) + f_group.neighbors[elem_idx] = -tag #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) return (part_mesh, queried_elems) -- GitLab From e902311138043d7de81aae0767ea5c3819e610f8 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Feb 2017 10:15:15 -0600 Subject: [PATCH 005/266] Small fixes. --- meshmode/mesh/processing.py | 14 ++++++++++---- test/test_meshmode.py | 8 +++++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 762cf41c..404f0d84 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -167,12 +167,18 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - # TODO: With mulitple groups, rank_neighbors will be wrong. - neighbor_part_num = part_per_element[rank_neighbor] + grp_start_elem = 0 + for grp in range(parent_group): + grp_start_elem += mesh.groups[grp].nelements + neighbor_part_num = part_per_element[ + rank_neighbor + grp_start_elem] tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tag = tag | part_mesh.boundary_tag_bit(neighbor_part_num) + tag = tag | part_mesh.boundary_tag_bit( + neighbor_part_num) f_group.neighbors[elem_idx] = -tag - #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) + #print("Boundary face", face, "of element", elem, + # "should be connected to element", rank_neighbor, + # "in partition", neighbor_part_num) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 97fc59de..f89a3e28 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,7 +49,8 @@ logger = logging.getLogger(__name__) # {{{ partition_mesh - +''' +#TODO facial_adjacency_groups is not available in torus. def test_partition_torus_mesh(): from meshmode.mesh.generation import generate_torus my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) @@ -64,13 +65,14 @@ def test_partition_torus_mesh(): assert part_mesh0.nelements == 2 assert part_mesh1.nelements == 4 assert part_mesh2.nelements == 2 - +''' def test_partition_boxes_mesh(): n = 5 - num_parts = 7 + num_parts = 3 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) #from meshmode.mesh.processing import merge_disjoint_meshes -- GitLab From 4bd62c70ae7bc876b0a73b6339d8bd38276e2dcc Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 1 Mar 2017 22:59:37 -0600 Subject: [PATCH 006/266] Added InterPartitionAdjacency class --- meshmode/mesh/__init__.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 413b79df..237bc2c4 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -381,6 +381,27 @@ class NodalAdjacency(Record): # }}} +# {{{ partition adjacency + +class InterPartitionAdjacency(): + """ + Describes adjacency information of elements between partitions. + """ + + def __init__(self): + self.elements = [] + self.element_faces = [] + self.neighbors = [] + self.neighbor_faces = [] + + def add_connection(self, elem, face, neighbor, neighbor_face): + self.elements.append(elem) + self.element_faces.append(face) + self.neighbors.append(neighbor) + self.neighbor_faces.append(neighbor_face) + +# }}} + # {{{ facial adjacency -- GitLab From 5df8067497109d04a641b42c1b4ab7c8f0b1e26e Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 1 Mar 2017 23:00:15 -0600 Subject: [PATCH 007/266] partition_mesh implements InterPartitionAdjacency --- meshmode/mesh/processing.py | 20 ++++++++++++++++---- test/test_meshmode.py | 3 ++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 404f0d84..6d7e792c 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -142,11 +142,16 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_tags = list(range(num_parts)) from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups, \ + part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import BTAG_ALL + from meshmode.mesh import InterPartitionAdjacency + tags_to_part_adj = dict() + for tag in range(np.max(part_per_element) + 1): + tags_to_part_adj[tag] = InterPartitionAdjacency() + for igrp in range(num_groups): f_group = part_mesh.facial_adjacency_groups[igrp][None] grp_elems = f_group.elements @@ -167,11 +172,15 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - grp_start_elem = 0 + rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + mgrp_start_elem = 0 + pgrp_start_elem = 0 for grp in range(parent_group): - grp_start_elem += mesh.groups[grp].nelements + mgrp_start_elem += mesh.groups[grp].nelements + for grp in range(num_groups): + pgrp_start_elem += part_mesh.groups[grp].nelements neighbor_part_num = part_per_element[ - rank_neighbor + grp_start_elem] + rank_neighbor + mgrp_start_elem] tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) tag = tag | part_mesh.boundary_tag_bit( neighbor_part_num) @@ -179,6 +188,9 @@ def partition_mesh(mesh, part_per_element, part_nr): #print("Boundary face", face, "of element", elem, # "should be connected to element", rank_neighbor, # "in partition", neighbor_part_num) + tags_to_part_adj[neighbor_part_num].add_connection( + elem + pgrp_start_elem, face, + rank_neighbor + mgrp_start_elem, rank_neighbor_face) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f89a3e28..f4926134 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -50,7 +50,7 @@ logger = logging.getLogger(__name__) # {{{ partition_mesh ''' -#TODO facial_adjacency_groups is not available in torus. +#TODO facial_adjacency_groups is not available in generate_torus. def test_partition_torus_mesh(): from meshmode.mesh.generation import generate_torus my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) @@ -67,6 +67,7 @@ def test_partition_torus_mesh(): assert part_mesh2.nelements == 2 ''' + def test_partition_boxes_mesh(): n = 5 num_parts = 3 -- GitLab From dba58f680f0e75a5a38fcfafacbf0abf15c68254 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 10:50:49 -0600 Subject: [PATCH 008/266] Fix whitespace --- meshmode/mesh/__init__.py | 1 + meshmode/mesh/processing.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 237bc2c4..c7520a62 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -381,6 +381,7 @@ class NodalAdjacency(Record): # }}} + # {{{ partition adjacency class InterPartitionAdjacency(): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 6d7e792c..7258d856 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -172,7 +172,8 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + rank_neighbor_face = \ + parent_facial_group.neighbor_faces[idx] mgrp_start_elem = 0 pgrp_start_elem = 0 for grp in range(parent_group): -- GitLab From 4dd7acc3f8f9cbf7c5dd578625cf81b32958d6e7 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 22:02:22 -0600 Subject: [PATCH 009/266] Add possible InterPartitionAdjacency class --- meshmode/mesh/__init__.py | 33 +++++++++++-- meshmode/mesh/processing.py | 94 +++++++++++++++++++++---------------- 2 files changed, 81 insertions(+), 46 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index c7520a62..0c9ed893 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -392,15 +392,38 @@ class InterPartitionAdjacency(): def __init__(self): self.elements = [] self.element_faces = [] - self.neighbors = [] + self.neighbor_elems = [] self.neighbor_faces = [] + self.neighbor_groups = [] - def add_connection(self, elem, face, neighbor, neighbor_face): - self.elements.append(elem) - self.element_faces.append(face) - self.neighbors.append(neighbor) + def add_connection(self, elem, face, neighbor_group, neighbor_elem, neighbor_face): + self.elems.append(elem) + self.elem_faces.append(face) + self.neighbor_groups.append(neighbor_group) + self.neighbor_elems.append(neighbor_elem) self.neighbor_faces.append(neighbor_face) + def get_neighbor(self, elem, face): + for idx in range(len(self.elements)): + if elem == self.elements[idx] and face == self.element_faces[idx]: + return (self.neighbor_groups[idx], + self.neighbor_elem[idx], + self.neighbor_faces[idx]) + + +class OtherPossibility(): + """ + """ + + def __init__(self): + self.adjacent = dict() + + def add_connection(self, tag, elem, face, neighbor_group, neighbor_elem, neighbor_face): + self.adjacent[(tag, elem, face)] = (neighbor_group, neighbor_elem, neighbor_face) + + def get_neighbor(self, tag, elem, face): + return self.adjacent((tag, elem, face)) + # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 7258d856..06a96cbb 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -147,51 +147,63 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import BTAG_ALL - from meshmode.mesh import InterPartitionAdjacency - tags_to_part_adj = dict() - for tag in range(np.max(part_per_element) + 1): - tags_to_part_adj[tag] = InterPartitionAdjacency() + #from meshmode.mesh import InterPartitionAdjacency + #num_connection_tags = np.max(part_per_element) + 1 + #tags_to_part_adj = [] + #for _ in range(num_connection_tags): + # tags_to_part_adj.append(InterPartitionAdjacency()) + + from meshmode.mesh import OtherPossibility + part_adjacency = OtherPossibility() for igrp in range(num_groups): - f_group = part_mesh.facial_adjacency_groups[igrp][None] - grp_elems = f_group.elements - grp_faces = f_group.element_faces - for elem_idx in range(len(grp_elems)): - elem = grp_elems[elem_idx] - face = grp_faces[elem_idx] - tag = -f_group.neighbors[elem_idx] + part_group = part_mesh.groups[igrp] + boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] + boundary_elems = boundary_adj.elements + boundary_faces = boundary_adj.element_faces + for elem_idx in range(len(boundary_elems)): + elem = boundary_elems[elem_idx] + face = boundary_faces[elem_idx] + tags = -boundary_adj.neighbors[elem_idx] + assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] - parent_group = 0 - while parent_elem >= mesh.groups[parent_group].nelements: - parent_elem -= mesh.groups[parent_group].nelements - parent_group += 1 - assert parent_group < num_groups, "oops..." - parent_f_group = mesh.facial_adjacency_groups[parent_group] - for _, parent_facial_group in parent_f_group.items(): + parent_group_num = 0 + while parent_elem >= mesh.groups[parent_group_num].nelements: + parent_elem -= mesh.groups[parent_group_num].nelements + parent_group_num += 1 + assert parent_group_num < num_groups, "Unable to find neighbor." + parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base + parent_boundary_adj = mesh.facial_adjacency_groups[parent_group_num] + for _, parent_facial_group in parent_boundary_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: - if parent_facial_group.neighbors[idx] >= 0: - if face == parent_facial_group.element_faces[idx]: - rank_neighbor = parent_facial_group.neighbors[idx] - rank_neighbor_face = \ - parent_facial_group.neighbor_faces[idx] - mgrp_start_elem = 0 - pgrp_start_elem = 0 - for grp in range(parent_group): - mgrp_start_elem += mesh.groups[grp].nelements - for grp in range(num_groups): - pgrp_start_elem += part_mesh.groups[grp].nelements - neighbor_part_num = part_per_element[ - rank_neighbor + mgrp_start_elem] - tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tag = tag | part_mesh.boundary_tag_bit( - neighbor_part_num) - f_group.neighbors[elem_idx] = -tag - #print("Boundary face", face, "of element", elem, - # "should be connected to element", rank_neighbor, - # "in partition", neighbor_part_num) - tags_to_part_adj[neighbor_part_num].add_connection( - elem + pgrp_start_elem, face, - rank_neighbor + mgrp_start_elem, rank_neighbor_face) + if parent_facial_group.neighbors[idx] >= 0 and \ + parent_facial_group.element_faces[idx] == face: + rank_neighbor = parent_facial_group.neighbors[idx] + rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + + new_tag = part_per_element[rank_neighbor + + parent_grp_elem_base] + tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tags = tags | part_mesh.boundary_tag_bit(new_tag) + boundary_adj.neighbors[elem_idx] = -tags + + #print("Boundary face", face, "of element", elem, + # "should be connected to element", rank_neighbor, + # "in partition", neighbor_part_num) + + #tags_to_part_adj[new_tag].add_connection( + # elem + part_group.element_nr_base, + # face, + # rank_neighbor + parent_grp_elem_base, + # rank_neighbor_face, + # parent_group_num) + + part_adjacency.add_connection(new_tag, + elem + part_group.element_nr_base, + face, + rank_neighbor + parent_grp_elem_base, + rank_neighbor_face, + parent_group_num) return (part_mesh, queried_elems) -- GitLab From d0d9212d4a627c22a7ab2c6b3bf6d028827df2e0 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 22:04:08 -0600 Subject: [PATCH 010/266] Whitespace fix --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 0c9ed893..85b081c1 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -396,7 +396,8 @@ class InterPartitionAdjacency(): self.neighbor_faces = [] self.neighbor_groups = [] - def add_connection(self, elem, face, neighbor_group, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, + neighbor_group, neighbor_elem, neighbor_face): self.elems.append(elem) self.elem_faces.append(face) self.neighbor_groups.append(neighbor_group) -- GitLab From 4c23d5c8cf36559a98dfe2f05b25804b894835f1 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 4 Mar 2017 16:46:38 -0600 Subject: [PATCH 011/266] Fix whitespace --- meshmode/mesh/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 85b081c1..c2e108ae 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -419,8 +419,10 @@ class OtherPossibility(): def __init__(self): self.adjacent = dict() - def add_connection(self, tag, elem, face, neighbor_group, neighbor_elem, neighbor_face): - self.adjacent[(tag, elem, face)] = (neighbor_group, neighbor_elem, neighbor_face) + def add_connection(self, tag, elem, face, + neighbor_group, neighbor_elem, neighbor_face): + self.adjacent[(tag, elem, face)] = \ + (neighbor_group, neighbor_elem, neighbor_face) def get_neighbor(self, tag, elem, face): return self.adjacent((tag, elem, face)) -- GitLab From 725d78ec1bd21cbed774367e1c2e046e2243807a Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 4 Mar 2017 17:20:20 -0600 Subject: [PATCH 012/266] Add test for partition tags --- meshmode/mesh/__init__.py | 2 +- test/test_meshmode.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index c2e108ae..dcefeb28 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -425,7 +425,7 @@ class OtherPossibility(): (neighbor_group, neighbor_elem, neighbor_face) def get_neighbor(self, tag, elem, face): - return self.adjacent((tag, elem, face)) + return self.adjacent[(tag, elem, face)] # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f4926134..57775dd0 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -101,6 +101,18 @@ def test_partition_boxes_mesh(): assert count_btag_all(mesh) == np.sum( [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" + + for part_num in range(num_parts): + for f_groups in new_meshes[part_num].facial_adjacency_groups: + f_grp = f_groups[None] + for idx in range(len(f_grp.elements)): + tag = -f_grp.neighbors[idx] + if tag >= 0: + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + (n_part, n_elem, n_face) = ...get_neighbor(tag, elem, face) + assert (part_num, elem, face) = ...get_neighbor(n_part, n_elem, n_face) + def count_btag_all(mesh): @@ -111,7 +123,7 @@ def count_btag_all(mesh): if neighbors < 0: if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: num_bnds += 1 - return num_bnds + return num_bnds # }}} -- GitLab From 84d49c6b1c9ee61399ec357f436d56eea2fceed5 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 5 Mar 2017 20:36:26 -0600 Subject: [PATCH 013/266] InterpartitionAdj is consistent --- meshmode/mesh/__init__.py | 44 +++++++--------------------------- meshmode/mesh/processing.py | 48 +++++++++++++++---------------------- test/test_meshmode.py | 26 +++++++++++++------- 3 files changed, 44 insertions(+), 74 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index dcefeb28..e78f54fb 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -382,50 +382,21 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - -class InterPartitionAdjacency(): - """ - Describes adjacency information of elements between partitions. - """ - - def __init__(self): - self.elements = [] - self.element_faces = [] - self.neighbor_elems = [] - self.neighbor_faces = [] - self.neighbor_groups = [] - - def add_connection(self, elem, face, - neighbor_group, neighbor_elem, neighbor_face): - self.elems.append(elem) - self.elem_faces.append(face) - self.neighbor_groups.append(neighbor_group) - self.neighbor_elems.append(neighbor_elem) - self.neighbor_faces.append(neighbor_face) - - def get_neighbor(self, elem, face): - for idx in range(len(self.elements)): - if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbor_groups[idx], - self.neighbor_elem[idx], - self.neighbor_faces[idx]) - +# {{{ partition adjacency -class OtherPossibility(): +class InterPartitionAdj(): """ + Interface is not final. """ def __init__(self): self.adjacent = dict() - def add_connection(self, tag, elem, face, - neighbor_group, neighbor_elem, neighbor_face): - self.adjacent[(tag, elem, face)] = \ - (neighbor_group, neighbor_elem, neighbor_face) + def add_connection(self, elem, face, neighbor_elem, neighbor_face): + self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) - def get_neighbor(self, tag, elem, face): - return self.adjacent[(tag, elem, face)] + def get_neighbor(self, elem, face): + return self.adjacent[(elem, face)] # }}} @@ -585,6 +556,7 @@ class Mesh(Record): will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ + el_nr = 0 node_nr = 0 diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 06a96cbb..43b656ad 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -147,14 +147,9 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import BTAG_ALL - #from meshmode.mesh import InterPartitionAdjacency - #num_connection_tags = np.max(part_per_element) + 1 - #tags_to_part_adj = [] - #for _ in range(num_connection_tags): - # tags_to_part_adj.append(InterPartitionAdjacency()) - - from meshmode.mesh import OtherPossibility - part_adjacency = OtherPossibility() + #TODO This should probably be in the Mesh class. + from meshmode.mesh import InterPartitionAdj + part_mesh.interpartition_adj = InterPartitionAdj() for igrp in range(num_groups): part_group = part_mesh.groups[igrp] @@ -165,6 +160,7 @@ def partition_mesh(mesh, part_per_element, part_nr): elem = boundary_elems[elem_idx] face = boundary_faces[elem_idx] tags = -boundary_adj.neighbors[elem_idx] + # Is is reasonable to expect this assertation? assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] parent_group_num = 0 @@ -178,32 +174,26 @@ def partition_mesh(mesh, part_per_element, part_nr): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: - rank_neighbor = parent_facial_group.neighbors[idx] + rank_neighbor = (parent_facial_group.neighbors[idx] + + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - - new_tag = part_per_element[rank_neighbor - + parent_grp_elem_base] + + n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(new_tag) + tags = tags | part_mesh.boundary_tag_bit(n_part_nr) boundary_adj.neighbors[elem_idx] = -tags - - #print("Boundary face", face, "of element", elem, - # "should be connected to element", rank_neighbor, - # "in partition", neighbor_part_num) - - #tags_to_part_adj[new_tag].add_connection( - # elem + part_group.element_nr_base, - # face, - # rank_neighbor + parent_grp_elem_base, - # rank_neighbor_face, - # parent_group_num) - - part_adjacency.add_connection(new_tag, + + # Find the neighbor element from the other partition + n_elem = np.count_nonzero( + part_per_element[:rank_neighbor] == n_part_nr) + + # TODO Test if this works with multiple groups + # Do I need to add the element number base? + part_mesh.interpartition_adj.add_connection( elem + part_group.element_nr_base, face, - rank_neighbor + parent_grp_elem_base, - rank_neighbor_face, - parent_group_num) + n_elem, + rank_neighbor_face) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 57775dd0..e51856d7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -70,7 +70,7 @@ def test_partition_torus_mesh(): def test_partition_boxes_mesh(): n = 5 - num_parts = 3 + num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. @@ -102,19 +102,27 @@ def test_partition_boxes_mesh(): [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - for part_num in range(num_parts): - for f_groups in new_meshes[part_num].facial_adjacency_groups: + for part_nr in range(num_parts): + for f_groups in new_meshes[part_nr].facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): + # Are all f_grp.neighbors guaranteed to be negative + # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] - if tag >= 0: - elem = f_grp.elements[idx] - face = f_grp.element_faces[idx] - (n_part, n_elem, n_face) = ...get_neighbor(tag, elem, face) - assert (part_num, elem, face) = ...get_neighbor(n_part, n_elem, n_face) + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + for n_part_nr in range(num_parts): + if tag >= 0 and \ + tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + # Is this the best way to probe the tag? + # Can one tag have multiple partition neighbors? + (n_elem, n_face) = new_meshes[part_nr].\ + interpartition_adj.get_neighbor(elem, face) + assert (elem, face) == new_meshes[n_part_nr].\ + interpartition_adj.get_neighbor(n_elem, n_face),\ + "InterpartitionAdj is not consistent" - def count_btag_all(mesh): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: -- GitLab From aa9c56317fba85e4d0f0693036a73be0ad5119f1 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 5 Mar 2017 20:45:56 -0600 Subject: [PATCH 014/266] Fix whitespace --- meshmode/mesh/__init__.py | 12 ++++++------ meshmode/mesh/processing.py | 4 ++-- test/test_meshmode.py | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index e78f54fb..32efe9b9 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -382,22 +382,22 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - +# {{{ partition adjacency + class InterPartitionAdj(): """ Interface is not final. """ - + def __init__(self): self.adjacent = dict() - + def add_connection(self, elem, face, neighbor_elem, neighbor_face): self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) def get_neighbor(self, elem, face): return self.adjacent[(elem, face)] - + # }}} @@ -556,7 +556,7 @@ class Mesh(Record): will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ - + el_nr = 0 node_nr = 0 diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 43b656ad..b1fc9e25 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -177,12 +177,12 @@ def partition_mesh(mesh, part_per_element, part_nr): rank_neighbor = (parent_facial_group.neighbors[idx] + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - + n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit(n_part_nr) boundary_adj.neighbors[elem_idx] = -tags - + # Find the neighbor element from the other partition n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_nr) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e51856d7..44ca2d08 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -101,19 +101,19 @@ def test_partition_boxes_mesh(): assert count_btag_all(mesh) == np.sum( [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - + for part_nr in range(num_parts): for f_groups in new_meshes[part_nr].facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): - # Are all f_grp.neighbors guaranteed to be negative + # Are all f_grp.neighbors guaranteed to be negative # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): if tag >= 0 and \ - tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: # Is this the best way to probe the tag? # Can one tag have multiple partition neighbors? (n_elem, n_face) = new_meshes[part_nr].\ @@ -121,7 +121,7 @@ def test_partition_boxes_mesh(): assert (elem, face) == new_meshes[n_part_nr].\ interpartition_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" - + def count_btag_all(mesh): num_bnds = 0 @@ -131,7 +131,7 @@ def count_btag_all(mesh): if neighbors < 0: if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: num_bnds += 1 - return num_bnds + return num_bnds # }}} -- GitLab From 59713a93d6ccd66a925eb92e02b58e463f819948 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 6 Mar 2017 12:32:43 -0600 Subject: [PATCH 015/266] Fix whitespace --- meshmode/mesh/__init__.py | 2 +- test/test_meshmode.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 32efe9b9..3673158a 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -391,7 +391,7 @@ class InterPartitionAdj(): def __init__(self): self.adjacent = dict() - + def add_connection(self, elem, face, neighbor_elem, neighbor_face): self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 44ca2d08..e94a3512 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -112,8 +112,8 @@ def test_partition_boxes_mesh(): elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): - if tag >= 0 and \ - tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + # Is tag >= 0 always true? + if tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: # Is this the best way to probe the tag? # Can one tag have multiple partition neighbors? (n_elem, n_face) = new_meshes[part_nr].\ -- GitLab From 5c1eb19a2ec9f8c1328cd23b30e4716e481cf64d Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 9 Mar 2017 00:15:10 -0600 Subject: [PATCH 016/266] Add documentation and fix bugs. --- meshmode/mesh/__init__.py | 91 +++++++++++++++++++++++++++++++++++-- meshmode/mesh/processing.py | 28 +++++++----- test/test_meshmode.py | 65 ++++++++++++++++++-------- 3 files changed, 148 insertions(+), 36 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 3673158a..0a01529d 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -88,6 +88,35 @@ class BTAG_NO_BOUNDARY(object): # noqa pass +class BTAG_PARTITION(object): + """ + A boundary tag indicating that this edge is adjacent to an element of + another :class:`Mesh`. The partition number of the adjacent mesh + is given by ``part_nr``. + + .. attribute:: part_nr + + .. versionadded:: 2017.1 + """ + def __init__(self, part_nr): + self.part_nr = int(part_nr) + + # TODO is this acceptable? + # __eq__ is also defined so maybe the hash value isn't too important + # for dictionaries. + def __hash__(self): + return self.part_nr + + def __eq__(self, other): + if isinstance(other, BTAG_PARTITION): + return self.part_nr == other.part_nr + else: + return False + + def __nq__(self, other): + return not self.__eq__(other) + + SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY]) # }}} @@ -386,17 +415,66 @@ class NodalAdjacency(Record): class InterPartitionAdj(): """ - Interface is not final. + Describes facial adjacency information of elements in one :class:`Mesh` to + elements in another :class:`Mesh`. The element's boundary tag gives the + partition that it is connected to. + + .. attribute:: elements + + `:class:Mesh`-local element numbers that have neighbors. + + .. attribute:: element_faces + + ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + + .. attribute:: neighbors + + ``neighbors[i]`` gives the element number within the neighboring partiton + of the element connected to ``elements[i]``. + + .. attribute:: neighbor_faces + + ``neighbor_faces[i]`` gives face index within the neighboring partition + of the face connected to ``elements[i]`` + + .. automethod:: add_connection + .. automethod:: get_neighbor + + .. versionadded:: 2017.1 """ def __init__(self): - self.adjacent = dict() + self.elements = [] + self.element_faces = [] + self.neighbors = [] + self.neighbor_faces = [] def add_connection(self, elem, face, neighbor_elem, neighbor_face): - self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) + """ + Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to + ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion + of type :class:`Mesh`. + :arg elem + :arg face + :arg neighbor_elem + :arg neighbor_face + """ + self.elements.append(elem) + self.element_faces.append(face) + self.neighbors.append(neighbor_elem) + self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): - return self.adjacent[(elem, face)] + """ + :arg elem + :arg face + :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring + elements within another :class:`Mesh`. + """ + for idx in range(len(self.elements)): + if elem == self.elements[idx] and face == self.element_faces[idx]: + return (self.neighbors[idx], self.neighbor_faces[idx]) + raise RuntimeError("This face does not have a neighbor") # }}} @@ -526,6 +604,7 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, + interpartition_adj=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -607,6 +686,7 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, + interpartition_adj=interpartition_adj, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), @@ -716,6 +796,7 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) + and self.interpartition_adj == other.interpartition_adj and self.boundary_tags == other.boundary_tags) def __ne__(self, other): @@ -885,6 +966,7 @@ def _compute_facial_adjacency_from_vertices(mesh): for ineighbor_group in range(len(mesh.groups)): nb_count = group_count.get((igroup, ineighbor_group)) + # FIXME nb_count is None sometimes when it maybe shouldn't be. if nb_count is not None: elements = np.empty(nb_count, dtype=mesh.element_id_dtype) element_faces = np.empty(nb_count, dtype=mesh.face_id_dtype) @@ -912,6 +994,7 @@ def _compute_facial_adjacency_from_vertices(mesh): idx = fill_count.get((igrp, inb_grp), 0) fill_count[igrp, inb_grp] = idx + 1 + # FIXME KeyError with inb_grp sometimes. fagrp = facial_adjacency_groups[igroup][inb_grp] fagrp.elements[idx] = iel fagrp.element_faces[idx] = iface diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b1fc9e25..dba72d9d 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -138,21 +138,19 @@ def partition_mesh(mesh, part_per_element, part_nr): type(mesh_group)(mesh_group.order, new_indices[group_nr], new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) - num_parts = np.max(part_per_element) - boundary_tags = list(range(num_parts)) + from meshmode.mesh import BTAG_ALL, BTAG_PARTITION + boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - from meshmode.mesh import BTAG_ALL - - #TODO This should probably be in the Mesh class. + # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj part_mesh.interpartition_adj = InterPartitionAdj() for igrp in range(num_groups): - part_group = part_mesh.groups[igrp] + elem_base = part_mesh.groups[igrp].element_nr_base boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces @@ -160,7 +158,6 @@ def partition_mesh(mesh, part_per_element, part_nr): elem = boundary_elems[elem_idx] face = boundary_faces[elem_idx] tags = -boundary_adj.neighbors[elem_idx] - # Is is reasonable to expect this assertation? assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] parent_group_num = 0 @@ -169,8 +166,8 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_group_num += 1 assert parent_group_num < num_groups, "Unable to find neighbor." parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base - parent_boundary_adj = mesh.facial_adjacency_groups[parent_group_num] - for _, parent_facial_group in parent_boundary_adj.items(): + parent_adj = mesh.facial_adjacency_groups[parent_group_num] + for _, parent_facial_group in parent_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: @@ -180,7 +177,7 @@ def partition_mesh(mesh, part_per_element, part_nr): n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(n_part_nr) + tags = tags | part_mesh.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition @@ -190,7 +187,7 @@ def partition_mesh(mesh, part_per_element, part_nr): # TODO Test if this works with multiple groups # Do I need to add the element number base? part_mesh.interpartition_adj.add_connection( - elem + part_group.element_nr_base, + elem + elem_base, face, n_elem, rank_neighbor_face) @@ -425,10 +422,13 @@ def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False): order = None unit_nodes = None nodal_adjacency = None + facial_adjacency_groups = None for mesh in meshes: if mesh._nodal_adjacency is not None: nodal_adjacency = False + if mesh._facial_adjacency_groups is not None: + facial_adjacency_groups = False for group in mesh.groups: if grp_cls is None: @@ -455,10 +455,13 @@ def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False): else: new_groups = [] nodal_adjacency = None + facial_adjacency_groups = None for mesh, vert_base in zip(meshes, vert_bases): if mesh._nodal_adjacency is not None: nodal_adjacency = False + if mesh._facial_adjacency_groups is not None: + facial_adjacency_groups = False for group in mesh.groups: new_vertex_indices = group.vertex_indices + vert_base @@ -469,7 +472,8 @@ def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False): from meshmode.mesh import Mesh return Mesh(vertices, new_groups, skip_tests=skip_tests, - nodal_adjacency=nodal_adjacency) + nodal_adjacency=nodal_adjacency, + facial_adjacency_groups=facial_adjacency_groups) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e94a3512..c0a697b9 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -92,44 +92,69 @@ def test_partition_boxes_mesh(): from meshmode.mesh.processing import partition_mesh new_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] assert mesh.nelements == np.sum( - [new_meshes[i].nelements for i in range(num_parts)]), \ + [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" - assert count_btag_all(mesh) == np.sum( - [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ + assert count_tags(mesh, BTAG_ALL) == np.sum( + [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" + from meshmode.mesh import BTAG_PARTITION + num_tags = np.zeros((num_parts,)) + for part_nr in range(num_parts): - for f_groups in new_meshes[part_nr].facial_adjacency_groups: + (part, part_to_global) = new_meshes[part_nr] + for f_groups in part.facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): - # Are all f_grp.neighbors guaranteed to be negative - # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] + assert tag >= 0 elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): - # Is tag >= 0 always true? - if tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: - # Is this the best way to probe the tag? - # Can one tag have multiple partition neighbors? - (n_elem, n_face) = new_meshes[part_nr].\ - interpartition_adj.get_neighbor(elem, face) - assert (elem, face) == new_meshes[n_part_nr].\ - interpartition_adj.get_neighbor(n_elem, n_face),\ - "InterpartitionAdj is not consistent" - - -def count_btag_all(mesh): + (n_part, n_part_to_global) = new_meshes[n_part_nr] + if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: + num_tags[n_part_nr] += 1 + (n_elem, n_face) = part.interpartition_adj.\ + get_neighbor(elem, face) + assert (elem, face) == n_part.interpartition_adj.\ + get_neighbor(n_elem, n_face),\ + "InterpartitionAdj is not consistent" + p_elem = part_to_global[elem] + n_part_to_global = new_meshes[n_part_nr][1] + p_n_elem = n_part_to_global[n_elem] + p_grp_nr = 0 + while p_elem >= mesh.groups[p_grp_nr].nelements: + p_elem -= mesh.groups[p_grp_nr].nelements + p_grp_nr += 1 + p_elem_base = mesh.groups[p_grp_nr].element_nr_base + f_groups = mesh.facial_adjacency_groups[p_grp_nr] + for _, p_bnd_adj in f_groups.items(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" + + for tag_nr in range(num_parts): + tag_sum = 0 + for mesh, _ in new_meshes: + tag_sum += count_tags(mesh, BTAG_PARTITION(tag_nr)) + assert num_tags[tag_nr] == tag_sum,\ + "part_mesh has the wrong number of BTAG_PARTITION boundaries" + +def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: for _, bdry_group in adj_dict.items(): for neighbors in bdry_group.neighbors: if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: num_bnds += 1 return num_bnds -- GitLab From 717466ba62d1da0757a9462f91449b0c25fe5eba Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 9 Mar 2017 00:22:21 -0600 Subject: [PATCH 017/266] Whitespace fixes --- meshmode/mesh/__init__.py | 15 ++++++++------- meshmode/mesh/processing.py | 3 ++- test/test_meshmode.py | 3 ++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 0a01529d..aeeec359 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -54,6 +54,7 @@ Predefined Boundary tags .. autoclass:: BTAG_ALL .. autoclass:: BTAG_REALLY_ALL .. autoclass:: BTAG_NO_BOUNDARY +.. autoclass:: BTAG_PARTITION """ @@ -88,12 +89,12 @@ class BTAG_NO_BOUNDARY(object): # noqa pass -class BTAG_PARTITION(object): +class BTAG_PARTITION(object): # noqa """ A boundary tag indicating that this edge is adjacent to an element of another :class:`Mesh`. The partition number of the adjacent mesh is given by ``part_nr``. - + .. attribute:: part_nr .. versionadded:: 2017.1 @@ -416,27 +417,27 @@ class NodalAdjacency(Record): class InterPartitionAdj(): """ Describes facial adjacency information of elements in one :class:`Mesh` to - elements in another :class:`Mesh`. The element's boundary tag gives the + elements in another :class:`Mesh`. The element's boundary tag gives the partition that it is connected to. .. attribute:: elements `:class:Mesh`-local element numbers that have neighbors. - + .. attribute:: element_faces ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. .. attribute:: neighbors - ``neighbors[i]`` gives the element number within the neighboring partiton + ``neighbors[i]`` gives the element number within the neighboring partiton of the element connected to ``elements[i]``. .. attribute:: neighbor_faces ``neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``elements[i]`` - + .. automethod:: add_connection .. automethod:: get_neighbor @@ -451,7 +452,7 @@ class InterPartitionAdj(): def add_connection(self, elem, face, neighbor_elem, neighbor_face): """ - Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to + Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion of type :class:`Mesh`. :arg elem diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index dba72d9d..bb097e17 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -177,7 +177,8 @@ def partition_mesh(mesh, part_per_element, part_nr): n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) + tags = tags | part_mesh.boundary_tag_bit( + BTAG_PARTITION(n_part_nr)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c0a697b9..c6859cc9 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -130,7 +130,7 @@ def test_partition_boxes_mesh(): while p_elem >= mesh.groups[p_grp_nr].nelements: p_elem -= mesh.groups[p_grp_nr].nelements p_grp_nr += 1 - p_elem_base = mesh.groups[p_grp_nr].element_nr_base + #p_elem_base = mesh.groups[p_grp_nr].element_nr_base f_groups = mesh.facial_adjacency_groups[p_grp_nr] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): @@ -148,6 +148,7 @@ def test_partition_boxes_mesh(): assert num_tags[tag_nr] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" + def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: -- GitLab From a17f953eb12cb88f04bebb56b5e97331c619ae9f Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 15 Mar 2017 00:43:35 -0500 Subject: [PATCH 018/266] Started partition_interpolation test --- test/test_meshmode.py | 46 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c6859cc9..e2251a36 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -48,26 +48,44 @@ import logging logger = logging.getLogger(__name__) -# {{{ partition_mesh -''' -#TODO facial_adjacency_groups is not available in generate_torus. -def test_partition_torus_mesh(): - from meshmode.mesh.generation import generate_torus - my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) +def test_partition_interpolation(ctx_getter): + cl_ctx = ctx_getter() + queue = cl.CommandQueue(cl_ctx) + order = 4 + group_factory = PolynomialWarpAndBlendGroupFactory(order) + n = 3 + dim = 2 + num_parts = 7 + from meshmode.mesh.generation import generate_warped_rect_mesh + mesh = generate_warped_rect_mesh(dim, order=order, n=n) + + adjacency_list = np.zeros((mesh.nelements,), dtype=set) + for elem in range(mesh.nelements): + adjacency_list[elem] = set() + starts = mesh.nodal_adjacency.neighbors_starts + for n in range(starts[elem], starts[elem + 1]): + adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - part_per_element = np.array([0, 1, 2, 1, 1, 2, 1, 0]) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=adjacency_list) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh - (part_mesh0, _) = partition_mesh(my_mesh, part_per_element, 0) - (part_mesh1, _) = partition_mesh(my_mesh, part_per_element, 1) - (part_mesh2, _) = partition_mesh(my_mesh, part_per_element, 2) + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - assert part_mesh0.nelements == 2 - assert part_mesh1.nelements == 4 - assert part_mesh2.nelements == 2 -''' + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + for i in range(num_parts)] + + from meshmode.discretization.connection import (make_face_restriction, + check_connection) + bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, + FRESTR_INTERIOR_FACES) for i in range(num_parts)] +# {{{ partition_mesh + def test_partition_boxes_mesh(): n = 5 num_parts = 7 -- GitLab From 79c5e623dcb81c9ac29c9be46961a27a9890be0d Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 16 Mar 2017 21:08:12 -0500 Subject: [PATCH 019/266] Created make_opposite_partition_connection function --- .../discretization/connection/__init__.py | 2 +- .../connection/opposite_face.py | 36 +++++++++++++++++++ test/test_meshmode.py | 11 ++++-- 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index bdb0f525..e2a45113 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -35,7 +35,7 @@ from meshmode.discretization.connection.face import ( FRESTR_INTERIOR_FACES, FRESTR_ALL_FACES, make_face_restriction, make_face_to_all_faces_embedding) from meshmode.discretization.connection.opposite_face import \ - make_opposite_face_connection + make_opposite_face_connection, make_opposite_partition_connection from meshmode.discretization.connection.refinement import \ make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 6ce70b2a..629782ff 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -392,4 +392,40 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} + +def make_opposite_partition_connection(vol_to_bdry_conns): + """ + Given a list of boundary restriction connections *volume_to_bdry_conn*, + return a :class:`DirectDiscretizationConnection` that performs data + exchange across adjacent faces of different partitions. + + :arg :vol_to_bdry_conns A list of *volume_to_bdry_conn* corresponding to + a partition of a parent mesh. + """ + + disc_conns = [] + return disc_conns + nparts = len(vol_to_bdry_conns) + from meshmode.discretization.connection import ( + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + for part_idx in range(nparts): + vol_discr = vol_to_bdry_conns[part_idx].from_discr + vol_mesh = vol_discr.mesh + bdry_discr = vol_to_bdry_conns[part_idx].to_discr + + with cl.CommandQueue(vol_discr.cl_context) as queue: + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + nop + + disc_conns.append(DirectDiscretizationConnection( + from_discr=bdry_discr, + to_discr=bdry_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in groups], + is_surjective=True)) + + return disc_conns + # vim: foldmethod=marker diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e2251a36..f3f9802b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -78,11 +78,18 @@ def test_partition_interpolation(ctx_getter): vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] - from meshmode.discretization.connection import (make_face_restriction, - check_connection) + from meshmode.discretization.connection import make_face_restriction bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] + from meshmode.discretization.connection import \ + make_opposite_partition_connection + opp_faces = make_opposite_partition_connection(bdry_connections) + + from meshmode.discretization.connection import check_connection + for opp_face in opp_faces: + check_connection(opp_face) + # {{{ partition_mesh -- GitLab From 62e1c9dc347c6e08d7c9e2928143d8d21997a092 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 16 Mar 2017 23:56:17 -0500 Subject: [PATCH 020/266] Added part_idx array to InterPartitionalAdj --- .../discretization/connection/__init__.py | 2 +- .../connection/opposite_face.py | 53 ++++++++++++------- meshmode/mesh/__init__.py | 12 +++-- meshmode/mesh/processing.py | 1 + test/test_meshmode.py | 16 +++--- 5 files changed, 51 insertions(+), 33 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index e2a45113..4ac11bef 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -35,7 +35,7 @@ from meshmode.discretization.connection.face import ( FRESTR_INTERIOR_FACES, FRESTR_ALL_FACES, make_face_restriction, make_face_to_all_faces_embedding) from meshmode.discretization.connection.opposite_face import \ - make_opposite_face_connection, make_opposite_partition_connection + make_opposite_face_connection, make_partition_connection from meshmode.discretization.connection.refinement import \ make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 629782ff..c04a444a 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,7 +393,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def make_opposite_partition_connection(vol_to_bdry_conns): +def make_partition_connection(vol_to_bdry_conns): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -404,27 +404,40 @@ def make_opposite_partition_connection(vol_to_bdry_conns): """ disc_conns = [] - return disc_conns nparts = len(vol_to_bdry_conns) from meshmode.discretization.connection import ( - DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - for part_idx in range(nparts): - vol_discr = vol_to_bdry_conns[part_idx].from_discr - vol_mesh = vol_discr.mesh - bdry_discr = vol_to_bdry_conns[part_idx].to_discr - - with cl.CommandQueue(vol_discr.cl_context) as queue: - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. - nop - - disc_conns.append(DirectDiscretizationConnection( - from_discr=bdry_discr, - to_discr=bdry_discr, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in groups], - is_surjective=True)) + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + + # My intuition tells me that this should not live inside a for loop. + # However, I need to grab a cl_context. I'll assume that each context from + # each partition is the same and I'll use the first one. + cl_context = vol_to_bdry_conns[0].from_discr.cl_context + with cl.CommandQueue(cl_context) as queue: + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + for src_part_idx in range(nparts): + part_batches = [[] for _ in range(nparts)] + src_vol_conn = vol_to_bdry_conns[src_part_idx] + src_from_discr = src_vol_conn.from_discr + src_to_discr = src_vol_conn.to_discr + src_mesh = src_from_discr.mesh + adj = src_mesh.interpartition_adj + for elem_idx, elem in enumerate(adj.elements): + face = adj.element_faces[elem_idx] + (part_idx, n_elem, n_face) = adj.get_neighbor(elem, face) + + # Using the neighboring face and element, we need to create batches + # I'm not sure how I would do this. My guess is that it would look + # something like _make_cross_face_batches + + # Make one Discr connection for each partition. + disc_conns.append(DirectDiscretizationConnection( + from_discr=src_from_discr, + to_discr=src_to_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in part_batches], + is_surjective=True)) return disc_conns diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index aeeec359..1236f561 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -449,19 +449,22 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] + self.part_indices = [] - def add_connection(self, elem, face, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion of type :class:`Mesh`. :arg elem :arg face + :arg part_idx :arg neighbor_elem :arg neighbor_face """ self.elements.append(elem) self.element_faces.append(face) + self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) self.neighbor_faces.append(neighbor_face) @@ -469,12 +472,13 @@ class InterPartitionAdj(): """ :arg elem :arg face - :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring - elements within another :class:`Mesh`. + :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbors[idx], self.neighbor_faces[idx]) + return (self.part_indices[idx], + self.neighbors[idx], self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index bb097e17..1b415fa7 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -190,6 +190,7 @@ def partition_mesh(mesh, part_per_element, part_nr): part_mesh.interpartition_adj.add_connection( elem + elem_base, face, + n_part_nr, n_elem, rank_neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f3f9802b..48c7e2ed 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -82,13 +82,12 @@ def test_partition_interpolation(ctx_getter): bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] - from meshmode.discretization.connection import \ - make_opposite_partition_connection - opp_faces = make_opposite_partition_connection(bdry_connections) + from meshmode.discretization.connection import make_partition_connection + opp_partitions = make_partition_connection(bdry_connections) - from meshmode.discretization.connection import check_connection - for opp_face in opp_faces: - check_connection(opp_face) + #from meshmode.discretization.connection import check_connection + #for opp_face in opp_faces: + #check_connection(opp_face) # {{{ partition_mesh @@ -143,9 +142,10 @@ def test_partition_boxes_mesh(): (n_part, n_part_to_global) = new_meshes[n_part_nr] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: num_tags[n_part_nr] += 1 - (n_elem, n_face) = part.interpartition_adj.\ + (n_part_idx, n_elem, n_face) = part.interpartition_adj.\ get_neighbor(elem, face) - assert (elem, face) == n_part.interpartition_adj.\ + assert n_part_idx == n_part_nr + assert (part_nr, elem, face) == n_part.interpartition_adj.\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" p_elem = part_to_global[elem] -- GitLab From 2e44dd798d8af12310efe38a2475b3d76029d6ae Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 17 Mar 2017 01:14:59 -0500 Subject: [PATCH 021/266] Added neighbor_group to InterPartitionAdj --- .../connection/opposite_face.py | 26 ++++++--- meshmode/mesh/__init__.py | 14 +++-- meshmode/mesh/processing.py | 58 ++++++++++--------- test/test_meshmode.py | 53 ++++++++--------- 4 files changed, 82 insertions(+), 69 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c04a444a..3546c963 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,6 +393,9 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} +def _make_cross_partition_batches(): + return [42] + def make_partition_connection(vol_to_bdry_conns): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, @@ -416,19 +419,24 @@ def make_partition_connection(vol_to_bdry_conns): # Create a list of batches. Each batch contains interpolation # data from one partition to another. for src_part_idx in range(nparts): - part_batches = [[] for _ in range(nparts)] src_vol_conn = vol_to_bdry_conns[src_part_idx] src_from_discr = src_vol_conn.from_discr src_to_discr = src_vol_conn.to_discr src_mesh = src_from_discr.mesh - adj = src_mesh.interpartition_adj - for elem_idx, elem in enumerate(adj.elements): - face = adj.element_faces[elem_idx] - (part_idx, n_elem, n_face) = adj.get_neighbor(elem, face) - - # Using the neighboring face and element, we need to create batches - # I'm not sure how I would do this. My guess is that it would look - # something like _make_cross_face_batches + ngroups = len(src_mesh.groups) + part_batches = [[] for _ in range(ngroups)] + for group_num, adj in enumerate(src_mesh.interpart_adj_groups): + for elem_idx, elem in enumerate(adj.elements): + face = adj.element_faces[elem_idx] + (part_idx, group_num, n_elem, n_face) =\ + adj.get_neighbor(elem, face) + + # We need to create batches using the + # neighboring face, element, and group + # I'm not sure how I would do this. + # My guess is that it would look + # something like _make_cross_face_batches + part_batches[group_num].extend(_make_cross_partition_batches()) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 1236f561..254bda81 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -449,9 +449,10 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] + self.neighbor_groups = [] self.part_indices = [] - def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_group, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion @@ -466,18 +467,19 @@ class InterPartitionAdj(): self.element_faces.append(face) self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) + self.neighbor_groups.append(neighbor_group) self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(part_idx, neighbor_group, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], + return (self.part_indices[idx], self.neighbor_groups[idx], self.neighbors[idx], self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") @@ -609,7 +611,7 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, - interpartition_adj=False, + interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -691,7 +693,7 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, - interpartition_adj=interpartition_adj, + interpart_adj_groups=interpart_adj_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), @@ -801,7 +803,7 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) - and self.interpartition_adj == other.interpartition_adj + and self.interpart_adj_groups == other.interpart_adj_groups and self.boundary_tags == other.boundary_tags) def __ne__(self, other): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 1b415fa7..e6c0d046 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -41,13 +41,13 @@ __doc__ = """ """ -def partition_mesh(mesh, part_per_element, part_nr): +def partition_mesh(mesh, part_per_element, part_num): """ :arg mesh: A :class:`meshmode.mesh.Mesh` to be partitioned. :arg part_per_element: A :class:`numpy.ndarray` containing one integer per element of *mesh* indicating which part of the partitioned mesh the element is to become a part of. - :arg part_nr: The part number of the mesh to return. + :arg part_num: The part number of the mesh to return. :returns: A tuple ``(part_mesh, part_to_global)``, where *part_mesh* is a :class:`meshmode.mesh.Mesh` that is a partition of mesh, and @@ -63,7 +63,7 @@ def partition_mesh(mesh, part_per_element, part_nr): "part_per_element must have shape (mesh.nelements,)") # Contains the indices of the elements requested. - queried_elems = np.where(np.array(part_per_element) == part_nr)[0] + queried_elems = np.where(np.array(part_per_element) == part_num)[0] num_groups = len(mesh.groups) new_indices = [] @@ -78,8 +78,8 @@ def partition_mesh(mesh, part_per_element, part_nr): skip_groups = [] num_prev_elems = 0 start_idx = 0 - for group_nr in range(num_groups): - mesh_group = mesh.groups[group_nr] + for group_num in range(num_groups): + mesh_group = mesh.groups[group_num] # Find the index of first element in the next group end_idx = len(queried_elems) @@ -89,7 +89,7 @@ def partition_mesh(mesh, part_per_element, part_nr): break if start_idx == end_idx: - skip_groups.append(group_nr) + skip_groups.append(group_num) new_indices.append(np.array([])) new_nodes.append(np.array([])) num_prev_elems += mesh_group.nelements @@ -105,10 +105,10 @@ def partition_mesh(mesh, part_per_element, part_nr): for j in range(start_idx, end_idx): elems = queried_elems[j] - num_prev_elems new_idx = j - start_idx - new_nodes[group_nr][i, new_idx, :] = mesh_group.nodes[i, elems, :] + new_nodes[group_num][i, new_idx, :] = mesh_group.nodes[i, elems, :] - #index_set = np.append(index_set, new_indices[group_nr].ravel()) - index_sets = np.append(index_sets, set(new_indices[group_nr].ravel())) + #index_set = np.append(index_set, new_indices[group_num].ravel()) + index_sets = np.append(index_sets, set(new_indices[group_num].ravel())) num_prev_elems += mesh_group.nelements start_idx = end_idx @@ -122,21 +122,21 @@ def partition_mesh(mesh, part_per_element, part_nr): new_vertices[dim] = mesh.vertices[dim][required_indices] # Our indices need to be in range [0, len(mesh.nelements)]. - for group_nr in range(num_groups): - if group_nr not in skip_groups: - for i in range(len(new_indices[group_nr])): - for j in range(len(new_indices[group_nr][0])): - original_index = new_indices[group_nr][i, j] - new_indices[group_nr][i, j] = np.where( - required_indices == original_index)[0] + for group_num in range(num_groups): + if group_num not in skip_groups: + for i in range(len(new_indices[group_num])): + for j in range(len(new_indices[group_num][0])): + original_index = new_indices[group_num][i, j] + new_indices[group_num][i, j] = np.where( + required_indices == original_index)[0] new_mesh_groups = [] - for group_nr in range(num_groups): - if group_nr not in skip_groups: - mesh_group = mesh.groups[group_nr] + for group_num in range(num_groups): + if group_num not in skip_groups: + mesh_group = mesh.groups[group_num] new_mesh_groups.append( - type(mesh_group)(mesh_group.order, new_indices[group_nr], - new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) + type(mesh_group)(mesh_group.order, new_indices[group_num], + new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import BTAG_ALL, BTAG_PARTITION boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] @@ -147,7 +147,8 @@ def partition_mesh(mesh, part_per_element, part_nr): # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj - part_mesh.interpartition_adj = InterPartitionAdj() + part_mesh.interpart_adj_groups = [ + InterPartitionAdj() for _ in range(num_groups)] for igrp in range(num_groups): elem_base = part_mesh.groups[igrp].element_nr_base @@ -167,7 +168,7 @@ def partition_mesh(mesh, part_per_element, part_nr): assert parent_group_num < num_groups, "Unable to find neighbor." parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base parent_adj = mesh.facial_adjacency_groups[parent_group_num] - for _, parent_facial_group in parent_adj.items(): + for n_grp_num, parent_facial_group in parent_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: @@ -175,22 +176,23 @@ def partition_mesh(mesh, part_per_element, part_nr): + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - n_part_nr = part_per_element[rank_neighbor] + n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit( - BTAG_PARTITION(n_part_nr)) + BTAG_PARTITION(n_part_num)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition n_elem = np.count_nonzero( - part_per_element[:rank_neighbor] == n_part_nr) + part_per_element[:rank_neighbor] == n_part_num) # TODO Test if this works with multiple groups # Do I need to add the element number base? - part_mesh.interpartition_adj.add_connection( + part_mesh.interpart_adj_groups[igrp].add_connection( elem + elem_base, face, - n_part_nr, + n_part_num, + n_grp_num, n_elem, rank_neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 48c7e2ed..7e14f41e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -83,16 +83,16 @@ def test_partition_interpolation(ctx_getter): FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection - opp_partitions = make_partition_connection(bdry_connections) + connections = make_partition_connection(bdry_connections) - #from meshmode.discretization.connection import check_connection - #for opp_face in opp_faces: - #check_connection(opp_face) + from meshmode.discretization.connection import check_connection + for conn in connections: + check_connection(conn) # {{{ partition_mesh -def test_partition_boxes_mesh(): +def test_partition_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh @@ -129,34 +129,35 @@ def test_partition_boxes_mesh(): from meshmode.mesh import BTAG_PARTITION num_tags = np.zeros((num_parts,)) - for part_nr in range(num_parts): - (part, part_to_global) = new_meshes[part_nr] - for f_groups in part.facial_adjacency_groups: + for part_num in range(num_parts): + (part, part_to_global) = new_meshes[part_num] + for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] for idx in range(len(f_grp.elements)): tag = -f_grp.neighbors[idx] assert tag >= 0 elem = f_grp.elements[idx] face = f_grp.element_faces[idx] - for n_part_nr in range(num_parts): - (n_part, n_part_to_global) = new_meshes[n_part_nr] - if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: - num_tags[n_part_nr] += 1 - (n_part_idx, n_elem, n_face) = part.interpartition_adj.\ - get_neighbor(elem, face) - assert n_part_idx == n_part_nr - assert (part_nr, elem, face) == n_part.interpartition_adj.\ + for n_part_num in range(num_parts): + (n_part, n_part_to_global) = new_meshes[n_part_num] + if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: + num_tags[n_part_num] += 1 + (n_part_idx, n_grp_num, n_elem, n_face) = part.\ + interpart_adj_groups[grp_num].get_neighbor(elem, face) + assert n_part_idx == n_part_num + assert (part_num, grp_num, elem, face) == n_part.\ + interpart_adj_groups[n_grp_num].\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" p_elem = part_to_global[elem] - n_part_to_global = new_meshes[n_part_nr][1] + n_part_to_global = new_meshes[n_part_num][1] p_n_elem = n_part_to_global[n_elem] - p_grp_nr = 0 - while p_elem >= mesh.groups[p_grp_nr].nelements: - p_elem -= mesh.groups[p_grp_nr].nelements - p_grp_nr += 1 - #p_elem_base = mesh.groups[p_grp_nr].element_nr_base - f_groups = mesh.facial_adjacency_groups[p_grp_nr] + p_grp_num = 0 + while p_elem >= mesh.groups[p_grp_num].nelements: + p_elem -= mesh.groups[p_grp_num].nelements + p_grp_num += 1 + #p_elem_base = mesh.groups[p_grp_num].element_num_base + f_groups = mesh.facial_adjacency_groups[p_grp_num] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): if (p_elem == p_bnd_adj.elements[idx] and @@ -166,11 +167,11 @@ def test_partition_boxes_mesh(): assert n_face == p_bnd_adj.neighbor_faces[idx],\ "Tag does not give correct neighbor" - for tag_nr in range(num_parts): + for tag_num in range(num_parts): tag_sum = 0 for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(tag_nr)) - assert num_tags[tag_nr] == tag_sum,\ + tag_sum += count_tags(mesh, BTAG_PARTITION(tag_num)) + assert num_tags[tag_num] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" -- GitLab From fc0126ab245ebabb052bb75fc4f7401e9cf07928 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 17 Mar 2017 02:38:02 -0500 Subject: [PATCH 022/266] Added _make_cross_partition_batch --- .../connection/opposite_face.py | 168 ++++++++++++++++-- 1 file changed, 158 insertions(+), 10 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3546c963..4558730c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,8 +393,153 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def _make_cross_partition_batches(): - return [42] +def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src_grp, from_elem, from_face): + + (i_tgt_part, i_tgt_grp, bdry_elem, bdry_face) = adj.get_neighbor(from_elem, from_face) + + src_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + tgt_bdry_discr = vol_to_bdry_conns[i_scr_part].to_discr + + to_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + tgt_bdry_discr.groups[i_tgt_grp].view( + tgt_bdry_discr.nodes().get(queue=queue)) + [:, to_bdry_element_indices]) + + tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps + + # TODO: Should this use vol_discr? + from_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + from_grp = src_bdry_discr.groups[i_src_grp] + + dim = from_grp.dim + ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape + + initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) + + from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) + from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + + import modepy as mp + from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) + from_inv_t_vdm = la.inv(from_vdm.T) + from_nfuncs = len(from_grp.basis()) + + # (ambient_dim, nelements, nfrom_unit_nodes) + from_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + # TODO: Should this be vol_discr? + bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, from_bdry_element_indices]) + + def apply_map(unit_nodes): + # unit_nodes: (dim, nelements, nto_unit_nodes) + # basis_at_unit_nodes + basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) + for i, f in enumerate(from_grp.basis()): + basis_at_unit_nodes[i] = ( + f(unit_nodes.reshape(dim, -1)) + .reshape(nelements, nto_unit_nodes)) + intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + # If we're interpolating 1, we had better get 1 back. + one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + + def get_map_jacobian(unit_nodes): + # unit_nodes: (dim, nelements, nto_unit_nodes) + # basis_at_unit_nodes + dbasis_at_unit_nodes = np.empty( + (dim, from_nfuncs, nelements, nto_unit_nodes)) + for i, df in enumerate(from_grp.grad_basis()): + df_result = df(unit_nodes.reshape(dim, -1)) + for rst_axis, df_r in enumerate(df_result): + dbasis_at_unit_nodes[rst_axis, i] = ( + df_r.reshape(nelements, nto_unit_nodes)) + dintp_coeffs = np.einsum( + "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + + logger.info("make_opposite_face_connection: begin gauss-newton") + niter = 0 + while True: + resid = apply_map(from_unit_nodes) - to_bdry_nodes + df = get_map_jacobian(from_unit_nodes) + df_inv_resid = np.empty_like(from_unit_nodes) + # For the 1D/2D accelerated versions, we'll use the normal + # equations and Cramer's rule. If you're looking for high-end + # numerics, look no further than meshmode. + if dim == 1: + # A is df.T + ata = np.einsum("iket,jket->ijet", df, df) + atb = np.einsum("iket,ket->iet", df, resid) + df_inv_resid = atb / ata[0, 0] + elif dim == 2: + # A is df.T + ata = np.einsum("iket,jket->ijet", df, df) + atb = np.einsum("iket,ket->iet", df, resid) + det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] + df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) + df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) + else: + # The boundary of a 3D mesh is 2D, so that's the + # highest-dimensional case we genuinely care about. + # + # This stinks, performance-wise, because it's not vectorized. + # But we'll only hit it for boundaries of 4+D meshes, in which + # case... good luck. :) + for e in range(nelements): + for t in range(nto_unit_nodes): + df_inv_resid[:, e, t], _, _, _ = \ + la.lstsq(df[:, :, e, t].T, resid[:, e, t]) + from_unit_nodes = from_unit_nodes - df_inv_resid + max_resid = np.max(np.abs(resid)) + logger.debug("gauss-newton residual: %g" % max_resid) + if max_resid < tol: + logger.info("make_opposite_face_connection: gauss-newton: done, " + "final residual: %g" % max_resid) + break + niter += 1 + if niter > 10: + raise RuntimeError("Gauss-Newton (for finding opposite-face reference " + "coordinates) did not converge") + + def to_dev(ary): + return cl.array.to_device(queue, ary, array_queue=None) + + done_elements = np.zeros(nelements, dtype=np.bool) + + # TODO: Still need to figure out what's happening here. + while True: + todo_elements, = np.where(~done_elements) + if not len(todo_elements): + return + template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + unit_node_dist = np.max(np.max(np.abs( + from_unit_nodes[:, todo_elements, :] + - + template_unit_nodes.reshape(dim, 1, -1)), + axis=2), axis=0) + close_els = todo_elements[unit_node_dist < tol] + done_elements[close_els] = True + unit_node_dist = np.max(np.max(np.abs( + from_unit_nodes[:, todo_elements, :] + - + template_unit_nodes.reshape(dim, 1, -1)), + axis=2), axis=0) + + from meshmode.discretization.connection import InterpolationBatch + yield InterpolationBatch( + from_group_index=i_src_grp, + from_element_indices=to_dev(from_bdry_element_indices[close_els]), + to_element_indices=to_dev(to_bdry_element_indices[close_els]), + result_unit_nodes=template_unit_nodes, + to_element_face=None) + def make_partition_connection(vol_to_bdry_conns): """ @@ -418,25 +563,28 @@ def make_partition_connection(vol_to_bdry_conns): with cl.CommandQueue(cl_context) as queue: # Create a list of batches. Each batch contains interpolation # data from one partition to another. - for src_part_idx in range(nparts): - src_vol_conn = vol_to_bdry_conns[src_part_idx] - src_from_discr = src_vol_conn.from_discr - src_to_discr = src_vol_conn.to_discr + for i_src_part, src_vol_conn in enumerate(vol_to_bdry_conns): src_mesh = src_from_discr.mesh ngroups = len(src_mesh.groups) part_batches = [[] for _ in range(ngroups)] for group_num, adj in enumerate(src_mesh.interpart_adj_groups): for elem_idx, elem in enumerate(adj.elements): face = adj.element_faces[elem_idx] - (part_idx, group_num, n_elem, n_face) =\ - adj.get_neighbor(elem, face) - # We need to create batches using the + # We need to create a batch using the # neighboring face, element, and group # I'm not sure how I would do this. # My guess is that it would look # something like _make_cross_face_batches - part_batches[group_num].extend(_make_cross_partition_batches()) + part_batches[group_num].append( + _make_cross_partition_batch( + queue, + vol_to_bdry_conns, + adj, + i_src_part, + group_num, + elem, + face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( -- GitLab From fcccd764f76ce0e822e1df5c13f2357f355e2293 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 23 Mar 2017 11:12:54 -0500 Subject: [PATCH 023/266] Work on _make_cross_partition_batch --- .../discretization/connection/__init__.py | 2 + .../connection/opposite_face.py | 153 ++++++++++-------- test/test_meshmode.py | 5 +- 3 files changed, 89 insertions(+), 71 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index 4ac11bef..ff19c350 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -51,6 +51,7 @@ __all__ = [ "make_face_restriction", "make_face_to_all_faces_embedding", "make_opposite_face_connection", + "make_partition_connection", "make_refinement_connection" ] @@ -66,6 +67,7 @@ __doc__ = """ .. autofunction:: make_face_to_all_faces_embedding .. autofunction:: make_opposite_face_connection +.. autofunction:: make_partition_connection .. autofunction:: make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 4558730c..bfed445c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,82 +393,96 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src_grp, from_elem, from_face): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): + """ + Creates a batch that transfers data to a face from a face of another partition. + + :arg queue: + :arg vol_to_bdry_conns: A list of :class:`Direct` for each partition. + :arg adj: :class:`InterPartitionAdj` of partition `i_tgt_part`. + :arg i_tgt_part: The target partition number. + :arg i_tgt_grp: + :arg i_tgt_elem: + :arg i_tgt_face: - (i_tgt_part, i_tgt_grp, bdry_elem, bdry_face) = adj.get_neighbor(from_elem, from_face) + :returns: ??? + """ - src_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr - tgt_bdry_discr = vol_to_bdry_conns[i_scr_part].to_discr + (i_src_part, i_src_grp, i_src_elem, i_src_face) =\ + adj.get_neighbor(i_tgt_elem, i_tgt_face) - to_bdry_nodes = ( + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + + tgt_bdry_nodes = ( # FIXME: This should view-then-transfer (but PyOpenCL doesn't do # non-contiguous transfers for now). tgt_bdry_discr.groups[i_tgt_grp].view( tgt_bdry_discr.nodes().get(queue=queue)) - [:, to_bdry_element_indices]) + [:, i_tgt_elem]) - tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps + ambient_dim, nelements, n_tgt_unit_nodes = tgt_bdry_nodes.shape - # TODO: Should this use vol_discr? - from_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] - from_grp = src_bdry_discr.groups[i_src_grp] + # (ambient_dim, nelements, nfrom_unit_nodes) + src_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + src_bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, i_src_elem]) - dim = from_grp.dim - ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape + tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps - initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) + src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + src_grp = src_bdry_discr.groups[i_src_grp] - from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) - from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + dim = src_grp.dim - import modepy as mp - from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) - from_inv_t_vdm = la.inv(from_vdm.T) - from_nfuncs = len(from_grp.basis()) + initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - # (ambient_dim, nelements, nfrom_unit_nodes) - from_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - # TODO: Should this be vol_discr? - bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - [:, from_bdry_element_indices]) + src_unit_nodes = np.empty((dim, nelements, n_tgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + + import modepy as mp + src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + src_inv_t_vdm = la.inv(src_vdm.T) + src_nfuncs = len(src_grp.basis()) def apply_map(unit_nodes): # unit_nodes: (dim, nelements, nto_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) - for i, f in enumerate(from_grp.basis()): + basis_at_unit_nodes = np.empty((src_nfuncs, nelements, n_tgt_unit_nodes)) + for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, nto_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + .reshape(nelements, n_tgt_unit_nodes)) + intp_coeffs = np.einsum("fj,jet->fet", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nelements, nto_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, from_nfuncs, nelements, nto_unit_nodes)) - for i, df in enumerate(from_grp.grad_basis()): + (dim, src_nfuncs, nelements, n_tgt_unit_nodes)) + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, nto_unit_nodes)) + df_r.reshape(nelements, n_tgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) - logger.info("make_opposite_face_connection: begin gauss-newton") + logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: - resid = apply_map(from_unit_nodes) - to_bdry_nodes - df = get_map_jacobian(from_unit_nodes) - df_inv_resid = np.empty_like(from_unit_nodes) + resid = apply_map(src_unit_nodes) - tgt_bdry_nodes + df = get_map_jacobian(src_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) # For the 1D/2D accelerated versions, we'll use the normal # equations and Cramer's rule. If you're looking for high-end # numerics, look no further than meshmode. @@ -482,7 +496,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src ata = np.einsum("iket,jket->ijet", df, df) atb = np.einsum("iket,ket->iet", df, resid) det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) else: @@ -493,21 +507,21 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) for e in range(nelements): - for t in range(nto_unit_nodes): + for t in range(n_tgt_unit_nodes): df_inv_resid[:, e, t], _, _, _ = \ la.lstsq(df[:, :, e, t].T, resid[:, e, t]) - from_unit_nodes = from_unit_nodes - df_inv_resid + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) if max_resid < tol: - logger.info("make_opposite_face_connection: gauss-newton: done, " + logger.info("make_partition_connection: gauss-newton: done, " "final residual: %g" % max_resid) break niter += 1 if niter > 10: - raise RuntimeError("Gauss-Newton (for finding opposite-face reference " - "coordinates) did not converge") - + raise RuntimeError("Gauss-Newton (for finding partition_connection " + "reference coordinates) did not converge") + def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) @@ -518,16 +532,16 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src todo_elements, = np.where(~done_elements) if not len(todo_elements): return - template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) close_els = todo_elements[unit_node_dist < tol] done_elements[close_els] = True unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -547,15 +561,17 @@ def make_partition_connection(vol_to_bdry_conns): return a :class:`DirectDiscretizationConnection` that performs data exchange across adjacent faces of different partitions. - :arg :vol_to_bdry_conns A list of *volume_to_bdry_conn* corresponding to + :arg vol_to_bdry_conns: A list of *volume_to_bdry_conn* corresponding to a partition of a parent mesh. + + :returns: A list of :class:`DirectDiscretizationConnection` corresponding to + each partition. """ disc_conns = [] - nparts = len(vol_to_bdry_conns) from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - + # My intuition tells me that this should not live inside a for loop. # However, I need to grab a cl_context. I'll assume that each context from # each partition is the same and I'll use the first one. @@ -563,33 +579,34 @@ def make_partition_connection(vol_to_bdry_conns): with cl.CommandQueue(cl_context) as queue: # Create a list of batches. Each batch contains interpolation # data from one partition to another. - for i_src_part, src_vol_conn in enumerate(vol_to_bdry_conns): - src_mesh = src_from_discr.mesh - ngroups = len(src_mesh.groups) + for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + bdry_discr = tgt_vol_conn.to_discr + tgt_mesh = tgt_vol_conn.to_discr.mesh + ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - for group_num, adj in enumerate(src_mesh.interpart_adj_groups): - for elem_idx, elem in enumerate(adj.elements): - face = adj.element_faces[elem_idx] + for tgt_group_num, adj in enumerate(tgt_mesh.interpart_adj_groups): + for idx, tgt_elem in enumerate(adj.elements): + tgt_face = adj.element_faces[idx] - # We need to create a batch using the + # We need to create a batch using the # neighboring face, element, and group # I'm not sure how I would do this. # My guess is that it would look # something like _make_cross_face_batches - part_batches[group_num].append( + part_batches[tgt_group_num].append( _make_cross_partition_batch( queue, vol_to_bdry_conns, adj, - i_src_part, - group_num, - elem, - face)) + i_tgt_part, + tgt_group_num, + tgt_elem, + tgt_face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( - from_discr=src_from_discr, - to_discr=src_to_discr, + from_discr=bdry_discr, + to_discr=bdry_discr, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 7e14f41e..49d09bfb 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -50,7 +50,6 @@ logger = logging.getLogger(__name__) def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() - queue = cl.CommandQueue(cl_ctx) order = 4 group_factory = PolynomialWarpAndBlendGroupFactory(order) n = 3 @@ -75,11 +74,11 @@ def test_partition_interpolation(ctx_getter): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] from meshmode.discretization.connection import make_face_restriction - bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, + bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection -- GitLab From 3732684137095a4402087b04338eddeb9bfacfbe Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 00:57:52 -0500 Subject: [PATCH 024/266] partition_mesh passes tests involving multiple groups --- meshmode/mesh/__init__.py | 39 +++++++++++++++++++---------- meshmode/mesh/processing.py | 49 +++++++++++++++++-------------------- test/test_meshmode.py | 40 +++++++++++++++++------------- 3 files changed, 71 insertions(+), 57 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index eb4ee957..f3966e75 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -428,6 +428,9 @@ class InterPartitionAdj(): ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + .. attribute:: part_indices + ``part_indices[i]`` gives the partition index of the neighboring face. + .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton @@ -449,38 +452,37 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] - self.neighbor_groups = [] self.part_indices = [] - def add_connection(self, elem, face, part_idx, neighbor_group, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to - ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion - of type :class:`Mesh`. - :arg elem - :arg face - :arg part_idx - :arg neighbor_elem - :arg neighbor_face + ``neighbor_elem`` and ``neighbor_face`` of the neighboring partion + of type :class:`Mesh` given by `part_idx`. + :arg elem: + :arg face: + :arg part_idx: + :arg neighbor_elem: + :arg neighbor_face: """ self.elements.append(elem) self.element_faces.append(face) self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) - self.neighbor_groups.append(neighbor_group) self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_group, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], self.neighbor_groups[idx], - self.neighbors[idx], self.neighbor_faces[idx]) + return (self.part_indices[idx], + self.neighbors[idx], + self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") # }}} @@ -855,6 +857,17 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) + def find_igrp(self, elem): + """ + :arg elem: An element of the mesh. + :returns: The index of the group that `elem` belongs to. + """ + for igrp, grp in enumerate(self.groups): + if elem < grp.nelements: + return igrp + elem -= grp.nelements + raise RuntimeError("Could not find group with element ", elem) + # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 48effb1d..d1d57fee 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -141,64 +141,59 @@ def partition_mesh(mesh, part_per_element, part_nr): new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import BTAG_ALL, BTAG_PARTITION - boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] + boundary_tags = [BTAG_PARTITION(n) for n in np.unique(part_per_element)] from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj - part_mesh.interpart_adj_groups = [ - InterPartitionAdj() for _ in range(num_groups)] + interpart_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] - for igrp in range(num_groups): - elem_base = part_mesh.groups[igrp].element_nr_base + for igrp, grp in enumerate(part_mesh.groups): + elem_base = grp.element_nr_base boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces - for elem_idx in range(len(boundary_elems)): - elem = boundary_elems[elem_idx] - face = boundary_faces[elem_idx] - tags = -boundary_adj.neighbors[elem_idx] + for adj_idx, elem in enumerate(boundary_elems): + face = boundary_faces[adj_idx] + tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - parent_elem = queried_elems[elem] - parent_group_num = 0 - while parent_elem >= mesh.groups[parent_group_num].nelements: - parent_elem -= mesh.groups[parent_group_num].nelements - parent_group_num += 1 - assert parent_group_num < num_groups, "Unable to find neighbor." - parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base - parent_adj = mesh.facial_adjacency_groups[parent_group_num] - for n_grp_num, parent_facial_group in parent_adj.items(): + + parent_igrp = mesh.find_igrp(queried_elems[elem + elem_base]) + parent_elem_base = mesh.groups[parent_igrp].element_nr_base + parent_elem = queried_elems[elem + elem_base] - parent_elem_base + + parent_adj = mesh.facial_adjacency_groups[parent_igrp] + + for parent_facial_group in parent_adj.values(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ - parent_facial_group.element_faces[idx] == face: + parent_facial_group.element_faces[idx] == face: rank_neighbor = (parent_facial_group.neighbors[idx] - + parent_grp_elem_base) + + parent_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit( BTAG_PARTITION(n_part_num)) - boundary_adj.neighbors[elem_idx] = -tags + boundary_adj.neighbors[adj_idx] = -tags # Find the neighbor element from the other partition n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - # TODO Test if this works with multiple groups - # Do I need to add the element number base? - part_mesh.interpart_adj_groups[igrp].add_connection( + interpart_grps[igrp].add_connection( elem + elem_base, face, n_part_num, - n_grp_num, n_elem, rank_neighbor_face) - return part_mesh, queried_elems + mesh = part_mesh.copy() + mesh.interpart_adj_groups = interpart_grps + return mesh, queried_elems # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index cb36e4da..0b5f99f6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -95,12 +95,12 @@ def test_partition_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh - mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. - #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh3 = generate_regular_rect_mesh(a=(1, 2, 2), b=(2, 3, 3), n=(n, n, n)) - #from meshmode.mesh.processing import merge_disjoint_meshes - #mesh = merge_disjoint_meshes([mesh1, mesh2]) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -129,33 +129,39 @@ def test_partition_mesh(): num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): - (part, part_to_global) = new_meshes[part_num] + part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] for idx in range(len(f_grp.elements)): tag = -f_grp.neighbors[idx] assert tag >= 0 - elem = f_grp.elements[idx] + elem = f_grp.elements[idx] + part.groups[grp_num].element_nr_base face = f_grp.element_faces[idx] for n_part_num in range(num_parts): - (n_part, n_part_to_global) = new_meshes[n_part_num] + n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_part_idx, n_grp_num, n_elem, n_face) = part.\ + (i, n_elem, n_face) = part.\ interpart_adj_groups[grp_num].get_neighbor(elem, face) - assert n_part_idx == n_part_num - assert (part_num, grp_num, elem, face) == n_part.\ + assert i == n_part_num + n_grp_num = n_part.find_igrp(n_elem) + assert (part_num, elem, face) == n_part.\ interpart_adj_groups[n_grp_num].\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" - p_elem = part_to_global[elem] + n_part_to_global = new_meshes[n_part_num][1] + p_elem = part_to_global[elem] p_n_elem = n_part_to_global[n_elem] - p_grp_num = 0 - while p_elem >= mesh.groups[p_grp_num].nelements: - p_elem -= mesh.groups[p_grp_num].nelements - p_grp_num += 1 - #p_elem_base = mesh.groups[p_grp_num].element_num_base + + p_grp_num = mesh.find_igrp(p_elem) + p_n_grp_num = mesh.find_igrp(p_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem -= p_elem_base + p_n_elem -= p_n_elem_base + f_groups = mesh.facial_adjacency_groups[p_grp_num] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): -- GitLab From 0fb2053e472a35024cb486394ec3a3677df6f273 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 09:55:53 -0500 Subject: [PATCH 025/266] Cleanup code --- meshmode/mesh/__init__.py | 9 +++++++-- meshmode/mesh/processing.py | 25 +++++++++++++------------ test/test_meshmode.py | 30 ++++++++++++++++++------------ 3 files changed, 38 insertions(+), 26 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f3966e75..d400f9ef 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -434,7 +434,10 @@ class InterPartitionAdj(): .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton - of the element connected to ``elements[i]``. + of the element connected to ``elements[i]``. This gives a mesh-wide element + numbering. Use ``Mesh.find_igrp()`` to find the group that the element + belongs to, then subtract ``element_nr_base`` to find the element of the + group. .. attribute:: neighbor_faces @@ -477,13 +480,15 @@ class InterPartitionAdj(): :arg face :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. + Or (-1, -1, -1) if the face does not have a neighbor. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: return (self.part_indices[idx], self.neighbors[idx], self.neighbor_faces[idx]) - raise RuntimeError("This face does not have a neighbor") + #raise RuntimeError("This face does not have a neighbor") + return (-1, -1, -1) # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index d1d57fee..19d8e9f7 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -148,7 +148,7 @@ def partition_mesh(mesh, part_per_element, part_nr): facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import InterPartitionAdj - interpart_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] + adj_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -160,9 +160,10 @@ def partition_mesh(mesh, part_per_element, part_nr): tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - parent_igrp = mesh.find_igrp(queried_elems[elem + elem_base]) + p_meshwide_elem = queried_elems[elem + elem_base] + parent_igrp = mesh.find_igrp(p_meshwide_elem) parent_elem_base = mesh.groups[parent_igrp].element_nr_base - parent_elem = queried_elems[elem + elem_base] - parent_elem_base + parent_elem = p_meshwide_elem - parent_elem_base parent_adj = mesh.facial_adjacency_groups[parent_igrp] @@ -184,16 +185,16 @@ def partition_mesh(mesh, part_per_element, part_nr): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - interpart_grps[igrp].add_connection( - elem + elem_base, - face, - n_part_num, - n_elem, - rank_neighbor_face) + adj_grps[igrp].add_connection( + elem, + face, + n_part_num, + n_elem, + rank_neighbor_face) - mesh = part_mesh.copy() - mesh.interpart_adj_groups = interpart_grps - return mesh, queried_elems + connected_mesh = part_mesh.copy() + connected_mesh.interpart_adj_groups = adj_grps + return connected_mesh, queried_elems # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 0b5f99f6..c29f013a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -94,10 +94,13 @@ def test_partition_interpolation(ctx_getter): def test_partition_mesh(): n = 5 num_parts = 7 - from meshmode.mesh.generation import generate_regular_rect_mesh + order = 4 + dim = 3 + from meshmode.mesh.generation import (generate_regular_rect_mesh, + generate_warped_rect_mesh) mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) - mesh3 = generate_regular_rect_mesh(a=(1, 2, 2), b=(2, 3, 3), n=(n, n, n)) + mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) @@ -131,28 +134,31 @@ def test_partition_mesh(): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): + adj = part.interpart_adj_groups[grp_num] f_grp = f_groups[None] - for idx in range(len(f_grp.elements)): + elem_base = part.groups[grp_num].element_nr_base + for idx, elem in enumerate(f_grp.elements): tag = -f_grp.neighbors[idx] assert tag >= 0 - elem = f_grp.elements[idx] + part.groups[grp_num].element_nr_base face = f_grp.element_faces[idx] for n_part_num in range(num_parts): n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (i, n_elem, n_face) = part.\ - interpart_adj_groups[grp_num].get_neighbor(elem, face) + + (i, n_elem, n_face) = adj.get_neighbor(elem, face) assert i == n_part_num n_grp_num = n_part.find_igrp(n_elem) - assert (part_num, elem, face) == n_part.\ - interpart_adj_groups[n_grp_num].\ - get_neighbor(n_elem, n_face),\ + n_adj = n_part.interpart_adj_groups[n_grp_num] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_elem - n_elem_base + assert (part_num, elem + elem_base, face) ==\ + n_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] - p_elem = part_to_global[elem] - p_n_elem = n_part_to_global[n_elem] + p_elem = part_to_global[elem + elem_base] + p_n_elem = n_part_to_global[n_elem + n_elem_base] p_grp_num = mesh.find_igrp(p_elem) p_n_grp_num = mesh.find_igrp(p_n_elem) @@ -163,7 +169,7 @@ def test_partition_mesh(): p_n_elem -= p_n_elem_base f_groups = mesh.facial_adjacency_groups[p_grp_num] - for _, p_bnd_adj in f_groups.items(): + for p_bnd_adj in f_groups.values(): for idx in range(len(p_bnd_adj.elements)): if (p_elem == p_bnd_adj.elements[idx] and face == p_bnd_adj.element_faces[idx]): -- GitLab From e82769550ce4aaa5b8f7b00b6884b24105d4c5e1 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 09:59:25 -0500 Subject: [PATCH 026/266] Comments --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index d400f9ef..cc32e962 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -636,6 +636,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ + .. automethod:: find_igrp """ face_id_dtype = np.int8 @@ -864,7 +865,7 @@ class Mesh(Record): def find_igrp(self, elem): """ - :arg elem: An element of the mesh. + :arg elem: A mesh-wise element. Think of it as ``elem + element_nr_base``. :returns: The index of the group that `elem` belongs to. """ for igrp, grp in enumerate(self.groups): -- GitLab From 0a559f3c20efd325e916076f7475f5371ec91acb Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 10:30:23 -0500 Subject: [PATCH 027/266] Temporarily pass interpart_adj to make_partition_connection --- .../connection/opposite_face.py | 34 ++++++++++--------- test/test_meshmode.py | 15 ++++++-- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index bfed445c..925bfe0d 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,6 +393,8 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} +# {{{ partition_connection + def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): """ @@ -555,7 +557,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, to_element_face=None) -def make_partition_connection(vol_to_bdry_conns): +def make_partition_connection(vol_to_bdry_conns, adj_parts): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -572,27 +574,24 @@ def make_partition_connection(vol_to_bdry_conns): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - # My intuition tells me that this should not live inside a for loop. - # However, I need to grab a cl_context. I'll assume that each context from - # each partition is the same and I'll use the first one. - cl_context = vol_to_bdry_conns[0].from_discr.cl_context - with cl.CommandQueue(cl_context) as queue: - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. - for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + + # Is this ok in a for loop? + cl_context = tgt_vol_conn.from_discr.cl_context + with cl.CommandQueue(cl_context) as queue: + bdry_discr = tgt_vol_conn.to_discr - tgt_mesh = tgt_vol_conn.to_discr.mesh + tgt_mesh = bdry_discr.mesh ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - for tgt_group_num, adj in enumerate(tgt_mesh.interpart_adj_groups): + # Hack, I need to get InterPartitionAdj so I'll receive it directly + # as an argument. + for tgt_group_num, adj in enumerate(adj_parts[i_tgt_part]): for idx, tgt_elem in enumerate(adj.elements): tgt_face = adj.element_faces[idx] - # We need to create a batch using the - # neighboring face, element, and group - # I'm not sure how I would do this. - # My guess is that it would look - # something like _make_cross_face_batches part_batches[tgt_group_num].append( _make_cross_partition_batch( queue, @@ -614,4 +613,7 @@ def make_partition_connection(vol_to_bdry_conns): return disc_conns +# }}} + + # vim: foldmethod=marker diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c29f013a..9fdf1757 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -48,6 +48,8 @@ import logging logger = logging.getLogger(__name__) +# {{{ partition_interpolation + def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() order = 4 @@ -56,7 +58,11 @@ def test_partition_interpolation(ctx_getter): dim = 2 num_parts = 7 from meshmode.mesh.generation import generate_warped_rect_mesh - mesh = generate_warped_rect_mesh(dim, order=order, n=n) + mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -73,6 +79,9 @@ def test_partition_interpolation(ctx_getter): part_meshes = [ partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + # Hack, I get InterPartitionAdj here instead of from vol_discrs. + adj_parts = [part_meshes[i].interpart_adj_groups for i in range(num_parts)] + from meshmode.discretization import Discretization vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] @@ -82,12 +91,14 @@ def test_partition_interpolation(ctx_getter): FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections) + connections = make_partition_connection(bdry_connections, adj_parts) from meshmode.discretization.connection import check_connection for conn in connections: check_connection(conn) +# }}} + # {{{ partition_mesh -- GitLab From e8e3b63f786eb21afe8b97a131f908b0e5db5fd2 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 13:00:04 -0500 Subject: [PATCH 028/266] Comments and better errors --- meshmode/mesh/__init__.py | 2 +- meshmode/mesh/processing.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index cc32e962..472e6c4f 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -872,7 +872,7 @@ class Mesh(Record): if elem < grp.nelements: return igrp elem -= grp.nelements - raise RuntimeError("Could not find group with element ", elem) + raise RuntimeError("Could not find group with element %d" % elem) # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 19d8e9f7..c028c63f 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -185,6 +185,8 @@ def partition_mesh(mesh, part_per_element, part_nr): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) + # I cannot compute the group because the other + # partitions have not been built yet. adj_grps[igrp].add_connection( elem, face, -- GitLab From d2109ce2c1432042fa6021ddc45123911fc8d28a Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 13:00:51 -0500 Subject: [PATCH 029/266] Slight progress on make_partition_connection --- .../connection/opposite_face.py | 42 ++++++++++--------- test/test_meshmode.py | 8 ++-- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 925bfe0d..85696ed0 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,8 +395,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, i_src_elem, i_tgt_part, i_tgt_grp, i_tgt_elem): """ Creates a batch that transfers data to a face from a face of another partition. @@ -411,9 +410,6 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, :returns: ??? """ - (i_src_part, i_src_grp, i_src_elem, i_src_face) =\ - adj.get_neighbor(i_tgt_elem, i_tgt_face) - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr @@ -424,7 +420,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, tgt_bdry_discr.nodes().get(queue=queue)) [:, i_tgt_elem]) - ambient_dim, nelements, n_tgt_unit_nodes = tgt_bdry_nodes.shape + ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape + nelements = 1 # (ambient_dim, nelements, nfrom_unit_nodes) src_bdry_nodes = ( @@ -557,7 +554,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, to_element_face=None) -def make_partition_connection(vol_to_bdry_conns, adj_parts): +def make_partition_connection(vol_to_bdry_conns, part_meshes): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -583,24 +580,31 @@ def make_partition_connection(vol_to_bdry_conns, adj_parts): with cl.CommandQueue(cl_context) as queue: bdry_discr = tgt_vol_conn.to_discr - tgt_mesh = bdry_discr.mesh + #tgt_mesh = bdry_discr.mesh + tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - # Hack, I need to get InterPartitionAdj so I'll receive it directly - # as an argument. - for tgt_group_num, adj in enumerate(adj_parts[i_tgt_part]): - for idx, tgt_elem in enumerate(adj.elements): - tgt_face = adj.element_faces[idx] - - part_batches[tgt_group_num].append( + for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): + for idx, i_tgt_elem in enumerate(adj.elements): + i_tgt_face = adj.element_faces[idx] + i_src_part = adj.part_indices[idx] + i_src_elem = adj.neighbors[idx] + i_src_face = adj.neighbor_faces[idx] + #src_mesh = vol_to_bdry_conns[i_src_part].to_discr.mesh + src_mesh = part_meshes[i_src_part] + i_src_grp = src_mesh.find_igrp(i_src_elem) + i_src_elem -= src_mesh.groups[i_src_grp].element_nr_base + + part_batches[i_tgt_grp].extend( _make_cross_partition_batch( queue, vol_to_bdry_conns, - adj, + i_src_part, + i_src_grp, + i_src_elem, i_tgt_part, - tgt_group_num, - tgt_elem, - tgt_face)) + i_tgt_grp, + i_tgt_elem)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 9fdf1757..c8b54d31 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -79,9 +79,6 @@ def test_partition_interpolation(ctx_getter): part_meshes = [ partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - # Hack, I get InterPartitionAdj here instead of from vol_discrs. - adj_parts = [part_meshes[i].interpart_adj_groups for i in range(num_parts)] - from meshmode.discretization import Discretization vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] @@ -90,11 +87,14 @@ def test_partition_interpolation(ctx_getter): bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] + # Hack, I probably shouldn't pass part_meshes directly. This is probably + # temporary. from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections, adj_parts) + connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection for conn in connections: + print(conn) check_connection(conn) # }}} -- GitLab From 89bc18518ad057d0b9c967b59843991fa1037821 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 16:00:57 -0500 Subject: [PATCH 030/266] No proggress, intermediate commit --- .../discretization/connection/opposite_face.py | 18 ++++++++++++++---- test/test_meshmode.py | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 85696ed0..c7ea09c0 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -418,7 +418,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # non-contiguous transfers for now). tgt_bdry_discr.groups[i_tgt_grp].view( tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem]) + [:, i_tgt_elem, :]) ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape nelements = 1 @@ -429,7 +429,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # non-contiguous transfers for now). src_bdry_discr.groups[i_src_grp].view( src_bdry_discr.nodes().get(queue=queue)) - [:, i_src_elem]) + ) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -575,7 +575,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): # data from one partition to another. for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): - # Is this ok in a for loop? + # Is this ok in a loop? cl_context = tgt_vol_conn.from_discr.cl_context with cl.CommandQueue(cl_context) as queue: @@ -583,8 +583,16 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) - part_batches = [[] for _ in range(ngroups)] + #part_batches = [[] for _ in range(ngroups)] + part_batches = [] for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): + part_batches.append(_make_cross_partition_batches( + queue, + vol_to_bdry_conns, + adj, + tgt_mesh, + i_tgt_grp)) + ''' for idx, i_tgt_elem in enumerate(adj.elements): i_tgt_face = adj.element_faces[idx] i_src_part = adj.part_indices[idx] @@ -605,9 +613,11 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): i_tgt_part, i_tgt_grp, i_tgt_elem)) + ''' # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( + # Is this ok? from_discr=bdry_discr, to_discr=bdry_discr, groups=[ diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c8b54d31..51fad45d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -162,7 +162,7 @@ def test_partition_mesh(): n_grp_num = n_part.find_igrp(n_elem) n_adj = n_part.interpart_adj_groups[n_grp_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_elem - n_elem_base + n_elem -= n_elem_base assert (part_num, elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" -- GitLab From 1ac6c876150331ab2376635d5377eeb59e4512ad Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 16:27:55 -0500 Subject: [PATCH 031/266] interpart_adj_groups is not a list of maps from partition numbers to InterPartitionAdj --- meshmode/mesh/__init__.py | 42 +++++++++++++------------------------ meshmode/mesh/processing.py | 19 +++++++++-------- test/test_meshmode.py | 12 +++++------ 3 files changed, 29 insertions(+), 44 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 472e6c4f..7ebaf393 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -428,9 +428,6 @@ class InterPartitionAdj(): ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. - .. attribute:: part_indices - ``part_indices[i]`` gives the partition index of the neighboring face. - .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton @@ -444,7 +441,6 @@ class InterPartitionAdj(): ``neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``elements[i]`` - .. automethod:: add_connection .. automethod:: get_neighbor .. versionadded:: 2017.1 @@ -455,40 +451,21 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] - self.part_indices = [] - - def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): - """ - Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to - ``neighbor_elem`` and ``neighbor_face`` of the neighboring partion - of type :class:`Mesh` given by `part_idx`. - :arg elem: - :arg face: - :arg part_idx: - :arg neighbor_elem: - :arg neighbor_face: - """ - self.elements.append(elem) - self.element_faces.append(face) - self.part_indices.append(part_idx) - self.neighbors.append(neighbor_elem) - self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. - Or (-1, -1, -1) if the face does not have a neighbor. + Or (-1, -1) if the face does not have a neighbor. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], - self.neighbors[idx], + return (self.neighbors[idx], self.neighbor_faces[idx]) #raise RuntimeError("This face does not have a neighbor") - return (-1, -1, -1) + return (-1, -1) # }}} @@ -620,6 +597,15 @@ class Mesh(Record): (Note that element groups are not necessarily contiguous like the figure may suggest.) + .. attribute:: interpart_adj_groups + + A list of mappings from neighbor partition numbers to instances of + :class:`InterPartitionAdj`. + + ``interpart_adj_gorups[igrp][ineighbor_part]`` gives + the set of facial adjacency relations between group *igrp* + and partition *ineighbor_part*. + .. attribute:: boundary_tags A tuple of boundary tag identifiers. :class:`BTAG_ALL` and @@ -872,7 +858,7 @@ class Mesh(Record): if elem < grp.nelements: return igrp elem -= grp.nelements - raise RuntimeError("Could not find group with element %d" % elem) + raise RuntimeError("Could not find group with element %d." % elem) # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index c028c63f..27a6c56d 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -148,7 +148,7 @@ def partition_mesh(mesh, part_per_element, part_nr): facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import InterPartitionAdj - adj_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] + adj_grps = [{} for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -173,7 +173,7 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_facial_group.element_faces[idx] == face: rank_neighbor = (parent_facial_group.neighbors[idx] + parent_elem_base) - rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + n_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) @@ -185,14 +185,15 @@ def partition_mesh(mesh, part_per_element, part_nr): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) + if n_part_num not in adj_grps[igrp]: + adj_grps[igrp][n_part_num] = InterPartitionAdj() + # I cannot compute the group because the other - # partitions have not been built yet. - adj_grps[igrp].add_connection( - elem, - face, - n_part_num, - n_elem, - rank_neighbor_face) + # partitions may not have been built yet. + adj_grps[igrp][n_part_num].elements.append(elem) + adj_grps[igrp][n_part_num].element_faces.append(face) + adj_grps[igrp][n_part_num].neighbors.append(n_elem) + adj_grps[igrp][n_part_num].neighbor_faces.append(n_face) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 51fad45d..e2980d2d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -145,27 +145,25 @@ def test_partition_mesh(): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): - adj = part.interpart_adj_groups[grp_num] f_grp = f_groups[None] elem_base = part.groups[grp_num].element_nr_base for idx, elem in enumerate(f_grp.elements): tag = -f_grp.neighbors[idx] assert tag >= 0 face = f_grp.element_faces[idx] - for n_part_num in range(num_parts): + for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (i, n_elem, n_face) = adj.get_neighbor(elem, face) - assert i == n_part_num + (n_elem, n_face) = adj.get_neighbor(elem, face) n_grp_num = n_part.find_igrp(n_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num] + n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem -= n_elem_base - assert (part_num, elem + elem_base, face) ==\ + assert (elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ - "InterpartitionAdj is not consistent" + "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] p_elem = part_to_global[elem + elem_base] -- GitLab From df070ccd064be9679ed1fe6a135d159868670318 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Mar 2017 10:06:39 -0500 Subject: [PATCH 032/266] Cleaned up call to _make_cross_partiton_batch --- .../connection/opposite_face.py | 151 +++++++----------- 1 file changed, 57 insertions(+), 94 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c7ea09c0..6779bbd9 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,41 +395,31 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, i_src_elem, i_tgt_part, i_tgt_grp, i_tgt_elem): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, + i_src_part, i_src_grp, i_src_elem, i_src_face): """ Creates a batch that transfers data to a face from a face of another partition. :arg queue: - :arg vol_to_bdry_conns: A list of :class:`Direct` for each partition. - :arg adj: :class:`InterPartitionAdj` of partition `i_tgt_part`. - :arg i_tgt_part: The target partition number. - :arg i_tgt_grp: - :arg i_tgt_elem: - :arg i_tgt_face: + :arg vol_to_bdry_conns: A list of :class:`DirectDiscretizationConnection` + for each partition. :returns: ??? """ - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + src_mesh = part_meshes[i_src_part] + tgt_mesh = part_meshes[i_tgt_part] + + adj = tgt_mesh.interpart_adj_groups[i_tgt_grp][i_src_part] + tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - tgt_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - tgt_bdry_discr.groups[i_tgt_grp].view( - tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem, :]) + tgt_bdry_nodes = tgt_mesh.groups[i_tgt_grp].nodes[:, i_tgt_elem, :] + src_bdry_nodes = src_mesh.groups[i_src_grp].nodes[:, i_src_elem, :] ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape - nelements = 1 - - # (ambient_dim, nelements, nfrom_unit_nodes) - src_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - src_bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - ) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -439,9 +429,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, dim = src_grp.dim initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - - src_unit_nodes = np.empty((dim, nelements, n_tgt_unit_nodes)) - src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + src_unit_nodes = np.empty((dim, n_tgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1) import modepy as mp src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) @@ -449,29 +438,29 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, src_nfuncs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nto_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((src_nfuncs, nelements, n_tgt_unit_nodes)) + basis_at_unit_nodes = np.empty((src_nfuncs, n_tgt_unit_nodes)) for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, n_tgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", src_inv_t_vdm, basis_at_unit_nodes) + .reshape(n_tgt_unit_nodes)) + intp_coeffs = np.einsum("fj,jt->ft", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. - one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) + #one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + #assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("ft,af->at", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nto_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, src_nfuncs, nelements, n_tgt_unit_nodes)) + (dim, src_nfuncs, n_tgt_unit_nodes)) for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, n_tgt_unit_nodes)) + df_r.reshape(n_tgt_unit_nodes)) dintp_coeffs = np.einsum( "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) @@ -486,6 +475,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # equations and Cramer's rule. If you're looking for high-end # numerics, look no further than meshmode. if dim == 1: + # TODO: Needs testing. # A is df.T ata = np.einsum("iket,jket->ijet", df, df) atb = np.einsum("iket,ket->iet", df, resid) @@ -505,10 +495,10 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # This stinks, performance-wise, because it's not vectorized. # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) - for e in range(nelements): - for t in range(n_tgt_unit_nodes): - df_inv_resid[:, e, t], _, _, _ = \ - la.lstsq(df[:, :, e, t].T, resid[:, e, t]) + # TODO: Needs testing. + for t in range(n_tgt_unit_nodes): + df_inv_resid[:, t], _, _, _ = \ + la.lstsq(df[:, :, t].T, resid[:, t]) src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -524,33 +514,11 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) - done_elements = np.zeros(nelements, dtype=np.bool) - - # TODO: Still need to figure out what's happening here. - while True: - todo_elements, = np.where(~done_elements) - if not len(todo_elements): - return - template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] - unit_node_dist = np.max(np.max(np.abs( - src_unit_nodes[:, todo_elements, :] - - - template_unit_nodes.reshape(dim, 1, -1)), - axis=2), axis=0) - close_els = todo_elements[unit_node_dist < tol] - done_elements[close_els] = True - unit_node_dist = np.max(np.max(np.abs( - src_unit_nodes[:, todo_elements, :] - - - template_unit_nodes.reshape(dim, 1, -1)), - axis=2), axis=0) - - from meshmode.discretization.connection import InterpolationBatch - yield InterpolationBatch( + return InterpolationBatch( from_group_index=i_src_grp, from_element_indices=to_dev(from_bdry_element_indices[close_els]), to_element_indices=to_dev(to_bdry_element_indices[close_els]), - result_unit_nodes=template_unit_nodes, + result_unit_nodes=src_unit_nodes, to_element_face=None) @@ -583,37 +551,32 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) - #part_batches = [[] for _ in range(ngroups)] - part_batches = [] - for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): - part_batches.append(_make_cross_partition_batches( - queue, - vol_to_bdry_conns, - adj, - tgt_mesh, - i_tgt_grp)) - ''' - for idx, i_tgt_elem in enumerate(adj.elements): - i_tgt_face = adj.element_faces[idx] - i_src_part = adj.part_indices[idx] - i_src_elem = adj.neighbors[idx] - i_src_face = adj.neighbor_faces[idx] - #src_mesh = vol_to_bdry_conns[i_src_part].to_discr.mesh + part_batches = [[] for _ in range(ngroups)] + for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): + for i_src_part, adj in adj_parts.items(): + src_mesh = part_meshes[i_src_part] - i_src_grp = src_mesh.find_igrp(i_src_elem) - i_src_elem -= src_mesh.groups[i_src_grp].element_nr_base - - part_batches[i_tgt_grp].extend( - _make_cross_partition_batch( - queue, - vol_to_bdry_conns, - i_src_part, - i_src_grp, - i_src_elem, - i_tgt_part, - i_tgt_grp, - i_tgt_elem)) - ''' + + i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces + i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] + for i in range(len(i_src_elems)): + i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base + + i_tgt_elems = adj.elements + i_tgt_faces = adj.element_faces + + for idx, i_tgt_elem in enumerate(i_tgt_elems): + i_tgt_face = i_tgt_faces[idx] + i_src_elem = i_src_elems[idx] + i_src_face = i_src_faces[idx] + i_src_grp = i_src_grps[idx] + + part_batches[i_tgt_grp].append( + _make_cross_partition_batch(queue, + vol_to_bdry_conns, part_meshes, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, + i_src_part, i_src_grp, i_src_elem, i_src_face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( -- GitLab From 89e95da3d224df93cee790f426fab57c07baec0a Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 29 Mar 2017 23:55:23 -0500 Subject: [PATCH 033/266] make_partition_connection works with no testing --- .../connection/opposite_face.py | 58 +++++++++++-------- test/test_meshmode.py | 17 +++--- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 6779bbd9..8c127bba 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,7 +395,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, +def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, i_src_part, i_src_grp, i_src_elem, i_src_face): """ @@ -408,16 +408,22 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, :returns: ??? """ - src_mesh = part_meshes[i_src_part] - tgt_mesh = part_meshes[i_tgt_part] - - adj = tgt_mesh.interpart_adj_groups[i_tgt_grp][i_src_part] - tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - tgt_bdry_nodes = tgt_mesh.groups[i_tgt_grp].nodes[:, i_tgt_elem, :] - src_bdry_nodes = src_mesh.groups[i_src_grp].nodes[:, i_src_elem, :] + tgt_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + tgt_bdry_discr.groups[i_tgt_grp].view( + tgt_bdry_discr.nodes().get(queue=queue)) + [:, i_tgt_elem]) + + src_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + src_bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, i_src_elem]) ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape @@ -445,11 +451,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) .reshape(n_tgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jt->ft", src_inv_t_vdm, basis_at_unit_nodes) + #intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes + intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. - #one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - #assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("ft,af->at", intp_coeffs, src_bdry_nodes) + one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("ij,jk->ik", src_bdry_nodes, intp_coeffs) + #return src_bdry_nodes @ intp_coeffs.T def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nto_unit_nodes) @@ -462,8 +470,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) + "ij,rjk->rik", src_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) logger.info("make_partition_connection: begin gauss-newton") niter = 0 @@ -477,13 +485,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, if dim == 1: # TODO: Needs testing. # A is df.T - ata = np.einsum("iket,jket->ijet", df, df) - atb = np.einsum("iket,ket->iet", df, resid) + ata = np.einsum("ikt,jkt->ijt", df, df) + atb = np.einsum("ikt,kt->it", df, resid) df_inv_resid = atb / ata[0, 0] elif dim == 2: # A is df.T - ata = np.einsum("iket,jket->ijet", df, df) - atb = np.einsum("iket,ket->iet", df, resid) + ata = np.einsum("ikt,jkt->ijt", df, df) + atb = np.einsum("ikt,kt->it", df, resid) det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) @@ -514,10 +522,12 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) + from meshmode.discretization.connection import InterpolationBatch return InterpolationBatch( + # This is not right. Need partition number information. from_group_index=i_src_grp, - from_element_indices=to_dev(from_bdry_element_indices[close_els]), - to_element_indices=to_dev(to_bdry_element_indices[close_els]), + from_element_indices=to_dev(np.array([i_src_elem])), + to_element_indices=to_dev(np.array([i_tgt_elem])), result_unit_nodes=src_unit_nodes, to_element_face=None) @@ -557,15 +567,15 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): src_mesh = part_meshes[i_src_part] + i_tgt_elems = adj.elements + i_tgt_faces = adj.element_faces + i_src_elems = adj.neighbors i_src_faces = adj.neighbor_faces i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] for i in range(len(i_src_elems)): i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base - i_tgt_elems = adj.elements - i_tgt_faces = adj.element_faces - for idx, i_tgt_elem in enumerate(i_tgt_elems): i_tgt_face = i_tgt_faces[idx] i_src_elem = i_src_elems[idx] @@ -574,7 +584,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): part_batches[i_tgt_grp].append( _make_cross_partition_batch(queue, - vol_to_bdry_conns, part_meshes, + vol_to_bdry_conns, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, i_src_part, i_src_grp, i_src_elem, i_src_face)) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e2980d2d..f286177a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,12 +57,14 @@ def test_partition_interpolation(ctx_getter): n = 3 dim = 2 num_parts = 7 - from meshmode.mesh.generation import generate_warped_rect_mesh - mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) - mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + from meshmode.mesh.generation import generate_regular_rect_mesh + mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #from meshmode.mesh.generation import generate_warped_rect_mesh + #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + #from meshmode.mesh.processing import merge_disjoint_meshes + #mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -93,9 +95,8 @@ def test_partition_interpolation(ctx_getter): connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection - for conn in connections: - print(conn) - check_connection(conn) + #for conn in connections: + #check_connection(conn) # }}} -- GitLab From 9d472bf171d75fb2790b5bc63505319949265b68 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 13:09:27 -0500 Subject: [PATCH 034/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 10 ++++------ test/test_meshmode.py | 6 +++--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 8c127bba..98bee5c4 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -451,13 +451,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) .reshape(n_tgt_unit_nodes)) - #intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes - intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) + intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes + #intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("ij,jk->ik", src_bdry_nodes, intp_coeffs) - #return src_bdry_nodes @ intp_coeffs.T + #return np.einsum("ij,jk,ik", src_bdry_nodes, intp_coeffs) + return src_bdry_nodes @ intp_coeffs def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nto_unit_nodes) @@ -549,8 +549,6 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): # Is this ok in a loop? diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f286177a..613797ed 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -60,7 +60,7 @@ def test_partition_interpolation(ctx_getter): from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes @@ -95,8 +95,8 @@ def test_partition_interpolation(ctx_getter): connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection - #for conn in connections: - #check_connection(conn) + for conn in connections: + check_connection(conn) # }}} -- GitLab From cfe1af7065f97ae15e2f0343976c587458de7588 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 17:09:31 -0500 Subject: [PATCH 035/266] Add adjacency_list method. --- .../connection/opposite_face.py | 85 ++++++++++++++----- meshmode/mesh/__init__.py | 16 ++++ test/test_meshmode.py | 29 ++----- 3 files changed, 87 insertions(+), 43 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 98bee5c4..1a69f624 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -439,40 +439,63 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, src_unit_nodes[:] = initial_guess.reshape(-1, 1) import modepy as mp - src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) - src_inv_t_vdm = la.inv(src_vdm.T) - src_nfuncs = len(src_grp.basis()) + vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + inv_t_vdm = la.inv(vdm.T) + n_src_funcs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nto_unit_nodes) - # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((src_nfuncs, n_tgt_unit_nodes)) - for i, f in enumerate(src_grp.basis()): - basis_at_unit_nodes[i] = ( - f(unit_nodes.reshape(dim, -1)) - .reshape(n_tgt_unit_nodes)) - intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes - #intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) - # If we're interpolating 1, we had better get 1 back. - one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - assert (one_deviation < tol).all(), np.max(one_deviation) - #return np.einsum("ij,jk,ik", src_bdry_nodes, intp_coeffs) - return src_bdry_nodes @ intp_coeffs + basis_at_unit_nodes = np.array([f(unit_nodes) for f in src_grp.basis()]) + + return src_bdry_nodes @ inv_t_vdm @ basis_at_unit_nodes def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nto_unit_nodes) - # basis_at_unit_nodes - dbasis_at_unit_nodes = np.empty( - (dim, src_nfuncs, n_tgt_unit_nodes)) + dbasis_at_unit_nodes = np.empty((dim, n_src_funcs, n_tgt_unit_nodes)) + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) + #dbasis_at_unit_nodes = np.array([df(unit_nodes) for df in src_grp.grad_basis()]) dintp_coeffs = np.einsum( - "ij,rjk->rik", src_inv_t_vdm, dbasis_at_unit_nodes) + "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) + # {{{ test map applier and jacobian + if 0: + u = src_unit_nodes + f = apply_map(u) + for h in [1e-1, 1e-2]: + du = h*np.random.randn(*u.shape) + + f_2 = apply_map(u+du) + + jf = get_map_jacobian(u) + + f2_2 = f + np.einsum("rat,rt->at", jf, du) + + print(h, la.norm((f_2-f2_2).ravel())) + # }}} + + # {{{ visualize initial guess + + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + from meshmode.discretization.visualization import draw_curve + draw_curve(src_bdry_discr) + + pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") + pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "o", + color="purple") + pt.show() + + # }}} + + logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: @@ -507,8 +530,26 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, for t in range(n_tgt_unit_nodes): df_inv_resid[:, t], _, _, _ = \ la.lstsq(df[:, :, t].T, resid[:, t]) + + # {{{ visualize next guess + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + from meshmode.discretization.visualization import draw_curve + + pt.plot(guess[0].reshape(-1), guess[2].reshape(-1), "r^") + pt.plot(goals[0].reshape(-1), goals[2].reshape(-1), "xg") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[2].reshape(-1), "o", + color="purple") + #pt.plot(src_unit_nodes[0].reshape(-1), src_unit_nodes[1].reshape(-1), "ob") + pt.show() + # }}} + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) + #print(resid[0, :]) logger.debug("gauss-newton residual: %g" % max_resid) if max_resid < tol: logger.info("make_partition_connection: gauss-newton: done, " diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 7ebaf393..62174f4c 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -623,6 +623,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ .. automethod:: find_igrp + .. automethos:: adjacency_list """ face_id_dtype = np.int8 @@ -860,6 +861,21 @@ class Mesh(Record): elem -= grp.nelements raise RuntimeError("Could not find group with element %d." % elem) + def adjacency_list(self): + """ + :returns: An :class:`np.array` with dtype `set`. `adjacency[i]` is the set + of all elements that are adjacent to element `i`. + Useful for `pymetis.part_graph`. + """ + adjacency_list = np.zeros((self.nelements,), dtype=set) + nodal_adj = self.nodal_adjacency + for elem in range(self.nelements): + adjacency_list[elem] = set() + starts = nodal_adj.neighbors_starts + for n in range(starts[elem], starts[elem + 1]): + adjacency_list[elem].add(nodal_adj.neighbors[n]) + return adjacency_list + # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 613797ed..e6be6d15 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,25 +56,18 @@ def test_partition_interpolation(ctx_getter): group_factory = PolynomialWarpAndBlendGroupFactory(order) n = 3 dim = 2 - num_parts = 7 + num_parts = 3 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes #mesh = merge_disjoint_meshes([mesh1, mesh2]) - adjacency_list = np.zeros((mesh.nelements,), dtype=set) - for elem in range(mesh.nelements): - adjacency_list[elem] = set() - starts = mesh.nodal_adjacency.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=adjacency_list) + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh @@ -94,9 +87,10 @@ def test_partition_interpolation(ctx_getter): from meshmode.discretization.connection import make_partition_connection connections = make_partition_connection(bdry_connections, part_meshes) - from meshmode.discretization.connection import check_connection - for conn in connections: - check_connection(conn) + # We can't use check_connection because I don't think it works with partitions. + #from meshmode.discretization.connection import check_connection + #for conn in connections: + # check_connection(conn) # }}} @@ -117,15 +111,8 @@ def test_partition_mesh(): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) - adjacency_list = np.zeros((mesh.nelements,), dtype=set) - for elem in range(mesh.nelements): - adjacency_list[elem] = set() - starts = mesh.nodal_adjacency.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=adjacency_list) + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh -- GitLab From 570051fde0be74a782be24bfe157572b515bca59 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 17:18:04 -0500 Subject: [PATCH 036/266] working --- .../discretization/connection/opposite_face.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1a69f624..b224cb32 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -456,7 +456,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) - #dbasis_at_unit_nodes = np.array([df(unit_nodes) for df in src_grp.grad_basis()]) + #dbasis_at_unit_nodes = np.array( + # [df(unit_nodes) for df in src_grp.grad_basis()]) dintp_coeffs = np.einsum( "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) @@ -495,7 +496,6 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, # }}} - logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: @@ -532,18 +532,17 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, la.lstsq(df[:, :, t].T, resid[:, t]) # {{{ visualize next guess - if 0: + if 1: import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes from meshmode.discretization.visualization import draw_curve - pt.plot(guess[0].reshape(-1), guess[2].reshape(-1), "r^") - pt.plot(goals[0].reshape(-1), goals[2].reshape(-1), "xg") - pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[2].reshape(-1), "o", - color="purple") - #pt.plot(src_unit_nodes[0].reshape(-1), src_unit_nodes[1].reshape(-1), "ob") + pt.plot(guess[0], guess[1], "r^") + pt.plot(goals[0], goals[1], "xg") + pt.plot(src_bdry_nodes[0], src_bdry_nodes[1], "o", color="purple") + pt.plot(src_unit_nodes[0], src_unit_nodes[1], "ob") pt.show() # }}} @@ -613,7 +612,8 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): i_src_faces = adj.neighbor_faces i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] for i in range(len(i_src_elems)): - i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base + elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_elems[i] -= elem_base for idx, i_tgt_elem in enumerate(i_tgt_elems): i_tgt_face = i_tgt_faces[idx] -- GitLab From db3a826e44c59168a22b7d4ef93941b30098938e Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 6 Apr 2017 10:51:21 -0500 Subject: [PATCH 037/266] Changed _make_cross_face_batches to handle two different meshes. --- .../connection/opposite_face.py | 460 ++++++------------ meshmode/mesh/__init__.py | 8 +- meshmode/mesh/processing.py | 11 +- test/test_meshmode.py | 23 +- 4 files changed, 168 insertions(+), 334 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index b224cb32..5b5c03dd 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -35,143 +35,82 @@ logger = logging.getLogger(__name__) # {{{ opposite-face connection -def _make_cross_face_batches( - queue, vol_discr, bdry_discr, - i_tgt_grp, i_src_grp, - i_face_tgt, - adj_grp, - vbc_tgt_grp_face_batch, src_grp_el_lookup): +def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices): - # {{{ index wrangling + # FIXME: This should view-then-transfer + # (but PyOpenCL doesn't do non-contiguous transfers for now). + tgt_bdry_nodes = (tgt_bdry_discr.groups[i_tgt_grp].view(tgt_bdry_discr.nodes(). + get(queue=queue))[:, tgt_bdry_element_indices]) - # Assert that the adjacency group and the restriction - # interpolation batch and the adjacency group have the same - # element ordering. + # FIXME: This should view-then-transfer + # (but PyOpenCL doesn't do non-contiguous transfers for now). + src_bdry_nodes = (tgt_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + get(queue=queue))[:, src_bdry_element_indices]) - adj_grp_tgt_flags = adj_grp.element_faces == i_face_tgt - - assert ( - np.array_equal( - adj_grp.elements[adj_grp_tgt_flags], - vbc_tgt_grp_face_batch.from_element_indices - .get(queue=queue))) - - # find to_element_indices - - to_bdry_element_indices = ( - vbc_tgt_grp_face_batch.to_element_indices - .get(queue=queue)) - - # find from_element_indices - - from_vol_element_indices = adj_grp.neighbors[adj_grp_tgt_flags] - from_element_faces = adj_grp.neighbor_faces[adj_grp_tgt_flags] - - from_bdry_element_indices = src_grp_el_lookup[ - from_vol_element_indices, from_element_faces] - - # }}} - - # {{{ visualization (for debugging) + tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps - if 0: - print("TVE", adj_grp.elements[adj_grp_tgt_flags]) - print("TBE", to_bdry_element_indices) - print("FVE", from_vol_element_indices) - from meshmode.mesh.visualization import draw_2d_mesh - import matplotlib.pyplot as pt - draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) - pt.figure() - - draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) + src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + src_grp = src_bdry_discr.groups[i_src_grp] - pt.show() - # }}} + dim = src_grp.dim + ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) - to_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - bdry_discr.groups[i_tgt_grp].view( - bdry_discr.nodes().get(queue=queue)) - [:, to_bdry_element_indices]) - - tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps - - from_mesh_grp = bdry_discr.mesh.groups[i_src_grp] - from_grp = bdry_discr.groups[i_src_grp] - - dim = from_grp.dim - ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape - - initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) - from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) - from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) + src_unit_nodes = np.empty((dim, nelements, ntgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) import modepy as mp - from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) - from_inv_t_vdm = la.inv(from_vdm.T) - from_nfuncs = len(from_grp.basis()) - - # (ambient_dim, nelements, nfrom_unit_nodes) - from_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - bdry_discr.groups[i_src_grp].view( - bdry_discr.nodes().get(queue=queue)) - [:, from_bdry_element_indices]) + vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + inv_t_vdm = la.inv(vdm.T) + nsrc_funcs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nelements, ntgt_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) + basis_at_unit_nodes = np.empty((nsrc_funcs, nelements, ntgt_unit_nodes)) - for i, f in enumerate(from_grp.basis()): + for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, nto_unit_nodes)) + .reshape(nelements, ntgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + intp_coeffs = np.einsum("fj,jet->fet", inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nelements, ntgt_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, from_nfuncs, nelements, nto_unit_nodes)) + (dim, nsrc_funcs, nelements, ntgt_unit_nodes)) - for i, df in enumerate(from_grp.grad_basis()): + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, nto_unit_nodes)) + df_r.reshape(nelements, ntgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) + "fj,rjet->rfet", inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) # {{{ test map applier and jacobian if 0: - u = from_unit_nodes + u = src_unit_nodes f = apply_map(u) for h in [1e-1, 1e-2]: du = h*np.random.randn(*u.shape) @@ -190,16 +129,16 @@ def _make_cross_face_batches( if 0: import matplotlib.pyplot as pt - guess = apply_map(from_unit_nodes) - goals = to_bdry_nodes + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes from meshmode.discretization.visualization import draw_curve - draw_curve(bdry_discr) + draw_curve(tgt_bdry_discr) + draw_curve(src_bdry_discr) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") - pt.plot(from_bdry_nodes[0].reshape(-1), from_bdry_nodes[1].reshape(-1), "o", - color="purple") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "xb") pt.show() # }}} @@ -208,10 +147,10 @@ def _make_cross_face_batches( niter = 0 while True: - resid = apply_map(from_unit_nodes) - to_bdry_nodes + resid = apply_map(src_unit_nodes) - tgt_bdry_nodes - df = get_map_jacobian(from_unit_nodes) - df_inv_resid = np.empty_like(from_unit_nodes) + df = get_map_jacobian(src_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) # For the 1D/2D accelerated versions, we'll use the normal # equations and Cramer's rule. If you're looking for high-end @@ -231,7 +170,7 @@ def _make_cross_face_batches( det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) @@ -243,11 +182,11 @@ def _make_cross_face_batches( # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) for e in range(nelements): - for t in range(nto_unit_nodes): + for t in range(ntgt_unit_nodes): df_inv_resid[:, e, t], _, _, _ = \ la.lstsq(df[:, :, e, t].T, resid[:, e, t]) - from_unit_nodes = from_unit_nodes - df_inv_resid + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -264,7 +203,7 @@ def _make_cross_face_batches( # }}} - # {{{ find groups of from_unit_nodes + # {{{ find groups of src_unit_nodes def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) @@ -275,10 +214,10 @@ def _make_cross_face_batches( if not len(todo_elements): return - template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -287,7 +226,7 @@ def _make_cross_face_batches( done_elements[close_els] = True unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -295,8 +234,8 @@ def _make_cross_face_batches( from meshmode.discretization.connection import InterpolationBatch yield InterpolationBatch( from_group_index=i_src_grp, - from_element_indices=to_dev(from_bdry_element_indices[close_els]), - to_element_indices=to_dev(to_bdry_element_indices[close_els]), + from_element_indices=to_dev(src_bdry_element_indices[close_els]), + to_element_indices=to_dev(tgt_bdry_element_indices[close_els]), result_unit_nodes=template_unit_nodes, to_element_face=None) @@ -366,211 +305,86 @@ def make_opposite_face_connection(volume_to_bdry_conn): for i_tgt_grp in range(ngrps): vbc_tgt_grp_batches = volume_to_bdry_conn.groups[i_tgt_grp].batches - adj_grp = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] + adj = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] for i_face_tgt in range(vol_mesh.groups[i_tgt_grp].nfaces): vbc_tgt_grp_face_batch = _find_ibatch_for_face( vbc_tgt_grp_batches, i_face_tgt) - groups[i_tgt_grp].extend( - _make_cross_face_batches( - queue, vol_discr, bdry_discr, - i_tgt_grp, i_src_grp, - i_face_tgt, - adj_grp, - vbc_tgt_grp_face_batch, src_grp_el_lookup)) - - from meshmode.discretization.connection import ( - DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - return DirectDiscretizationConnection( - from_discr=bdry_discr, - to_discr=bdry_discr, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in groups], - is_surjective=True) - -# }}} + # {{{ index wrangling + # Assert that the adjacency group and the restriction + # interpolation batch and the adjacency group have the same + # element ordering. -# {{{ partition_connection - -def _make_cross_partition_batch(queue, vol_to_bdry_conns, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, - i_src_part, i_src_grp, i_src_elem, i_src_face): - """ - Creates a batch that transfers data to a face from a face of another partition. + adj_tgt_flags = adj.element_faces == i_face_tgt - :arg queue: - :arg vol_to_bdry_conns: A list of :class:`DirectDiscretizationConnection` - for each partition. + assert (np.array_equal( + adj.elements[adj_tgt_flags], + vbc_tgt_grp_face_batch.from_element_indices + .get(queue=queue))) - :returns: ??? - """ + # find to_element_indices - tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + tgt_bdry_element_indices = ( + vbc_tgt_grp_face_batch.to_element_indices + .get(queue=queue)) - tgt_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - tgt_bdry_discr.groups[i_tgt_grp].view( - tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem]) + # find from_element_indices - src_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - src_bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - [:, i_src_elem]) + src_vol_element_indices = adj.neighbors[adj_tgt_flags] + src_element_faces = adj.neighbor_faces[adj_tgt_flags] - ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape + src_bdry_element_indices = src_grp_el_lookup[ + src_vol_element_indices, src_element_faces] - tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps + # }}} - src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] - src_grp = src_bdry_discr.groups[i_src_grp] + # {{{ visualization (for debugging) - dim = src_grp.dim + if 0: + print("TVE", adj.elements[adj_tgt_flags]) + print("TBE", tgt_bdry_element_indices) + print("FVE", src_vol_element_indices) + from meshmode.mesh.visualization import draw_2d_mesh + import matplotlib.pyplot as pt + draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) + pt.figure() - initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - src_unit_nodes = np.empty((dim, n_tgt_unit_nodes)) - src_unit_nodes[:] = initial_guess.reshape(-1, 1) + draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) - import modepy as mp - vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) - inv_t_vdm = la.inv(vdm.T) - n_src_funcs = len(src_grp.basis()) + pt.show() - def apply_map(unit_nodes): - basis_at_unit_nodes = np.array([f(unit_nodes) for f in src_grp.basis()]) + # }}} - return src_bdry_nodes @ inv_t_vdm @ basis_at_unit_nodes - - def get_map_jacobian(unit_nodes): - dbasis_at_unit_nodes = np.empty((dim, n_src_funcs, n_tgt_unit_nodes)) - - for i, df in enumerate(src_grp.grad_basis()): - df_result = df(unit_nodes.reshape(dim, -1)) - for rst_axis, df_r in enumerate(df_result): - dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(n_tgt_unit_nodes)) - #dbasis_at_unit_nodes = np.array( - # [df(unit_nodes) for df in src_grp.grad_basis()]) - dintp_coeffs = np.einsum( - "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) - - # {{{ test map applier and jacobian - if 0: - u = src_unit_nodes - f = apply_map(u) - for h in [1e-1, 1e-2]: - du = h*np.random.randn(*u.shape) - - f_2 = apply_map(u+du) - - jf = get_map_jacobian(u) - - f2_2 = f + np.einsum("rat,rt->at", jf, du) - - print(h, la.norm((f_2-f2_2).ravel())) - # }}} - - # {{{ visualize initial guess - - if 0: - import matplotlib.pyplot as pt - guess = apply_map(src_unit_nodes) - goals = tgt_bdry_nodes - - from meshmode.discretization.visualization import draw_curve - draw_curve(src_bdry_discr) - - pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") - pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") - pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "o", - color="purple") - pt.show() - - # }}} - - logger.info("make_partition_connection: begin gauss-newton") - niter = 0 - while True: - resid = apply_map(src_unit_nodes) - tgt_bdry_nodes - df = get_map_jacobian(src_unit_nodes) - df_inv_resid = np.empty_like(src_unit_nodes) - # For the 1D/2D accelerated versions, we'll use the normal - # equations and Cramer's rule. If you're looking for high-end - # numerics, look no further than meshmode. - if dim == 1: - # TODO: Needs testing. - # A is df.T - ata = np.einsum("ikt,jkt->ijt", df, df) - atb = np.einsum("ikt,kt->it", df, resid) - df_inv_resid = atb / ata[0, 0] - elif dim == 2: - # A is df.T - ata = np.einsum("ikt,jkt->ijt", df, df) - atb = np.einsum("ikt,kt->it", df, resid) - det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(src_unit_nodes) - df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) - df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) - else: - # The boundary of a 3D mesh is 2D, so that's the - # highest-dimensional case we genuinely care about. - # - # This stinks, performance-wise, because it's not vectorized. - # But we'll only hit it for boundaries of 4+D meshes, in which - # case... good luck. :) - # TODO: Needs testing. - for t in range(n_tgt_unit_nodes): - df_inv_resid[:, t], _, _, _ = \ - la.lstsq(df[:, :, t].T, resid[:, t]) - - # {{{ visualize next guess - if 1: - import matplotlib.pyplot as pt - guess = apply_map(src_unit_nodes) - goals = tgt_bdry_nodes - - from meshmode.discretization.visualization import draw_curve - - pt.plot(guess[0], guess[1], "r^") - pt.plot(goals[0], goals[1], "xg") - pt.plot(src_bdry_nodes[0], src_bdry_nodes[1], "o", color="purple") - pt.plot(src_unit_nodes[0], src_unit_nodes[1], "ob") - pt.show() - # }}} + groups[i_tgt_grp].extend(_make_cross_face_batches(queue, + bdry_discr, bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) - src_unit_nodes = src_unit_nodes - df_inv_resid - max_resid = np.max(np.abs(resid)) - #print(resid[0, :]) - logger.debug("gauss-newton residual: %g" % max_resid) - if max_resid < tol: - logger.info("make_partition_connection: gauss-newton: done, " - "final residual: %g" % max_resid) - break - niter += 1 - if niter > 10: - raise RuntimeError("Gauss-Newton (for finding partition_connection " - "reference coordinates) did not converge") + from meshmode.discretization.connection import ( + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + return DirectDiscretizationConnection( + from_discr=bdry_discr, + to_discr=bdry_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in groups], + is_surjective=True) - def to_dev(ary): - return cl.array.to_device(queue, ary, array_queue=None) +# }}} - from meshmode.discretization.connection import InterpolationBatch - return InterpolationBatch( - # This is not right. Need partition number information. - from_group_index=i_src_grp, - from_element_indices=to_dev(np.array([i_src_elem])), - to_element_indices=to_dev(np.array([i_tgt_elem])), - result_unit_nodes=src_unit_nodes, - to_element_face=None) +# {{{ partition_connection def make_partition_connection(vol_to_bdry_conns, part_meshes): """ @@ -595,7 +409,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): cl_context = tgt_vol_conn.from_discr.cl_context with cl.CommandQueue(cl_context) as queue: - bdry_discr = tgt_vol_conn.to_discr + tgt_bdry_discr = tgt_vol_conn.to_discr #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) @@ -603,35 +417,55 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): for i_src_part, adj in adj_parts.items(): - src_mesh = part_meshes[i_src_part] + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - i_tgt_elems = adj.elements - i_tgt_faces = adj.element_faces + src_mesh = part_meshes[i_src_part] i_src_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_elems]) for i in range(len(i_src_elems)): elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base i_src_elems[i] -= elem_base - for idx, i_tgt_elem in enumerate(i_tgt_elems): - i_tgt_face = i_tgt_faces[idx] - i_src_elem = i_src_elems[idx] - i_src_face = i_src_faces[idx] - i_src_grp = i_src_grps[idx] + for i_src_grp in range(ngroups): + + src_grp_el_lookup = _make_el_lookup_table(queue, + vol_to_bdry_conns[i_src_part], i_src_grp) + + for i_tgt_face in adj.element_faces: + + index_flags = np.logical_and((i_src_grps == i_src_grp), + (adj.element_faces == i_tgt_face)) + + vbc_tgt_grp_face_batch = _find_ibatch_for_face( + tgt_vol_conn.groups[i_tgt_grp].batches, i_tgt_face) + + tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + to_element_indices.get(queue=queue) + + i_src_elems = adj.neighbors[index_flags] + i_src_faces = adj.neighbor_faces[index_flags] + src_bdry_element_indices =\ + src_grp_el_lookup[i_src_elems, i_src_faces] + src_bdry_element_indices = i_src_elems + + print(index_flags) + print(tgt_bdry_element_indices) + print(src_bdry_element_indices) - part_batches[i_tgt_grp].append( - _make_cross_partition_batch(queue, - vol_to_bdry_conns, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, - i_src_part, i_src_grp, i_src_elem, i_src_face)) + part_batches[i_tgt_grp].extend( + _make_cross_face_batches(queue, + tgt_bdry_discr, src_bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( # Is this ok? - from_discr=bdry_discr, - to_discr=bdry_discr, + from_discr=src_bdry_discr, + to_discr=tgt_bdry_discr, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 62174f4c..d6bea656 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -447,10 +447,10 @@ class InterPartitionAdj(): """ def __init__(self): - self.elements = [] - self.element_faces = [] - self.neighbors = [] - self.neighbor_faces = [] + self.elements = np.array([], dtype=int) + self.element_faces = np.array([], dtype=int) + self.neighbors = np.array([], dtype=int) + self.neighbor_faces = np.array([], dtype=int) def get_neighbor(self, elem, face): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 27a6c56d..cb95d008 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -188,12 +188,13 @@ def partition_mesh(mesh, part_per_element, part_nr): if n_part_num not in adj_grps[igrp]: adj_grps[igrp][n_part_num] = InterPartitionAdj() - # I cannot compute the group because the other + # I cannot compute the neighbor group because the other # partitions may not have been built yet. - adj_grps[igrp][n_part_num].elements.append(elem) - adj_grps[igrp][n_part_num].element_faces.append(face) - adj_grps[igrp][n_part_num].neighbors.append(n_elem) - adj_grps[igrp][n_part_num].neighbor_faces.append(n_face) + adj = adj_grps[igrp][n_part_num] + adj.elements = np.append(adj.elements, elem) + adj.element_faces = np.append(adj.element_faces, face) + adj.neighbors = np.append(adj.neighbors, n_elem) + adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e6be6d15..da4dfac1 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -52,15 +52,14 @@ logger = logging.getLogger(__name__) def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() - order = 4 + order = 2 group_factory = PolynomialWarpAndBlendGroupFactory(order) - n = 3 + #group_factory = InterpolatoryQuadratureSimplexGroupFactory(order) + n = 5 dim = 2 - num_parts = 3 - from meshmode.mesh.generation import generate_regular_rect_mesh - mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + num_parts = 2 + from meshmode.mesh.generation import generate_warped_rect_mesh + mesh = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes @@ -97,15 +96,15 @@ def test_partition_interpolation(ctx_getter): # {{{ partition_mesh -def test_partition_mesh(): +@pytest.mark.parametrize("dim", [2, 3]) +@pytest.mark.parametrize("num_parts", [1, 2, 7]) +def test_partition_mesh(num_parts, dim): n = 5 - num_parts = 7 order = 4 - dim = 3 from meshmode.mesh.generation import (generate_regular_rect_mesh, generate_warped_rect_mesh) - mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh1 = generate_regular_rect_mesh(a=(0,) * dim, b=(1,) * dim, n=(n,) * dim) + mesh2 = generate_regular_rect_mesh(a=(2,) * dim, b=(3,) * dim, n=(n,) * dim) mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) from meshmode.mesh.processing import merge_disjoint_meshes -- GitLab From 04fb0771f0c0ce7243f4ab5136390f5d4b898f1a Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 7 Apr 2017 00:04:38 -0500 Subject: [PATCH 038/266] Partition Interpolation works for dim=2 --- .../connection/opposite_face.py | 114 ++++++++++++------ test/test_meshmode.py | 41 ++++--- 2 files changed, 98 insertions(+), 57 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5b5c03dd..797b36ac 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -47,7 +47,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # FIXME: This should view-then-transfer # (but PyOpenCL doesn't do non-contiguous transfers for now). - src_bdry_nodes = (tgt_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). get(queue=queue))[:, src_bdry_element_indices]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -57,6 +57,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape + #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -132,9 +133,9 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes - from meshmode.discretization.visualization import draw_curve - draw_curve(tgt_bdry_discr) - draw_curve(src_bdry_discr) + #from meshmode.discretization.visualization import draw_curve + #draw_curve(tgt_bdry_discr) + #draw_curve(src_bdry_discr) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") @@ -188,6 +189,19 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, src_unit_nodes = src_unit_nodes - df_inv_resid + # {{{ visualize next guess + + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "rx") + pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "go") + pt.show() + + # }}} + max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -386,7 +400,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(vol_to_bdry_conns, part_meshes): +def make_partition_connection(bdry_conns, part_meshes): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -403,60 +417,80 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): - - # Is this ok in a loop? - cl_context = tgt_vol_conn.from_discr.cl_context - with cl.CommandQueue(cl_context) as queue: - - tgt_bdry_discr = tgt_vol_conn.to_discr - #tgt_mesh = bdry_discr.mesh - tgt_mesh = part_meshes[i_tgt_part] - ngroups = len(tgt_mesh.groups) - part_batches = [[] for _ in range(ngroups)] - for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): - for i_src_part, adj in adj_parts.items(): - - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - - src_mesh = part_meshes[i_src_part] - + nparts = len(bdry_conns) + for i_tgt_part in range(nparts): + for i_src_part in range(nparts): + + tgt_conn = bdry_conns[i_tgt_part][i_src_part] + src_conn = bdry_conns[i_src_part][i_tgt_part] + tgt_vol = tgt_conn.from_discr + src_vol = src_conn.from_discr + tgt_bdry = tgt_conn.to_discr + src_bdry = src_conn.to_discr + tgt_mesh = tgt_vol.mesh + src_mesh = src_vol.mesh + #tgt_mesh = part_meshes[i_tgt_part] + #src_mesh = part_meshes[i_src_part] + + # Is this ok in a loop? + cl_context = tgt_vol.cl_context + with cl.CommandQueue(cl_context) as queue: + + adj_grps = part_meshes[i_tgt_part].interpart_adj_groups + + ntgt_groups = len(tgt_mesh.groups) + nsrc_groups = len(src_mesh.groups) + part_batches = ntgt_groups * [[]] + for i_tgt_grp, adj_parts in enumerate(adj_grps): + if i_src_part not in adj_parts: + continue + + adj = adj_parts[i_src_part] + + i_tgt_faces = adj.element_faces i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) for i in range(len(i_src_elems)): + #elem_base = part_meshes[i_src_part].groups[i_src_grps[i]].element_nr_base elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base i_src_elems[i] -= elem_base - for i_src_grp in range(ngroups): + for i_src_grp in range(nsrc_groups): - src_grp_el_lookup = _make_el_lookup_table(queue, - vol_to_bdry_conns[i_src_part], i_src_grp) + src_el_lookup = _make_el_lookup_table(queue, + src_conn, i_src_grp) - for i_tgt_face in adj.element_faces: + for i_tgt_face in i_tgt_faces: - index_flags = np.logical_and((i_src_grps == i_src_grp), - (adj.element_faces == i_tgt_face)) + index_flags = np.logical_and(i_src_grps == i_src_grp, + i_tgt_faces == i_tgt_face) + + if True not in index_flags: + continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_vol_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) - i_src_elems = adj.neighbors[index_flags] - i_src_faces = adj.neighbor_faces[index_flags] + src_bdry_element_indices = src_el_lookup[ + i_src_elems[index_flags], + i_src_faces[index_flags]] + + # FIXME: I honestly have no idea why this helps. src_bdry_element_indices =\ - src_grp_el_lookup[i_src_elems, i_src_faces] - src_bdry_element_indices = i_src_elems + np.sort(src_bdry_element_indices) - print(index_flags) - print(tgt_bdry_element_indices) - print(src_bdry_element_indices) + print("tgt", i_tgt_part, tgt_bdry_element_indices) + print("src", i_src_part, src_bdry_element_indices) + print("-------------------") part_batches[i_tgt_grp].extend( _make_cross_face_batches(queue, - tgt_bdry_discr, src_bdry_discr, + tgt_bdry, src_bdry, i_tgt_grp, i_src_grp, tgt_bdry_element_indices, src_bdry_element_indices)) @@ -464,8 +498,8 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( # Is this ok? - from_discr=src_bdry_discr, - to_discr=tgt_bdry_discr, + from_discr=src_bdry, + to_discr=tgt_bdry, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index da4dfac1..2bb20c6f 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -50,20 +50,24 @@ logger = logging.getLogger(__name__) # {{{ partition_interpolation -def test_partition_interpolation(ctx_getter): +@pytest.mark.parametrize("group_factory", [ + PolynomialWarpAndBlendGroupFactory, + InterpolatoryQuadratureSimplexGroupFactory + ]) +@pytest.mark.parametrize(("num_parts"), [2, 4]) +#@pytest.mark.parametrize("dim", [2, 3, 4]) +def test_partition_interpolation(ctx_getter, group_factory, num_parts): cl_ctx = ctx_getter() - order = 2 - group_factory = PolynomialWarpAndBlendGroupFactory(order) - #group_factory = InterpolatoryQuadratureSimplexGroupFactory(order) - n = 5 + order = 4 dim = 2 - num_parts = 2 + n = 5 + from meshmode.mesh.generation import generate_warped_rect_mesh - mesh = generate_warped_rect_mesh(dim, order=order, n=n) - #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) - #from meshmode.mesh.processing import merge_disjoint_meshes - #mesh = merge_disjoint_meshes([mesh1, mesh2]) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2]) from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) @@ -74,22 +78,25 @@ def test_partition_interpolation(ctx_getter): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) for i in range(num_parts)] from meshmode.discretization.connection import make_face_restriction - bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, - FRESTR_INTERIOR_FACES) for i in range(num_parts)] + from meshmode.mesh import BTAG_PARTITION + bdry_conns = [[make_face_restriction(vol_discrs[tgt], group_factory(order), + BTAG_PARTITION(src)) + for src in range(num_parts)] + for tgt in range(num_parts)] # Hack, I probably shouldn't pass part_meshes directly. This is probably # temporary. from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections, part_meshes) + connections = make_partition_connection(bdry_conns, part_meshes) # We can't use check_connection because I don't think it works with partitions. - #from meshmode.discretization.connection import check_connection - #for conn in connections: - # check_connection(conn) + from meshmode.discretization.connection import check_connection + for conn in connections: + check_connection(conn) # }}} -- GitLab From 3fbc90f5109221ae80f81b9ec1b182306e33795c Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 7 Apr 2017 23:10:25 -0500 Subject: [PATCH 039/266] Almost finished make_partition_connection --- .../connection/opposite_face.py | 152 ++++++++---------- test/test_meshmode.py | 71 ++++---- 2 files changed, 110 insertions(+), 113 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 797b36ac..fbb0643d 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -57,6 +57,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape + # FIXME: Not sure if this is a valid assertion. #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -400,112 +401,99 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(bdry_conns, part_meshes): +def make_partition_connection(tgt_conn, src_conn, i_src_part): """ - Given a list of boundary restriction connections *volume_to_bdry_conn*, + Given a two boundary restriction connections *tgt_conn* and *src_conn*, return a :class:`DirectDiscretizationConnection` that performs data exchange across adjacent faces of different partitions. - :arg vol_to_bdry_conns: A list of *volume_to_bdry_conn* corresponding to - a partition of a parent mesh. + :arg tgt_conn: A :class:`Discretization` for the target partition. + :arg src_conn: A :class:`Discretization` for the source partition. + :arg i_src_part: The partition number corresponding to *src_conn*. - :returns: A list of :class:`DirectDiscretizationConnection` corresponding to - each partition. + :returns: A :class:`DirectDiscretizationConnection` that performs data + exchange across faces in different partitions. + + .. versionadded:: 2017.1 + + .. warning:: Interface is not final. Doesn't even work yet...........:( """ - disc_conns = [] from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - nparts = len(bdry_conns) - for i_tgt_part in range(nparts): - for i_src_part in range(nparts): - - tgt_conn = bdry_conns[i_tgt_part][i_src_part] - src_conn = bdry_conns[i_src_part][i_tgt_part] - tgt_vol = tgt_conn.from_discr - src_vol = src_conn.from_discr - tgt_bdry = tgt_conn.to_discr - src_bdry = src_conn.to_discr - tgt_mesh = tgt_vol.mesh - src_mesh = src_vol.mesh - #tgt_mesh = part_meshes[i_tgt_part] - #src_mesh = part_meshes[i_src_part] - - # Is this ok in a loop? - cl_context = tgt_vol.cl_context - with cl.CommandQueue(cl_context) as queue: - - adj_grps = part_meshes[i_tgt_part].interpart_adj_groups - - ntgt_groups = len(tgt_mesh.groups) - nsrc_groups = len(src_mesh.groups) - part_batches = ntgt_groups * [[]] - for i_tgt_grp, adj_parts in enumerate(adj_grps): - if i_src_part not in adj_parts: - continue + tgt_vol = tgt_conn.from_discr + src_vol = src_conn.from_discr + tgt_bdry = tgt_conn.to_discr + src_bdry = src_conn.to_discr + tgt_mesh = tgt_vol.mesh + src_mesh = src_vol.mesh + + adj_grps = tgt_mesh.interpart_adj_groups - adj = adj_parts[i_src_part] + ntgt_groups = len(tgt_mesh.groups) + nsrc_groups = len(src_mesh.groups) + part_batches = ntgt_groups * [[]] - i_tgt_faces = adj.element_faces - i_src_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_elems]) - for i in range(len(i_src_elems)): - #elem_base = part_meshes[i_src_part].groups[i_src_grps[i]].element_nr_base - elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base - i_src_elems[i] -= elem_base + with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_src_grp in range(nsrc_groups): + for i_tgt_grp, adj_parts in enumerate(adj_grps): + if i_src_part not in adj_parts: + # Skip because i_tgt_grp is not connected to i_src_part. + continue - src_el_lookup = _make_el_lookup_table(queue, - src_conn, i_src_grp) + adj = adj_parts[i_src_part] - for i_tgt_face in i_tgt_faces: + i_tgt_faces = adj.element_faces + i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_elems]) + for i in range(len(i_src_elems)): + elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_elems[i] -= elem_base - index_flags = np.logical_and(i_src_grps == i_src_grp, - i_tgt_faces == i_tgt_face) + for i_src_grp in range(nsrc_groups): - if True not in index_flags: - continue + src_el_lookup = _make_el_lookup_table(queue, src_conn, i_src_grp) - vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) + for i_tgt_face in i_tgt_faces: - tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ - to_element_indices.get(queue=queue) + index_flags = np.logical_and(i_src_grps == i_src_grp, + i_tgt_faces == i_tgt_face) + + if True not in index_flags: + continue + + vbc_tgt_grp_face_batch = _find_ibatch_for_face( + tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) - src_bdry_element_indices = src_el_lookup[ + tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + to_element_indices.get(queue=queue) + + src_bdry_element_indices = src_el_lookup[ i_src_elems[index_flags], i_src_faces[index_flags]] - # FIXME: I honestly have no idea why this helps. - src_bdry_element_indices =\ - np.sort(src_bdry_element_indices) - - print("tgt", i_tgt_part, tgt_bdry_element_indices) - print("src", i_src_part, src_bdry_element_indices) - print("-------------------") - - part_batches[i_tgt_grp].extend( - _make_cross_face_batches(queue, - tgt_bdry, src_bdry, - i_tgt_grp, i_src_grp, - tgt_bdry_element_indices, - src_bdry_element_indices)) - - # Make one Discr connection for each partition. - disc_conns.append(DirectDiscretizationConnection( - # Is this ok? - from_discr=src_bdry, - to_discr=tgt_bdry, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], - is_surjective=True)) + # FIXME: I honestly have no idea why this helps. + src_bdry_element_indices = np.sort(src_bdry_element_indices) + + print("Attempting to connect elements") + print(tgt_bdry_element_indices) + print(src_bdry_element_indices) + + part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, + tgt_bdry, src_bdry, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) - return disc_conns + return DirectDiscretizationConnection( + from_discr=src_bdry, + to_discr=tgt_bdry, + groups=[DiscretizationConnectionElementGroup(batches=batches) + for batches in part_batches], + is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2bb20c6f..d149707a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -54,20 +54,26 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize(("num_parts"), [2, 4]) -#@pytest.mark.parametrize("dim", [2, 3, 4]) -def test_partition_interpolation(ctx_getter, group_factory, num_parts): +@pytest.mark.parametrize(("num_parts"), [2, 3, 7]) +# FIXME: Mostly fails for dim = 3. +@pytest.mark.parametrize("dim", [2]) +# FIXME: Mostly fails for multiple groups. +@pytest.mark.parametrize("num_meshes", [1]) +def test_partition_interpolation(ctx_getter, group_factory, dim, + num_parts, num_meshes): cl_ctx = ctx_getter() order = 4 - dim = 2 n = 5 from meshmode.mesh.generation import generate_warped_rect_mesh - mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) - mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_meshes)] - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + if num_meshes > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) @@ -81,22 +87,27 @@ def test_partition_interpolation(ctx_getter, group_factory, num_parts): vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) for i in range(num_parts)] - from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION - bdry_conns = [[make_face_restriction(vol_discrs[tgt], group_factory(order), - BTAG_PARTITION(src)) - for src in range(num_parts)] - for tgt in range(num_parts)] + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) + + for i_tgt_part in range(num_parts): + for i_src_part in range(num_parts): + if i_tgt_part == i_src_part: + continue + + tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Hack, I probably shouldn't pass part_meshes directly. This is probably - # temporary. - from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_conns, part_meshes) + src_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # We can't use check_connection because I don't think it works with partitions. - from meshmode.discretization.connection import check_connection - for conn in connections: - check_connection(conn) + connection = make_partition_connection(tgt_conn, src_conn, i_src_part) + + check_connection(connection) # }}} @@ -104,18 +115,16 @@ def test_partition_interpolation(ctx_getter, group_factory, num_parts): # {{{ partition_mesh @pytest.mark.parametrize("dim", [2, 3]) -@pytest.mark.parametrize("num_parts", [1, 2, 7]) -def test_partition_mesh(num_parts, dim): - n = 5 - order = 4 - from meshmode.mesh.generation import (generate_regular_rect_mesh, - generate_warped_rect_mesh) - mesh1 = generate_regular_rect_mesh(a=(0,) * dim, b=(1,) * dim, n=(n,) * dim) - mesh2 = generate_regular_rect_mesh(a=(2,) * dim, b=(3,) * dim, n=(n,) * dim) - mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) +@pytest.mark.parametrize("num_parts", [4, 5, 7]) +@pytest.mark.parametrize("num_meshes", [2, 3]) +def test_partition_mesh(num_parts, num_meshes, dim): + n = (5,) * dim + from meshmode.mesh.generation import generate_regular_rect_mesh + meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) + for i in range(num_meshes)] from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) + mesh = merge_disjoint_meshes(meshes) from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) -- GitLab From a8f6122f2f8a63ce44d465d26f81b65eb113635d Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 10 Apr 2017 16:48:36 -0500 Subject: [PATCH 040/266] More tests in test_partition_interpolation --- test/test_meshmode.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d149707a..2355ea55 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -58,18 +58,22 @@ logger = logging.getLogger(__name__) # FIXME: Mostly fails for dim = 3. @pytest.mark.parametrize("dim", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_meshes", [1]) +@pytest.mark.parametrize("num_groups", [1]) def test_partition_interpolation(ctx_getter, group_factory, dim, - num_parts, num_meshes): + num_parts, num_groups): cl_ctx = ctx_getter() + queue = cl.CommandQueue(cl_ctx) order = 4 n = 5 + def f(x): + return 0.1*cl.clmath.sin(30*x) + from meshmode.mesh.generation import generate_warped_rect_mesh meshes = [generate_warped_rect_mesh(dim, order=order, n=n) - for _ in range(num_meshes)] + for _ in range(num_groups)] - if num_meshes > 1: + if num_groups > 1: from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) else: @@ -109,6 +113,15 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, check_connection(connection) + bdry_x = src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_x.size != 0: + bdry_f = f(bdry_x) + + bdry_f_2 = connection(queue, bdry_f) + + err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + print(err) + assert err < 1e-13 # }}} -- GitLab From a2aab35b71a4bf8d17f61336b627d9bb9b70302c Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 11 Apr 2017 19:06:47 -0500 Subject: [PATCH 041/266] More testing --- .../connection/opposite_face.py | 10 +++--- test/test_meshmode.py | 31 +++++++++++++------ 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index fbb0643d..90f88c88 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -416,7 +416,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. Doesn't even work yet...........:( + .. warning:: Interface is not final. Doesn't even work yet...:( """ from meshmode.discretization.connection import ( @@ -478,9 +478,9 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): # FIXME: I honestly have no idea why this helps. src_bdry_element_indices = np.sort(src_bdry_element_indices) - print("Attempting to connect elements") - print(tgt_bdry_element_indices) - print(src_bdry_element_indices) + #print("Attempting to connect elements") + #print(tgt_bdry_element_indices) + #print(src_bdry_element_indices) part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, @@ -489,7 +489,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): src_bdry_element_indices)) return DirectDiscretizationConnection( - from_discr=src_bdry, + from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2355ea55..cc0f52b9 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -64,13 +64,15 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) order = 4 - n = 5 + + from pytools.convergence import EOCRecorder + eoc_rec = EOCRecorder() def f(x): return 0.1*cl.clmath.sin(30*x) from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + meshes = [generate_warped_rect_mesh(dim, order=order, n=5) for _ in range(num_groups)] if num_groups > 1: @@ -101,27 +103,36 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, if i_tgt_part == i_src_part: continue + # Connections within i_tgt_part to i_src_part tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + group_factory(order), + BTAG_PARTITION(i_src_part)) + # Connections within i_src_part to i_tgt_part src_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + group_factory(order), + BTAG_PARTITION(i_tgt_part)) + # Connect tgt_conn to src_conn connection = make_partition_connection(tgt_conn, src_conn, i_src_part) check_connection(connection) - bdry_x = src_conn.to_discr.nodes()[0].with_queue(queue) + # Should this be src_conn? + bdry_x = tgt_conn.to_discr.nodes()[0].with_queue(queue) if bdry_x.size != 0: bdry_f = f(bdry_x) bdry_f_2 = connection(queue, bdry_f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - print(err) - assert err < 1e-13 + abscissa = i_tgt_part + num_parts * i_src_part + eoc_rec.add_data_point(abscissa, err) + + print(eoc_rec) + assert (eoc_rec.order_estimate() >= order-0.5 + or eoc_rec.max_error() < 1e-13) + # }}} @@ -129,7 +140,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [2, 3]) +@pytest.mark.parametrize("num_meshes", [2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh -- GitLab From 7ee201a9c659b518b7ba2df8f7a2a186ab6e58ed Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 12 Apr 2017 15:00:43 -0500 Subject: [PATCH 042/266] Working --- .../connection/opposite_face.py | 22 +++++++++------- test/test_meshmode.py | 25 ++++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 90f88c88..5dd5c335 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -58,7 +58,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape # FIXME: Not sure if this is a valid assertion. - #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape + assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -130,6 +130,9 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # {{{ visualize initial guess if 0: + # FIXME: When dim=3 it looks like sometimes src_bdry_nodes + # have the wrong coordinate system. They need to + # be reflected about some plane. import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes @@ -401,7 +404,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(tgt_conn, src_conn, i_src_part): +def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): """ Given a two boundary restriction connections *tgt_conn* and *src_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -416,16 +419,16 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. Doesn't even work yet...:( + .. warning:: Interface is not final. It doesn't even work yet...:( """ from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - tgt_vol = tgt_conn.from_discr - src_vol = src_conn.from_discr - tgt_bdry = tgt_conn.to_discr - src_bdry = src_conn.to_discr + tgt_vol = tgt_to_src_conn.from_discr + src_vol = src_to_tgt_conn.from_discr + tgt_bdry = tgt_to_src_conn.to_discr + src_bdry = src_to_tgt_conn.to_discr tgt_mesh = tgt_vol.mesh src_mesh = src_vol.mesh @@ -455,7 +458,8 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): for i_src_grp in range(nsrc_groups): - src_el_lookup = _make_el_lookup_table(queue, src_conn, i_src_grp) + src_el_lookup =\ + _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: @@ -466,7 +470,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_to_src_conn.groups[i_tgt_grp].batches, i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index cc0f52b9..683a3508 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -103,23 +103,24 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, if i_tgt_part == i_src_part: continue - # Connections within i_tgt_part to i_src_part - tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Connections within tgt_mesh to src_mesh + tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Connections within i_src_part to i_tgt_part - src_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Connections within src_mesh to tgt_mesh + src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # Connect tgt_conn to src_conn - connection = make_partition_connection(tgt_conn, src_conn, i_src_part) + # Connect tgt_mesh to src_mesh + connection = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) check_connection(connection) - # Should this be src_conn? - bdry_x = tgt_conn.to_discr.nodes()[0].with_queue(queue) + # Should this be src_to_tgt_conn? + bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) if bdry_x.size != 0: bdry_f = f(bdry_x) -- GitLab From 79eecabfc192ccd29e6375dcf0dd552b647c8525 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 14 Apr 2017 13:13:50 -0500 Subject: [PATCH 043/266] partition interpolation works with dim=3 --- .../connection/opposite_face.py | 32 ++--- test/test_meshmode.py | 112 +++++++++--------- 2 files changed, 70 insertions(+), 74 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5dd5c335..c8bf2a93 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -47,7 +47,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # FIXME: This should view-then-transfer # (but PyOpenCL doesn't do non-contiguous transfers for now). - src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(src_bdry_discr.nodes(). get(queue=queue))[:, src_bdry_element_indices]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -130,16 +130,16 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # {{{ visualize initial guess if 0: - # FIXME: When dim=3 it looks like sometimes src_bdry_nodes - # have the wrong coordinate system. They need to - # be reflected about some plane. import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes - #from meshmode.discretization.visualization import draw_curve - #draw_curve(tgt_bdry_discr) - #draw_curve(src_bdry_discr) + from meshmode.discretization.visualization import draw_curve + pt.figure(0) + draw_curve(tgt_bdry_discr) + pt.figure(1) + draw_curve(src_bdry_discr) + pt.figure(2) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") @@ -435,7 +435,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups ntgt_groups = len(tgt_mesh.groups) - nsrc_groups = len(src_mesh.groups) part_batches = ntgt_groups * [[]] with cl.CommandQueue(tgt_vol.cl_context) as queue: @@ -450,13 +449,13 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_tgt_faces = adj.element_faces i_src_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_elems]) - for i in range(len(i_src_elems)): - elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) + + for i, i_grp in enumerate(i_src_grps): + elem_base = src_mesh.groups[i_grp].element_nr_base i_src_elems[i] -= elem_base - for i_src_grp in range(nsrc_groups): + for i_src_grp in np.unique(i_src_grps): src_el_lookup =\ _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) @@ -479,13 +478,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_elems[index_flags], i_src_faces[index_flags]] - # FIXME: I honestly have no idea why this helps. - src_bdry_element_indices = np.sort(src_bdry_element_indices) - - #print("Attempting to connect elements") - #print(tgt_bdry_element_indices) - #print(src_bdry_element_indices) - part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, i_tgt_grp, i_src_grp, diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 683a3508..4ec586f1 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -54,16 +54,18 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize(("num_parts"), [2, 3, 7]) -# FIXME: Mostly fails for dim = 3. -@pytest.mark.parametrize("dim", [2]) +@pytest.mark.parametrize("num_parts", [2, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) -def test_partition_interpolation(ctx_getter, group_factory, dim, +@pytest.mark.parametrize(("dim", "mesh_pars"), [ + (2, [3, 5, 7]), + (3, [3, 5]) + ]) +def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 4 + order = 3 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -71,64 +73,64 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, def f(x): return 0.1*cl.clmath.sin(30*x) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=5) - for _ in range(num_groups)] + for n in mesh_pars: + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_groups)] - if num_groups > 1: - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - else: - mesh = meshes[0] + if num_groups > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + part_per_element = np.array(p) - from meshmode.mesh.processing import partition_mesh - part_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + from meshmode.mesh.processing import partition_mesh + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) - for i in range(num_parts)] + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) + for i in range(num_parts)] - from meshmode.mesh import BTAG_PARTITION - from meshmode.discretization.connection import (make_face_restriction, - make_partition_connection, - check_connection) + from meshmode.mesh import BTAG_PARTITION + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) - for i_tgt_part in range(num_parts): - for i_src_part in range(num_parts): - if i_tgt_part == i_src_part: - continue + for i_tgt_part in range(num_parts): + for i_src_part in range(num_parts): + if i_tgt_part == i_src_part: + continue - # Connections within tgt_mesh to src_mesh - tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Connections within tgt_mesh to src_mesh + tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Connections within src_mesh to tgt_mesh - src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Connections within src_mesh to tgt_mesh + src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # Connect tgt_mesh to src_mesh - connection = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + # Connect tgt_mesh to src_mesh + connection = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) - check_connection(connection) + check_connection(connection) - # Should this be src_to_tgt_conn? - bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) - if bdry_x.size != 0: - bdry_f = f(bdry_x) + # Should this be src_to_tgt_conn? + bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_x.size != 0: + bdry_f = f(bdry_x) - bdry_f_2 = connection(queue, bdry_f) + bdry_f_2 = connection(queue, bdry_f) - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - abscissa = i_tgt_part + num_parts * i_src_part - eoc_rec.add_data_point(abscissa, err) + err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + eoc_rec.add_data_point(1./n, err) print(eoc_rec) assert (eoc_rec.order_estimate() >= order-0.5 @@ -141,7 +143,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [2, 7]) +@pytest.mark.parametrize("num_meshes", [1, 2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh @@ -156,6 +158,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh + # TODO: The same part_per_element array must be used to partition each mesh. + # Maybe the interface should be changed to guarantee this. new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] @@ -473,12 +477,12 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, @pytest.mark.parametrize("group_factory", [ InterpolatoryQuadratureSimplexGroupFactory, - PolynomialWarpAndBlendGroupFactory + #PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - ("blob", 2, [1e-1, 8e-2, 5e-2]), + #("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), - ("warp", 3, [3, 5]), + #("warp", 3, [3, 5]), ]) def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): -- GitLab From c495416d702b49b789c0e6ff32d3015853c54408 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Apr 2017 21:12:31 -0500 Subject: [PATCH 044/266] Working --- test/test_meshmode.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 4ec586f1..484c12a0 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -65,7 +65,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 3 + order = 5 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -477,12 +477,12 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, @pytest.mark.parametrize("group_factory", [ InterpolatoryQuadratureSimplexGroupFactory, - #PolynomialWarpAndBlendGroupFactory + PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - #("blob", 2, [1e-1, 8e-2, 5e-2]), + ("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), - #("warp", 3, [3, 5]), + ("warp", 3, [3, 5]), ]) def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): -- GitLab From e7a5b140651382d103ba3055c42e4f4ddca1d507 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 19 Apr 2017 00:13:04 -0500 Subject: [PATCH 045/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 1 + test/test_meshmode.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c8bf2a93..12dcb0dc 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -437,6 +437,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): ntgt_groups = len(tgt_mesh.groups) part_batches = ntgt_groups * [[]] + # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 484c12a0..1c27795b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -52,13 +52,13 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory + #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2, 3]) +@pytest.mark.parametrize("num_parts", [3])#, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 7]), + #(2, [3, 5, 7]), (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, -- GitLab From cd86dabc2e850bff2169a00bfd2f3e47e90bbdca Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 20 May 2017 12:11:25 -0500 Subject: [PATCH 046/266] Working --- meshmode/mesh/__init__.py | 18 +++++++------ meshmode/mesh/processing.py | 20 +++++++-------- test/test_meshmode.py | 51 +++++++++++++++++++++---------------- 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index d6bea656..bae6a7f7 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -457,8 +457,10 @@ class InterPartitionAdj(): :arg elem :arg face :returns: A tuple ``(neighbor_elem, neighbor_face)`` of - neighboring elements within another :class:`Mesh`. - Or (-1, -1) if the face does not have a neighbor. + neighboring elements within another :class:`Mesh` + or (-1, -1) if the face does not have a neighbor. + Note that ``neighbor_elem`` is mesh-wide and includes + its ``element_nr_base``. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: @@ -850,16 +852,16 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrp(self, elem): + def find_igrp(self, meshwide_elem): """ - :arg elem: A mesh-wise element. Think of it as ``elem + element_nr_base``. - :returns: The index of the group that `elem` belongs to. + :arg meshwide_elem: Think of it as ``elem + element_nr_base``. + :returns: The index of the group that `meshwide_elem` belongs to. """ for igrp, grp in enumerate(self.groups): - if elem < grp.nelements: + if meshwide_elem < grp.nelements: return igrp - elem -= grp.nelements - raise RuntimeError("Could not find group with element %d." % elem) + meshwide_elem -= grp.nelements + raise RuntimeError("Could not find group with element %d." % meshwide_elem) def adjacency_list(self): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index cb95d008..cdac92f1 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -57,9 +57,6 @@ def partition_mesh(mesh, part_per_element, part_nr): numbers on *part_mesh* to ones in *mesh*. .. versionadded:: 2017.1 - - .. warning:: Interface is not final. Connectivity between elements - across groups needs to be added. """ assert len(part_per_element) == mesh.nelements, ( "part_per_element must have shape (mesh.nelements,)") @@ -83,7 +80,7 @@ def partition_mesh(mesh, part_per_element, part_nr): for group_num in range(num_groups): mesh_group = mesh.groups[group_num] - # Find the index of first element in the next group + # Find the index of first element in the next group. end_idx = len(queried_elems) for idx in range(start_idx, len(queried_elems)): if queried_elems[idx] - num_prev_elems >= mesh_group.nelements: @@ -168,9 +165,10 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_adj = mesh.facial_adjacency_groups[parent_igrp] for parent_facial_group in parent_adj.values(): - for idx in np.where(parent_facial_group.elements == parent_elem)[0]: - if parent_facial_group.neighbors[idx] >= 0 and \ - parent_facial_group.element_faces[idx] == face: + indices, = np.nonzero(parent_facial_group.elements == parent_elem) + for idx in indices: + if (parent_facial_group.neighbors[idx] >= 0 and + parent_facial_group.element_faces[idx] == face): rank_neighbor = (parent_facial_group.neighbors[idx] + parent_elem_base) n_face = parent_facial_group.neighbor_faces[idx] @@ -181,19 +179,19 @@ def partition_mesh(mesh, part_per_element, part_nr): BTAG_PARTITION(n_part_num)) boundary_adj.neighbors[adj_idx] = -tags - # Find the neighbor element from the other partition - n_elem = np.count_nonzero( + # Find the neighbor element from the other partition. + n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) if n_part_num not in adj_grps[igrp]: adj_grps[igrp][n_part_num] = InterPartitionAdj() - # I cannot compute the neighbor group because the other + # We cannot compute the neighbor group because the other # partitions may not have been built yet. adj = adj_grps[igrp][n_part_num] adj.elements = np.append(adj.elements, elem) adj.element_faces = np.append(adj.element_faces, face) - adj.neighbors = np.append(adj.neighbors, n_elem) + adj.neighbors = np.append(adj.neighbors, n_meshwide_elem) adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) connected_mesh = part_mesh.copy() diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 1c27795b..ccab714b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -54,24 +54,30 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [3])#, 3]) +@pytest.mark.parametrize("num_parts", [2])#, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - #(2, [3, 5, 7]), - (3, [3, 5]) + (2, [10, 20, 30]), + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 5 + order = 3 from pytools.convergence import EOCRecorder - eoc_rec = EOCRecorder() + eoc_rec = dict() + for i in range(num_parts): + for j in range(num_parts): + if i == j: + continue + eoc_rec[(i, j)] = EOCRecorder() def f(x): - return 0.1*cl.clmath.sin(30*x) + return x + #return 0.1*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -130,11 +136,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, bdry_f_2 = connection(queue, bdry_f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - eoc_rec.add_data_point(1./n, err) + eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - print(eoc_rec) - assert (eoc_rec.order_estimate() >= order-0.5 - or eoc_rec.max_error() < 1e-13) + print(eoc_rec[(0, 1)]) + + assert (eoc_rec[(0, 1)].order_estimate() >= order-0.5 + or eoc_rec[(0, 1)].max_error() < 1e-13) # }}} @@ -188,26 +195,26 @@ def test_partition_mesh(num_parts, num_meshes, dim): if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_elem, n_face) = adj.get_neighbor(elem, face) - n_grp_num = n_part.find_igrp(n_elem) + (n_meshwide_elem, n_face) = adj.get_neighbor(elem, face) + n_grp_num = n_part.find_igrp(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem -= n_elem_base + n_elem = n_meshwide_elem - n_elem_base assert (elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] - p_elem = part_to_global[elem + elem_base] - p_n_elem = n_part_to_global[n_elem + n_elem_base] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrp(p_elem) - p_n_grp_num = mesh.find_igrp(p_n_elem) + p_grp_num = mesh.find_igrp(p_meshwide_elem) + p_n_grp_num = mesh.find_igrp(p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem -= p_elem_base - p_n_elem -= p_n_elem_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base f_groups = mesh.facial_adjacency_groups[p_grp_num] for p_bnd_adj in f_groups.values(): @@ -219,11 +226,11 @@ def test_partition_mesh(num_parts, num_meshes, dim): assert n_face == p_bnd_adj.neighbor_faces[idx],\ "Tag does not give correct neighbor" - for tag_num in range(num_parts): + for i_tag in range(num_parts): tag_sum = 0 for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(tag_num)) - assert num_tags[tag_num] == tag_sum,\ + tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) + assert num_tags[i_tag] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" -- GitLab From f67233b00449931af1f1e566dda8c90765aac731 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 20 May 2017 12:45:37 -0500 Subject: [PATCH 047/266] Working# Please enter the commit message for your changes. Lines starting --- meshmode/discretization/connection/opposite_face.py | 8 +++++--- test/test_meshmode.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 12dcb0dc..76c2b404 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -448,13 +448,15 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces - i_src_elems = adj.neighbors + i_src_meshwide_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_meshwide_elems]) + i_src_elems = np.empty_like(i_src_meshwide_elems) for i, i_grp in enumerate(i_src_grps): elem_base = src_mesh.groups[i_grp].element_nr_base - i_src_elems[i] -= elem_base + i_src_elems[i] = i_src_meshwide_elems[i] - elem_base for i_src_grp in np.unique(i_src_grps): diff --git a/test/test_meshmode.py b/test/test_meshmode.py index ccab714b..2f8eb258 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -54,12 +54,12 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2])#, 3]) +@pytest.mark.parametrize("num_parts", [2]) # , 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - #(3, [3, 5]) + #(3, [10, 20]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): -- GitLab From a71b60f1b73740a148148e002799af2f525737e4 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 19:56:28 -0500 Subject: [PATCH 048/266] Testing --- test/test_meshmode.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2f8eb258..431cf2b5 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -54,12 +54,12 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2]) # , 3]) +@pytest.mark.parametrize("num_parts", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_groups", [1]) +@pytest.mark.parametrize("num_groups", [3]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - #(3, [10, 20]) + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -138,10 +138,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, err = la.norm((bdry_f-bdry_f_2).get(), np.inf) eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - print(eoc_rec[(0, 1)]) - - assert (eoc_rec[(0, 1)].order_estimate() >= order-0.5 - or eoc_rec[(0, 1)].max_error() < 1e-13) + for i in range(num_parts): + for j in range(num_parts): + if i != j: + print(eoc_rec[(i, j)]) + #assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 + # or eoc_rec[(i, j)].max_error() < 1e-13) # }}} @@ -487,7 +489,7 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - ("blob", 2, [1e-1, 8e-2, 5e-2]), + #("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -- GitLab From 5aaf283856bb6086037ecded5926f160ea042499 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 19:56:42 -0500 Subject: [PATCH 049/266] Fixed bug when there is a partition of multiple groups. If on paritition does not contain all groups then its connection was left with an empty batch. --- meshmode/discretization/connection/opposite_face.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 76c2b404..9a69ee5c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -434,8 +434,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups - ntgt_groups = len(tgt_mesh.groups) - part_batches = ntgt_groups * [[]] + part_batches = dict() # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: @@ -445,6 +444,8 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): # Skip because i_tgt_grp is not connected to i_src_part. continue + part_batches[i_tgt_grp] = [] + adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces @@ -491,7 +492,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], + for batches in part_batches.values()], is_surjective=True) # }}} -- GitLab From 2a408d8fc9c7b89a4fc18521c258224a412e65c2 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 20:26:54 -0500 Subject: [PATCH 050/266] Added comments --- meshmode/discretization/connection/opposite_face.py | 2 ++ test/test_meshmode.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 9a69ee5c..1c3542bb 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -444,6 +444,8 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): # Skip because i_tgt_grp is not connected to i_src_part. continue + # FIXME: Here we avoid creating empty batches. But now the + # number of batches does not match the number of groups. part_batches[i_tgt_grp] = [] adj = adj_parts[i_src_part] diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 431cf2b5..ea947472 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,13 @@ logger = logging.getLogger(__name__) ]) @pytest.mark.parametrize("num_parts", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_groups", [3]) +# The problem is that when multiple groups are partitioned +# some partitions may not contain all groups. In that case +# there will be a connection between two partitions with +# empty batches because there will be a group that doesn't +# connect to the other partition. I need to deal with these +# empty batches. +@pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), #(3, [3, 5]) @@ -142,8 +148,8 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for j in range(num_parts): if i != j: print(eoc_rec[(i, j)]) - #assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 - # or eoc_rec[(i, j)].max_error() < 1e-13) + assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 + or eoc_rec[(i, j)].max_error() < 1e-13) # }}} -- GitLab From 7ccc5115bf0d3ffb9909fd97c41a5a205b58598e Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 22 May 2017 21:23:42 -0500 Subject: [PATCH 051/266] Interpartition interpolation works as expected with one group --- .../connection/opposite_face.py | 11 ++-- test/test_meshmode.py | 63 +++++++++++-------- 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1c3542bb..0b1ae69b 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -434,20 +434,17 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups - part_batches = dict() + part_batches = [] # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): + part_batches.append([]) if i_src_part not in adj_parts: # Skip because i_tgt_grp is not connected to i_src_part. continue - # FIXME: Here we avoid creating empty batches. But now the - # number of batches does not match the number of groups. - part_batches[i_tgt_grp] = [] - adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces @@ -471,7 +468,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): index_flags = np.logical_and(i_src_grps == i_src_grp, i_tgt_faces == i_tgt_face) - if True not in index_flags: + if not np.any(index_flags): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( @@ -494,7 +491,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches.values()], + for batches in part_batches], is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index ea947472..7673691d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -52,9 +52,9 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - #InterpolatoryQuadratureSimplexGroupFactory + InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2]) +@pytest.mark.parametrize("num_parts", [2, 3, 4]) # FIXME: Mostly fails for multiple groups. # The problem is that when multiple groups are partitioned # some partitions may not contain all groups. In that case @@ -64,14 +64,14 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [10, 20, 30]), - #(3, [3, 5]) + (2, [3, 5, 7]), + (3, [3, 5, 7]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 3 + order = 5 from pytools.convergence import EOCRecorder eoc_rec = dict() @@ -82,8 +82,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[(i, j)] = EOCRecorder() def f(x): - return x - #return 0.1*cl.clmath.sin(30*x) + return 0.1*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -115,41 +114,51 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): - if i_tgt_part == i_src_part: + if (i_tgt_part == i_src_part + or eoc_rec[(i_tgt_part, i_src_part)] == None): + eoc_rec[(i_tgt_part, i_src_part)] = None continue - # Connections within tgt_mesh to src_mesh + # Mark faces within tgt_mesh that are connected to src_mesh tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], group_factory(order), BTAG_PARTITION(i_src_part)) - # Connections within src_mesh to tgt_mesh + # If these parts are not connected, don't bother checking the error + bdry_nodes = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_nodes.size == 0: + eoc_rec[(i_tgt_part, i_src_part)] = None + continue + + # Mark faces within src_mesh that are connected to tgt_mesh src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], group_factory(order), BTAG_PARTITION(i_tgt_part)) # Connect tgt_mesh to src_mesh - connection = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + tgt_conn = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) - check_connection(connection) + # Connect src_mesh to tgt_mesh + src_conn = make_partition_connection(src_to_tgt_conn, + tgt_to_src_conn, i_tgt_part) - # Should this be src_to_tgt_conn? - bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) - if bdry_x.size != 0: - bdry_f = f(bdry_x) + check_connection(tgt_conn) + check_connection(src_conn) - bdry_f_2 = connection(queue, bdry_f) + bdry_t = f(tgt_conn.to_discr.nodes()[0].with_queue(queue)) + bdry_s = tgt_conn(queue, bdry_t) + bdry_t_2 = src_conn(queue, bdry_s) - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) + err = la.norm((bdry_t - bdry_t_2).get(), np.inf) + eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - for i in range(num_parts): - for j in range(num_parts): - if i != j: - print(eoc_rec[(i, j)]) - assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 - or eoc_rec[(i, j)].max_error() < 1e-13) + for (i, j), e in eoc_rec.items(): + if e != None: + print("Error of connection from part %i to part %i." % (i, j)) + print(e) + assert(e.order_estimate() >= order - 0.5 + or e.max_error() < 1e-12) # }}} @@ -495,7 +504,7 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - #("blob", 2, [1e-1, 8e-2, 5e-2]), + ("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -- GitLab From 333758d57e1fbfdeb91c0183b6608f4a79ae2e1f Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 22 May 2017 21:26:11 -0500 Subject: [PATCH 052/266] Format fix --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 7673691d..5dc3f038 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -115,7 +115,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): if (i_tgt_part == i_src_part - or eoc_rec[(i_tgt_part, i_src_part)] == None): + or eoc_rec[(i_tgt_part, i_src_part)] is None): eoc_rec[(i_tgt_part, i_src_part)] = None continue @@ -154,7 +154,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): - if e != None: + if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) assert(e.order_estimate() >= order - 0.5 -- GitLab From 68c09da72332b8354add706f20e7a9732fbcd31b Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 23 May 2017 11:10:20 -0500 Subject: [PATCH 053/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 3 +-- test/test_meshmode.py | 9 ++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 0b1ae69b..169a5b2a 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -57,7 +57,6 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape - # FIXME: Not sure if this is a valid assertion. assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -419,7 +418,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. It doesn't even work yet...:( + .. warning:: Interface is not final. """ from meshmode.discretization.connection import ( diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 5dc3f038..c04a3d5c 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -64,14 +64,14 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 7]), - (3, [3, 5, 7]) + (2, [10, 20, 30]), + (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 5 + order = 4 from pytools.convergence import EOCRecorder eoc_rec = dict() @@ -157,8 +157,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 - or e.max_error() < 1e-12) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) # }}} -- GitLab From 8c9cde9e2106caefb7ac9c54d3099bb2201dc9ab Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 29 May 2017 10:25:05 -0500 Subject: [PATCH 054/266] Small changes --- meshmode/mesh/processing.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index cdac92f1..c0ccf5ba 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -130,9 +130,8 @@ def partition_mesh(mesh, part_per_element, part_nr): required_indices == original_index)[0] new_mesh_groups = [] - for group_num in range(num_groups): + for group_num, mesh_group in enumerate(mesh.groups): if group_num not in skip_groups: - mesh_group = mesh.groups[group_num] new_mesh_groups.append( type(mesh_group)(mesh_group.order, new_indices[group_num], new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) -- GitLab From 682c140c74f9575afd6965723ffef71772f8d434 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 14 Jul 2017 20:16:24 -0500 Subject: [PATCH 055/266] Easy fixes --- meshmode/discretization/connection/opposite_face.py | 1 - meshmode/mesh/__init__.py | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 169a5b2a..3095d8d8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -435,7 +435,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): part_batches = [] - # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index aecd5c47..83f18922 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -102,11 +102,8 @@ class BTAG_PARTITION(object): # noqa def __init__(self, part_nr): self.part_nr = int(part_nr) - # TODO is this acceptable? - # __eq__ is also defined so maybe the hash value isn't too important - # for dictionaries. def __hash__(self): - return self.part_nr + return hash((type(self), self.part_nr)) def __eq__(self, other): if isinstance(other, BTAG_PARTITION): @@ -114,7 +111,7 @@ class BTAG_PARTITION(object): # noqa else: return False - def __nq__(self, other): + def __ne__(self, other): return not self.__eq__(other) -- GitLab From 4294bbaa98462077edc856ad5bd95ed6f0e262e5 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 15 Jul 2017 14:18:26 -0500 Subject: [PATCH 056/266] InterpartitionAdj now includes a lookup table that maps elements and faces to their neighbors --- meshmode/mesh/__init__.py | 47 +++++++++++++++++++++++-------------- meshmode/mesh/processing.py | 11 +++++---- test/test_meshmode.py | 12 +++++----- 3 files changed, 41 insertions(+), 29 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 83f18922..17ae6f37 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -438,7 +438,13 @@ class InterPartitionAdj(): ``neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``elements[i]`` - .. automethod:: get_neighbor + .. attribute:: neighbor_lookup_table + + A dictionary that maps the tuple ``(element, face)`` to the tuple + ``(neighbor_element, neighbor_face)``. May be ``None`` if it has not + been generated. + + .. automethod:: append_connection .. versionadded:: 2017.1 """ @@ -449,22 +455,26 @@ class InterPartitionAdj(): self.neighbors = np.array([], dtype=int) self.neighbor_faces = np.array([], dtype=int) - def get_neighbor(self, elem, face): + def append_connection(self, elem, face, nelem, nface): """ - :arg elem - :arg face - :returns: A tuple ``(neighbor_elem, neighbor_face)`` of - neighboring elements within another :class:`Mesh` - or (-1, -1) if the face does not have a neighbor. - Note that ``neighbor_elem`` is mesh-wide and includes - its ``element_nr_base``. + :arg elem: + :arg face: + :arg nelem: + :arg nface: + Connects element ``elem`` with face ``face`` to its neighboring element + ``nelem`` with face ``nface``. """ - for idx in range(len(self.elements)): - if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbors[idx], - self.neighbor_faces[idx]) - #raise RuntimeError("This face does not have a neighbor") - return (-1, -1) + self.elements = np.append(self.elements, elem) + self.element_faces = np.append(self.element_faces, face) + self.neighbors = np.append(self.neighbors, nelem) + self.neighbor_faces = np.append(self.neighbor_faces, nface) + + def _generate_neighbor_lookup_table(self): + self.neighbor_lookup_table = dict() + for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): + nelem = self.neighbors[idx] + nface = self.neighbor_faces[idx] + self.neighbor_lookup_table[(elem, face)] = (nelem, nface) # }}} @@ -862,14 +872,15 @@ class Mesh(Record): def adjacency_list(self): """ - :returns: An :class:`np.array` with dtype `set`. `adjacency[i]` is the set + :returns: An list of sets. `adjacency[i]` is the set of all elements that are adjacent to element `i`. Useful for `pymetis.part_graph`. """ - adjacency_list = np.zeros((self.nelements,), dtype=set) + adjacency_list = [] + for _ in range(self.nelements): + adjacency_list.append(set()) nodal_adj = self.nodal_adjacency for elem in range(self.nelements): - adjacency_list[elem] = set() starts = nodal_adj.neighbors_starts for n in range(starts[elem], starts[elem + 1]): adjacency_list[elem].add(nodal_adj.neighbors[n]) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 63779002..3c64952a 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -187,11 +187,12 @@ def partition_mesh(mesh, part_per_element, part_nr): # We cannot compute the neighbor group because the other # partitions may not have been built yet. - adj = adj_grps[igrp][n_part_num] - adj.elements = np.append(adj.elements, elem) - adj.element_faces = np.append(adj.element_faces, face) - adj.neighbors = np.append(adj.neighbors, n_meshwide_elem) - adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) + adj_grps[igrp][n_part_num].\ + append_connection(elem, face, n_meshwide_elem, n_face) + + for adj_dict in adj_grps: + for adj_grp in adj_dict.values(): + adj_grp._generate_neighbor_lookup_table() connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f4dc7621..4affd1a6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory + #InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3, 4]) # FIXME: Mostly fails for multiple groups. @@ -66,7 +66,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - (3, [3, 5]) + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -212,15 +212,15 @@ def test_partition_mesh(num_parts, num_meshes, dim): if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_meshwide_elem, n_face) = adj.get_neighbor(elem, face) + (n_meshwide_elem, n_face) =\ + adj.neighbor_lookup_table[(elem, face)] n_grp_num = n_part.find_igrp(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base assert (elem + elem_base, face) ==\ - n_adj.get_neighbor(n_elem, n_face),\ - "InterPartitionAdj is not consistent" - + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From 1249bb9153499996be54a99eb6c995a8993700aa Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 16 Jul 2017 13:16:03 -0500 Subject: [PATCH 057/266] Partition testing no longer relies on pymetis.part_graph --- test/test_meshmode.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 4affd1a6..f57b39fe 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2, 3, 4]) +@pytest.mark.parametrize("num_parts", [2, 3]) # FIXME: Mostly fails for multiple groups. # The problem is that when multiple groups are partitioned # some partitions may not contain all groups. In that case @@ -65,8 +65,8 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [10, 20, 30]), - #(3, [3, 5]) + (2, [3, 5, 10]), + (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -99,6 +99,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) + #part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -177,9 +178,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) + #from pymetis import part_graph + #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #part_per_element = np.array(p) + part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From c297f0996aafda170c6277f1127e5771b29084b4 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 16 Jul 2017 23:55:06 -0500 Subject: [PATCH 058/266] Fix FaceConnection: from_element_indices are group-local --- meshmode/discretization/connection/face.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py index 680d0686..1160ec3f 100644 --- a/meshmode/discretization/connection/face.py +++ b/meshmode/discretization/connection/face.py @@ -83,8 +83,7 @@ def _build_boundary_connection(queue, vol_discr, bdry_discr, connection_data, from_group_index=igrp, from_element_indices=cl.array.to_device( queue, - vol_grp.mesh_el_group.element_nr_base - + data.group_source_element_indices) + data.group_source_element_indices) .with_queue(None), to_element_indices=cl.array.to_device( queue, -- GitLab From 6b058afaa739cd1de649953bb2beb6406c030c25 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 16 Jul 2017 23:55:30 -0500 Subject: [PATCH 059/266] Opposite-face connection: better document _make_bdry_el_lookup_table --- meshmode/discretization/connection/opposite_face.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3095d8d8..3cced8f7 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -272,7 +272,11 @@ def _find_ibatch_for_face(vbc_tgt_grp_batches, iface): return vbc_tgt_grp_face_batch -def _make_el_lookup_table(queue, connection, igrp): +def _make_bdry_el_lookup_table(queue, connection, igrp): + """Given a voluem-to-boundary connection as *connection*, return + a table of shape ``(from_nelements, nfaces)`` to look up the + element number of the boundary element for that face. + """ from_nelements = connection.from_discr.groups[igrp].nelements from_nfaces = connection.from_discr.mesh.groups[igrp].nfaces @@ -316,7 +320,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): groups = [[] for i_tgt_grp in range(ngrps)] for i_src_grp in range(ngrps): - src_grp_el_lookup = _make_el_lookup_table( + src_grp_el_lookup = _make_bdry_el_lookup_table( queue, volume_to_bdry_conn, i_src_grp) for i_tgt_grp in range(ngrps): @@ -459,7 +463,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): for i_src_grp in np.unique(i_src_grps): src_el_lookup =\ - _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: -- GitLab From bde418ee3205de0900ed217b275c0270e035a70a Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 11:39:34 -0500 Subject: [PATCH 060/266] Test partition interpolation for multiple groups --- meshmode/mesh/__init__.py | 1 - test/test_meshmode.py | 23 +++++++++-------------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 17ae6f37..7772bca3 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -1061,7 +1061,6 @@ def _compute_facial_adjacency_from_vertices(mesh): for ineighbor_group in range(len(mesh.groups)): nb_count = group_count.get((igroup, ineighbor_group)) - # FIXME nb_count is None sometimes when it maybe shouldn't be. if nb_count is not None: elements = np.empty(nb_count, dtype=mesh.element_id_dtype) element_faces = np.empty(nb_count, dtype=mesh.face_id_dtype) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f57b39fe..6aedef4d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,20 +56,14 @@ logger = logging.getLogger(__name__) #InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3]) -# FIXME: Mostly fails for multiple groups. -# The problem is that when multiple groups are partitioned -# some partitions may not contain all groups. In that case -# there will be a connection between two partitions with -# empty batches because there will be a group that doesn't -# connect to the other partition. I need to deal with these -# empty batches. -@pytest.mark.parametrize("num_groups", [1]) +@pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 10]), - (3, [3, 5]) + (2, [3, 4, 7]), + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): + np.random.seed(42) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) order = 4 @@ -96,10 +90,10 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, else: mesh = meshes[0] - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) - #part_per_element = np.random.randint(num_parts, size=mesh.nelements) + #from pymetis import part_graph + #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #part_per_element = np.array(p) + part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -170,6 +164,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, @pytest.mark.parametrize("num_parts", [4, 5, 7]) @pytest.mark.parametrize("num_meshes", [1, 2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): + np.random.seed(42) n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) -- GitLab From 699f602d8978160aec1cba728d4de34ba6e30894 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 21:57:24 -0500 Subject: [PATCH 061/266] Make InterPartitionAdjacency better --- .../connection/opposite_face.py | 2 +- meshmode/mesh/__init__.py | 61 ++++++++----------- meshmode/mesh/processing.py | 20 +++--- 3 files changed, 38 insertions(+), 45 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3cced8f7..9dca9cab 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -490,7 +490,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): src_bdry_element_indices)) return DirectDiscretizationConnection( - from_discr=src_bdry, # Is this right? + from_discr=src_bdry, to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 7772bca3..f069d899 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -411,7 +411,7 @@ class NodalAdjacency(Record): # {{{ partition adjacency -class InterPartitionAdj(): +class InterPartitionAdjacency(object): """ Describes facial adjacency information of elements in one :class:`Mesh` to elements in another :class:`Mesh`. The element's boundary tag gives the @@ -419,55 +419,45 @@ class InterPartitionAdj(): .. attribute:: elements - `:class:Mesh`-local element numbers that have neighbors. + Group-local element numbers. + Element ``element_id_dtype elements[i]`` and face + ``face_id_dtype element_faces[i]`` is connected to neighbor element + ``element_id_dtype neighbors[i]`` with face + ``face_id_dtype neighbor_faces[i]``. .. attribute:: element_faces - ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + ``face_id_dtype element_faces[i]`` gives the face of + ``element_id_dtype elements[i]`` that is connected to ``neighbors[i]``. .. attribute:: neighbors - ``neighbors[i]`` gives the element number within the neighboring partiton - of the element connected to ``elements[i]``. This gives a mesh-wide element - numbering. Use ``Mesh.find_igrp()`` to find the group that the element - belongs to, then subtract ``element_nr_base`` to find the element of the - group. + Mesh-wide element numbers. + ``element_id_dtype neighbors[i]`` gives the element number within the + neighboring partiton of the element connected to + ``element_id_dtype elements[i]``. Use ``Mesh.find_igrp()`` to find the group + that the element belongs to, then subtract ``element_nr_base`` to find the + element of the group. .. attribute:: neighbor_faces - ``neighbor_faces[i]`` gives face index within the neighboring partition - of the face connected to ``elements[i]`` + ``face_id_dtype neighbor_faces[i]`` gives face index within the neighboring + partition of the face connected to ``element_id_dtype elements[i]`` .. attribute:: neighbor_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(neighbor_element, neighbor_face)``. May be ``None`` if it has not - been generated. - - .. automethod:: append_connection + ``(neighbor_element, neighbor_face)``. .. versionadded:: 2017.1 """ - def __init__(self): - self.elements = np.array([], dtype=int) - self.element_faces = np.array([], dtype=int) - self.neighbors = np.array([], dtype=int) - self.neighbor_faces = np.array([], dtype=int) - - def append_connection(self, elem, face, nelem, nface): - """ - :arg elem: - :arg face: - :arg nelem: - :arg nface: - Connects element ``elem`` with face ``face`` to its neighboring element - ``nelem`` with face ``nface``. - """ - self.elements = np.append(self.elements, elem) - self.element_faces = np.append(self.element_faces, face) - self.neighbors = np.append(self.neighbors, nelem) - self.neighbor_faces = np.append(self.neighbor_faces, nface) + def __init__(self, elements, element_faces, neighbors, neighbor_faces): + self.elements = np.array(elements, dtype=Mesh.element_id_dtype) + self.element_faces = np.array(element_faces, dtype=Mesh.face_id_dtype) + self.neighbors = np.array(neighbors, dtype=Mesh.element_id_dtype) + self.neighbor_faces = np.array(neighbor_faces, dtype=Mesh.face_id_dtype) + self._generate_neighbor_lookup_table() def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() @@ -629,13 +619,16 @@ class Mesh(Record): .. attribute:: element_id_dtype + .. attribute:: face_id_dtype + .. automethod:: __eq__ .. automethod:: __ne__ .. automethod:: find_igrp .. automethos:: adjacency_list """ - face_id_dtype = np.int8 + face_id_dtype=np.int8 + element_id_dtype=np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 3c64952a..70a7fd1c 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -143,8 +143,7 @@ def partition_mesh(mesh, part_per_element, part_nr): part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - from meshmode.mesh import InterPartitionAdj - adj_grps = [{} for _ in range(len(part_mesh.groups))] + adj_grps = [dict() for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -183,16 +182,17 @@ def partition_mesh(mesh, part_per_element, part_nr): part_per_element[:rank_neighbor] == n_part_num) if n_part_num not in adj_grps[igrp]: - adj_grps[igrp][n_part_num] = InterPartitionAdj() + adj_grps[igrp][n_part_num] = [] - # We cannot compute the neighbor group because the other - # partitions may not have been built yet. adj_grps[igrp][n_part_num].\ - append_connection(elem, face, n_meshwide_elem, n_face) - - for adj_dict in adj_grps: - for adj_grp in adj_dict.values(): - adj_grp._generate_neighbor_lookup_table() + append((elem, face, n_meshwide_elem, n_face)) + + from meshmode.mesh import InterPartitionAdjacency + for igrp, adj_dict in enumerate(adj_grps): + for n_part_num, adj_data in adj_dict.items(): + elems, faces, n_elems, n_faces = np.array(adj_data).T + adj_grps[igrp][n_part_num] =\ + InterPartitionAdjacency(elems, faces, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps -- GitLab From 6a9ec3a7fb3f13ffa74ab47894a404c6454fb9d1 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 22:00:50 -0500 Subject: [PATCH 062/266] Fix typo --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 9dca9cab..2adbc8b7 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -273,7 +273,7 @@ def _find_ibatch_for_face(vbc_tgt_grp_batches, iface): def _make_bdry_el_lookup_table(queue, connection, igrp): - """Given a voluem-to-boundary connection as *connection*, return + """Given a volume-to-boundary connection as *connection*, return a table of shape ``(from_nelements, nfaces)`` to look up the element number of the boundary element for that face. """ -- GitLab From 115888a624ad674eb6da16cfdbd6eddb024901fc Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 22:03:59 -0500 Subject: [PATCH 063/266] Fix typo --- meshmode/mesh/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f069d899..5c81a5c8 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -601,7 +601,7 @@ class Mesh(Record): A list of mappings from neighbor partition numbers to instances of :class:`InterPartitionAdj`. - ``interpart_adj_gorups[igrp][ineighbor_part]`` gives + ``interpart_adj_groups[igrp][ineighbor_part]`` gives the set of facial adjacency relations between group *igrp* and partition *ineighbor_part*. -- GitLab From 310ae5795a6c6007874578aa5a95584fb1123f1d Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 18 Jul 2017 18:25:26 -0500 Subject: [PATCH 064/266] Fix whitespace --- meshmode/mesh/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 5c81a5c8..3c0a9a14 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -627,8 +627,8 @@ class Mesh(Record): .. automethos:: adjacency_list """ - face_id_dtype=np.int8 - element_id_dtype=np.int32 + face_id_dtype = np.int8 + element_id_dtype = np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, -- GitLab From 6ebff51a3c82957c5a5a03900ff423cdc197ac2d Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 18 Jul 2017 19:05:37 -0500 Subject: [PATCH 065/266] Replace find_igrp with a batched version --- .../connection/opposite_face.py | 3 +-- meshmode/mesh/__init__.py | 19 +++++++++++-------- meshmode/mesh/processing.py | 7 ++++--- test/test_meshmode.py | 11 +++++++---- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 2adbc8b7..addaa267 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -452,8 +452,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_tgt_faces = adj.element_faces i_src_meshwide_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_meshwide_elems]) + i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) i_src_elems = np.empty_like(i_src_meshwide_elems) for i, i_grp in enumerate(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 3c0a9a14..1eece780 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -623,7 +623,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ - .. automethod:: find_igrp + .. automethod:: find_igrps .. automethos:: adjacency_list """ @@ -852,16 +852,19 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrp(self, meshwide_elem): + def find_igrps(self, meshwide_elems): """ - :arg meshwide_elem: Think of it as ``elem + element_nr_base``. - :returns: The index of the group that `meshwide_elem` belongs to. + :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers + Usually computed by ``elem + element_nr_base``. + :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` + belongs to. """ + grps = np.zeros_like(meshwide_elems) + next_grp_boundary = 0 for igrp, grp in enumerate(self.groups): - if meshwide_elem < grp.nelements: - return igrp - meshwide_elem -= grp.nelements - raise RuntimeError("Could not find group with element %d." % meshwide_elem) + next_grp_boundary += grp.nelements + grps += meshwide_elems >= next_grp_boundary + return grps def adjacency_list(self): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 70a7fd1c..4946bcef 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -150,15 +150,16 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces + p_meshwide_elems = queried_elems[boundary_elems + elem_base] + parent_igrps = mesh.find_igrps(p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - p_meshwide_elem = queried_elems[elem + elem_base] - parent_igrp = mesh.find_igrp(p_meshwide_elem) + parent_igrp = parent_igrps[adj_idx] parent_elem_base = mesh.groups[parent_igrp].element_nr_base - parent_elem = p_meshwide_elem - parent_elem_base + parent_elem = p_meshwide_elems[adj_idx] - parent_elem_base parent_adj = mesh.facial_adjacency_groups[parent_igrp] diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6aedef4d..d3a50f6b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - #InterpolatoryQuadratureSimplexGroupFactory + InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) @@ -211,7 +211,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): (n_meshwide_elem, n_face) =\ adj.neighbor_lookup_table[(elem, face)] - n_grp_num = n_part.find_igrp(n_meshwide_elem) + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = n_part.find_igrps(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base @@ -222,8 +225,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrp(p_meshwide_elem) - p_n_grp_num = mesh.find_igrp(p_meshwide_n_elem) + p_grp_num = mesh.find_igrps(p_meshwide_elem) + p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base -- GitLab From 52234749f193d649dbb0241f484665bdfd7be7bb Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 22 Jul 2017 18:19:26 -0500 Subject: [PATCH 066/266] Mesh.adjacency_list now returns a list of numpy arrays --- meshmode/mesh/__init__.py | 13 ++++--------- test/test_meshmode.py | 8 ++++---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 1eece780..6258c8df 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -868,18 +868,13 @@ class Mesh(Record): def adjacency_list(self): """ - :returns: An list of sets. `adjacency[i]` is the set - of all elements that are adjacent to element `i`. - Useful for `pymetis.part_graph`. + :returns: `adjacency[i]` is a list of all elements that are adjacent to + element `i`. Useful for `pymetis.part_graph`. """ adjacency_list = [] - for _ in range(self.nelements): - adjacency_list.append(set()) - nodal_adj = self.nodal_adjacency for elem in range(self.nelements): - starts = nodal_adj.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(nodal_adj.neighbors[n]) + start, end = self.nodal_adjacency.neighbors_starts[elem:elem+2] + adjacency_list.append(self.nodal_adjacency.neighbors[start:end]) return adjacency_list # Design experience: Try not to add too many global data structures to the diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d3a50f6b..df5dc509 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -173,10 +173,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - #from pymetis import part_graph - #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - #part_per_element = np.array(p) - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + part_per_element = np.array(p) + #part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From f5766f6af82f0873b6332693b9c53b4dc0f9ce0e Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 22 Jul 2017 19:08:40 -0500 Subject: [PATCH 067/266] Clean up test_partition_mesh --- test/test_meshmode.py | 87 ++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index df5dc509..d7137e77 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -200,48 +200,51 @@ def test_partition_mesh(num_parts, num_meshes, dim): for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] elem_base = part.groups[grp_num].element_nr_base - for idx, elem in enumerate(f_grp.elements): - tag = -f_grp.neighbors[idx] - assert tag >= 0 - face = f_grp.element_faces[idx] - for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): - n_part, n_part_to_global = new_meshes[n_part_num] - if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: - num_tags[n_part_num] += 1 - - (n_meshwide_elem, n_face) =\ - adj.neighbor_lookup_table[(elem, face)] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = n_part.find_igrps(n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - assert (elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ - "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = mesh.find_igrps(p_meshwide_elem) - p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" + for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): + n_part, n_part_to_global = new_meshes[n_part_num] + tags = -f_grp.neighbors + assert np.all(tags >= 0) + def is_connected_to_part(i): + return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) + & tags[i]) + for idx in filter(is_connected_to_part, range(len(tags))): + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + num_tags[n_part_num] += 1 + + (n_meshwide_elem, n_face) =\ + adj.neighbor_lookup_table[(elem, face)] + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = n_part.find_igrps(n_meshwide_elem) + n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + assert (elem + elem_base, face) ==\ + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" + n_part_to_global = new_meshes[n_part_num][1] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = mesh.find_igrps(p_meshwide_elem) + p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" for i_tag in range(num_parts): tag_sum = 0 -- GitLab From e9c00aa86265dfccfd3d0ee049074d2a4ab2d334 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 23 Jul 2017 01:31:04 -0500 Subject: [PATCH 068/266] Fix whitespace --- test/test_meshmode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d7137e77..50f37acb 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -204,6 +204,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_part, n_part_to_global = new_meshes[n_part_num] tags = -f_grp.neighbors assert np.all(tags >= 0) + def is_connected_to_part(i): return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) & tags[i]) -- GitLab From 15cec834a2cf54bfcf8c406b6ccd343dfecdc894 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 26 Jul 2017 18:18:48 -0500 Subject: [PATCH 069/266] Small change to make_partition_connection --- .../discretization/connection/opposite_face.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index addaa267..c7ef3a49 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -454,15 +454,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_faces = adj.neighbor_faces i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) - i_src_elems = np.empty_like(i_src_meshwide_elems) - for i, i_grp in enumerate(i_src_grps): - elem_base = src_mesh.groups[i_grp].element_nr_base - i_src_elems[i] = i_src_meshwide_elems[i] - elem_base - for i_src_grp in np.unique(i_src_grps): + elem_base = src_mesh.groups[i_src_grp].element_nr_base src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: @@ -478,9 +474,9 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) - src_bdry_element_indices = src_el_lookup[ - i_src_elems[index_flags], - i_src_faces[index_flags]] + elems = i_src_meshwide_elems[index_flags] - elem_base + faces = i_src_faces[index_flags] + src_bdry_element_indices = src_el_lookup[elems, faces] part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, -- GitLab From 0d99e6a1cb982281b65282c8d20c79da4fbfaa01 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 26 Jul 2017 18:18:56 -0500 Subject: [PATCH 070/266] Mesh can now be initialized with face_id_dtype --- meshmode/mesh/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 6258c8df..8bf8b27c 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -637,6 +637,7 @@ class Mesh(Record): interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, + face_id_dtype=np.int8, element_id_dtype=np.int32): """ The following are keyword-only: @@ -720,6 +721,7 @@ class Mesh(Record): boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), + face_id_dtype=np.dtype(face_id_dtype), element_id_dtype=np.dtype(element_id_dtype), ) -- GitLab From 041beaf3313f9707c11752a7d1705e78686d0d3a Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 18:07:31 -0500 Subject: [PATCH 071/266] Change test_partition_interpolation inputs --- test/test_meshmode.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 50f37acb..4e949e2b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -51,10 +51,7 @@ logger = logging.getLogger(__name__) # {{{ partition_interpolation -@pytest.mark.parametrize("group_factory", [ - PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory - ]) +@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ -- GitLab From 4e73293120964f349e93c735fd1d7507346a953a Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 18:49:20 -0500 Subject: [PATCH 072/266] Update docs --- .../discretization/connection/opposite_face.py | 15 ++++++++------- test/test_meshmode.py | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c7ef3a49..5540096b 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -409,16 +409,17 @@ def make_opposite_face_connection(volume_to_bdry_conn): def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): """ - Given a two boundary restriction connections *tgt_conn* and *src_conn*, - return a :class:`DirectDiscretizationConnection` that performs data - exchange across adjacent faces of different partitions. + Given a two boundary restriction connections *tgt_to_src_conn* and + *src_to_tgt_conn*, return a :class:`DirectDiscretizationConnection` that + performs data exchange across adjacent faces of different partitions. - :arg tgt_conn: A :class:`Discretization` for the target partition. - :arg src_conn: A :class:`Discretization` for the source partition. - :arg i_src_part: The partition number corresponding to *src_conn*. + :arg tgt_to_src_conn: A :class:`Discretization` of the target partition. + :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. + :arg i_src_part: The partition number of the src partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces in different partitions. + exchange across faces from partition `src_to_tgt_conn` to + `tgt_to_src_conn`. .. versionadded:: 2017.1 diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 4e949e2b..c5f54334 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -118,7 +118,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, BTAG_PARTITION(i_src_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + bdry_nodes = tgt_to_src_conn.to_discr.nodes() if bdry_nodes.size == 0: eoc_rec[(i_tgt_part, i_src_part)] = None continue -- GitLab From c56a277435e85548794683a1cb99f43b4c13cdb5 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 19:57:01 -0500 Subject: [PATCH 073/266] Working --- .../connection/opposite_face.py | 7 ++- meshmode/mesh/__init__.py | 63 +++++++------------ meshmode/mesh/processing.py | 20 +++++- test/test_meshmode.py | 9 +-- 4 files changed, 50 insertions(+), 49 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5540096b..e6f3be62 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -426,6 +426,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): .. warning:: Interface is not final. """ + from meshmode.mesh.processing import find_group_indices from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) @@ -451,9 +452,9 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces - i_src_meshwide_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) + i_src_meshwide_elems = adj.global_neighbors + i_src_faces = adj.global_neighbor_faces + i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8bf8b27c..cbb22af6 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -422,48 +422,51 @@ class InterPartitionAdjacency(object): Group-local element numbers. Element ``element_id_dtype elements[i]`` and face ``face_id_dtype element_faces[i]`` is connected to neighbor element - ``element_id_dtype neighbors[i]`` with face - ``face_id_dtype neighbor_faces[i]``. + ``element_id_dtype global_neighbors[i]`` with face + ``face_id_dtype global_neighbor_faces[i]``. .. attribute:: element_faces ``face_id_dtype element_faces[i]`` gives the face of - ``element_id_dtype elements[i]`` that is connected to ``neighbors[i]``. + ``element_id_dtype elements[i]`` that is connected to + ``globla_neighbors[i]``. - .. attribute:: neighbors + .. attribute:: global_neighbors Mesh-wide element numbers. - ``element_id_dtype neighbors[i]`` gives the element number within the + ``element_id_dtype global_neighbors[i]`` gives the element number within the neighboring partiton of the element connected to - ``element_id_dtype elements[i]``. Use ``Mesh.find_igrp()`` to find the group - that the element belongs to, then subtract ``element_nr_base`` to find the - element of the group. + ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the + group that the element belongs to, then subtract ``element_nr_base`` to find + the element of the group. - .. attribute:: neighbor_faces + .. attribute:: global_neighbor_faces - ``face_id_dtype neighbor_faces[i]`` gives face index within the neighboring - partition of the face connected to ``element_id_dtype elements[i]`` + ``face_id_dtype global_neighbor_faces[i]`` gives face index within the + neighboring partition of the face connected to + ``element_id_dtype elements[i]`` .. attribute:: neighbor_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(neighbor_element, neighbor_face)``. + ``(global_neighbor_element, global_neighbor_face)``. .. versionadded:: 2017.1 """ - def __init__(self, elements, element_faces, neighbors, neighbor_faces): - self.elements = np.array(elements, dtype=Mesh.element_id_dtype) - self.element_faces = np.array(element_faces, dtype=Mesh.face_id_dtype) - self.neighbors = np.array(neighbors, dtype=Mesh.element_id_dtype) - self.neighbor_faces = np.array(neighbor_faces, dtype=Mesh.face_id_dtype) + def __init__(self, elements, element_faces, + global_neighbors, global_neighbor_faces): + self.elements = elements + self.element_faces = element_faces + self.global_neighbors = global_neighbors + self.global_neighbor_faces = global_neighbor_faces self._generate_neighbor_lookup_table() def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.neighbors[idx] - nface = self.neighbor_faces[idx] + nelem = self.global_neighbors[idx] + nface = self.global_neighbor_faces[idx] self.neighbor_lookup_table[(elem, face)] = (nelem, nface) # }}} @@ -619,16 +622,12 @@ class Mesh(Record): .. attribute:: element_id_dtype - .. attribute:: face_id_dtype - .. automethod:: __eq__ .. automethod:: __ne__ - .. automethod:: find_igrps .. automethos:: adjacency_list """ face_id_dtype = np.int8 - element_id_dtype = np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, @@ -637,7 +636,6 @@ class Mesh(Record): interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, - face_id_dtype=np.int8, element_id_dtype=np.int32): """ The following are keyword-only: @@ -721,8 +719,7 @@ class Mesh(Record): boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), - face_id_dtype=np.dtype(face_id_dtype), - element_id_dtype=np.dtype(element_id_dtype), + element_id_dtype=np.dtype(element_id_dtype) ) if not skip_tests: @@ -854,20 +851,6 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrps(self, meshwide_elems): - """ - :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers - Usually computed by ``elem + element_nr_base``. - :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` - belongs to. - """ - grps = np.zeros_like(meshwide_elems) - next_grp_boundary = 0 - for igrp, grp in enumerate(self.groups): - next_grp_boundary += grp.nelements - grps += meshwide_elems >= next_grp_boundary - return grps - def adjacency_list(self): """ :returns: `adjacency[i]` is a list of all elements that are adjacent to diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 4946bcef..b0a88a05 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -31,6 +31,7 @@ import modepy as mp __doc__ = """ +.. autofunction:: find_group_indices .. autofunction:: partition_mesh .. autofunction:: find_volume_mesh_element_orientations .. autofunction:: perform_flips @@ -40,6 +41,21 @@ __doc__ = """ .. autofunction:: affine_map """ +def find_group_indices(groups, meshwide_elems): + """ + :arg groups: A list of :class:``MeshElementGroup`` instances that contain + ``meshwide_elems``. + :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers + Usually computed by ``elem + element_nr_base``. + :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` + belongs to. + """ + grps = np.zeros_like(meshwide_elems) + next_grp_boundary = 0 + for igrp, grp in enumerate(groups): + next_grp_boundary += grp.nelements + grps += meshwide_elems >= next_grp_boundary + return grps # {{{ partition_mesh @@ -151,7 +167,7 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces p_meshwide_elems = queried_elems[boundary_elems + elem_base] - parent_igrps = mesh.find_igrps(p_meshwide_elems) + parent_igrps = find_group_indices(mesh.groups, p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] tags = -boundary_adj.neighbors[adj_idx] @@ -193,7 +209,7 @@ def partition_mesh(mesh, part_per_element, part_nr): for n_part_num, adj_data in adj_dict.items(): elems, faces, n_elems, n_faces = np.array(adj_data).T adj_grps[igrp][n_part_num] =\ - InterPartitionAdjacency(elems, faces, n_elems, n_faces) + InterPartitionAdjacency(elems, faces, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c5f54334..450cd7e6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -190,6 +190,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): "part_mesh has the wrong number of BTAG_ALL boundaries" from meshmode.mesh import BTAG_PARTITION + from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): @@ -215,7 +216,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. - n_grp_num = n_part.find_igrps(n_meshwide_elem) + n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base @@ -226,8 +227,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrps(p_meshwide_elem) - p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base -- GitLab From 5ce66b8104ca12f243faeee1378f18fd6e87a76f Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 20:02:20 -0500 Subject: [PATCH 074/266] Fix whitespace --- meshmode/mesh/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b0a88a05..2dc65d92 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -41,9 +41,10 @@ __doc__ = """ .. autofunction:: affine_map """ + def find_group_indices(groups, meshwide_elems): """ - :arg groups: A list of :class:``MeshElementGroup`` instances that contain + :arg groups: A list of :class:``MeshElementGroup`` instances that contain ``meshwide_elems``. :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers Usually computed by ``elem + element_nr_base``. @@ -57,6 +58,7 @@ def find_group_indices(groups, meshwide_elems): grps += meshwide_elems >= next_grp_boundary return grps + # {{{ partition_mesh def partition_mesh(mesh, part_per_element, part_nr): -- GitLab From 656b5dfa37474655c166b74e2ac4ba0e90645cd5 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 31 Jul 2017 22:30:43 -0500 Subject: [PATCH 075/266] Working --- .../connection/opposite_face.py | 25 +++--- meshmode/mesh/__init__.py | 21 ++++- meshmode/mesh/processing.py | 20 ++--- test/test_meshmode.py | 87 +++++++++---------- 4 files changed, 82 insertions(+), 71 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index e6f3be62..a384e8ff 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -437,23 +437,28 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): tgt_mesh = tgt_vol.mesh src_mesh = src_vol.mesh - adj_grps = tgt_mesh.interpart_adj_groups - part_batches = [] with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_tgt_grp, adj_parts in enumerate(adj_grps): + for i_tgt_grp, adj in tgt_mesh.interpart_adj_groups.items(): part_batches.append([]) - if i_src_part not in adj_parts: - # Skip because i_tgt_grp is not connected to i_src_part. - continue + #if i_src_part not in adj_parts: + # # Skip because i_tgt_grp is not connected to i_src_part. + # continue - adj = adj_parts[i_src_part] + #adj = adj_parts[i_src_part] - i_tgt_faces = adj.element_faces - i_src_meshwide_elems = adj.global_neighbors - i_src_faces = adj.global_neighbor_faces + idxes = i_src_part == adj.neighbor_parts + if not np.any(idxes): + continue + i_tgt_faces = adj.element_faces[idxes] + i_src_meshwide_elems = adj.global_neighbors[idxes] + i_src_faces = adj.global_neighbor_faces[idxes] + + #i_tgt_faces = adj.element_faces + #i_src_meshwide_elems = adj.global_neighbors + #i_src_faces = adj.global_neighbor_faces i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index cbb22af6..e9feb743 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -454,20 +454,35 @@ class InterPartitionAdjacency(object): .. versionadded:: 2017.1 """ - def __init__(self, elements, element_faces, - global_neighbors, global_neighbor_faces): + def __init__(self, elements, + element_faces, + neighbor_parts, + global_neighbors, + global_neighbor_faces): self.elements = elements self.element_faces = element_faces + self.neighbor_parts = neighbor_parts self.global_neighbors = global_neighbors self.global_neighbor_faces = global_neighbor_faces self._generate_neighbor_lookup_table() + def __eq__(self, other): + return ( + type(self) == type(other) + and np.array_equal(self.elements, other.elements) + and np.array_equal(self.element_faces, other.element_faces) + and np.array_equal(self.neighbors, other.neighbors) + and np.array_equal(self.neighbor_faces, other.neighbor_faces) + and np.array_equal(self.neighbor_part, other.neighbor_part) + ) + def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): nelem = self.global_neighbors[idx] nface = self.global_neighbor_faces[idx] - self.neighbor_lookup_table[(elem, face)] = (nelem, nface) + npart = self.neighbor_parts[idx] + self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 2dc65d92..3b409df1 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -161,7 +161,7 @@ def partition_mesh(mesh, part_per_element, part_nr): part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - adj_grps = [dict() for _ in range(len(part_mesh.groups))] + adj_data = [[] for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -200,18 +200,16 @@ def partition_mesh(mesh, part_per_element, part_nr): n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - if n_part_num not in adj_grps[igrp]: - adj_grps[igrp][n_part_num] = [] - - adj_grps[igrp][n_part_num].\ - append((elem, face, n_meshwide_elem, n_face)) + adj_data[igrp].append((elem, face, + n_part_num, n_meshwide_elem, n_face)) from meshmode.mesh import InterPartitionAdjacency - for igrp, adj_dict in enumerate(adj_grps): - for n_part_num, adj_data in adj_dict.items(): - elems, faces, n_elems, n_faces = np.array(adj_data).T - adj_grps[igrp][n_part_num] =\ - InterPartitionAdjacency(elems, faces, n_elems, n_faces) + adj_grps = dict() + for igrp, connection in enumerate(adj_data): + if connection: + elems, faces, n_parts, n_elems, n_faces = np.array(connection).T + adj_grps[igrp] =\ + InterPartitionAdjacency(elems, faces, n_parts, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 450cd7e6..17a595ce 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -195,55 +195,48 @@ def test_partition_mesh(num_parts, num_meshes, dim): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] - for grp_num, f_groups in enumerate(part.facial_adjacency_groups): - f_grp = f_groups[None] + for grp_num, adj in part.interpart_adj_groups.items(): + f_grp = part.facial_adjacency_groups[grp_num][None] + tags = -f_grp.neighbors + assert np.all(tags >= 0) elem_base = part.groups[grp_num].element_nr_base - for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): + for elem, face, n_part_num, n_meshwide_elem, n_face in\ + zip(adj.elements, adj.element_faces, + adj.neighbor_parts, adj.global_neighbors, + adj.global_neighbor_faces): + num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] - tags = -f_grp.neighbors - assert np.all(tags >= 0) - - def is_connected_to_part(i): - return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) - & tags[i]) - for idx in filter(is_connected_to_part, range(len(tags))): - elem = f_grp.elements[idx] - face = f_grp.element_faces[idx] - num_tags[n_part_num] += 1 - - (n_meshwide_elem, n_face) =\ - adj.neighbor_lookup_table[(elem, face)] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - assert (elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ - "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) - p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) + n_adj = n_part.interpart_adj_groups[int(n_grp_num)] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + assert (part_num, elem + elem_base, face) ==\ + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" + n_part_to_global = new_meshes[n_part_num][1] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" for i_tag in range(num_parts): tag_sum = 0 -- GitLab From 289204acc295c025c49f894b91311628a737f796 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 1 Aug 2017 02:49:55 -0500 Subject: [PATCH 076/266] Working --- .../connection/opposite_face.py | 22 +-- meshmode/mesh/__init__.py | 165 ++++++++---------- meshmode/mesh/processing.py | 16 +- test/test_meshmode.py | 28 +-- 4 files changed, 108 insertions(+), 123 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index a384e8ff..f5461b77 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -441,24 +441,18 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_tgt_grp, adj in tgt_mesh.interpart_adj_groups.items(): + for i_tgt_grp in range(len(tgt_mesh.groups)): part_batches.append([]) - #if i_src_part not in adj_parts: - # # Skip because i_tgt_grp is not connected to i_src_part. - # continue + adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp]['part'] - #adj = adj_parts[i_src_part] - - idxes = i_src_part == adj.neighbor_parts - if not np.any(idxes): + indices = i_src_part == adj.neighbor_parts + if not np.any(indices): + # Skip because i_tgt_grp is not connected to i_src_part. continue - i_tgt_faces = adj.element_faces[idxes] - i_src_meshwide_elems = adj.global_neighbors[idxes] - i_src_faces = adj.global_neighbor_faces[idxes] + i_tgt_faces = adj.element_faces[indices] + i_src_meshwide_elems = adj.global_neighbors[indices] + i_src_faces = adj.neighbor_faces[indices] - #i_tgt_faces = adj.element_faces - #i_src_meshwide_elems = adj.global_neighbors - #i_src_faces = adj.global_neighbor_faces i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index e9feb743..f1fe9b5a 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -409,84 +409,6 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - -class InterPartitionAdjacency(object): - """ - Describes facial adjacency information of elements in one :class:`Mesh` to - elements in another :class:`Mesh`. The element's boundary tag gives the - partition that it is connected to. - - .. attribute:: elements - - Group-local element numbers. - Element ``element_id_dtype elements[i]`` and face - ``face_id_dtype element_faces[i]`` is connected to neighbor element - ``element_id_dtype global_neighbors[i]`` with face - ``face_id_dtype global_neighbor_faces[i]``. - - .. attribute:: element_faces - - ``face_id_dtype element_faces[i]`` gives the face of - ``element_id_dtype elements[i]`` that is connected to - ``globla_neighbors[i]``. - - .. attribute:: global_neighbors - - Mesh-wide element numbers. - ``element_id_dtype global_neighbors[i]`` gives the element number within the - neighboring partiton of the element connected to - ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the - group that the element belongs to, then subtract ``element_nr_base`` to find - the element of the group. - - .. attribute:: global_neighbor_faces - - ``face_id_dtype global_neighbor_faces[i]`` gives face index within the - neighboring partition of the face connected to - ``element_id_dtype elements[i]`` - - .. attribute:: neighbor_lookup_table - - A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(global_neighbor_element, global_neighbor_face)``. - - .. versionadded:: 2017.1 - """ - - def __init__(self, elements, - element_faces, - neighbor_parts, - global_neighbors, - global_neighbor_faces): - self.elements = elements - self.element_faces = element_faces - self.neighbor_parts = neighbor_parts - self.global_neighbors = global_neighbors - self.global_neighbor_faces = global_neighbor_faces - self._generate_neighbor_lookup_table() - - def __eq__(self, other): - return ( - type(self) == type(other) - and np.array_equal(self.elements, other.elements) - and np.array_equal(self.element_faces, other.element_faces) - and np.array_equal(self.neighbors, other.neighbors) - and np.array_equal(self.neighbor_faces, other.neighbor_faces) - and np.array_equal(self.neighbor_part, other.neighbor_part) - ) - - def _generate_neighbor_lookup_table(self): - self.neighbor_lookup_table = dict() - for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.global_neighbors[idx] - nface = self.global_neighbor_faces[idx] - npart = self.neighbor_parts[idx] - self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) - -# }}} - - # {{{ facial adjacency class FacialAdjacencyGroup(Record): @@ -561,6 +483,82 @@ class FacialAdjacencyGroup(Record): # }}} +# {{{ partition adjacency + +class InterPartitionAdjacency(FacialAdjacencyGroup): + """ + Describes facial adjacency information of elements in one :class:`Mesh` to + elements in another :class:`Mesh`. The element's boundary tag gives the + partition that it is connected to. + + .. attribute:: elements + + Group-local element numbers. + Element ``element_id_dtype elements[i]`` and face + ``face_id_dtype element_faces[i]`` is connected to neighbor element + ``element_id_dtype global_neighbors[i]`` with face + ``face_id_dtype global_neighbor_faces[i]``. + + .. attribute:: element_faces + + ``face_id_dtype element_faces[i]`` gives the face of + ``element_id_dtype elements[i]`` that is connected to + ``globla_neighbors[i]``. + + .. attribute:: global_neighbors + + Mesh-wide element numbers. + ``element_id_dtype global_neighbors[i]`` gives the element number within the + neighboring partiton of the element connected to + ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the + group that the element belongs to, then subtract ``element_nr_base`` to find + the element of the group. + + .. attribute:: global_neighbor_faces + + ``face_id_dtype global_neighbor_faces[i]`` gives face index within the + neighboring partition of the face connected to + ``element_id_dtype elements[i]`` + + .. attribute:: neighbor_lookup_table + + A dictionary that maps the tuple ``(element, face)`` to the tuple + ``(global_neighbor_element, global_neighbor_face)``. + + .. versionadded:: 2017.1 + """ + + def __init__(self, elements, + element_faces, + neighbor_parts, + global_neighbors, + neighbor_faces): + self.elements = elements + self.element_faces = element_faces + self.neighbor_parts = neighbor_parts + self.global_neighbors = global_neighbors + self.neighbor_faces = neighbor_faces + self._generate_neighbor_lookup_table() + + def __eq__(self, other): + return (type(self) == type(other) + and np.array_equal(self.elements, other.elements) + and np.array_equal(self.element_faces, other.element_faces) + and np.array_equal(self.global_neighbors, other.global_neighbors) + and np.array_equal(self.neighbor_faces, other.neighbor_faces) + and np.array_equal(self.neighbor_part, other.neighbor_part)) + + def _generate_neighbor_lookup_table(self): + self.neighbor_lookup_table = dict() + for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): + nelem = self.global_neighbors[idx] + nface = self.neighbor_faces[idx] + npart = self.neighbor_parts[idx] + self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) + +# }}} + + # {{{ mesh class Mesh(Record): @@ -614,15 +612,6 @@ class Mesh(Record): (Note that element groups are not necessarily contiguous like the figure may suggest.) - .. attribute:: interpart_adj_groups - - A list of mappings from neighbor partition numbers to instances of - :class:`InterPartitionAdj`. - - ``interpart_adj_groups[igrp][ineighbor_part]`` gives - the set of facial adjacency relations between group *igrp* - and partition *ineighbor_part*. - .. attribute:: boundary_tags A tuple of boundary tag identifiers. :class:`BTAG_ALL` and @@ -648,7 +637,6 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, - interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -730,7 +718,6 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, - interpart_adj_groups=interpart_adj_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 3b409df1..d5c5d0d1 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -203,16 +203,16 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_data[igrp].append((elem, face, n_part_num, n_meshwide_elem, n_face)) + connected_mesh = part_mesh.copy() + from meshmode.mesh import InterPartitionAdjacency - adj_grps = dict() - for igrp, connection in enumerate(adj_data): - if connection: - elems, faces, n_parts, n_elems, n_faces = np.array(connection).T - adj_grps[igrp] =\ - InterPartitionAdjacency(elems, faces, n_parts, n_elems, n_faces) + for igrp, adj in enumerate(adj_data): + if adj: + elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + connected_mesh.facial_adjacency_groups[igrp]['part'] =\ + InterPartitionAdjacency(elems, faces, + n_parts, n_elems, n_faces) - connected_mesh = part_mesh.copy() - connected_mesh.interpart_adj_groups = adj_grps return connected_mesh, queried_elems # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 17a595ce..477ff1d4 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -189,28 +189,33 @@ def test_partition_mesh(num_parts, num_meshes, dim): [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - from meshmode.mesh import BTAG_PARTITION + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacency from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] - for grp_num, adj in part.interpart_adj_groups.items(): - f_grp = part.facial_adjacency_groups[grp_num][None] - tags = -f_grp.neighbors - assert np.all(tags >= 0) + for grp_num in range(len(part.groups)): + #f_grp = part.facial_adjacency_groups[grp_num][None] + #tags = -f_grp.neighbors + #assert np.all(tags >= 0) + if not 'part' in part.facial_adjacency_groups[grp_num]: + continue + adj = part.facial_adjacency_groups[grp_num]['part'] + if not isinstance(adj, InterPartitionAdjacency): + continue elem_base = part.groups[grp_num].element_nr_base for elem, face, n_part_num, n_meshwide_elem, n_face in\ zip(adj.elements, adj.element_faces, adj.neighbor_parts, adj.global_neighbors, - adj.global_neighbor_faces): + adj.neighbor_faces): num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[int(n_grp_num)] + n_adj = n_part.facial_adjacency_groups[int(n_grp_num)]['part'] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base assert (part_num, elem + elem_base, face) ==\ @@ -249,11 +254,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: - for _, bdry_group in adj_dict.items(): - for neighbors in bdry_group.neighbors: - if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(tag) != 0: - num_bnds += 1 + for neighbors in adj_dict[None].neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: + num_bnds += 1 return num_bnds # }}} -- GitLab From 0a93a3ffc4124ae60f7026195dab4eebd86e3198 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 1 Aug 2017 20:59:28 -0500 Subject: [PATCH 077/266] InterPartitionAdjacency inherits FacialAdjacencyGroup --- meshmode/mesh/__init__.py | 35 +++++++++++++++++++++-------------- meshmode/mesh/processing.py | 36 +++++++++++++++++++++++++++--------- test/test_meshmode.py | 31 +++++++++++++++++-------------- 3 files changed, 65 insertions(+), 37 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f1fe9b5a..354021fb 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -514,13 +514,13 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. - .. attribute:: global_neighbor_faces + .. attribute:: neighbor_faces ``face_id_dtype global_neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``element_id_dtype elements[i]`` - .. attribute:: neighbor_lookup_table + .. attribute:: index_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple ``(global_neighbor_element, global_neighbor_face)``. @@ -528,33 +528,41 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ + ''' + I don't like the idea of having InterPartitionAdjacency replace the boundary + group for FacialAdjacencyGroup. A boundary may be a real boundary or it may + have a partition adjacent to it. FacialAdjacency and InterPartitionAdjacecy + will not have the same elements. They should be separate. facial_adjacency_groups + should have groups for real boundaries and for 'fake' boundaries. + ''' + def __init__(self, elements, element_faces, + neighbors, + igroup, + i_neighbor_group, neighbor_parts, global_neighbors, neighbor_faces): self.elements = elements self.element_faces = element_faces + self.neighbors = neighbors + self.igroup = igroup + self.i_neighbor_group = i_neighbor_group self.neighbor_parts = neighbor_parts self.global_neighbors = global_neighbors self.neighbor_faces = neighbor_faces - self._generate_neighbor_lookup_table() + self._generate_index_lookup_table() def __eq__(self, other): - return (type(self) == type(other) - and np.array_equal(self.elements, other.elements) - and np.array_equal(self.element_faces, other.element_faces) + return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) - and np.array_equal(self.neighbor_faces, other.neighbor_faces) and np.array_equal(self.neighbor_part, other.neighbor_part)) - def _generate_neighbor_lookup_table(self): - self.neighbor_lookup_table = dict() + def _generate_index_lookup_table(self): + self.index_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.global_neighbors[idx] - nface = self.neighbor_faces[idx] - npart = self.neighbor_parts[idx] - self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) + self.index_lookup_table[(elem, face)] = idx # }}} @@ -847,7 +855,6 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) - and self.interpart_adj_groups == other.interpart_adj_groups and self.boundary_tags == other.boundary_tags) def __ne__(self, other): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index d5c5d0d1..1f3c5099 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -172,8 +172,8 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_igrps = find_group_indices(mesh.groups, p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] - tags = -boundary_adj.neighbors[adj_idx] - assert tags >= 0, "Expected boundary tag in adjacency group." + tag = -boundary_adj.neighbors[adj_idx] + assert tag >= 0, "Expected boundary tag in adjacency group." parent_igrp = parent_igrps[adj_idx] parent_elem_base = mesh.groups[parent_igrp].element_nr_base @@ -191,16 +191,16 @@ def partition_mesh(mesh, part_per_element, part_nr): n_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] - tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit( + tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tag = tag | part_mesh.boundary_tag_bit( BTAG_PARTITION(n_part_num)) - boundary_adj.neighbors[adj_idx] = -tags + boundary_adj.neighbors[adj_idx] = -tag # Find the neighbor element from the other partition. n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - adj_data[igrp].append((elem, face, + adj_data[igrp].append((elem, face, n_part_num, n_meshwide_elem, n_face)) connected_mesh = part_mesh.copy() @@ -208,9 +208,27 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import InterPartitionAdjacency for igrp, adj in enumerate(adj_data): if adj: - elems, faces, n_parts, n_elems, n_faces = np.array(adj).T - connected_mesh.facial_adjacency_groups[igrp]['part'] =\ - InterPartitionAdjacency(elems, faces, + boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] + n_parts = np.zeros_like(boundary_adj.elements) + n_parts.fill(-1) + n_elems = np.copy(n_parts) + n_faces = np.copy(n_parts) + for elem, face, n_part, n_elem, n_face in adj: + idx = np.where(np.logical_and(elem == boundary_adj.elements, + face == boundary_adj.element_faces))[0] + n_parts[idx] = n_part + n_elems[idx] = n_elem + n_faces[idx] = n_face + #bdry_perm = np.argsort(boundary_adj.elements) + #bdry_elems = boundary_adj.elements[perm] + #bdry_faces = boundary_adj.element_faces[perm] + #elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + connected_mesh.facial_adjacency_groups[igrp][None] =\ + InterPartitionAdjacency(boundary_adj.elements, + boundary_adj.element_faces, + boundary_adj.neighbors, + boundary_adj.igroup, + boundary_adj.ineighbor_group, n_parts, n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 477ff1d4..d0f1904e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -196,32 +196,35 @@ def test_partition_mesh(num_parts, num_meshes, dim): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num in range(len(part.groups)): - #f_grp = part.facial_adjacency_groups[grp_num][None] - #tags = -f_grp.neighbors - #assert np.all(tags >= 0) - if not 'part' in part.facial_adjacency_groups[grp_num]: - continue - adj = part.facial_adjacency_groups[grp_num]['part'] + adj = part.facial_adjacency_groups[grp_num][None] + tags = -part.facial_adjacency_groups[grp_num][None].neighbors + assert np.all(tags >= 0) if not isinstance(adj, InterPartitionAdjacency): continue elem_base = part.groups[grp_num].element_nr_base - for elem, face, n_part_num, n_meshwide_elem, n_face in\ - zip(adj.elements, adj.element_faces, - adj.neighbor_parts, adj.global_neighbors, - adj.neighbor_faces): + for idx in range(len(adj.elements)): + if adj.global_neighbors[idx] == -1: + continue + elem = adj.elements[idx] + face = adj.element_faces[idx] + n_part_num = adj.neighbor_parts[idx] + n_meshwide_elem = adj.global_neighbors[idx] + n_face = adj.neighbor_faces[idx] num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.facial_adjacency_groups[int(n_grp_num)]['part'] + n_adj = n_part.facial_adjacency_groups[int(n_grp_num)][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - assert (part_num, elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + n_idx = n_adj.index_lookup_table[(n_elem, n_face)] + assert (part_num == n_adj.neighbor_parts[n_idx] + and elem + elem_base == n_adj.global_neighbors[n_idx] + and face == n_adj.neighbor_faces[n_idx]),\ "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] + _, n_part_to_global = new_meshes[n_part_num] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From a5388844061f34b8895c01e6d39aa8318eebe653 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 15:12:18 -0500 Subject: [PATCH 078/266] Working --- .../connection/opposite_face.py | 2 +- meshmode/mesh/processing.py | 42 ++++++++++++------- test/test_meshmode.py | 2 +- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index f5461b77..61834402 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -443,7 +443,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): for i_tgt_grp in range(len(tgt_mesh.groups)): part_batches.append([]) - adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp]['part'] + adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] indices = i_src_part == adj.neighbor_parts if not np.any(indices): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 1f3c5099..935aa662 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -211,25 +211,37 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] n_parts = np.zeros_like(boundary_adj.elements) n_parts.fill(-1) - n_elems = np.copy(n_parts) + global_n_elems = np.copy(n_parts) n_faces = np.copy(n_parts) - for elem, face, n_part, n_elem, n_face in adj: - idx = np.where(np.logical_and(elem == boundary_adj.elements, - face == boundary_adj.element_faces))[0] - n_parts[idx] = n_part - n_elems[idx] = n_elem - n_faces[idx] = n_face - #bdry_perm = np.argsort(boundary_adj.elements) - #bdry_elems = boundary_adj.elements[perm] - #bdry_faces = boundary_adj.element_faces[perm] - #elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + bdry_perm = np.lexsort([boundary_adj.element_faces, boundary_adj.elements]) + bdry_elems = boundary_adj.elements[bdry_perm] + bdry_faces = boundary_adj.element_faces[bdry_perm] + bdry_neighbors = boundary_adj.neighbors[bdry_perm] + adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces = np.array(adj).T + adj_perm = np.lexsort([adj_faces, adj_elems]) + adj_elems = adj_elems[adj_perm] + adj_faces = adj_faces[adj_perm] + adj_n_parts = adj_n_parts[adj_perm] + adj_gl_n_elems = adj_gl_n_elems[adj_perm] + adj_n_faces = adj_n_faces[adj_perm] + adj_idx = 0 + for bdry_idx in range(len(bdry_elems)): + if adj_idx >= len(adj_elems): + break + if (adj_elems[adj_idx] == bdry_elems[bdry_idx] + and adj_faces[adj_idx] == bdry_faces[bdry_idx]): + n_parts[bdry_idx] = adj_n_parts[adj_idx] + global_n_elems[bdry_idx] = adj_gl_n_elems[adj_idx] + n_faces[bdry_idx] = adj_n_faces[adj_idx] + adj_idx += 1 + connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(boundary_adj.elements, - boundary_adj.element_faces, - boundary_adj.neighbors, + InterPartitionAdjacency(bdry_elems, + bdry_faces, + bdry_neighbors, boundary_adj.igroup, boundary_adj.ineighbor_group, - n_parts, n_elems, n_faces) + n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d0f1904e..aef12f94 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): -- GitLab From c911f672cad0a82b42de05ac9c62dff6a1157fe1 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 15:51:15 -0500 Subject: [PATCH 079/266] Merge InterPartitionAdjacency with FacialAdjacencyGroups --- .../connection/opposite_face.py | 4 +- meshmode/mesh/__init__.py | 56 +++++++++++-------- meshmode/mesh/processing.py | 34 ++++++----- test/test_meshmode.py | 4 +- 4 files changed, 56 insertions(+), 42 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 61834402..c79b72a8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -445,14 +445,14 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): part_batches.append([]) adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] - indices = i_src_part == adj.neighbor_parts + indices = (i_src_part == adj.neighbor_partitions) if not np.any(indices): # Skip because i_tgt_grp is not connected to i_src_part. continue i_tgt_faces = adj.element_faces[indices] i_src_meshwide_elems = adj.global_neighbors[indices] i_src_faces = adj.neighbor_faces[indices] - + i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 354021fb..ca267daa 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -487,9 +487,12 @@ class FacialAdjacencyGroup(Record): class InterPartitionAdjacency(FacialAdjacencyGroup): """ - Describes facial adjacency information of elements in one :class:`Mesh` to - elements in another :class:`Mesh`. The element's boundary tag gives the - partition that it is connected to. + Describes boundary adjacency information of elements in + :class:`MeshElementGroup`. + + .. attribute:: igroup + + The group number of this group. .. attribute:: elements @@ -497,19 +500,26 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): Element ``element_id_dtype elements[i]`` and face ``face_id_dtype element_faces[i]`` is connected to neighbor element ``element_id_dtype global_neighbors[i]`` with face - ``face_id_dtype global_neighbor_faces[i]``. + ``face_id_dtype global_neighbor_faces[i]``. The partition number it connects + to is ``neighbor_partitions[i]``. .. attribute:: element_faces ``face_id_dtype element_faces[i]`` gives the face of ``element_id_dtype elements[i]`` that is connected to - ``globla_neighbors[i]``. + ``global_neighbors[i]``. + + .. attribute:: neighbors + + Since this is a boundary, ``element_id_dtype neighbors[i]`` is interpreted + as a boundary tag. ``-neighbors[i]`` should be interpreted according to + :class:``Mesh.boundary_tags``. .. attribute:: global_neighbors Mesh-wide element numbers. ``element_id_dtype global_neighbors[i]`` gives the element number within the - neighboring partiton of the element connected to + neighboring partition of the element connected to ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. @@ -520,44 +530,43 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): neighboring partition of the face connected to ``element_id_dtype elements[i]`` + .. attribute:: neighbor_partitions + + ``neighbor_partitions[i]`` gives the partition number that ``elements[i]`` + is connected to. + + If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: index_lookup_table - A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(global_neighbor_element, global_neighbor_face)``. + A dictionary that maps the tuple ``(element, face)`` to an index ``i`` such + that ``elements[i] == element and element_faces[i] == face``. .. versionadded:: 2017.1 """ - ''' - I don't like the idea of having InterPartitionAdjacency replace the boundary - group for FacialAdjacencyGroup. A boundary may be a real boundary or it may - have a partition adjacent to it. FacialAdjacency and InterPartitionAdjacecy - will not have the same elements. They should be separate. facial_adjacency_groups - should have groups for real boundaries and for 'fake' boundaries. - ''' - def __init__(self, elements, element_faces, neighbors, igroup, - i_neighbor_group, - neighbor_parts, + neighbor_partitions, global_neighbors, neighbor_faces): self.elements = elements self.element_faces = element_faces self.neighbors = neighbors self.igroup = igroup - self.i_neighbor_group = i_neighbor_group - self.neighbor_parts = neighbor_parts + self.ineighbor_group = None + self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors self.neighbor_faces = neighbor_faces self._generate_index_lookup_table() def __eq__(self, other): return (super.__eq__(self, other) - and np.array_equal(self.global_neighbors, other.global_neighbors) - and np.array_equal(self.neighbor_part, other.neighbor_part)) + and np.array_equal(self.global_neighbors, other.global_neighbors) + and np.array_equal(self.neighbor_partitions, other.neighbor_partitions)) def _generate_index_lookup_table(self): self.index_lookup_table = dict() @@ -596,7 +605,8 @@ class Mesh(Record): the set of facial adjacency relations between group *igrp* and *ineighbor_group*. *ineighbor_group* and *igrp* may be identical, or *ineighbor_group* may be *None*, in which case - a group containing boundary faces is returned. + an :class:``InterPartitionAdjacency`` group containing boundary + faces is returned. Referencing this attribute may raise :exc:`meshmode.DataUnavailable`. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 935aa662..d69f52b5 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -208,39 +208,43 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import InterPartitionAdjacency for igrp, adj in enumerate(adj_data): if adj: - boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] - n_parts = np.zeros_like(boundary_adj.elements) + bdry = connected_mesh.facial_adjacency_groups[igrp][None] + # Initialize connections + n_parts = np.zeros_like(bdry.elements) n_parts.fill(-1) global_n_elems = np.copy(n_parts) n_faces = np.copy(n_parts) - bdry_perm = np.lexsort([boundary_adj.element_faces, boundary_adj.elements]) - bdry_elems = boundary_adj.elements[bdry_perm] - bdry_faces = boundary_adj.element_faces[bdry_perm] - bdry_neighbors = boundary_adj.neighbors[bdry_perm] - adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces = np.array(adj).T + + # Sort both sets of elements so that we can quickly merge + # the two data structures + bdry_perm = np.lexsort([bdry.element_faces, bdry.elements]) + elems = bdry.elements[bdry_perm] + faces = bdry.element_faces[bdry_perm] + neighbors = bdry.neighbors[bdry_perm] + adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces =\ + np.array(adj).T adj_perm = np.lexsort([adj_faces, adj_elems]) adj_elems = adj_elems[adj_perm] adj_faces = adj_faces[adj_perm] adj_n_parts = adj_n_parts[adj_perm] adj_gl_n_elems = adj_gl_n_elems[adj_perm] adj_n_faces = adj_n_faces[adj_perm] + + # Merge interpartition adjacency data with FacialAdjacencyGroup adj_idx = 0 - for bdry_idx in range(len(bdry_elems)): + for bdry_idx in range(len(elems)): if adj_idx >= len(adj_elems): break - if (adj_elems[adj_idx] == bdry_elems[bdry_idx] - and adj_faces[adj_idx] == bdry_faces[bdry_idx]): + if (adj_elems[adj_idx] == elems[bdry_idx] + and adj_faces[adj_idx] == faces[bdry_idx]): n_parts[bdry_idx] = adj_n_parts[adj_idx] global_n_elems[bdry_idx] = adj_gl_n_elems[adj_idx] n_faces[bdry_idx] = adj_n_faces[adj_idx] adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(bdry_elems, - bdry_faces, - bdry_neighbors, - boundary_adj.igroup, - boundary_adj.ineighbor_group, + InterPartitionAdjacency(elems, faces, neighbors, + bdry.igroup, n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index aef12f94..64000416 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -207,7 +207,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): continue elem = adj.elements[idx] face = adj.element_faces[idx] - n_part_num = adj.neighbor_parts[idx] + n_part_num = adj.neighbor_partitions[idx] n_meshwide_elem = adj.global_neighbors[idx] n_face = adj.neighbor_faces[idx] num_tags[n_part_num] += 1 @@ -220,7 +220,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base n_idx = n_adj.index_lookup_table[(n_elem, n_face)] - assert (part_num == n_adj.neighbor_parts[n_idx] + assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ "InterPartitionAdj is not consistent" -- GitLab From 1d1a5d8fcefa1438ce8bf83ec5ad40d1efc04cc9 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 16:00:04 -0500 Subject: [PATCH 080/266] Improve documentation --- meshmode/mesh/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index ca267daa..8f7161f7 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -524,12 +524,18 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. + If ``global_neighbors[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: neighbor_faces ``face_id_dtype global_neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``element_id_dtype elements[i]`` + If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: neighbor_partitions ``neighbor_partitions[i]`` gives the partition number that ``elements[i]`` -- GitLab From 53f628dd7164044ee4777db0afc145cebae6e2ab Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 16:45:09 -0500 Subject: [PATCH 081/266] Send less data to make_partition_connection --- .../connection/opposite_face.py | 38 ++++++++----------- test/test_meshmode.py | 24 +++++++++--- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c79b72a8..d96cd5e8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,19 +407,21 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): +def make_partition_connection(src_to_tgt_conn, i_src_part, + tgt_bdry, tgt_adj_groups, tgt_batches): """ - Given a two boundary restriction connections *tgt_to_src_conn* and - *src_to_tgt_conn*, return a :class:`DirectDiscretizationConnection` that - performs data exchange across adjacent faces of different partitions. + Connects ``src_to_tgt_conn`` to a neighboring partition. - :arg tgt_to_src_conn: A :class:`Discretization` of the target partition. :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. :arg i_src_part: The partition number of the src partition. + :arg tgt_adj_groups: A list of :class:`InterPartitionAdjacency`` of the target + partition. + :arg tgt_bdry: A :class:`Discretization` of the boundary of the + target partition. + :arg tgt_batches: A list of batches of the target partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `src_to_tgt_conn` to - `tgt_to_src_conn`. + exchange across faces from partition `i_src_part` to the target partition. .. versionadded:: 2017.1 @@ -430,21 +432,14 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - tgt_vol = tgt_to_src_conn.from_discr - src_vol = src_to_tgt_conn.from_discr - tgt_bdry = tgt_to_src_conn.to_discr src_bdry = src_to_tgt_conn.to_discr - tgt_mesh = tgt_vol.mesh - src_mesh = src_vol.mesh + src_groups = src_to_tgt_conn.from_discr.mesh.groups - part_batches = [] + part_batches = [[] for _ in tgt_adj_groups] - with cl.CommandQueue(tgt_vol.cl_context) as queue: - - for i_tgt_grp in range(len(tgt_mesh.groups)): - part_batches.append([]) - adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] + with cl.CommandQueue(src_to_tgt_conn.cl_context) as queue: + for i_tgt_grp, adj in enumerate(tgt_adj_groups): indices = (i_src_part == adj.neighbor_partitions) if not np.any(indices): # Skip because i_tgt_grp is not connected to i_src_part. @@ -453,11 +448,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_meshwide_elems = adj.global_neighbors[indices] i_src_faces = adj.neighbor_faces[indices] - i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) + i_src_grps = find_group_indices(src_groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): - elem_base = src_mesh.groups[i_src_grp].element_nr_base + elem_base = src_groups[i_src_grp].element_nr_base src_el_lookup =\ _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) @@ -465,12 +460,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): index_flags = np.logical_and(i_src_grps == i_src_grp, i_tgt_faces == i_tgt_face) - if not np.any(index_flags): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_to_src_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_batches[i_tgt_grp], i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 64000416..cdad4758 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -128,14 +128,28 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, group_factory(order), BTAG_PARTITION(i_tgt_part)) + # Gather just enough information for the connection + tgt_bdry = tgt_to_src_conn.to_discr + tgt_mesh = tgt_to_src_conn.from_discr.mesh + tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] + for i in range(len(tgt_mesh.groups))] + tgt_batches = [tgt_to_src_conn.groups[i].batches + for i in range(len(tgt_mesh.groups))] + + src_bdry = src_to_tgt_conn.to_discr + src_mesh = src_to_tgt_conn.from_discr.mesh + src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] + for i in range(len(src_mesh.groups))] + src_batches = [src_to_tgt_conn.groups[i].batches + for i in range(len(src_mesh.groups))] + # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + tgt_conn = make_partition_connection(src_to_tgt_conn, i_src_part, + tgt_bdry, tgt_adj_groups, tgt_batches) # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(src_to_tgt_conn, - tgt_to_src_conn, i_tgt_part) - + src_conn = make_partition_connection(tgt_to_src_conn, i_tgt_part, + src_bdry, src_adj_groups, src_batches) check_connection(tgt_conn) check_connection(src_conn) -- GitLab From a66ad48a3d67d157cc2705729d48c2305b227a42 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 16:29:37 -0500 Subject: [PATCH 082/266] Change name --- meshmode/mesh/__init__.py | 2 +- meshmode/mesh/processing.py | 8 ++++---- test/test_meshmode.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8f7161f7..cae5f058 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -485,7 +485,7 @@ class FacialAdjacencyGroup(Record): # {{{ partition adjacency -class InterPartitionAdjacency(FacialAdjacencyGroup): +class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): """ Describes boundary adjacency information of elements in :class:`MeshElementGroup`. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index d69f52b5..a47ee7fb 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -205,7 +205,7 @@ def partition_mesh(mesh, part_per_element, part_nr): connected_mesh = part_mesh.copy() - from meshmode.mesh import InterPartitionAdjacency + from meshmode.mesh import InterPartitionAdjacencyGroup for igrp, adj in enumerate(adj_data): if adj: bdry = connected_mesh.facial_adjacency_groups[igrp][None] @@ -243,9 +243,9 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(elems, faces, neighbors, - bdry.igroup, - n_parts, global_n_elems, n_faces) + InterPartitionAdjacencyGroup(elems, faces, neighbors, + bdry.igroup, + n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index cdad4758..68cd7e0d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -203,7 +203,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacency + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) @@ -213,7 +213,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): adj = part.facial_adjacency_groups[grp_num][None] tags = -part.facial_adjacency_groups[grp_num][None].neighbors assert np.all(tags >= 0) - if not isinstance(adj, InterPartitionAdjacency): + if not isinstance(adj, InterPartitionAdjacencyGroup): continue elem_base = part.groups[grp_num].element_nr_base for idx in range(len(adj.elements)): @@ -237,7 +237,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ - "InterPartitionAdj is not consistent" + "InterPartitionAdjacencyGroup is not consistent" _, n_part_to_global = new_meshes[n_part_num] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From c311c89804b1dceb1d31650e0806fb77ec00b846 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 16:55:23 -0500 Subject: [PATCH 083/266] Remove Mesh.adjacency_list --- meshmode/mesh/__init__.py | 11 ----------- test/test_meshmode.py | 12 ++++++++---- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index cae5f058..7120f14b 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -876,17 +876,6 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def adjacency_list(self): - """ - :returns: `adjacency[i]` is a list of all elements that are adjacent to - element `i`. Useful for `pymetis.part_graph`. - """ - adjacency_list = [] - for elem in range(self.nelements): - start, end = self.nodal_adjacency.neighbors_starts[elem:elem+2] - adjacency_list.append(self.nodal_adjacency.neighbors[start:end]) - return adjacency_list - # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 68cd7e0d..51fd056c 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -88,7 +88,9 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, mesh = meshes[0] #from pymetis import part_graph - #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #_, p = part_graph(num_parts, + # xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + # adjncy=mesh.nodal_adjacency.neighbors.tolist()) #part_per_element = np.array(p) part_per_element = np.random.randint(num_parts, size=mesh.nelements) @@ -158,7 +160,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, bdry_t_2 = src_conn(queue, bdry_s) err = la.norm((bdry_t - bdry_t_2).get(), np.inf) - eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) + eoc_rec[i_tgt_part, i_src_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): if e is not None: @@ -185,7 +187,9 @@ def test_partition_mesh(num_parts, num_meshes, dim): mesh = merge_disjoint_meshes(meshes) from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) part_per_element = np.array(p) #part_per_element = np.random.randint(num_parts, size=mesh.nelements) -- GitLab From 01aaf70fc143408912961dcd44ed0c5850ed90af Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 17:19:15 -0500 Subject: [PATCH 084/266] Improve InterPartitionAdjacency constructor --- meshmode/mesh/__init__.py | 14 +++++++------- test/test_meshmode.py | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 7120f14b..347a5068 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -559,14 +559,14 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): neighbor_partitions, global_neighbors, neighbor_faces): - self.elements = elements - self.element_faces = element_faces - self.neighbors = neighbors - self.igroup = igroup - self.ineighbor_group = None + FacialAdjacencyGroup.__init__(self, elements=elements, + element_faces=element_faces, + neighbors=neighbors, + neighbor_faces=neighbor_faces, + igroup=igroup, + ineighbor_group=None) self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors - self.neighbor_faces = neighbor_faces self._generate_index_lookup_table() def __eq__(self, other): @@ -577,7 +577,7 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): def _generate_index_lookup_table(self): self.index_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - self.index_lookup_table[(elem, face)] = idx + self.index_lookup_table[elem, face] = idx # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 51fd056c..df34b8fc 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -233,11 +233,11 @@ def test_partition_mesh(num_parts, num_meshes, dim): # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. - n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.facial_adjacency_groups[int(n_grp_num)][None] + n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) + n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - n_idx = n_adj.index_lookup_table[(n_elem, n_face)] + n_idx = n_adj.index_lookup_table[n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ -- GitLab From 0ea3145308fc47586f120a47d25ba2078f9d2d7c Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 19:57:11 -0500 Subject: [PATCH 085/266] Remove index_lookup_table from InterPartitionAdjacencyGroup --- .../connection/opposite_face.py | 10 ++-- meshmode/mesh/__init__.py | 11 ---- test/test_meshmode.py | 59 +++++++++++-------- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index d96cd5e8..1d7efb63 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,7 +407,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(src_to_tgt_conn, i_src_part, +def make_partition_connection(src_bdry_discr, i_src_part, tgt_bdry, tgt_adj_groups, tgt_batches): """ Connects ``src_to_tgt_conn`` to a neighboring partition. @@ -432,12 +432,12 @@ def make_partition_connection(src_to_tgt_conn, i_src_part, from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - src_bdry = src_to_tgt_conn.to_discr - src_groups = src_to_tgt_conn.from_discr.mesh.groups + src_bdry = src_bdry_discr.to_discr + src_groups = src_bdry_discr.from_discr.mesh.groups part_batches = [[] for _ in tgt_adj_groups] - with cl.CommandQueue(src_to_tgt_conn.cl_context) as queue: + with cl.CommandQueue(src_bdry_discr.cl_context) as queue: for i_tgt_grp, adj in enumerate(tgt_adj_groups): indices = (i_src_part == adj.neighbor_partitions) @@ -454,7 +454,7 @@ def make_partition_connection(src_to_tgt_conn, i_src_part, elem_base = src_groups[i_src_grp].element_nr_base src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_bdry_discr, i_src_grp) for i_tgt_face in i_tgt_faces: diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 347a5068..f07f9431 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -544,11 +544,6 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true boundary and is not connected to any other :class:``Mesh``. - .. attribute:: index_lookup_table - - A dictionary that maps the tuple ``(element, face)`` to an index ``i`` such - that ``elements[i] == element and element_faces[i] == face``. - .. versionadded:: 2017.1 """ @@ -567,18 +562,12 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): ineighbor_group=None) self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors - self._generate_index_lookup_table() def __eq__(self, other): return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) and np.array_equal(self.neighbor_partitions, other.neighbor_partitions)) - def _generate_index_lookup_table(self): - self.index_lookup_table = dict() - for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - self.index_lookup_table[elem, face] = idx - # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index df34b8fc..76fa5b1f 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -71,7 +71,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for j in range(num_parts): if i == j: continue - eoc_rec[(i, j)] = EOCRecorder() + eoc_rec[i, j] = EOCRecorder() def f(x): return 0.1*cl.clmath.sin(30*x) @@ -110,47 +110,47 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): if (i_tgt_part == i_src_part - or eoc_rec[(i_tgt_part, i_src_part)] is None): - eoc_rec[(i_tgt_part, i_src_part)] = None + or eoc_rec[i_tgt_part, i_src_part] is None): + eoc_rec[i_tgt_part, i_src_part] = None continue # Mark faces within tgt_mesh that are connected to src_mesh - tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + tgt_bdry_discr = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_to_src_conn.to_discr.nodes() + bdry_nodes = tgt_bdry_discr.to_discr.nodes() if bdry_nodes.size == 0: - eoc_rec[(i_tgt_part, i_src_part)] = None + eoc_rec[i_tgt_part, i_src_part] = None continue # Mark faces within src_mesh that are connected to tgt_mesh - src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + src_bdry_discr = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) # Gather just enough information for the connection - tgt_bdry = tgt_to_src_conn.to_discr - tgt_mesh = tgt_to_src_conn.from_discr.mesh + tgt_bdry = tgt_bdry_discr.to_discr + tgt_mesh = tgt_bdry_discr.from_discr.mesh tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] for i in range(len(tgt_mesh.groups))] - tgt_batches = [tgt_to_src_conn.groups[i].batches + tgt_batches = [tgt_bdry_discr.groups[i].batches for i in range(len(tgt_mesh.groups))] - src_bdry = src_to_tgt_conn.to_discr - src_mesh = src_to_tgt_conn.from_discr.mesh + src_bdry = src_bdry_discr.to_discr + src_mesh = src_bdry_discr.from_discr.mesh src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] for i in range(len(src_mesh.groups))] - src_batches = [src_to_tgt_conn.groups[i].batches + src_batches = [src_bdry_discr.groups[i].batches for i in range(len(src_mesh.groups))] - # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(src_to_tgt_conn, i_src_part, + # Connect src_mesh to tgt_mesh + src_conn = make_partition_connection(src_bdry_discr, i_src_part, tgt_bdry, tgt_adj_groups, tgt_batches) - # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(tgt_to_src_conn, i_tgt_part, + # Connect tgt_mesh to src_mesh + tgt_conn = make_partition_connection(tgt_bdry_discr, i_tgt_part, src_bdry, src_adj_groups, src_batches) check_connection(tgt_conn) check_connection(src_conn) @@ -166,7 +166,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) # }}} @@ -211,6 +211,15 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) + index_lookup_table = dict() + for ipart, (m, _) in enumerate(new_meshes): + for igrp in range(len(m.groups)): + adj = m.facial_adjacency_groups[igrp][None] + if not isinstance(adj, InterPartitionAdjacencyGroup): + continue + for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): + index_lookup_table[ipart, igrp, elem, face] = i + for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num in range(len(part.groups)): @@ -218,6 +227,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): tags = -part.facial_adjacency_groups[grp_num][None].neighbors assert np.all(tags >= 0) if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. continue elem_base = part.groups[grp_num].element_nr_base for idx in range(len(adj.elements)): @@ -237,7 +247,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - n_idx = n_adj.index_lookup_table[n_elem, n_face] + #n_idx = n_adj.index_lookup_table[n_elem, n_face] + n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ -- GitLab From cecca60af5cab1569213f11dc9019d7f0ae0ebf2 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 20:03:28 -0500 Subject: [PATCH 086/266] Small changes --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 76fa5b1f..17127703 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -216,6 +216,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): for igrp in range(len(m.groups)): adj = m.facial_adjacency_groups[igrp][None] if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. continue for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): index_lookup_table[ipart, igrp, elem, face] = i @@ -247,7 +248,6 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - #n_idx = n_adj.index_lookup_table[n_elem, n_face] n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] -- GitLab From 8a05ec99f8ee7a024fb07b8e8e3e28479e50cac0 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 21:06:14 -0500 Subject: [PATCH 087/266] Small fix --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 17127703..f086dc41 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -74,7 +74,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 0.1*cl.clmath.sin(30*x) + return 0.5*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -166,7 +166,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-13) # }}} -- GitLab From b37fe06fa7be66584c9356112d25f2c0f57031ce Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 10 Aug 2017 21:04:52 -0500 Subject: [PATCH 088/266] Clean up names --- .../connection/opposite_face.py | 86 ++++++------ test/test_meshmode.py | 132 ++++++++++-------- 2 files changed, 117 insertions(+), 101 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1d7efb63..181e059e 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,21 +407,22 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(src_bdry_discr, i_src_part, - tgt_bdry, tgt_adj_groups, tgt_batches): +def make_partition_connection(local_bdry_conn, i_local_part, + remote_bdry, remote_adj_groups, remote_batches): """ - Connects ``src_to_tgt_conn`` to a neighboring partition. + Connects ``local_bdry_conn`` to a neighboring partition. - :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. - :arg i_src_part: The partition number of the src partition. - :arg tgt_adj_groups: A list of :class:`InterPartitionAdjacency`` of the target + :arg local_bdry_conn: A :class:`DirectDiscretizationConnection` of the local partition. - :arg tgt_bdry: A :class:`Discretization` of the boundary of the - target partition. - :arg tgt_batches: A list of batches of the target partition. + :arg i_local_part: The partition number of the local partition. + :arg remote_adj_groups: A list of :class:`InterPartitionAdjacency`` of the + remote partition. + :arg remote_bdry: A :class:`Discretization` of the boundary of the + remote partition. + :arg remote_batches: A list of batches of the remote partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `i_src_part` to the target partition. + exchange across faces from partition `i_local_part` to the remote partition. .. versionadded:: 2017.1 @@ -432,56 +433,59 @@ def make_partition_connection(src_bdry_discr, i_src_part, from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - src_bdry = src_bdry_discr.to_discr - src_groups = src_bdry_discr.from_discr.mesh.groups + local_bdry = local_bdry_conn.to_discr + local_groups = local_bdry_conn.from_discr.mesh.groups - part_batches = [[] for _ in tgt_adj_groups] + part_batches = [[] for _ in remote_adj_groups] - with cl.CommandQueue(src_bdry_discr.cl_context) as queue: + with cl.CommandQueue(local_bdry_conn.cl_context) as queue: - for i_tgt_grp, adj in enumerate(tgt_adj_groups): - indices = (i_src_part == adj.neighbor_partitions) + for i_remote_grp, adj in enumerate(remote_adj_groups): + indices = (i_local_part == adj.neighbor_partitions) if not np.any(indices): - # Skip because i_tgt_grp is not connected to i_src_part. + # Skip because i_remote_grp is not connected to i_local_part. continue - i_tgt_faces = adj.element_faces[indices] - i_src_meshwide_elems = adj.global_neighbors[indices] - i_src_faces = adj.neighbor_faces[indices] + i_remote_faces = adj.element_faces[indices] + i_local_meshwide_elems = adj.global_neighbors[indices] + i_local_faces = adj.neighbor_faces[indices] - i_src_grps = find_group_indices(src_groups, i_src_meshwide_elems) + i_local_grps = find_group_indices(local_groups, i_local_meshwide_elems) - for i_src_grp in np.unique(i_src_grps): + for i_local_grp in np.unique(i_local_grps): - elem_base = src_groups[i_src_grp].element_nr_base - src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_bdry_discr, i_src_grp) + elem_base = local_groups[i_local_grp].element_nr_base + local_el_lookup = _make_bdry_el_lookup_table(queue, + local_bdry_conn, + i_local_grp) - for i_tgt_face in i_tgt_faces: + for i_remote_face in i_remote_faces: - index_flags = np.logical_and(i_src_grps == i_src_grp, - i_tgt_faces == i_tgt_face) + index_flags = np.logical_and(i_local_grps == i_local_grp, + i_remote_faces == i_remote_face) if not np.any(index_flags): continue - vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_batches[i_tgt_grp], i_tgt_face) + vbc_remote_grp_face_batch = _find_ibatch_for_face( + remote_batches[i_remote_grp], i_remote_face) - tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + remote_bdry_element_indices = vbc_remote_grp_face_batch.\ to_element_indices.get(queue=queue) - elems = i_src_meshwide_elems[index_flags] - elem_base - faces = i_src_faces[index_flags] - src_bdry_element_indices = src_el_lookup[elems, faces] + elems = i_local_meshwide_elems[index_flags] - elem_base + faces = i_local_faces[index_flags] + local_bdry_element_indices = local_el_lookup[elems, faces] + + batches = _make_cross_face_batches(queue, + remote_bdry, local_bdry, + i_remote_grp, i_local_grp, + remote_bdry_element_indices, + local_bdry_element_indices) - part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, - tgt_bdry, src_bdry, - i_tgt_grp, i_src_grp, - tgt_bdry_element_indices, - src_bdry_element_indices)) + part_batches[i_remote_grp].extend(batches) return DirectDiscretizationConnection( - from_discr=src_bdry, - to_discr=tgt_bdry, + from_discr=local_bdry, + to_discr=remote_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], is_surjective=True) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f086dc41..cdc8c9e7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, - num_parts, num_groups): + num_parts, num_groups, scramble_partitions=True): np.random.seed(42) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) @@ -74,7 +74,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 0.5*cl.clmath.sin(30*x) + return 0.5*cl.clmath.sin(30.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -87,12 +87,14 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, else: mesh = meshes[0] - #from pymetis import part_graph - #_, p = part_graph(num_parts, - # xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - # adjncy=mesh.nodal_adjacency.neighbors.tolist()) - #part_per_element = np.array(p) - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -107,60 +109,68 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, make_partition_connection, check_connection) - for i_tgt_part in range(num_parts): - for i_src_part in range(num_parts): - if (i_tgt_part == i_src_part - or eoc_rec[i_tgt_part, i_src_part] is None): - eoc_rec[i_tgt_part, i_src_part] = None + for i_local_part in range(num_parts): + for i_remote_part in range(num_parts): + if (i_local_part == i_remote_part + or eoc_rec[i_local_part, i_remote_part] is None): + eoc_rec[i_local_part, i_remote_part] = None continue - # Mark faces within tgt_mesh that are connected to src_mesh - tgt_bdry_discr = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Mark faces within local_mesh that are connected to remote_mesh + local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + group_factory(order), + BTAG_PARTITION(i_remote_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_bdry_discr.to_discr.nodes() + bdry_nodes = local_bdry_conn.to_discr.nodes() if bdry_nodes.size == 0: - eoc_rec[i_tgt_part, i_src_part] = None + eoc_rec[i_local_part, i_remote_part] = None continue - # Mark faces within src_mesh that are connected to tgt_mesh - src_bdry_discr = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Mark faces within remote_mesh that are connected to local_mesh + remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + group_factory(order), + BTAG_PARTITION(i_local_part)) # Gather just enough information for the connection - tgt_bdry = tgt_bdry_discr.to_discr - tgt_mesh = tgt_bdry_discr.from_discr.mesh - tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] - for i in range(len(tgt_mesh.groups))] - tgt_batches = [tgt_bdry_discr.groups[i].batches - for i in range(len(tgt_mesh.groups))] - - src_bdry = src_bdry_discr.to_discr - src_mesh = src_bdry_discr.from_discr.mesh - src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] - for i in range(len(src_mesh.groups))] - src_batches = [src_bdry_discr.groups[i].batches - for i in range(len(src_mesh.groups))] - - # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(src_bdry_discr, i_src_part, - tgt_bdry, tgt_adj_groups, tgt_batches) - - # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(tgt_bdry_discr, i_tgt_part, - src_bdry, src_adj_groups, src_batches) - check_connection(tgt_conn) - check_connection(src_conn) - - bdry_t = f(tgt_conn.to_discr.nodes()[0].with_queue(queue)) - bdry_s = tgt_conn(queue, bdry_t) - bdry_t_2 = src_conn(queue, bdry_s) - - err = la.norm((bdry_t - bdry_t_2).get(), np.inf) - eoc_rec[i_tgt_part, i_src_part].add_data_point(1./n, err) + local_bdry = local_bdry_conn.to_discr + local_mesh = local_bdry_conn.from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + + remote_bdry = remote_bdry_conn.to_discr + remote_mesh = remote_bdry_conn.from_discr.mesh + remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] + for i in range(len(remote_mesh.groups))] + remote_batches = [remote_bdry_conn.groups[i].batches + for i in range(len(remote_mesh.groups))] + + # Connect local_mesh to remote_mesh + local_part_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_batches) + + # Connect remote mesh to local mesh + remote_part_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_batches) + + check_connection(local_part_conn) + check_connection(remote_part_conn) + + true_local_points = f(local_part_conn.to_discr.nodes()[0] + .with_queue(queue)) + remote_points = local_part_conn(queue, true_local_points) + local_points = remote_part_conn(queue, remote_points) + + err = la.norm((true_local_points - local_points).get(), np.inf) + eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): if e is not None: @@ -176,7 +186,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) @pytest.mark.parametrize("num_meshes", [1, 2, 7]) -def test_partition_mesh(num_parts, num_meshes, dim): +def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): np.random.seed(42) n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh @@ -186,12 +196,14 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - #part_per_element = np.random.randint(num_parts, size=mesh.nelements) + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From 9dd3cdaa2a920c68fe3f4494ef417ee43194b4c6 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 18:31:17 -0500 Subject: [PATCH 089/266] Add to system tags --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f07f9431..b9f8e9fe 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -115,7 +115,8 @@ class BTAG_PARTITION(object): # noqa return not self.__eq__(other) -SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY]) +SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY, + BTAG_PARTITION]) # }}} -- GitLab From 2b4b414a8328259473b9431e3672083b476bd4fd Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 19:03:09 -0500 Subject: [PATCH 090/266] Relax error check --- test/test_meshmode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index cdc8c9e7..a3b1ca31 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -176,7 +176,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-13) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) # }}} -- GitLab From 4e853a73c519d6c92c1ee98e60d3b7949073ce8f Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 20:08:02 -0500 Subject: [PATCH 091/266] Add MPI test file --- testmpi.py | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 testmpi.py diff --git a/testmpi.py b/testmpi.py new file mode 100644 index 00000000..fbc13883 --- /dev/null +++ b/testmpi.py @@ -0,0 +1,107 @@ +from mpi4py import MPI +import numpy as np +import pyopencl + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +num_parts = 3 +if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh parts.') + for req in reqs: + req.wait() + +elif (rank - 1) in range(num_parts): + mesh = comm.recv(source=0, tag=1) + print('Recieved mesh') + + cl_ctx = pyopencl.create_some_context() + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_data = {'bdry': local_bdry, + 'adj': local_adj_groups, + 'batches': local_batches} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + recv_reqs[i_remote_part] = comm.irecv(source=i_remote_part+1, tag=2) + + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait() + for req in send_reqs: + req.wait() + + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry = data['bdry'] + remote_adj_groups =data['adj'] + remote_batches = data['batches'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_batches) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) + -- GitLab From 2b94106e1e8ac8d2e88d4431f540edffb79f8124 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 10:08:54 -0500 Subject: [PATCH 092/266] Reduce arguments to `make_partition_connection`. --- .../connection/opposite_face.py | 27 +++++++++++-------- test/test_meshmode.py | 18 +++++++++++-- testmpi.py | 23 +++++++++++----- 3 files changed, 49 insertions(+), 19 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 181e059e..cccae33c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -408,7 +408,8 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection def make_partition_connection(local_bdry_conn, i_local_part, - remote_bdry, remote_adj_groups, remote_batches): + remote_bdry, remote_adj_groups, + remote_to_elem_faces, remote_to_elem_indices): """ Connects ``local_bdry_conn`` to a neighboring partition. @@ -419,7 +420,11 @@ def make_partition_connection(local_bdry_conn, i_local_part, remote partition. :arg remote_bdry: A :class:`Discretization` of the boundary of the remote partition. - :arg remote_batches: A list of batches of the remote partition. + :arg remote_to_elem_faces: `remote_to_elem_faces[igrp][idx]` gives the face + that batch `idx` interpolates from group `igrp`. + :arg remote_to_elem_indices: `remote_to_elem_indices[igrp][idx]` gives a + :class:`np.array` of element indices that batch `idx` interpolates from + group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data exchange across faces from partition `i_local_part` to the remote partition. @@ -465,29 +470,29 @@ def make_partition_connection(local_bdry_conn, i_local_part, if not np.any(index_flags): continue - vbc_remote_grp_face_batch = _find_ibatch_for_face( - remote_batches[i_remote_grp], i_remote_face) + batch_idx = np.where(remote_to_elem_faces[i_remote_grp] + == i_remote_face)[0] - remote_bdry_element_indices = vbc_remote_grp_face_batch.\ - to_element_indices.get(queue=queue) + remote_bdry_indices =\ + remote_to_elem_indices[i_remote_grp][batch_idx] elems = i_local_meshwide_elems[index_flags] - elem_base faces = i_local_faces[index_flags] - local_bdry_element_indices = local_el_lookup[elems, faces] + local_bdry_indices = local_el_lookup[elems, faces] batches = _make_cross_face_batches(queue, remote_bdry, local_bdry, i_remote_grp, i_local_grp, - remote_bdry_element_indices, - local_bdry_element_indices) + remote_bdry_indices, + local_bdry_indices) part_batches[i_remote_grp].extend(batches) return DirectDiscretizationConnection( from_discr=local_bdry, to_discr=remote_bdry, - groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], + groups=[DiscretizationConnectionElementGroup(batches=grp_batches) + for grp_batches in part_batches], is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a3b1ca31..5b5d4537 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -139,6 +139,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conn.groups[i].batches for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr remote_mesh = remote_bdry_conn.from_discr.mesh @@ -146,20 +152,28 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches for i in range(len(remote_mesh.groups))] + remote_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in remote_batches] + remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in remote_batches] # Connect local_mesh to remote_mesh local_part_conn = make_partition_connection(local_bdry_conn, i_local_part, remote_bdry, remote_adj_groups, - remote_batches) + remote_to_elem_faces, + remote_to_elem_indices) # Connect remote mesh to local mesh remote_part_conn = make_partition_connection(remote_bdry_conn, i_remote_part, local_bdry, local_adj_groups, - local_batches) + local_to_elem_faces, + local_to_elem_indices) check_connection(local_part_conn) check_connection(remote_part_conn) diff --git a/testmpi.py b/testmpi.py index fbc13883..bb3c1978 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,6 +1,6 @@ from mpi4py import MPI import numpy as np -import pyopencl +import pyopencl as cl comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -30,8 +30,9 @@ elif (rank - 1) in range(num_parts): mesh = comm.recv(source=0, tag=1) print('Recieved mesh') - cl_ctx = pyopencl.create_some_context() - + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory group_factory = PolynomialWarpAndBlendGroupFactory(4) @@ -68,9 +69,17 @@ elif (rank - 1) in range(num_parts): for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conns[i_remote_part].groups[i].batches for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + print(local_bdry.groups) local_data = {'bdry': local_bdry, 'adj': local_adj_groups, - 'batches': local_batches} + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) recv_reqs = {} @@ -93,7 +102,8 @@ elif (rank - 1) in range(num_parts): continue remote_bdry = data['bdry'] remote_adj_groups =data['adj'] - remote_batches = data['batches'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection connection[i_remote_part] =\ @@ -101,7 +111,8 @@ elif (rank - 1) in range(num_parts): i_local_part, remote_bdry, remote_adj_groups, - remote_batches) + remote_to_elem_faces, + remote_to_elem_indices) from meshmode.discretization.connection import check_connection check_connection(connection[i_remote_part]) -- GitLab From 1f740fd0e90dafcac636dc270855b8ebba398831 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 11:59:47 -0500 Subject: [PATCH 093/266] working --- meshmode/mesh/__init__.py | 10 +++ test/test_meshmode.py | 11 +++- test/testmpi.py | 130 ++++++++++++++++++++++++++++++++++++++ testmpi.py | 118 ---------------------------------- 4 files changed, 149 insertions(+), 120 deletions(-) create mode 100644 test/testmpi.py delete mode 100644 testmpi.py diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index b9f8e9fe..f5147001 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -548,6 +548,16 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ + #FIXME + ''' + This is a weird error. When we try to pickle and unpickle a mesh, + neighbor_partitions does not exist anymore in + mesh.facial_adjacency_groups[i][None]. My guess was that pickle did not know + that property existed, so I created it. + ''' + neighbor_partitions = None + global_neighbors = None + def __init__(self, elements, element_faces, neighbors, diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 5b5d4537..a49994d0 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,6 +49,13 @@ import logging logger = logging.getLogger(__name__) +@pytest.mark.parametrize("num_parts", [3]) +def test_interpartition_comm(num_parts): + from pytools.mpi import run_with_mpi_ranks + run_with_mpi_ranks("testmpi.py", num_parts + 1, interpartition_communication, + (num_parts,)) + + # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @@ -134,7 +141,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, # Gather just enough information for the connection local_bdry = local_bdry_conn.to_discr - local_mesh = local_bdry_conn.from_discr.mesh + local_mesh = part_meshes[i_local_part] local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conn.groups[i].batches @@ -147,7 +154,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr - remote_mesh = remote_bdry_conn.from_discr.mesh + remote_mesh = part_meshes[i_remote_mesh] remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches diff --git a/test/testmpi.py b/test/testmpi.py new file mode 100644 index 00000000..5c142c10 --- /dev/null +++ b/test/testmpi.py @@ -0,0 +1,130 @@ +import numpy as np +import pyopencl as cl +import pytest + +def interpartition_communication(num_parts): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh partitions.') + for req in reqs: + req.wait() + + elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Recieved mesh (size = {0})'.format(status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + status = MPI.Status() + #TODO: Send size of data before to allocate a buffer. + recv_reqs[i_remote_part] = comm.irecv(buf=1000000, + source=i_remote_part+1, + tag=2) + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait(status=status) + print('Received remote data (size = {0})'.format(status.count)) + for req in send_reqs: + req.wait() + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) + +if __name__ == "__main__": + import sys + from pytools.mpi import check_for_mpi_relaunch + check_for_mpi_relaunch(sys.argv) + + if len(sys.argv) > 1: + exec sys.argv[1] + diff --git a/testmpi.py b/testmpi.py deleted file mode 100644 index bb3c1978..00000000 --- a/testmpi.py +++ /dev/null @@ -1,118 +0,0 @@ -from mpi4py import MPI -import numpy as np -import pyopencl as cl - -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - -num_parts = 3 -if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh parts.') - for req in reqs: - req.wait() - -elif (rank - 1) in range(num_parts): - mesh = comm.recv(source=0, tag=1) - print('Recieved mesh') - - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - print(local_bdry.groups) - local_data = {'bdry': local_bdry, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - recv_reqs[i_remote_part] = comm.irecv(source=i_remote_part+1, tag=2) - - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait() - for req in send_reqs: - req.wait() - - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry = data['bdry'] - remote_adj_groups =data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) - -- GitLab From 584250fdc4c5bb35c97f035f574b589e2f0034dd Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 12:15:49 -0500 Subject: [PATCH 094/266] Add MPI test --- test/test_meshmode.py | 7 --- test/testmpi.py | 130 ------------------------------------------ testmpi.py | 121 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 137 deletions(-) delete mode 100644 test/testmpi.py create mode 100644 testmpi.py diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a49994d0..07483f31 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,13 +49,6 @@ import logging logger = logging.getLogger(__name__) -@pytest.mark.parametrize("num_parts", [3]) -def test_interpartition_comm(num_parts): - from pytools.mpi import run_with_mpi_ranks - run_with_mpi_ranks("testmpi.py", num_parts + 1, interpartition_communication, - (num_parts,)) - - # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) diff --git a/test/testmpi.py b/test/testmpi.py deleted file mode 100644 index 5c142c10..00000000 --- a/test/testmpi.py +++ /dev/null @@ -1,130 +0,0 @@ -import numpy as np -import pyopencl as cl -import pytest - -def interpartition_communication(num_parts): - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh partitions.') - for req in reqs: - req.wait() - - elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Recieved mesh (size = {0})'.format(status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - status = MPI.Status() - #TODO: Send size of data before to allocate a buffer. - recv_reqs[i_remote_part] = comm.irecv(buf=1000000, - source=i_remote_part+1, - tag=2) - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait(status=status) - print('Received remote data (size = {0})'.format(status.count)) - for req in send_reqs: - req.wait() - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) - -if __name__ == "__main__": - import sys - from pytools.mpi import check_for_mpi_relaunch - check_for_mpi_relaunch(sys.argv) - - if len(sys.argv) > 1: - exec sys.argv[1] - diff --git a/testmpi.py b/testmpi.py new file mode 100644 index 00000000..62a7aaef --- /dev/null +++ b/testmpi.py @@ -0,0 +1,121 @@ +import numpy as np + +num_parts = 3 + +from mpi4py import MPI +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh partitions.') + for req in reqs: + req.wait() + +elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Recieved mesh (size = {0})'.format(status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + status = MPI.Status() + #TODO: Send size of data before to allocate a buffer. + recv_reqs[i_remote_part] = comm.irecv(buf=1000000, + source=i_remote_part+1, + tag=2) + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait(status=status) + print('Received remote data (size = {0})'.format(status.count)) + for req in send_reqs: + req.wait() + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) -- GitLab From af247d771e4b390a6339697c020e9e6e10c8d24a Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 12:17:05 -0500 Subject: [PATCH 095/266] small fix --- test/test_meshmode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 07483f31..481fa4c7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -147,7 +147,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr - remote_mesh = part_meshes[i_remote_mesh] + remote_mesh = part_meshes[i_remote_part] remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches -- GitLab From dccd0e0863f86baaa0eae5bfbcc40990cd118c16 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 14:21:15 -0500 Subject: [PATCH 096/266] Fix errors --- meshmode/discretization/connection/opposite_face.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index cccae33c..ab460da8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -470,11 +470,11 @@ def make_partition_connection(local_bdry_conn, i_local_part, if not np.any(index_flags): continue - batch_idx = np.where(remote_to_elem_faces[i_remote_grp] - == i_remote_face)[0] - - remote_bdry_indices =\ - remote_to_elem_indices[i_remote_grp][batch_idx] + for idxs, to_face in zip(remote_to_elem_indices[i_remote_grp], + remote_to_elem_faces[i_remote_grp]): + if to_face == i_remote_face: + remote_bdry_indices = idxs + break elems = i_local_meshwide_elems[index_flags] - elem_base faces = i_local_faces[index_flags] -- GitLab From 76d7d280c3d390538d082336deb876e9c5544d4b Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 24 Aug 2017 15:13:34 -0500 Subject: [PATCH 097/266] Make pytest for mpi communication --- test/test_meshmode.py | 17 ++- testmpi.py | 245 ++++++++++++++++++++++-------------------- 2 files changed, 141 insertions(+), 121 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 481fa4c7..54bdd3a7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,12 +49,24 @@ import logging logger = logging.getLogger(__name__) +@pytest.mark.parametrize("num_partitions", [3, 6]) +def test_mpi_communication(num_partitions): + num_ranks = num_partitions + 1 + import subprocess, sys, os + newenv = os.environ.copy() + newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" + subprocess.check_call(["mpirun", "-np", str(num_ranks), + sys.executable, "testmpi.py", str(num_partitions)], + env=newenv) + + # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) -@pytest.mark.parametrize(("dim", "mesh_pars"), [ +@pytest.mark.parametrize(("dim", "mesh_pars"), + [ (2, [3, 4, 7]), (3, [3, 4]) ]) @@ -178,8 +190,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, check_connection(local_part_conn) check_connection(remote_part_conn) - true_local_points = f(local_part_conn.to_discr.nodes()[0] - .with_queue(queue)) + true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) remote_points = local_part_conn(queue, true_local_points) local_points = remote_part_conn(queue, remote_points) diff --git a/testmpi.py b/testmpi.py index 62a7aaef..e24ce730 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,121 +1,130 @@ import numpy as np -num_parts = 3 - -from mpi4py import MPI -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - -if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh partitions.') - for req in reqs: - req.wait() - -elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Recieved mesh (size = {0})'.format(status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue +def mpi_comm(num_parts): + + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + # This rank only partitions a mesh and sends them to their respective ranks. + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Rank 0: Sent all mesh partitions.') + for req in reqs: + req.wait() + + # These ranks recieve a mesh and comunicates boundary data to the other ranks. + elif (rank - 1) in range(num_parts): status = MPI.Status() - #TODO: Send size of data before to allocate a buffer. - recv_reqs[i_remote_part] = comm.irecv(buf=1000000, - source=i_remote_part+1, - tag=2) - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait(status=status) - print('Received remote data (size = {0})'.format(status.count)) - for req in send_reqs: - req.wait() - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + # Receive boundary data + remote_data = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + 1 + status = MPI.Status() + remote_data[i_remote_part] = comm.recv(source=remote_rank, + tag=2, + status=status) + print('Rank {0}: Received rank {1} data (size = {2})' + .format(rank, remote_rank, status.count)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) + +if __name__ == "__main__": + import sys + + assert(len(sys.argv) == 2, 'Invalid number of arguments') + + num_parts = int(sys.argv[1]) + mpi_comm(num_parts) -- GitLab From 39c4c3eb05b76d036bd564bdacbdee053783007f Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 24 Aug 2017 15:15:16 -0500 Subject: [PATCH 098/266] Fix whitespace --- testmpi.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testmpi.py b/testmpi.py index e24ce730..87a51914 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,5 +1,6 @@ import numpy as np + def mpi_comm(num_parts): from mpi4py import MPI @@ -121,10 +122,11 @@ def mpi_comm(num_parts): from meshmode.discretization.connection import check_connection check_connection(connection) + if __name__ == "__main__": import sys - assert(len(sys.argv) == 2, 'Invalid number of arguments') + assert len(sys.argv) == 2, 'Invalid number of arguments' num_parts = int(sys.argv[1]) mpi_comm(num_parts) -- GitLab From 65b3fb6643c1c0adaf40fd9dd66bca415cb4ccc0 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 25 Aug 2017 10:02:56 -0500 Subject: [PATCH 099/266] Fix errors --- test/test_meshmode.py | 10 ++++++---- testmpi.py => test/testmpi.py | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) rename testmpi.py => test/testmpi.py (98%) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 54bdd3a7..deda084c 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -52,12 +52,14 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): num_ranks = num_partitions + 1 - import subprocess, sys, os + from subprocess import check_call + import sys + import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - subprocess.check_call(["mpirun", "-np", str(num_ranks), - sys.executable, "testmpi.py", str(num_partitions)], - env=newenv) + check_call(["mpirun", "-np", str(num_ranks), + sys.executable, "test/testmpi.py", str(num_partitions)], + env=newenv) # {{{ partition_interpolation diff --git a/testmpi.py b/test/testmpi.py similarity index 98% rename from testmpi.py rename to test/testmpi.py index 87a51914..7f84e501 100644 --- a/testmpi.py +++ b/test/testmpi.py @@ -1,3 +1,4 @@ +from __future__ import division, absolute_import, print_function import numpy as np -- GitLab From 9f1b2497e54118cb4645bfa6a73e8648a023a0e9 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 5 Sep 2017 17:07:57 -0500 Subject: [PATCH 100/266] Fix testmpi error --- test/test_meshmode.py | 3 +-- test/testmpi.py | 10 +++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index deda084c..42a1a1cc 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,8 +57,7 @@ def test_mpi_communication(num_partitions): import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), - sys.executable, "test/testmpi.py", str(num_partitions)], + check_call(["mpirun", "-np", str(num_ranks), sys.executable, "testmpi.py"], env=newenv) diff --git a/test/testmpi.py b/test/testmpi.py index 7f84e501..511b1fe9 100644 --- a/test/testmpi.py +++ b/test/testmpi.py @@ -2,11 +2,12 @@ from __future__ import division, absolute_import, print_function import numpy as np -def mpi_comm(num_parts): +def mpi_comm(): from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() + num_parts = comm.Get_size() - 1 # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: @@ -125,9 +126,4 @@ def mpi_comm(num_parts): if __name__ == "__main__": - import sys - - assert len(sys.argv) == 2, 'Invalid number of arguments' - - num_parts = int(sys.argv[1]) - mpi_comm(num_parts) + mpi_comm() -- GitLab From b5df65b98c794d4a6a7c7396cbfd0ec9457ec3be Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 5 Sep 2017 17:37:34 -0500 Subject: [PATCH 101/266] Install mpi4py --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4668a2b0..1cfab6cb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ Python 2.7 AMD CPU: script: - export PY_EXE=python2.7 - export PYOPENCL_TEST=amd:pu - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: @@ -15,7 +15,7 @@ Python 2.7 POCL: script: - export PY_EXE=python2.7 - export PYOPENCL_TEST=portable - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: @@ -28,7 +28,7 @@ Python 3.5 POCL: script: - export PY_EXE=python3.5 - export PYOPENCL_TEST=portable - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: -- GitLab From 435e7cd32a03a30f0a0198d6d02f60c2fb26936c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 10:18:06 -0500 Subject: [PATCH 102/266] Rename MPI test helper to avoid confusion with test entrypoints --- test/{testmpi.py => mpi_test_helper.py} | 0 test/test_meshmode.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename test/{testmpi.py => mpi_test_helper.py} (100%) diff --git a/test/testmpi.py b/test/mpi_test_helper.py similarity index 100% rename from test/testmpi.py rename to test/mpi_test_helper.py diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 42a1a1cc..5972a977 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,7 +57,7 @@ def test_mpi_communication(num_partitions): import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), sys.executable, "testmpi.py"], + check_call(["mpirun", "-np", str(num_ranks), sys.executable, "mpi_test_helper.py"], env=newenv) -- GitLab From c5afcaff1a1b8347d29ddec5371d386251d54aab Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 10:20:40 -0500 Subject: [PATCH 103/266] Add 'mpi' tag to CI jobs to make sure MPI is available where tests run --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1cfab6cb..03d40314 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,6 +8,7 @@ Python 2.7 AMD CPU: tags: - python2.7 - amd-cl-cpu + - mpi except: - tags @@ -21,6 +22,7 @@ Python 2.7 POCL: tags: - python2.7 - pocl + - mpi except: - tags @@ -34,6 +36,7 @@ Python 3.5 POCL: tags: - python3.5 - pocl + - mpi except: - tags -- GitLab From 4244d3522a0a492d15dc526172d22be76c08e95c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 13:06:37 -0500 Subject: [PATCH 104/266] Deprecated ctx_getter -> ctx_factory --- test/test_meshmode.py | 328 ++++-------------------------------------- 1 file changed, 26 insertions(+), 302 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 5972a977..4f28ffc6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,290 +49,6 @@ import logging logger = logging.getLogger(__name__) -@pytest.mark.parametrize("num_partitions", [3, 6]) -def test_mpi_communication(num_partitions): - num_ranks = num_partitions + 1 - from subprocess import check_call - import sys - import os - newenv = os.environ.copy() - newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), sys.executable, "mpi_test_helper.py"], - env=newenv) - - -# {{{ partition_interpolation - -@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) -@pytest.mark.parametrize("num_parts", [2, 3]) -@pytest.mark.parametrize("num_groups", [1, 2]) -@pytest.mark.parametrize(("dim", "mesh_pars"), - [ - (2, [3, 4, 7]), - (3, [3, 4]) - ]) -def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, - num_parts, num_groups, scramble_partitions=True): - np.random.seed(42) - cl_ctx = ctx_getter() - queue = cl.CommandQueue(cl_ctx) - order = 4 - - from pytools.convergence import EOCRecorder - eoc_rec = dict() - for i in range(num_parts): - for j in range(num_parts): - if i == j: - continue - eoc_rec[i, j] = EOCRecorder() - - def f(x): - return 0.5*cl.clmath.sin(30.*x) - - for n in mesh_pars: - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=n) - for _ in range(num_groups)] - - if num_groups > 1: - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - else: - mesh = meshes[0] - - if scramble_partitions: - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - else: - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - - from meshmode.mesh.processing import partition_mesh - part_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - - from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) - for i in range(num_parts)] - - from meshmode.mesh import BTAG_PARTITION - from meshmode.discretization.connection import (make_face_restriction, - make_partition_connection, - check_connection) - - for i_local_part in range(num_parts): - for i_remote_part in range(num_parts): - if (i_local_part == i_remote_part - or eoc_rec[i_local_part, i_remote_part] is None): - eoc_rec[i_local_part, i_remote_part] = None - continue - - # Mark faces within local_mesh that are connected to remote_mesh - local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], - group_factory(order), - BTAG_PARTITION(i_remote_part)) - - # If these parts are not connected, don't bother checking the error - bdry_nodes = local_bdry_conn.to_discr.nodes() - if bdry_nodes.size == 0: - eoc_rec[i_local_part, i_remote_part] = None - continue - - # Mark faces within remote_mesh that are connected to local_mesh - remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], - group_factory(order), - BTAG_PARTITION(i_local_part)) - - # Gather just enough information for the connection - local_bdry = local_bdry_conn.to_discr - local_mesh = part_meshes[i_local_part] - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conn.groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - remote_bdry = remote_bdry_conn.to_discr - remote_mesh = part_meshes[i_remote_part] - remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] - for i in range(len(remote_mesh.groups))] - remote_batches = [remote_bdry_conn.groups[i].batches - for i in range(len(remote_mesh.groups))] - remote_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in remote_batches] - remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in remote_batches] - - # Connect local_mesh to remote_mesh - local_part_conn = make_partition_connection(local_bdry_conn, - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - # Connect remote mesh to local mesh - remote_part_conn = make_partition_connection(remote_bdry_conn, - i_remote_part, - local_bdry, - local_adj_groups, - local_to_elem_faces, - local_to_elem_indices) - - check_connection(local_part_conn) - check_connection(remote_part_conn) - - true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) - remote_points = local_part_conn(queue, true_local_points) - local_points = remote_part_conn(queue, remote_points) - - err = la.norm((true_local_points - local_points).get(), np.inf) - eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) - - for (i, j), e in eoc_rec.items(): - if e is not None: - print("Error of connection from part %i to part %i." % (i, j)) - print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) - -# }}} - - -# {{{ partition_mesh - -@pytest.mark.parametrize("dim", [2, 3]) -@pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [1, 2, 7]) -def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): - np.random.seed(42) - n = (5,) * dim - from meshmode.mesh.generation import generate_regular_rect_mesh - meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) - for i in range(num_meshes)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - if scramble_partitions: - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - else: - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - - from meshmode.mesh.processing import partition_mesh - # TODO: The same part_per_element array must be used to partition each mesh. - # Maybe the interface should be changed to guarantee this. - new_meshes = [ - partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] - - assert mesh.nelements == np.sum( - [new_meshes[i][0].nelements for i in range(num_parts)]), \ - "part_mesh has the wrong number of elements" - - assert count_tags(mesh, BTAG_ALL) == np.sum( - [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ - "part_mesh has the wrong number of BTAG_ALL boundaries" - - from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup - from meshmode.mesh.processing import find_group_indices - num_tags = np.zeros((num_parts,)) - - index_lookup_table = dict() - for ipart, (m, _) in enumerate(new_meshes): - for igrp in range(len(m.groups)): - adj = m.facial_adjacency_groups[igrp][None] - if not isinstance(adj, InterPartitionAdjacencyGroup): - # This group is not connected to another partition. - continue - for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): - index_lookup_table[ipart, igrp, elem, face] = i - - for part_num in range(num_parts): - part, part_to_global = new_meshes[part_num] - for grp_num in range(len(part.groups)): - adj = part.facial_adjacency_groups[grp_num][None] - tags = -part.facial_adjacency_groups[grp_num][None].neighbors - assert np.all(tags >= 0) - if not isinstance(adj, InterPartitionAdjacencyGroup): - # This group is not connected to another partition. - continue - elem_base = part.groups[grp_num].element_nr_base - for idx in range(len(adj.elements)): - if adj.global_neighbors[idx] == -1: - continue - elem = adj.elements[idx] - face = adj.element_faces[idx] - n_part_num = adj.neighbor_partitions[idx] - n_meshwide_elem = adj.global_neighbors[idx] - n_face = adj.neighbor_faces[idx] - num_tags[n_part_num] += 1 - n_part, n_part_to_global = new_meshes[n_part_num] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) - n_adj = n_part.facial_adjacency_groups[n_grp_num][None] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] - assert (part_num == n_adj.neighbor_partitions[n_idx] - and elem + elem_base == n_adj.global_neighbors[n_idx] - and face == n_adj.neighbor_faces[n_idx]),\ - "InterPartitionAdjacencyGroup is not consistent" - _, n_part_to_global = new_meshes[n_part_num] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) - p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" - - for i_tag in range(num_parts): - tag_sum = 0 - for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) - assert num_tags[i_tag] == tag_sum,\ - "part_mesh has the wrong number of BTAG_PARTITION boundaries" - - -def count_tags(mesh, tag): - num_bnds = 0 - for adj_dict in mesh.facial_adjacency_groups: - for neighbors in adj_dict[None].neighbors: - if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(tag) != 0: - num_bnds += 1 - return num_bnds - -# }}} - - # {{{ circle mesh def test_circle_mesh(do_plot=False): @@ -377,9 +93,9 @@ def test_circle_mesh(do_plot=False): ("warp", 3, [10, 20, 30]), ]) @pytest.mark.parametrize("per_face_groups", [False, True]) -def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, +def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, mesh_name, dim, mesh_pars, per_face_groups): - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -465,9 +181,9 @@ def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, ("warp", 3, [10, 20, 30]), ]) @pytest.mark.parametrize("per_face_groups", [False, True]) -def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, +def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, per_face_groups): - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -577,11 +293,11 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -def test_opposite_face_interpolation(ctx_getter, group_factory, +def test_opposite_face_interpolation(ctx_factory, group_factory, mesh_name, dim, mesh_pars): logging.basicConfig(level=logging.INFO) - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -693,12 +409,12 @@ def test_element_orientation(): ("ball", lambda: mgen.generate_icosahedron(1, 1)), ("torus", lambda: mgen.generate_torus(5, 1)), ]) -def test_3d_orientation(ctx_getter, what, mesh_gen_func, visualize=False): +def test_3d_orientation(ctx_factory, what, mesh_gen_func, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) mesh = mesh_gen_func() @@ -748,7 +464,7 @@ def test_3d_orientation(ctx_getter, what, mesh_gen_func, visualize=False): # {{{ merge and map -def test_merge_and_map(ctx_getter, visualize=False): +def test_merge_and_map(ctx_factory, visualize=False): from meshmode.mesh.io import generate_gmsh, FileSource from meshmode.mesh.generation import generate_box_mesh from meshmode.mesh import TensorProductElementGroup @@ -789,7 +505,7 @@ def test_merge_and_map(ctx_getter, visualize=False): if visualize: from meshmode.discretization import Discretization - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) discr = Discretization(cl_ctx, mesh3, discr_grp_factory) @@ -805,10 +521,10 @@ def test_merge_and_map(ctx_getter, visualize=False): @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("order", [1, 3]) -def test_sanity_single_element(ctx_getter, dim, order, visualize=False): +def test_sanity_single_element(ctx_factory, dim, order, visualize=False): pytest.importorskip("pytential") - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from modepy.tools import unit_vertices @@ -892,12 +608,12 @@ def test_sanity_single_element(ctx_getter, dim, order, visualize=False): @pytest.mark.parametrize("dim", [2, 3, 4]) @pytest.mark.parametrize("order", [3]) -def test_sanity_qhull_nd(ctx_getter, dim, order): +def test_sanity_qhull_nd(ctx_factory, dim, order): pytest.importorskip("scipy") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) from scipy.spatial import Delaunay @@ -946,13 +662,13 @@ def test_sanity_qhull_nd(ctx_getter, dim, order): ("ball-radius-1.step", 3), ]) @pytest.mark.parametrize("mesh_order", [1, 2]) -def test_sanity_balls(ctx_getter, src_file, dim, mesh_order, +def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) from pytools.convergence import EOCRecorder @@ -1070,7 +786,7 @@ def test_rect_mesh(do_plot=False): pt.show() -def test_box_mesh(ctx_getter, visualize=False): +def test_box_mesh(ctx_factory, visualize=False): from meshmode.mesh.generation import generate_box_mesh mesh = generate_box_mesh(3*(np.linspace(0, 1, 5),)) @@ -1078,7 +794,7 @@ def test_box_mesh(ctx_getter, visualize=False): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ PolynomialWarpAndBlendGroupFactory - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) discr = Discretization(cl_ctx, mesh, @@ -1227,6 +943,8 @@ def no_test_quad_mesh_3d(): # }}} +# {{{ test_quad_single_element + def test_quad_single_element(): from meshmode.mesh.generation import make_group_from_vertices from meshmode.mesh import Mesh, TensorProductElementGroup @@ -1250,6 +968,10 @@ def test_quad_single_element(): mg.nodes[1].reshape(-1), "o") plt.show() +# }}} + + +# {{{ test_quad_multi_element def test_quad_multi_element(): from meshmode.mesh.generation import generate_box_mesh @@ -1270,6 +992,8 @@ def test_quad_multi_element(): mg.nodes[1].reshape(-1), "o") plt.show() +# }}} + if __name__ == "__main__": import sys -- GitLab From 301b1ff1b77b1b27d65cff23d7e00699fd41ec69 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 13:07:03 -0500 Subject: [PATCH 105/266] Shuffle MPI/partition into single/independent file --- test/mpi_test_helper.py | 129 ----------- test/test_partition.py | 482 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 482 insertions(+), 129 deletions(-) delete mode 100644 test/mpi_test_helper.py create mode 100644 test/test_partition.py diff --git a/test/mpi_test_helper.py b/test/mpi_test_helper.py deleted file mode 100644 index 511b1fe9..00000000 --- a/test/mpi_test_helper.py +++ /dev/null @@ -1,129 +0,0 @@ -from __future__ import division, absolute_import, print_function -import numpy as np - - -def mpi_comm(): - - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() - 1 - - # This rank only partitions a mesh and sends them to their respective ranks. - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Rank 0: Sent all mesh partitions.') - for req in reqs: - req.wait() - - # These ranks recieve a mesh and comunicates boundary data to the other ranks. - elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - # Receive boundary data - remote_data = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part + 1 - status = MPI.Status() - remote_data[i_remote_part] = comm.recv(source=remote_rank, - tag=2, - status=status) - print('Rank {0}: Received rank {1} data (size = {2})' - .format(rank, remote_rank, status.count)) - - for req in send_reqs: - req.wait() - - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) - - -if __name__ == "__main__": - mpi_comm() diff --git a/test/test_partition.py b/test/test_partition.py new file mode 100644 index 00000000..9a8cfb60 --- /dev/null +++ b/test/test_partition.py @@ -0,0 +1,482 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = """ +Copyright (C) 2017 Ellis Hoag +Copyright (C) 2017 Andreas Kloeckner +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from six.moves import range +import numpy as np +import numpy.linalg as la +import pyopencl as cl +import pyopencl.array # noqa +import pyopencl.clmath # noqa + +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl + as pytest_generate_tests) + +from meshmode.discretization.poly_element import ( + PolynomialWarpAndBlendGroupFactory) +from meshmode.mesh import BTAG_ALL + +import pytest +import os + +import logging +logger = logging.getLogger(__name__) + + +# {{{ partition_interpolation + +@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) +@pytest.mark.parametrize("num_parts", [2, 3]) +@pytest.mark.parametrize("num_groups", [1, 2]) +@pytest.mark.parametrize(("dim", "mesh_pars"), + [ + (2, [3, 4, 7]), + (3, [3, 4]) + ]) +def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, + num_parts, num_groups, scramble_partitions=True): + np.random.seed(42) + cl_ctx = ctx_factory() + queue = cl.CommandQueue(cl_ctx) + order = 4 + + from pytools.convergence import EOCRecorder + eoc_rec = dict() + for i in range(num_parts): + for j in range(num_parts): + if i == j: + continue + eoc_rec[i, j] = EOCRecorder() + + def f(x): + return 0.5*cl.clmath.sin(30.*x) + + for n in mesh_pars: + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_groups)] + + if num_groups > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] + + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) + + from meshmode.mesh.processing import partition_mesh + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) + for i in range(num_parts)] + + from meshmode.mesh import BTAG_PARTITION + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) + + for i_local_part in range(num_parts): + for i_remote_part in range(num_parts): + if (i_local_part == i_remote_part + or eoc_rec[i_local_part, i_remote_part] is None): + eoc_rec[i_local_part, i_remote_part] = None + continue + + # Mark faces within local_mesh that are connected to remote_mesh + local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + group_factory(order), + BTAG_PARTITION(i_remote_part)) + + # If these parts are not connected, don't bother checking the error + bdry_nodes = local_bdry_conn.to_discr.nodes() + if bdry_nodes.size == 0: + eoc_rec[i_local_part, i_remote_part] = None + continue + + # Mark faces within remote_mesh that are connected to local_mesh + remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + group_factory(order), + BTAG_PARTITION(i_local_part)) + + # Gather just enough information for the connection + local_bdry = local_bdry_conn.to_discr + local_mesh = part_meshes[i_local_part] + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + remote_bdry = remote_bdry_conn.to_discr + remote_mesh = part_meshes[i_remote_part] + remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] + for i in range(len(remote_mesh.groups))] + remote_batches = [remote_bdry_conn.groups[i].batches + for i in range(len(remote_mesh.groups))] + remote_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in remote_batches] + remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in remote_batches] + + # Connect local_mesh to remote_mesh + local_part_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + + # Connect remote mesh to local mesh + remote_part_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_to_elem_faces, + local_to_elem_indices) + + check_connection(local_part_conn) + check_connection(remote_part_conn) + + true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) + remote_points = local_part_conn(queue, true_local_points) + local_points = remote_part_conn(queue, remote_points) + + err = la.norm((true_local_points - local_points).get(), np.inf) + eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) + + for (i, j), e in eoc_rec.items(): + if e is not None: + print("Error of connection from part %i to part %i." % (i, j)) + print(e) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) + +# }}} + + +# {{{ partition_mesh + +@pytest.mark.parametrize("dim", [2, 3]) +@pytest.mark.parametrize("num_parts", [4, 5, 7]) +@pytest.mark.parametrize("num_meshes", [1, 2, 7]) +def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): + np.random.seed(42) + n = (5,) * dim + from meshmode.mesh.generation import generate_regular_rect_mesh + meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) + for i in range(num_meshes)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) + + from meshmode.mesh.processing import partition_mesh + # TODO: The same part_per_element array must be used to partition each mesh. + # Maybe the interface should be changed to guarantee this. + new_meshes = [ + partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] + + assert mesh.nelements == np.sum( + [new_meshes[i][0].nelements for i in range(num_parts)]), \ + "part_mesh has the wrong number of elements" + + assert count_tags(mesh, BTAG_ALL) == np.sum( + [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ + "part_mesh has the wrong number of BTAG_ALL boundaries" + + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup + from meshmode.mesh.processing import find_group_indices + num_tags = np.zeros((num_parts,)) + + index_lookup_table = dict() + for ipart, (m, _) in enumerate(new_meshes): + for igrp in range(len(m.groups)): + adj = m.facial_adjacency_groups[igrp][None] + if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. + continue + for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): + index_lookup_table[ipart, igrp, elem, face] = i + + for part_num in range(num_parts): + part, part_to_global = new_meshes[part_num] + for grp_num in range(len(part.groups)): + adj = part.facial_adjacency_groups[grp_num][None] + tags = -part.facial_adjacency_groups[grp_num][None].neighbors + assert np.all(tags >= 0) + if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. + continue + elem_base = part.groups[grp_num].element_nr_base + for idx in range(len(adj.elements)): + if adj.global_neighbors[idx] == -1: + continue + elem = adj.elements[idx] + face = adj.element_faces[idx] + n_part_num = adj.neighbor_partitions[idx] + n_meshwide_elem = adj.global_neighbors[idx] + n_face = adj.neighbor_faces[idx] + num_tags[n_part_num] += 1 + n_part, n_part_to_global = new_meshes[n_part_num] + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) + n_adj = n_part.facial_adjacency_groups[n_grp_num][None] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] + assert (part_num == n_adj.neighbor_partitions[n_idx] + and elem + elem_base == n_adj.global_neighbors[n_idx] + and face == n_adj.neighbor_faces[n_idx]),\ + "InterPartitionAdjacencyGroup is not consistent" + _, n_part_to_global = new_meshes[n_part_num] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" + + for i_tag in range(num_parts): + tag_sum = 0 + for mesh, _ in new_meshes: + tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) + assert num_tags[i_tag] == tag_sum,\ + "part_mesh has the wrong number of BTAG_PARTITION boundaries" + + +def count_tags(mesh, tag): + num_bnds = 0 + for adj_dict in mesh.facial_adjacency_groups: + for neighbors in adj_dict[None].neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: + num_bnds += 1 + return num_bnds + +# }}} + + +# {{{ MPI test rank entrypoint + +def mpi_test_rank_entrypoint(): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + num_parts = comm.Get_size() - 1 + + # This rank only partitions a mesh and sends them to their respective ranks. + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Rank 0: Sent all mesh partitions.') + for req in reqs: + req.wait() + + # These ranks recieve a mesh and comunicates boundary data to the other ranks. + elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + print("Rank %d send begin" % rank) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + print(i_remote_part) + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + # Receive boundary data + remote_data = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + 1 + status = MPI.Status() + remote_data[i_remote_part] = comm.recv(source=remote_rank, + tag=2, + status=status) + print('Rank {0}: Received rank {1} data (size = {2})' + .format(rank, remote_rank, status.count)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) + + print("Rank %d exiting" % rank) + +# }}} + + +# {{{ MPI test pytest entrypoint + +@pytest.mark.mpi +@pytest.mark.parametrize("num_partitions", [3, 6]) +def test_mpi_communication(num_partitions): + num_ranks = num_partitions + 1 + from subprocess import check_call + import sys + newenv = os.environ.copy() + newenv["RUN_WITHIN_MPI"] = "1" + check_call([ + "mpiexec", "-np", str(num_ranks), "-x", "RUN_WITHIN_MPI", + sys.executable, __file__], + env=newenv) + +# }}} + + +if __name__ == "__main__": + if "RUN_WITHIN_MPI" in os.environ: + mpi_test_rank_entrypoint() + else: + import sys + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from py.test.cmdline import main + main([__file__]) + +# vim: fdm=marker -- GitLab From 7d2688ee6c974a21334aff900e9265ec4b76f66e Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 18 Sep 2017 14:11:09 -0500 Subject: [PATCH 106/266] Minor doc tweak --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index ab460da8..106c6350 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -413,7 +413,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, """ Connects ``local_bdry_conn`` to a neighboring partition. - :arg local_bdry_conn: A :class:`DirectDiscretizationConnection` of the local + :arg local_bdry_conn: A :class:`DiscretizationConnection` of the local partition. :arg i_local_part: The partition number of the local partition. :arg remote_adj_groups: A list of :class:`InterPartitionAdjacency`` of the -- GitLab From 66cc87a9d11fbdf2baeaf3f038aa87c4cc282b29 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 23 Sep 2017 20:19:41 -0500 Subject: [PATCH 107/266] Make MPI receives non-blocking --- test/test_partition.py | 43 ++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 9a8cfb60..30d8354c 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -327,6 +327,9 @@ def mpi_test_rank_entrypoint(): rank = comm.Get_rank() num_parts = comm.Get_size() - 1 + TAG_DISTRIBUTE_MESHES = 1 + TAG_SEND_MESH = 2 + # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: np.random.seed(42) @@ -344,7 +347,7 @@ def mpi_test_rank_entrypoint(): reqs = [] for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) print('Rank 0: Sent all mesh partitions.') for req in reqs: req.wait() @@ -352,7 +355,7 @@ def mpi_test_rank_entrypoint(): # These ranks recieve a mesh and comunicates boundary data to the other ranks. elif (rank - 1) in range(num_parts): status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) + local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) from meshmode.discretization.poly_element\ @@ -382,13 +385,14 @@ def mpi_test_rank_entrypoint(): # Send boundary data send_reqs = [] for i_remote_part in range(num_parts): - print(i_remote_part) if i_local_part == i_remote_part: continue bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + # local_mesh is not connected to remote_mesh; send None + send_reqs.append(comm.isend(None, + dest=i_remote_part+1, + tag=TAG_SEND_MESH)) continue # Gather information to send to other ranks @@ -407,20 +411,35 @@ def mpi_test_rank_entrypoint(): 'adj': local_adj_groups, 'to_elem_faces': local_to_elem_faces, 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + send_reqs.append(comm.isend(local_data, + dest=i_remote_part+1, + tag=TAG_SEND_MESH)) # Receive boundary data - remote_data = {} + remote_buf = {} for i_remote_part in range(num_parts): if i_local_part == i_remote_part: continue remote_rank = i_remote_part + 1 status = MPI.Status() - remote_data[i_remote_part] = comm.recv(source=remote_rank, - tag=2, - status=status) - print('Rank {0}: Received rank {1} data (size = {2})' - .format(rank, remote_rank, status.count)) + comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + remote_rank = i_remote_part + 1 + recv_reqs[i_remote_part] = comm.irecv(buf=buf, + source=remote_rank, + tag=TAG_SEND_MESH) + + remote_data = {} + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + # Free the buffer + remote_buf[i_remote_part] = None # FIXME: Is this a good idea? + print('Rank {0}: Received rank {1} data ({2} bytes)' + .format(rank, i_remote_part + 1, status.count)) for req in send_reqs: req.wait() -- GitLab From bd66f2f5cac8ca5abe1cea83f70ec2d8bdd4a7cb Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 23 Sep 2017 21:27:47 -0500 Subject: [PATCH 108/266] Make tag constants global --- test/test_partition.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 30d8354c..6809e523 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -320,6 +320,9 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint +TAG_DISTRIBUTE_MESHES = 1 +TAG_SEND_MESH = 2 + def mpi_test_rank_entrypoint(): from mpi4py import MPI @@ -327,9 +330,6 @@ def mpi_test_rank_entrypoint(): rank = comm.Get_rank() num_parts = comm.Get_size() - 1 - TAG_DISTRIBUTE_MESHES = 1 - TAG_SEND_MESH = 2 - # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: np.random.seed(42) -- GitLab From 9844a35aa62473b479a82f45ef2a4338f6a571ce Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 24 Sep 2017 22:10:25 -0500 Subject: [PATCH 109/266] working --- test/test_partition.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_partition.py b/test/test_partition.py index 6809e523..c4323597 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -397,6 +397,7 @@ def mpi_test_rank_entrypoint(): # Gather information to send to other ranks local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conns[i_remote_part].groups[i].batches -- GitLab From 7fe4e326c2aa593fea092535284fdd5dd53f033d Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 25 Sep 2017 11:30:18 -0500 Subject: [PATCH 110/266] Add debugging info --- test/test_partition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index c4323597..7c9b4217 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -348,7 +348,7 @@ def mpi_test_rank_entrypoint(): reqs = [] for r in range(num_parts): reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) - print('Rank 0: Sent all mesh partitions.') + print('Rank 0: Sent all mesh partitions') for req in reqs: req.wait() @@ -356,7 +356,7 @@ def mpi_test_rank_entrypoint(): elif (rank - 1) in range(num_parts): status = MPI.Status() local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + print('Rank {0}: Recieved local mesh (size = {1})'.format(rank, status.count)) from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory @@ -434,6 +434,7 @@ def mpi_test_rank_entrypoint(): tag=TAG_SEND_MESH) remote_data = {} + total_bytes_recvd = 0 for i_remote_part, req in recv_reqs.items(): status = MPI.Status() remote_data[i_remote_part] = req.wait(status=status) @@ -441,6 +442,9 @@ def mpi_test_rank_entrypoint(): remote_buf[i_remote_part] = None # FIXME: Is this a good idea? print('Rank {0}: Received rank {1} data ({2} bytes)' .format(rank, i_remote_part + 1, status.count)) + total_bytes_recvd += status.count + + print('Rank {0}: Recieved {1} bytes in total'.format(rank, total_bytes_recvd)) for req in send_reqs: req.wait() -- GitLab From 7c6bc07b08d83c41c1dfabb2d64f2a9a67318b43 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 25 Sep 2017 13:27:02 -0500 Subject: [PATCH 111/266] Fix whitespace --- test/test_partition.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 7c9b4217..41f3888f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -356,7 +356,8 @@ def mpi_test_rank_entrypoint(): elif (rank - 1) in range(num_parts): status = MPI.Status() local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved local mesh (size = {1})'.format(rank, status.count)) + print('Rank {0}: Recieved local mesh (size = {1})' + .format(rank, status.count)) from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory @@ -444,7 +445,8 @@ def mpi_test_rank_entrypoint(): .format(rank, i_remote_part + 1, status.count)) total_bytes_recvd += status.count - print('Rank {0}: Recieved {1} bytes in total'.format(rank, total_bytes_recvd)) + print('Rank {0}: Recieved {1} bytes in total' + .format(rank, total_bytes_recvd)) for req in send_reqs: req.wait() -- GitLab From 395b50c4b106f660ca22a2db4f11a137c6cde9ae Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 19:44:32 -0500 Subject: [PATCH 112/266] partition test: build a mesh distributor utility, make all ranks do useful work --- test/test_partition.py | 298 +++++++++++++++++++++++------------------ 1 file changed, 169 insertions(+), 129 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 41f3888f..005af2c9 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -320,156 +320,194 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint -TAG_DISTRIBUTE_MESHES = 1 -TAG_SEND_MESH = 2 +TAG_BASE = 83411 +TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 +TAG_SEND_MESH = TAG_BASE + 2 -def mpi_test_rank_entrypoint(): - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() - 1 - # This rank only partitions a mesh and sends them to their respective ranks. - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] +class MPIMeshDistributor(object): + def __init__(self, mpi_comm, manager_rank=0): + self.mpi_comm = mpi_comm + self.manager_rank = manager_rank - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) + def is_mananger_rank(self): + return self.mpi_comm.Get_rank() == self.manager_rank - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + def send_mesh_parts(self, mesh, part_per_element, num_parts): + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + assert num_parts <= mpi_comm.Get_size() + + assert self.is_mananger_rank() from meshmode.mesh.processing import partition_mesh parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + local_part = None + reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) - print('Rank 0: Sent all mesh partitions') + for r, part in enumerate(parts): + if r == self.manager_rank: + local_part = part + else: + reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) + + logger.info('rank %d: sent all mesh partitions', rank) for req in reqs: req.wait() - # These ranks recieve a mesh and comunicates boundary data to the other ranks. - elif (rank - 1) in range(num_parts): + return local_part + + def receive_mesh_part(self): + from mpi4py import MPI + + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + status = MPI.Status() - local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved local mesh (size = {1})' - .format(rank, status.count)) + result = self.mpi_comm.recv( + source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, + status=status) + logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) + return result - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - print("Rank %d send begin" % rank) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh; send None - send_reqs.append(comm.isend(None, - dest=i_remote_part+1, - tag=TAG_SEND_MESH)) - continue +def mpi_test_rank_entrypoint(): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + num_parts = comm.Get_size() - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, - dest=i_remote_part+1, - tag=TAG_SEND_MESH)) + mesh_dist = MPIMeshDistributor(comm) - # Receive boundary data - remote_buf = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part + 1 - status = MPI.Status() - comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - remote_rank = i_remote_part + 1 - recv_reqs[i_remote_part] = comm.irecv(buf=buf, - source=remote_rank, - tag=TAG_SEND_MESH) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) - # Free the buffer - remote_buf[i_remote_part] = None # FIXME: Is this a good idea? - print('Rank {0}: Received rank {1} data ({2} bytes)' - .format(rank, i_remote_part + 1, status.count)) - total_bytes_recvd += status.count - - print('Rank {0}: Recieved {1} bytes in total' - .format(rank, total_bytes_recvd)) - - for req in send_reqs: - req.wait() + if mesh_dist.is_mananger_rank(): + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts) + else: + local_mesh = mesh_dist.receive_mesh_part() + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + print("Rank %d send begin" % rank) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh; send None + send_reqs.append(comm.isend(None, + dest=i_remote_part, + tag=TAG_SEND_MESH)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, + dest=i_remote_part, + tag=TAG_SEND_MESH)) + + # Receive boundary data + remote_buf = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + status = MPI.Status() + comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + remote_rank = i_remote_part + recv_reqs[i_remote_part] = comm.irecv(buf=buf, + source=remote_rank, + tag=TAG_SEND_MESH) + + remote_data = {} + total_bytes_recvd = 0 + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + # Free the buffer + remote_buf[i_remote_part] = None # FIXME: Is this a good idea? + print('Rank {0}: Received rank {1} data ({2} bytes)' + .format(rank, i_remote_part, status.count)) + total_bytes_recvd += status.count + + print('Rank {0}: Recieved {1} bytes in total' + .format(rank, total_bytes_recvd)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) print("Rank %d exiting" % rank) @@ -481,6 +519,8 @@ def mpi_test_rank_entrypoint(): @pytest.mark.mpi @pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): + pytest.importorskip("mpi4py") + num_ranks = num_partitions + 1 from subprocess import check_call import sys -- GitLab From fce3c96d284c57930a46138719e10b14ea253def Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 23:08:34 -0500 Subject: [PATCH 113/266] Refactor mesh distribution and boundary communication into reusable components --- meshmode/distributed.py | 233 ++++++++++++++++++++++++++++++++++++++++ test/test_partition.py | 166 ++-------------------------- 2 files changed, 242 insertions(+), 157 deletions(-) create mode 100644 meshmode/distributed.py diff --git a/meshmode/distributed.py b/meshmode/distributed.py new file mode 100644 index 00000000..8fcc239c --- /dev/null +++ b/meshmode/distributed.py @@ -0,0 +1,233 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = """ +Copyright (C) 2017 Ellis Hoag +Copyright (C) 2017 Andreas Kloeckner +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import six + +import numpy as np + +import logging +logger = logging.getLogger(__name__) + +TAG_BASE = 83411 +TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 +TAG_SEND_MESH = TAG_BASE + 2 + + +# {{{ mesh distributor + +class MPIMeshDistributor(object): + def __init__(self, mpi_comm, manager_rank=0): + self.mpi_comm = mpi_comm + self.manager_rank = manager_rank + + def is_mananger_rank(self): + return self.mpi_comm.Get_rank() == self.manager_rank + + def send_mesh_parts(self, mesh, part_per_element, num_parts): + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + assert num_parts <= mpi_comm.Get_size() + + assert self.is_mananger_rank() + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + local_part = None + + reqs = [] + for r, part in enumerate(parts): + if r == self.manager_rank: + local_part = part + else: + reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) + + logger.info('rank %d: sent all mesh partitions', rank) + for req in reqs: + req.wait() + + return local_part + + def receive_mesh_part(self): + from mpi4py import MPI + + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + + status = MPI.Status() + result = self.mpi_comm.recv( + source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, + status=status) + logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) + + return result + +# }}} + + +# {{{ boundary communicator + +class MPIBoundaryCommunicator(object): + def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + self.mpi_comm = mpi_comm + self.part_discr = part_discr + + self.i_local_part = mpi_comm.Get_rank() + + self.bdry_group_factory = bdry_group_factory + + # FIXME: boundary tags for unconnected parts should not exist + from meshmode.mesh import BTAG_PARTITION + self.connected_parts = set( + btag.part_nr + for btag in part_discr.mesh.boundary_tags + if isinstance(btag, BTAG_PARTITION)) + # /!\ Not final--mutated melow + + from meshmode.discretization.connection import make_face_restriction + + self.local_bdry_conns = {} + for i_remote_part in list(self.connected_parts): + bdry_conn = make_face_restriction(part_discr, bdry_group_factory, + BTAG_PARTITION(i_remote_part)) + + # FIXME This is a really inefficient way of figuring out that that + # part of the boundary is empty. + if bdry_conn.to_discr.nnodes: + self.local_bdry_conns[i_remote_part] = bdry_conn + else: + self.connected_parts.remove(i_remote_part) + + assert self.i_local_part not in self.connected_parts + + self._setup(queue) + + def _post_boundary_data_sends(self, queue): + send_reqs = [] + for i_remote_part in self.connected_parts: + local_bdry = self.local_bdry_conns[i_remote_part].to_discr + local_mesh = self.local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [self.local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(self.mpi_comm.isend( + local_data, dest=i_remote_part, tag=TAG_SEND_MESH)) + + return send_reqs + + def _receive_boundary_data(self, queue): + rank = self.mpi_comm.Get_rank() + i_local_part = rank + + from mpi4py import MPI + + remote_buf = {} + for i_remote_part in self.connected_parts: + status = MPI.Status() + self.mpi_comm.probe( + source=i_remote_part, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_MESH) + + remote_data = {} + total_bytes_recvd = 0 + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + + # Free the buffer + remote_buf[i_remote_part] = None + logger.debug('rank %d: Received rank %d data (%d bytes)', + rank, i_remote_part, status.count) + + total_bytes_recvd += status.count + + logger.debug('rank %d: recieved %d bytes in total', rank, total_bytes_recvd) + + self.remote_to_local_bdry_conns = {} + + from meshmode.discretization import Discretization + + for i_remote_part, data in remote_data.items(): + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization( + queue.context, + remote_bdry_mesh, + self.bdry_group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + self.remote_to_local_bdry_conns[i_remote_part] = \ + make_partition_connection( + self.local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + + def _setup(self, queue): + logger.info("bdry comm rank %d send begin", self.mpi_comm.Get_rank()) + + send_reqs = self._post_boundary_data_sends(queue) + self._receive_boundary_data(queue) + + for req in send_reqs: + req.wait() + + logger.info("bdry comm rank %d send completed", self.mpi_comm.Get_rank()) + + def check(self): + from meshmode.discretization.connection import check_connection + + for i, conn in six.iteritems(self.remote_to_local_bdry_conns): + check_connection(conn) + +# }}} + + +# vim: foldmethod=marker diff --git a/test/test_partition.py b/test/test_partition.py index 005af2c9..f464597f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -25,6 +25,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import six from six.moves import range import numpy as np import numpy.linalg as la @@ -321,61 +322,9 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint -TAG_BASE = 83411 -TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 -TAG_SEND_MESH = TAG_BASE + 2 - - -class MPIMeshDistributor(object): - def __init__(self, mpi_comm, manager_rank=0): - self.mpi_comm = mpi_comm - self.manager_rank = manager_rank - - def is_mananger_rank(self): - return self.mpi_comm.Get_rank() == self.manager_rank - - def send_mesh_parts(self, mesh, part_per_element, num_parts): - mpi_comm = self.mpi_comm - rank = mpi_comm.Get_rank() - assert num_parts <= mpi_comm.Get_size() - - assert self.is_mananger_rank() - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - local_part = None - - reqs = [] - for r, part in enumerate(parts): - if r == self.manager_rank: - local_part = part - else: - reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) - - logger.info('rank %d: sent all mesh partitions', rank) - for req in reqs: - req.wait() - - return local_part - - def receive_mesh_part(self): - from mpi4py import MPI - - mpi_comm = self.mpi_comm - rank = mpi_comm.Get_rank() - - status = MPI.Status() - result = self.mpi_comm.recv( - source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, - status=status) - logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) - - return result - - def mpi_test_rank_entrypoint(): + from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator + from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -407,109 +356,12 @@ def mpi_test_rank_entrypoint(): from meshmode.discretization import Discretization vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - i_local_part = rank - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - print("Rank %d send begin" % rank) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh; send None - send_reqs.append(comm.isend(None, - dest=i_remote_part, - tag=TAG_SEND_MESH)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, - dest=i_remote_part, - tag=TAG_SEND_MESH)) - - # Receive boundary data - remote_buf = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part - status = MPI.Status() - comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - remote_rank = i_remote_part - recv_reqs[i_remote_part] = comm.irecv(buf=buf, - source=remote_rank, - tag=TAG_SEND_MESH) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) - # Free the buffer - remote_buf[i_remote_part] = None # FIXME: Is this a good idea? - print('Rank {0}: Received rank {1} data ({2} bytes)' - .format(rank, i_remote_part, status.count)) - total_bytes_recvd += status.count - - print('Rank {0}: Recieved {1} bytes in total' - .format(rank, total_bytes_recvd)) - - for req in send_reqs: - req.wait() - - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) - - print("Rank %d exiting" % rank) + bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) + bdry_comm.check() + + # FIXME: Actually test communicating data with this + + logger.debug("Rank %d exiting", rank) # }}} -- GitLab From fc48b112bcd9d3e8a9d87513cf7df7ddc4379c80 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 23:14:34 -0500 Subject: [PATCH 114/266] Remove unused six import --- test/test_partition.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_partition.py b/test/test_partition.py index f464597f..f422c61f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -25,7 +25,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import six from six.moves import range import numpy as np import numpy.linalg as la -- GitLab From 951571d1f8ae4e5bbb41095aa1a4a93673770632 Mon Sep 17 00:00:00 2001 From: Ellis Date: Fri, 13 Oct 2017 20:46:45 -0500 Subject: [PATCH 115/266] working --- meshmode/distributed.py | 17 ++++++++++++++++- meshmode/mesh/__init__.py | 26 -------------------------- meshmode/mesh/processing.py | 10 +++++++--- 3 files changed, 23 insertions(+), 30 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 8fcc239c..c2d93c23 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -107,7 +107,21 @@ class MPIBoundaryCommunicator(object): btag.part_nr for btag in part_discr.mesh.boundary_tags if isinstance(btag, BTAG_PARTITION)) - # /!\ Not final--mutated melow + # /!\ Not final--mutated below + # self.connected_parts = np.array([]) + # for adj in part_discr.mesh.facial_adjacency_groups: + # from meshmode.mesh import InterPartitionAdjacencyGroup + # print(adj[None]) + # if isinstance(adj[None], InterPartitionAdjacencyGroup): + # indices = adj[None].neighbor_partitions >= 0 + # self.connected_parts.append(np.unique(adj[None].neighbor_partitions[indices])) + # self.connected_parts = np.unique(self.connected_parts) + #self.connected_parts = set( + # part_nr + # for adj in part_discr.mesh.facial_adjacency_groups + # for part_nr in adj[None].neighbor_partitions + # if part_nr >= 0 + #) from meshmode.discretization.connection import make_face_restriction @@ -122,6 +136,7 @@ class MPIBoundaryCommunicator(object): self.local_bdry_conns[i_remote_part] = bdry_conn else: self.connected_parts.remove(i_remote_part) + # self.local_bdry_conns[i_remote_part] = bdry_conn assert self.i_local_part not in self.connected_parts diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f5147001..8e6e18b9 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -548,32 +548,6 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ - #FIXME - ''' - This is a weird error. When we try to pickle and unpickle a mesh, - neighbor_partitions does not exist anymore in - mesh.facial_adjacency_groups[i][None]. My guess was that pickle did not know - that property existed, so I created it. - ''' - neighbor_partitions = None - global_neighbors = None - - def __init__(self, elements, - element_faces, - neighbors, - igroup, - neighbor_partitions, - global_neighbors, - neighbor_faces): - FacialAdjacencyGroup.__init__(self, elements=elements, - element_faces=element_faces, - neighbors=neighbors, - neighbor_faces=neighbor_faces, - igroup=igroup, - ineighbor_group=None) - self.neighbor_partitions = neighbor_partitions - self.global_neighbors = global_neighbors - def __eq__(self, other): return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index a47ee7fb..b8743eb8 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -243,9 +243,13 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacencyGroup(elems, faces, neighbors, - bdry.igroup, - n_parts, global_n_elems, n_faces) + InterPartitionAdjacencyGroup(elements=elems, + element_faces=faces, + neighbors=neighbors, + igroup=bdry.igroup, + neighbor_partitions=n_parts, + global_neighbors=global_n_elems, + neighbor_faces=n_faces) return connected_mesh, queried_elems -- GitLab From c9820ded509df07e075ed139a3efc3aece317783 Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 10:33:04 -0500 Subject: [PATCH 116/266] Make mpi communication only between connected partitions --- meshmode/distributed.py | 42 ++++++++++++----------------------------- test/test_partition.py | 17 ++++++++++++++--- 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index c2d93c23..21c09d4f 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -101,44 +101,26 @@ class MPIBoundaryCommunicator(object): self.bdry_group_factory = bdry_group_factory - # FIXME: boundary tags for unconnected parts should not exist - from meshmode.mesh import BTAG_PARTITION - self.connected_parts = set( - btag.part_nr - for btag in part_discr.mesh.boundary_tags - if isinstance(btag, BTAG_PARTITION)) - # /!\ Not final--mutated below - # self.connected_parts = np.array([]) - # for adj in part_discr.mesh.facial_adjacency_groups: - # from meshmode.mesh import InterPartitionAdjacencyGroup - # print(adj[None]) - # if isinstance(adj[None], InterPartitionAdjacencyGroup): - # indices = adj[None].neighbor_partitions >= 0 - # self.connected_parts.append(np.unique(adj[None].neighbor_partitions[indices])) - # self.connected_parts = np.unique(self.connected_parts) - #self.connected_parts = set( - # part_nr - # for adj in part_discr.mesh.facial_adjacency_groups - # for part_nr in adj[None].neighbor_partitions - # if part_nr >= 0 - #) + from meshmode.mesh import InterPartitionAdjacencyGroup + self.connected_parts = set() + for adj in part_discr.mesh.facial_adjacency_groups: + if isinstance(adj[None], InterPartitionAdjacencyGroup): + indices = adj[None].neighbor_partitions >= 0 + self.connected_parts = self.connected_parts.union( + adj[None].neighbor_partitions[indices]) + assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} for i_remote_part in list(self.connected_parts): bdry_conn = make_face_restriction(part_discr, bdry_group_factory, BTAG_PARTITION(i_remote_part)) - # FIXME This is a really inefficient way of figuring out that that - # part of the boundary is empty. - if bdry_conn.to_discr.nnodes: - self.local_bdry_conns[i_remote_part] = bdry_conn - else: - self.connected_parts.remove(i_remote_part) - # self.local_bdry_conns[i_remote_part] = bdry_conn - - assert self.i_local_part not in self.connected_parts + # Assert that everything in self.connected_parts is truly connected + assert bdry_conn.to_discr.nnodes > 0 + self.local_bdry_conns[i_remote_part] = bdry_conn self._setup(queue) diff --git a/test/test_partition.py b/test/test_partition.py index f422c61f..987d9a93 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -223,6 +223,17 @@ def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] + import pickle + for m, _ in new_meshes: + for adj in m.facial_adjacency_groups: + data = {'adj': adj[None]} + pickle.dump(data, open('tmp.p', 'wb')) + data2 = pickle.load(open('tmp.p', 'rb')) + assert data == data2 + from meshmode.mesh import InterPartitionAdjacencyGroup + if isinstance(data['adj'], InterPartitionAdjacencyGroup): + assert np.equal(data['adj'].neighbor_partitions, data2['adj'].neighbor_partitions).all() + assert mesh.nelements == np.sum( [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" @@ -334,7 +345,7 @@ def mpi_test_rank_entrypoint(): if mesh_dist.is_mananger_rank(): np.random.seed(42) from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + meshes = [generate_warped_rect_mesh(3, order=4, n=4) for _ in range(2)] from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) @@ -368,11 +379,11 @@ def mpi_test_rank_entrypoint(): # {{{ MPI test pytest entrypoint @pytest.mark.mpi -@pytest.mark.parametrize("num_partitions", [3, 6]) +@pytest.mark.parametrize("num_partitions", [3, 4]) def test_mpi_communication(num_partitions): pytest.importorskip("mpi4py") - num_ranks = num_partitions + 1 + num_ranks = num_partitions from subprocess import check_call import sys newenv = os.environ.copy() -- GitLab From f40e42da4b9e35ebca9a28ab49e8a8d7a0723b5f Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 13:33:49 -0500 Subject: [PATCH 117/266] Improve testing for mpi communication --- meshmode/distributed.py | 4 ++- test/test_partition.py | 60 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 21c09d4f..a71cf699 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -108,13 +108,14 @@ class MPIBoundaryCommunicator(object): indices = adj[None].neighbor_partitions >= 0 self.connected_parts = self.connected_parts.union( adj[None].neighbor_partitions[indices]) + self.connected_parts = list(self.connected_parts) assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} - for i_remote_part in list(self.connected_parts): + for i_remote_part in self.connected_parts: bdry_conn = make_face_restriction(part_discr, bdry_group_factory, BTAG_PARTITION(i_remote_part)) @@ -198,6 +199,7 @@ class MPIBoundaryCommunicator(object): # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection + # FIXME: rename to local_to_remote_bdry_conns?? self.remote_to_local_bdry_conns[i_remote_part] = \ make_partition_connection( self.local_bdry_conns[i_remote_part], diff --git a/test/test_partition.py b/test/test_partition.py index 987d9a93..6e1782b3 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -178,6 +178,11 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, check_connection(remote_part_conn) true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) + s = true_local_points.shape + d = true_local_points.dtype + a = cl.array.Array(queue, shape=s, dtype=d) + a[:] = true_local_points.get() + true_local_points = a remote_points = local_part_conn(queue, true_local_points) local_points = remote_part_conn(queue, remote_points) @@ -369,7 +374,60 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - # FIXME: Actually test communicating data with this + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + TAG_A = 123 + TAG_B = 234 + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + data = {'remote_f': remote_f.get(queue=queue), + 'shape': remote_f.shape, + 'dtype': remote_f.dtype} + send_reqs.append(comm.isend(data, dest=i_remote_part, tag=TAG_A)) + + remote_to_local_f_data = {} + for i_remote_part in bdry_comm.connected_parts: + remote_to_local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_A) + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + shape = remote_to_local_f_data[i_remote_part]['shape'] + dtype = remote_to_local_f_data[i_remote_part]['dtype'] + local_f_np = remote_to_local_f_data[i_remote_part]['remote_f'] + local_f_cl = cl.array.Array(queue, shape=shape, dtype=dtype) + local_f_cl[:] = local_f_np + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(comm.isend(remote_f, dest=i_remote_part, tag=TAG_B)) + + local_f_data = {} + for i_remote_part in bdry_comm.connected_parts: + local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_B) + + for req in send_reqs: + req.wait() + + for i_remote_part in bdry_comm.connected_parts: + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + err = la.norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error (%f) too large" % err logger.debug("Rank %d exiting", rank) -- GitLab From 4b525b7a4f4816ef38c85c32815ced3214e99f28 Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 14:25:34 -0500 Subject: [PATCH 118/266] Clean up mpi communication tests --- meshmode/distributed.py | 72 ++++++++++++++++++++++++++++++++++++++--- test/test_partition.py | 67 +------------------------------------- 2 files changed, 68 insertions(+), 71 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index a71cf699..25b3a538 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -34,7 +34,9 @@ logger = logging.getLogger(__name__) TAG_BASE = 83411 TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 -TAG_SEND_MESH = TAG_BASE + 2 +TAG_SEND_BOUNDARY = TAG_BASE + 2 +TAG_SEND_REMOTE_NODES = TAG_BASE + 3 +TAG_SEND_LOCAL_NODES = TAG_BASE + 4 # {{{ mesh distributor @@ -145,7 +147,7 @@ class MPIBoundaryCommunicator(object): 'to_elem_faces': local_to_elem_faces, 'to_elem_indices': local_to_elem_indices} send_reqs.append(self.mpi_comm.isend( - local_data, dest=i_remote_part, tag=TAG_SEND_MESH)) + local_data, dest=i_remote_part, tag=TAG_SEND_BOUNDARY)) return send_reqs @@ -159,14 +161,14 @@ class MPIBoundaryCommunicator(object): for i_remote_part in self.connected_parts: status = MPI.Status() self.mpi_comm.probe( - source=i_remote_part, tag=TAG_SEND_MESH, status=status) + source=i_remote_part, tag=TAG_SEND_BOUNDARY, status=status) remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in remote_buf.items(): recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, source=i_remote_part, - tag=TAG_SEND_MESH) + tag=TAG_SEND_BOUNDARY) remote_data = {} total_bytes_recvd = 0 @@ -199,7 +201,6 @@ class MPIBoundaryCommunicator(object): # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection - # FIXME: rename to local_to_remote_bdry_conns?? self.remote_to_local_bdry_conns[i_remote_part] = \ make_partition_connection( self.local_bdry_conns[i_remote_part], @@ -226,6 +227,67 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) + def test_data_transfer(self, queue): + import pyopencl as cl + + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + send_reqs = [] + for i_remote_part in self.connected_parts: + conn = self.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = self.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + send_reqs.append(self.mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) + + remote_to_local_f_data = {} + for i_remote_part in self.connected_parts: + remote_to_local_f_data[i_remote_part] =\ + self.mpi_comm.recv(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in self.connected_parts: + conn = self.remote_to_local_bdry_conns[i_remote_part] + local_f_np = remote_to_local_f_data[i_remote_part] + local_f_cl = cl.array.Array(queue, + shape=local_f_np.shape, + dtype=local_f_np.dtype) + local_f_cl.set(local_f_np) + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(self.mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) + + local_f_data = {} + for i_remote_part in self.connected_parts: + local_f_data[i_remote_part] = self.mpi_comm.recv(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + + for req in send_reqs: + req.wait() + + for i_remote_part in self.connected_parts: + bdry_discr = self.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + from numpy.linalg import norm + err = norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error (%f) too large" % err + # }}} diff --git a/test/test_partition.py b/test/test_partition.py index 6e1782b3..4ae9c741 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -228,17 +228,6 @@ def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] - import pickle - for m, _ in new_meshes: - for adj in m.facial_adjacency_groups: - data = {'adj': adj[None]} - pickle.dump(data, open('tmp.p', 'wb')) - data2 = pickle.load(open('tmp.p', 'rb')) - assert data == data2 - from meshmode.mesh import InterPartitionAdjacencyGroup - if isinstance(data['adj'], InterPartitionAdjacencyGroup): - assert np.equal(data['adj'].neighbor_partitions, data2['adj'].neighbor_partitions).all() - assert mesh.nelements == np.sum( [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" @@ -373,61 +362,7 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - - def f(x): - return 0.1*cl.clmath.sin(30.*x) - - TAG_A = 123 - TAG_B = 234 - send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x) - remote_f = conn(queue, true_local_f) - - data = {'remote_f': remote_f.get(queue=queue), - 'shape': remote_f.shape, - 'dtype': remote_f.dtype} - send_reqs.append(comm.isend(data, dest=i_remote_part, tag=TAG_A)) - - remote_to_local_f_data = {} - for i_remote_part in bdry_comm.connected_parts: - remote_to_local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_A) - - for req in send_reqs: - req.wait() - - send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - shape = remote_to_local_f_data[i_remote_part]['shape'] - dtype = remote_to_local_f_data[i_remote_part]['dtype'] - local_f_np = remote_to_local_f_data[i_remote_part]['remote_f'] - local_f_cl = cl.array.Array(queue, shape=shape, dtype=dtype) - local_f_cl[:] = local_f_np - remote_f = conn(queue, local_f_cl).get(queue=queue) - - send_reqs.append(comm.isend(remote_f, dest=i_remote_part, tag=TAG_B)) - - local_f_data = {} - for i_remote_part in bdry_comm.connected_parts: - local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_B) - - for req in send_reqs: - req.wait() - - for i_remote_part in bdry_comm.connected_parts: - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x).get(queue=queue) - local_f = local_f_data[i_remote_part] - - err = la.norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error (%f) too large" % err + bdry_comm.test_data_transfer(queue) logger.debug("Rank %d exiting", rank) -- GitLab From 974ee71e3de4a61fad7a27270e5f473e3ba67f1e Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 17 Oct 2017 19:55:55 -0500 Subject: [PATCH 119/266] Make nonblocking receives --- meshmode/distributed.py | 73 +++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 25b3a538..42ac8931 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -28,6 +28,7 @@ THE SOFTWARE. import six import numpy as np +from mpi4py import MPI import logging logger = logging.getLogger(__name__) @@ -76,8 +77,6 @@ class MPIMeshDistributor(object): return local_part def receive_mesh_part(self): - from mpi4py import MPI - mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() @@ -107,19 +106,18 @@ class MPIBoundaryCommunicator(object): self.connected_parts = set() for adj in part_discr.mesh.facial_adjacency_groups: if isinstance(adj[None], InterPartitionAdjacencyGroup): - indices = adj[None].neighbor_partitions >= 0 + indices = (adj[None].neighbor_partitions >= 0) self.connected_parts = self.connected_parts.union( adj[None].neighbor_partitions[indices]) - self.connected_parts = list(self.connected_parts) assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} for i_remote_part in self.connected_parts: - bdry_conn = make_face_restriction(part_discr, bdry_group_factory, - BTAG_PARTITION(i_remote_part)) + bdry_conn = make_face_restriction(part_discr, + bdry_group_factory, + BTAG_PARTITION(i_remote_part)) # Assert that everything in self.connected_parts is truly connected assert bdry_conn.to_discr.nnodes > 0 @@ -155,8 +153,6 @@ class MPIBoundaryCommunicator(object): rank = self.mpi_comm.Get_rank() i_local_part = rank - from mpi4py import MPI - remote_buf = {} for i_remote_part in self.connected_parts: status = MPI.Status() @@ -233,6 +229,24 @@ class MPIBoundaryCommunicator(object): def f(x): return 0.1*cl.clmath.sin(30.*x) + ''' + Here is a simplified example of what happens from + the point of view of the local rank. + + Local rank: + 1. Transfer local points from local boundary to remote boundary + to get remote points. + 2. Send remote points to remote rank. + Remote rank: + 3. Receive remote points from local rank. + 4. Transfer remote points from remote boundary to local boundary + to get local points. + 5. Send local points to local rank. + Local rank: + 6. Recieve local points from remote rank. + 7. Check if local points are the same as the original local points. + ''' + send_reqs = [] for i_remote_part in self.connected_parts: conn = self.remote_to_local_bdry_conns[i_remote_part] @@ -246,11 +260,23 @@ class MPIBoundaryCommunicator(object): dest=i_remote_part, tag=TAG_SEND_REMOTE_NODES)) - remote_to_local_f_data = {} + buffers = {} for i_remote_part in self.connected_parts: - remote_to_local_f_data[i_remote_part] =\ - self.mpi_comm.recv(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) + status = MPI.Status() + self.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + remote_to_local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + remote_to_local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer for req in send_reqs: req.wait() @@ -269,10 +295,23 @@ class MPIBoundaryCommunicator(object): dest=i_remote_part, tag=TAG_SEND_LOCAL_NODES)) - local_f_data = {} + buffers = {} for i_remote_part in self.connected_parts: - local_f_data[i_remote_part] = self.mpi_comm.recv(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) + status = MPI.Status() + self.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer for req in send_reqs: req.wait() @@ -286,7 +325,7 @@ class MPIBoundaryCommunicator(object): from numpy.linalg import norm err = norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error (%f) too large" % err + assert err < 1e-13, "Error = %f is too large" % err # }}} -- GitLab From 37280a02c5a57c90efe9e4fc247a339b8157865b Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 19:51:16 -0500 Subject: [PATCH 120/266] Add missing attribute in InterPartitionAdjacency --- meshmode/mesh/__init__.py | 4 ++++ meshmode/mesh/processing.py | 1 + 2 files changed, 5 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8e6e18b9..1147afb1 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -495,6 +495,10 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): The group number of this group. + .. attribute:: ineighbor_group + + *None* for boundary faces. + .. attribute:: elements Group-local element numbers. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b8743eb8..419db4e8 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -247,6 +247,7 @@ def partition_mesh(mesh, part_per_element, part_nr): element_faces=faces, neighbors=neighbors, igroup=bdry.igroup, + ineighbor_group=None, neighbor_partitions=n_parts, global_neighbors=global_n_elems, neighbor_faces=n_faces) -- GitLab From 9b6e55b3809973470bbdde7e753acc6635b65046 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 20:27:14 -0500 Subject: [PATCH 121/266] Remove useless import --- test/test_partition.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 4ae9c741..e0e25dfb 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -350,8 +350,6 @@ def mpi_test_rank_entrypoint(): else: local_mesh = mesh_dist.receive_mesh_part() - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory group_factory = PolynomialWarpAndBlendGroupFactory(4) import pyopencl as cl cl_ctx = cl.create_some_context() -- GitLab From 8c691c923aa9a6fafe1ac247fbfeb77b5e635fa7 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 20:31:18 -0500 Subject: [PATCH 122/266] remove useless import --- test/test_partition.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_partition.py b/test/test_partition.py index e0e25dfb..15b0dbb7 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -351,7 +351,6 @@ def mpi_test_rank_entrypoint(): local_mesh = mesh_dist.receive_mesh_part() group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) -- GitLab From a1ffcddc94b07897b8f3211518f1f67c643031ca Mon Sep 17 00:00:00 2001 From: Ellis Date: Wed, 25 Oct 2017 15:25:36 -0500 Subject: [PATCH 123/266] Change variable name for flake8 --- meshmode/mesh/generation.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/meshmode/mesh/generation.py b/meshmode/mesh/generation.py index c19c3dd1..5f647b95 100644 --- a/meshmode/mesh/generation.py +++ b/meshmode/mesh/generation.py @@ -395,12 +395,12 @@ def generate_icosahedron(r, order): top_point = 5 tris = [] - l = len(top_ring) - for i in range(l): - tris.append([top_ring[i], top_ring[(i+1) % l], top_point]) - tris.append([bottom_ring[i], bottom_point, bottom_ring[(i+1) % l], ]) - tris.append([bottom_ring[i], bottom_ring[(i+1) % l], top_ring[i]]) - tris.append([top_ring[i], bottom_ring[(i+1) % l], top_ring[(i+1) % l]]) + el = len(top_ring) + for i in range(el): + tris.append([top_ring[i], top_ring[(i+1) % el], top_point]) + tris.append([bottom_ring[i], bottom_point, bottom_ring[(i+1) % el], ]) + tris.append([bottom_ring[i], bottom_ring[(i+1) % el], top_ring[i]]) + tris.append([top_ring[i], bottom_ring[(i+1) % el], top_ring[(i+1) % el]]) vertices *= r/la.norm(vertices[:, 0]) -- GitLab From a6b2360aeeecb142b24a41ef33aad4a95a45a3eb Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 2 Nov 2017 12:15:43 -0500 Subject: [PATCH 124/266] Fix documentation --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 106c6350..cfd2b1ac 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -427,7 +427,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `i_local_part` to the remote partition. + exchange across faces from the remote partition to partition `i_local_part`. .. versionadded:: 2017.1 -- GitLab From 3207e83b573bdc36a9c0650a4bd675d3c170d04f Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 2 Nov 2017 12:16:05 -0500 Subject: [PATCH 125/266] Add documentation --- meshmode/distributed.py | 46 ++++++++++++++++++++++++++++++++++++++++- test/test_partition.py | 2 +- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 42ac8931..2adc1c14 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -39,11 +39,24 @@ TAG_SEND_BOUNDARY = TAG_BASE + 2 TAG_SEND_REMOTE_NODES = TAG_BASE + 3 TAG_SEND_LOCAL_NODES = TAG_BASE + 4 +__doc__ = """ +.. autoclass:: MPIMeshDistributor +.. autoclass:: MPIBoundaryCommunicator +""" + # {{{ mesh distributor class MPIMeshDistributor(object): + """ + .. automethod:: is_mananger_rank + .. automethod:: send_mesh_parts + .. automethod:: recv_mesh_part + """ def __init__(self, mpi_comm, manager_rank=0): + """ + :arg mpi_comm: A :class:`MPI.Intracomm` + """ self.mpi_comm = mpi_comm self.manager_rank = manager_rank @@ -51,6 +64,16 @@ class MPIMeshDistributor(object): return self.mpi_comm.Get_rank() == self.manager_rank def send_mesh_parts(self, mesh, part_per_element, num_parts): + """ + :arg mesh: A :class:`Mesh` to distribute to other ranks. + :arg part_per_element: A :class:`numpy.ndarray` containing one + integer per element of *mesh* indicating which part of the + partitioned mesh the element is to become a part of. + :arg num_parts: The number of partitions to divide the mesh into. + + Sends each partition to a different rank. + Returns one partition that was not sent to any other rank. + """ mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() assert num_parts <= mpi_comm.Get_size() @@ -77,9 +100,14 @@ class MPIMeshDistributor(object): return local_part def receive_mesh_part(self): + """ + Returns the mesh sent by the manager rank. + """ mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() + assert not self.is_mananger_rank(), "Manager rank cannot recieve mesh" + status = MPI.Status() result = self.mpi_comm.recv( source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, @@ -94,7 +122,23 @@ class MPIMeshDistributor(object): # {{{ boundary communicator class MPIBoundaryCommunicator(object): + """ + .. attribute:: remote_to_local_bdry_conns + + Maps rank numbers to :class:`DirectDiscretizationConnection`. + + ``remote_to_local_bdry_conns[i_remote_part]`` gives the connection + that performs data exchange across faces from partition `i_remote_part` + to the local mesh. + """ def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + """ + :arg mpi_comm: A :class:`MPI.Intracomm` + :arg queue: + :arg part_discr: A :class:`meshmode.Discretization` of the local mesh + to perform boundary communication on. + :arg bdry_group_factory: + """ self.mpi_comm = mpi_comm self.part_discr = part_discr @@ -223,7 +267,7 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) - def test_data_transfer(self, queue): + def _test_data_transfer(self, queue): import pyopencl as cl def f(x): diff --git a/test/test_partition.py b/test/test_partition.py index 15b0dbb7..3fe56060 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -359,7 +359,7 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - bdry_comm.test_data_transfer(queue) + bdry_comm._test_data_transfer(queue) logger.debug("Rank %d exiting", rank) -- GitLab From 1035cd61ee5c280f4e4bd61364aad4140a0fa3f0 Mon Sep 17 00:00:00 2001 From: Ellis Date: Sun, 5 Nov 2017 18:32:49 -0600 Subject: [PATCH 126/266] Move data transfer test function to test folder --- meshmode/distributed.py | 107 +------------------------------------ test/test_partition.py | 113 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 108 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 2adc1c14..9b160a6a 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -36,8 +36,6 @@ logger = logging.getLogger(__name__) TAG_BASE = 83411 TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 TAG_SEND_BOUNDARY = TAG_BASE + 2 -TAG_SEND_REMOTE_NODES = TAG_BASE + 3 -TAG_SEND_LOCAL_NODES = TAG_BASE + 4 __doc__ = """ .. autoclass:: MPIMeshDistributor @@ -132,6 +130,7 @@ class MPIBoundaryCommunicator(object): to the local mesh. """ def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + # FIXME: Refactor so that we can specify which rank we want to recieve from """ :arg mpi_comm: A :class:`MPI.Intracomm` :arg queue: @@ -267,110 +266,6 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) - def _test_data_transfer(self, queue): - import pyopencl as cl - - def f(x): - return 0.1*cl.clmath.sin(30.*x) - - ''' - Here is a simplified example of what happens from - the point of view of the local rank. - - Local rank: - 1. Transfer local points from local boundary to remote boundary - to get remote points. - 2. Send remote points to remote rank. - Remote rank: - 3. Receive remote points from local rank. - 4. Transfer remote points from remote boundary to local boundary - to get local points. - 5. Send local points to local rank. - Local rank: - 6. Recieve local points from remote rank. - 7. Check if local points are the same as the original local points. - ''' - - send_reqs = [] - for i_remote_part in self.connected_parts: - conn = self.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = self.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x) - remote_f = conn(queue, true_local_f) - - send_reqs.append(self.mpi_comm.isend(remote_f.get(queue=queue), - dest=i_remote_part, - tag=TAG_SEND_REMOTE_NODES)) - - buffers = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES, - status=status) - buffers[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) - remote_to_local_f_data = {} - for i_remote_part, req in recv_reqs.items(): - remote_to_local_f_data[i_remote_part] = req.wait() - buffers[i_remote_part] = None # free buffer - - for req in send_reqs: - req.wait() - - send_reqs = [] - for i_remote_part in self.connected_parts: - conn = self.remote_to_local_bdry_conns[i_remote_part] - local_f_np = remote_to_local_f_data[i_remote_part] - local_f_cl = cl.array.Array(queue, - shape=local_f_np.shape, - dtype=local_f_np.dtype) - local_f_cl.set(local_f_np) - remote_f = conn(queue, local_f_cl).get(queue=queue) - - send_reqs.append(self.mpi_comm.isend(remote_f, - dest=i_remote_part, - tag=TAG_SEND_LOCAL_NODES)) - - buffers = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES, - status=status) - buffers[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) - local_f_data = {} - for i_remote_part, req in recv_reqs.items(): - local_f_data[i_remote_part] = req.wait() - buffers[i_remote_part] = None # free buffer - - for req in send_reqs: - req.wait() - - for i_remote_part in self.connected_parts: - bdry_discr = self.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x).get(queue=queue) - local_f = local_f_data[i_remote_part] - - from numpy.linalg import norm - err = norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error = %f is too large" % err - # }}} diff --git a/test/test_partition.py b/test/test_partition.py index 3fe56060..7453ecbf 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -359,17 +359,126 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - bdry_comm._test_data_transfer(queue) + _test_data_transfer(bdry_comm, queue) logger.debug("Rank %d exiting", rank) + +def _test_data_transfer(bdry_comm, queue): + from mpi4py import MPI + # Is there a smart way of choosing this number? + TAG_BASE = 83411 + TAG_SEND_REMOTE_NODES = TAG_BASE + 3 + TAG_SEND_LOCAL_NODES = TAG_BASE + 4 + + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + ''' + Here is a simplified example of what happens from + the point of view of the local rank. + + Local rank: + 1. Transfer local points from local boundary to remote boundary + to get remote points. + 2. Send remote points to remote rank. + Remote rank: + 3. Receive remote points from local rank. + 4. Transfer remote points from remote boundary to local boundary + to get local points. + 5. Send local points to local rank. + Local rank: + 6. Recieve local points from remote rank. + 7. Check if local points are the same as the original local points. + ''' + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + send_reqs.append(bdry_comm.mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) + + buffers = {} + for i_remote_part in bdry_comm.connected_parts: + status = MPI.Status() + bdry_comm.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + remote_to_local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + remote_to_local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + local_f_np = remote_to_local_f_data[i_remote_part] + local_f_cl = cl.array.Array(queue, + shape=local_f_np.shape, + dtype=local_f_np.dtype) + local_f_cl.set(local_f_np) + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(bdry_comm.mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) + + buffers = {} + for i_remote_part in bdry_comm.connected_parts: + status = MPI.Status() + bdry_comm.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer + + for req in send_reqs: + req.wait() + + for i_remote_part in bdry_comm.connected_parts: + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + from numpy.linalg import norm + err = norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error = %f is too large" % err + # }}} # {{{ MPI test pytest entrypoint @pytest.mark.mpi -@pytest.mark.parametrize("num_partitions", [3, 4]) +@pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): pytest.importorskip("mpi4py") -- GitLab From ea49d2c6b6ca83ea58177dec8b3f79fde212f53f Mon Sep 17 00:00:00 2001 From: Ellis Date: Sun, 5 Nov 2017 19:25:00 -0600 Subject: [PATCH 127/266] quiet flake8 --- test/test_partition.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 7453ecbf..2a1a4d19 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -46,6 +46,12 @@ import os import logging logger = logging.getLogger(__name__) +# Is there a smart way of choosing this number? +# Currenly it is the same as the base from MPIBoundaryCommunicator +TAG_BASE = 83411 +TAG_SEND_REMOTE_NODES = TAG_BASE + 3 +TAG_SEND_LOCAL_NODES = TAG_BASE + 4 + # {{{ partition_interpolation @@ -366,10 +372,6 @@ def mpi_test_rank_entrypoint(): def _test_data_transfer(bdry_comm, queue): from mpi4py import MPI - # Is there a smart way of choosing this number? - TAG_BASE = 83411 - TAG_SEND_REMOTE_NODES = TAG_BASE + 3 - TAG_SEND_LOCAL_NODES = TAG_BASE + 4 def f(x): return 0.1*cl.clmath.sin(30.*x) -- GitLab From 48ca24b088570c88709cc5b382db9d1207b93589 Mon Sep 17 00:00:00 2001 From: Ellis Date: Fri, 17 Nov 2017 20:45:42 -0600 Subject: [PATCH 128/266] MPIBoundaryCommunicator now works per rank and returns futures --- meshmode/distributed.py | 236 ++++++++++++++++++---------------------- test/test_partition.py | 106 +++++++++++------- 2 files changed, 171 insertions(+), 171 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 9b160a6a..2ba18c31 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -25,8 +25,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import six - import numpy as np from mpi4py import MPI @@ -104,13 +102,13 @@ class MPIMeshDistributor(object): mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() - assert not self.is_mananger_rank(), "Manager rank cannot recieve mesh" + assert not self.is_mananger_rank(), "Manager rank cannot receive mesh" status = MPI.Status() result = self.mpi_comm.recv( source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, status=status) - logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) + logger.info('rank %d: received local mesh (size = %d)', rank, status.count) return result @@ -121,152 +119,128 @@ class MPIMeshDistributor(object): class MPIBoundaryCommunicator(object): """ - .. attribute:: remote_to_local_bdry_conns - - Maps rank numbers to :class:`DirectDiscretizationConnection`. - - ``remote_to_local_bdry_conns[i_remote_part]`` gives the connection - that performs data exchange across faces from partition `i_remote_part` - to the local mesh. + .. automethod:: __call__ + .. automethod:: is_ready """ - def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): - # FIXME: Refactor so that we can specify which rank we want to recieve from + def __init__(self, mpi_comm, queue, part_discr, bdry_grp_factory, i_remote_part): """ :arg mpi_comm: A :class:`MPI.Intracomm` :arg queue: :arg part_discr: A :class:`meshmode.Discretization` of the local mesh to perform boundary communication on. - :arg bdry_group_factory: + :arg bdry_grp_factory: + :arg i_remote_part: The part number of the remote partition """ self.mpi_comm = mpi_comm + self.queue = queue self.part_discr = part_discr - self.i_local_part = mpi_comm.Get_rank() - - self.bdry_group_factory = bdry_group_factory - - from meshmode.mesh import InterPartitionAdjacencyGroup - self.connected_parts = set() - for adj in part_discr.mesh.facial_adjacency_groups: - if isinstance(adj[None], InterPartitionAdjacencyGroup): - indices = (adj[None].neighbor_partitions >= 0) - self.connected_parts = self.connected_parts.union( - adj[None].neighbor_partitions[indices]) - assert self.i_local_part not in self.connected_parts + self.i_remote_part = i_remote_part + self.bdry_grp_factory = bdry_grp_factory from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION - self.local_bdry_conns = {} - for i_remote_part in self.connected_parts: - bdry_conn = make_face_restriction(part_discr, - bdry_group_factory, - BTAG_PARTITION(i_remote_part)) - - # Assert that everything in self.connected_parts is truly connected - assert bdry_conn.to_discr.nnodes > 0 - self.local_bdry_conns[i_remote_part] = bdry_conn - - self._setup(queue) - - def _post_boundary_data_sends(self, queue): - send_reqs = [] - for i_remote_part in self.connected_parts: - local_bdry = self.local_bdry_conns[i_remote_part].to_discr - local_mesh = self.local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [self.local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(self.mpi_comm.isend( - local_data, dest=i_remote_part, tag=TAG_SEND_BOUNDARY)) - - return send_reqs - - def _receive_boundary_data(self, queue): - rank = self.mpi_comm.Get_rank() - i_local_part = rank - - remote_buf = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe( - source=i_remote_part, tag=TAG_SEND_BOUNDARY, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_BOUNDARY) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) + self.local_bdry_conn = make_face_restriction(part_discr, + bdry_grp_factory, + BTAG_PARTITION(i_remote_part)) + self._setup() + self.remote_data = None + + def _setup(self): + logger.info("bdry comm rank %d send begin", self.i_local_part) + self.send_req = self._post_send_boundary_data() + self.recv_req = self._post_recv_boundary_data() + + def _post_send_boundary_data(self): + local_bdry = self.local_bdry_conn.to_discr + local_mesh = self.local_bdry_conn.from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [self.local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=self.queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + return self.mpi_comm.isend(local_data, + dest=self.i_remote_part, + tag=TAG_SEND_BOUNDARY) + + def _post_recv_boundary_data(self): + status = MPI.Status() + self.mpi_comm.probe(source=self.i_remote_part, + tag=TAG_SEND_BOUNDARY, status=status) + return self.mpi_comm.irecv(buf=np.empty(status.count, dtype=bytes), + source=self.i_remote_part, + tag=TAG_SEND_BOUNDARY) - # Free the buffer - remote_buf[i_remote_part] = None + def __call__(self): + """ + Returns the tuple (`remote_to_local_bdry_conn`, []) + where `remote_to_local_bdry_conn` is a + :class:`DirectDiscretizationConnection` that gives the connection that + performs data exchange across faces from partition `i_remote_part` to the + local mesh. + """ + if self.remote_data is None: + status = MPI.Status() + self.remote_data = self.recv_req.wait(status=status) logger.debug('rank %d: Received rank %d data (%d bytes)', - rank, i_remote_part, status.count) - - total_bytes_recvd += status.count - - logger.debug('rank %d: recieved %d bytes in total', rank, total_bytes_recvd) - - self.remote_to_local_bdry_conns = {} + self.i_local_part, self.i_remote_part, status.count) from meshmode.discretization import Discretization - - for i_remote_part, data in remote_data.items(): - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization( - queue.context, - remote_bdry_mesh, - self.bdry_group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - self.remote_to_local_bdry_conns[i_remote_part] = \ - make_partition_connection( - self.local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - def _setup(self, queue): - logger.info("bdry comm rank %d send begin", self.mpi_comm.Get_rank()) - - send_reqs = self._post_boundary_data_sends(queue) - self._receive_boundary_data(queue) - - for req in send_reqs: - req.wait() - - logger.info("bdry comm rank %d send completed", self.mpi_comm.Get_rank()) - - def check(self): - from meshmode.discretization.connection import check_connection - - for i, conn in six.iteritems(self.remote_to_local_bdry_conns): - check_connection(conn) + remote_bdry_mesh = self.remote_data['bdry_mesh'] + remote_bdry = Discretization(self.queue.context, remote_bdry_mesh, + self.bdry_grp_factory) + remote_adj_groups = self.remote_data['adj'] + remote_to_elem_faces = self.remote_data['to_elem_faces'] + remote_to_elem_indices = self.remote_data['to_elem_indices'] + + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + remote_to_local_bdry_conn = make_partition_connection(self.local_bdry_conn, + self.i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + self.send_req.wait() + return remote_to_local_bdry_conn, [] + + def is_ready(self): + """ + Returns True if the rank boundary data is ready to be received. + """ + if self.remote_data is None: + status = MPI.Status() + did_receive, self.remote_data = self.recv_req.test(status=status) + if not did_receive: + return False + logger.debug('rank %d: Received rank %d data (%d bytes)', + self.i_local_part, self.i_remote_part, status.count) + return True # }}} +def get_connected_partitions(mesh): + """ + :arg mesh: A :class:`Mesh` + Returns the set of partition numbers that are connected to `mesh` + """ + connected_parts = set() + from meshmode.mesh import InterPartitionAdjacencyGroup + for adj in mesh.facial_adjacency_groups: + if isinstance(adj[None], InterPartitionAdjacencyGroup): + indices = (adj[None].neighbor_partitions >= 0) + connected_parts = connected_parts.union( + adj[None].neighbor_partitions[indices]) + return connected_parts + # vim: foldmethod=marker diff --git a/test/test_partition.py b/test/test_partition.py index 2a1a4d19..aca511fa 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -336,11 +336,11 @@ def mpi_test_rank_entrypoint(): from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() + mpi_comm = MPI.COMM_WORLD + i_local_part = mpi_comm.Get_rank() + num_parts = mpi_comm.Get_size() - mesh_dist = MPIMeshDistributor(comm) + mesh_dist = MPIMeshDistributor(mpi_comm) if mesh_dist.is_mananger_rank(): np.random.seed(42) @@ -363,14 +363,40 @@ def mpi_test_rank_entrypoint(): from meshmode.discretization import Discretization vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) - bdry_comm.check() - _test_data_transfer(bdry_comm, queue) - - logger.debug("Rank %d exiting", rank) - - -def _test_data_transfer(bdry_comm, queue): + from meshmode.distributed import get_connected_partitions + connected_parts = get_connected_partitions(local_mesh) + assert i_local_part not in connected_parts + bdry_conn_futures = {} + local_bdry_conns = {} + for i_remote_part in connected_parts: + bdry_conn_futures[i_remote_part] = MPIBoundaryCommunicator(mpi_comm, + queue, + vol_discr, + group_factory, + i_remote_part) + local_bdry_conns[i_remote_part] =\ + bdry_conn_futures[i_remote_part].local_bdry_conn + + remote_to_local_bdry_conns = {} + from meshmode.discretization.connection import check_connection + while len(bdry_conn_futures) > 0: + for i_remote_part, future in bdry_conn_futures.items(): + if future.is_ready(): + conn, _ = bdry_conn_futures.pop(i_remote_part)() + check_connection(conn) + remote_to_local_bdry_conns[i_remote_part] = conn + break + _test_data_transfer(mpi_comm, + queue, + local_bdry_conns, + remote_to_local_bdry_conns, + connected_parts) + + logger.debug("Rank %d exiting", i_local_part) + + +def _test_data_transfer(mpi_comm, queue, local_bdry_conns, + remote_to_local_bdry_conns, connected_parts): from mpi4py import MPI def f(x): @@ -390,36 +416,36 @@ def _test_data_transfer(bdry_comm, queue): to get local points. 5. Send local points to local rank. Local rank: - 6. Recieve local points from remote rank. + 6. Receive local points from remote rank. 7. Check if local points are the same as the original local points. ''' send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + for i_remote_part in connected_parts: + conn = remote_to_local_bdry_conns[i_remote_part] + bdry_discr = local_bdry_conns[i_remote_part].to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) true_local_f = f(bdry_x) remote_f = conn(queue, true_local_f) - send_reqs.append(bdry_comm.mpi_comm.isend(remote_f.get(queue=queue), - dest=i_remote_part, - tag=TAG_SEND_REMOTE_NODES)) + send_reqs.append(mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) buffers = {} - for i_remote_part in bdry_comm.connected_parts: + for i_remote_part in connected_parts: status = MPI.Status() - bdry_comm.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES, - status=status) + mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) buffers[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) + recv_reqs[i_remote_part] = mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) remote_to_local_f_data = {} for i_remote_part, req in recv_reqs.items(): remote_to_local_f_data[i_remote_part] = req.wait() @@ -429,8 +455,8 @@ def _test_data_transfer(bdry_comm, queue): req.wait() send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + for i_remote_part in connected_parts: + conn = remote_to_local_bdry_conns[i_remote_part] local_f_np = remote_to_local_f_data[i_remote_part] local_f_cl = cl.array.Array(queue, shape=local_f_np.shape, @@ -438,23 +464,23 @@ def _test_data_transfer(bdry_comm, queue): local_f_cl.set(local_f_np) remote_f = conn(queue, local_f_cl).get(queue=queue) - send_reqs.append(bdry_comm.mpi_comm.isend(remote_f, - dest=i_remote_part, - tag=TAG_SEND_LOCAL_NODES)) + send_reqs.append(mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) buffers = {} - for i_remote_part in bdry_comm.connected_parts: + for i_remote_part in connected_parts: status = MPI.Status() - bdry_comm.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES, - status=status) + mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) buffers[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) + recv_reqs[i_remote_part] = mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) local_f_data = {} for i_remote_part, req in recv_reqs.items(): local_f_data[i_remote_part] = req.wait() @@ -463,8 +489,8 @@ def _test_data_transfer(bdry_comm, queue): for req in send_reqs: req.wait() - for i_remote_part in bdry_comm.connected_parts: - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + for i_remote_part in connected_parts: + bdry_discr = local_bdry_conns[i_remote_part].to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) true_local_f = f(bdry_x).get(queue=queue) -- GitLab From 5b8f363dfef56210a27d13baccd84c28739d2156 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 21 Feb 2017 21:54:19 -0600 Subject: [PATCH 129/266] partition_mesh creates facial_adjacency_groups --- meshmode/mesh/processing.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 1c961e4b..ae33bcf2 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -141,13 +141,23 @@ def partition_mesh(mesh, part_per_element, part_nr): new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups) + part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) return part_mesh, queried_elems # }}} +def set_rank_boundaries(part_mesh, mesh, part_to_global): + """ + Looks through facial_adjacency_groups in part_mesh. + If a boundary is found, then it is possible that it + used to be connected to other faces from mesh. + If this is the case, then part_mesh will have special + boundary_tags where faces used to be connected. + """ + + # {{{ orientations def find_volume_mesh_element_group_orientation(vertices, grp): -- GitLab From 54a3adfe2b8fe08393630b3a3af10327c0442d9f Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 23 Feb 2017 23:49:08 -0600 Subject: [PATCH 130/266] boundary tags set in partition_mesh --- meshmode/mesh/processing.py | 37 ++++++++++++++++++++++++------------- test/test_meshmode.py | 27 ++++++++++++++++++++++----- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index ae33bcf2..6ba63344 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -143,19 +143,30 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) - return part_mesh, queried_elems - -# }}} - - -def set_rank_boundaries(part_mesh, mesh, part_to_global): - """ - Looks through facial_adjacency_groups in part_mesh. - If a boundary is found, then it is possible that it - used to be connected to other faces from mesh. - If this is the case, then part_mesh will have special - boundary_tags where faces used to be connected. - """ + from meshmode.mesh import BTAG_ALL + + for igrp in range(num_groups): + f_group = part_mesh.facial_adjacency_groups[igrp][None] + grp_elems = f_group.elements + grp_faces = f_group.element_faces + for elem_idx in range(len(grp_elems)): + elem = grp_elems[elem_idx] + face = grp_faces[elem_idx] + tag = -f_group.neighbors[elem_idx] + parent_elem = queried_elems[elem] + parent_group = 0 + while parent_elem >= mesh.groups[parent_group].nelements: + parent_elem -= mesh.groups[parent_group].nelements + parent_group += 1 + assert parent_group < num_groups, "oops..." + parent_facial_group = mesh.facial_adjacency_groups[parent_group][None] + idxs = np.where(parent_facial_group.elements == parent_elem)[0] + for parent_face in parent_facial_group.element_faces[idxs]: + if face == parent_face: + f_group.neighbors[elem_idx] = -(tag ^ part_mesh.boundary_tag_bit(BTAG_ALL)) + #print("Boundary face", face, "of element", elem, "should be connected to", parent_elem, "in parent mesh.") + + return (part_mesh, queried_elems) # {{{ orientations diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2cb0470a..97fd9048 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -71,11 +71,11 @@ def test_partition_boxes_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh - mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + #from meshmode.mesh.processing import merge_disjoint_meshes + #mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -93,7 +93,24 @@ def test_partition_boxes_mesh(): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] assert mesh.nelements == np.sum( - [new_meshes[i].nelements for i in range(num_parts)]) + [new_meshes[i].nelements for i in range(num_parts)]), \ + "part_mesh has the wrong number of elements" + + print(count_BTAG_ALL(mesh)) + print(np.sum([count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)])) + assert count_BTAG_ALL(mesh) == np.sum( + [count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)]), \ + "part_mesh has the wrong number of BTAG_ALL boundaries" + + +def count_BTAG_ALL(mesh): + num_bnds = 0 + for adj_groups in mesh.facial_adjacency_groups: + bdry_group = adj_groups[None] + for mesh_tag in -bdry_group.neighbors: + if mesh_tag & mesh.boundary_tag_bit(BTAG_ALL) != 0: + num_bnds += 1 + return num_bnds # }}} -- GitLab From 250a6a15dd1b693e79bb9f606beed356a8fa7379 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 27 Feb 2017 21:15:49 -0600 Subject: [PATCH 131/266] Work on boundary tags within partition_mesh --- meshmode/mesh/processing.py | 13 +++++++------ test/test_meshmode.py | 19 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 6ba63344..5a168846 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -159,12 +159,13 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_elem -= mesh.groups[parent_group].nelements parent_group += 1 assert parent_group < num_groups, "oops..." - parent_facial_group = mesh.facial_adjacency_groups[parent_group][None] - idxs = np.where(parent_facial_group.elements == parent_elem)[0] - for parent_face in parent_facial_group.element_faces[idxs]: - if face == parent_face: - f_group.neighbors[elem_idx] = -(tag ^ part_mesh.boundary_tag_bit(BTAG_ALL)) - #print("Boundary face", face, "of element", elem, "should be connected to", parent_elem, "in parent mesh.") + parent_f_group = mesh.facial_adjacency_groups[parent_group] + for _, parent_facial_group in parent_f_group.items(): + for idx in np.where(parent_facial_group.elements == parent_elem)[0]: + if parent_facial_group.neighbors[idx] >= 0: + if face == parent_facial_group.element_faces[idx]: + f_group.neighbors[elem_idx] = -(tag & ~part_mesh.boundary_tag_bit(BTAG_ALL)) + #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 97fd9048..f38aa542 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -96,20 +96,19 @@ def test_partition_boxes_mesh(): [new_meshes[i].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" - print(count_BTAG_ALL(mesh)) - print(np.sum([count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)])) - assert count_BTAG_ALL(mesh) == np.sum( - [count_BTAG_ALL(new_meshes[i]) for i in range(num_parts)]), \ + assert count_btag_all(mesh) == np.sum( + [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" -def count_BTAG_ALL(mesh): +def count_btag_all(mesh): num_bnds = 0 - for adj_groups in mesh.facial_adjacency_groups: - bdry_group = adj_groups[None] - for mesh_tag in -bdry_group.neighbors: - if mesh_tag & mesh.boundary_tag_bit(BTAG_ALL) != 0: - num_bnds += 1 + for adj_dict in mesh.facial_adjacency_groups: + for _, bdry_group in adj_dict.items(): + for neighbors in bdry_group.neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: + num_bnds += 1 return num_bnds # }}} -- GitLab From 6d4f3613795984dc45db65359328afe95592e93c Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Feb 2017 00:20:10 -0600 Subject: [PATCH 132/266] Rank boundary tags --- meshmode/mesh/processing.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 5a168846..6ed7cfa0 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -140,8 +140,12 @@ def partition_mesh(mesh, part_per_element, part_nr): type(mesh_group)(mesh_group.order, new_indices[group_nr], new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) + num_parts = np.max(part_per_element) + boundary_tags = list(range(num_parts)) + from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None) + part_mesh = Mesh(new_vertices, new_mesh_groups, \ + facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import BTAG_ALL @@ -164,7 +168,12 @@ def partition_mesh(mesh, part_per_element, part_nr): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: - f_group.neighbors[elem_idx] = -(tag & ~part_mesh.boundary_tag_bit(BTAG_ALL)) + rank_neighbor = parent_facial_group.neighbors[idx] + # TODO: With mulitple groups, rank_neighbors will be wrong. + neighbor_part_num = part_per_element[rank_neighbor] + tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tag = tag | part_mesh.boundary_tag_bit(neighbor_part_num) + f_group.neighbors[elem_idx] = -tag #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) return (part_mesh, queried_elems) -- GitLab From 2e510669848b5c04465a88bbb7821006053e7b6f Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Feb 2017 10:15:15 -0600 Subject: [PATCH 133/266] Small fixes. --- meshmode/mesh/processing.py | 14 ++++++++++---- test/test_meshmode.py | 8 +++++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 6ed7cfa0..367ae148 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -169,12 +169,18 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - # TODO: With mulitple groups, rank_neighbors will be wrong. - neighbor_part_num = part_per_element[rank_neighbor] + grp_start_elem = 0 + for grp in range(parent_group): + grp_start_elem += mesh.groups[grp].nelements + neighbor_part_num = part_per_element[ + rank_neighbor + grp_start_elem] tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tag = tag | part_mesh.boundary_tag_bit(neighbor_part_num) + tag = tag | part_mesh.boundary_tag_bit( + neighbor_part_num) f_group.neighbors[elem_idx] = -tag - #print("Boundary face", face, "of element", elem, "should be connected to element", parent_elem, "in parent group", parent_group) + #print("Boundary face", face, "of element", elem, + # "should be connected to element", rank_neighbor, + # "in partition", neighbor_part_num) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f38aa542..470faf27 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -50,7 +50,8 @@ logger = logging.getLogger(__name__) # {{{ partition_mesh - +''' +#TODO facial_adjacency_groups is not available in torus. def test_partition_torus_mesh(): from meshmode.mesh.generation import generate_torus my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) @@ -65,13 +66,14 @@ def test_partition_torus_mesh(): assert part_mesh0.nelements == 2 assert part_mesh1.nelements == 4 assert part_mesh2.nelements == 2 - +''' def test_partition_boxes_mesh(): n = 5 - num_parts = 7 + num_parts = 3 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) #from meshmode.mesh.processing import merge_disjoint_meshes -- GitLab From 851dc6334d3a4d82396e23037a92357101e97795 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 1 Mar 2017 22:59:37 -0600 Subject: [PATCH 134/266] Added InterPartitionAdjacency class --- meshmode/mesh/__init__.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 44dba80b..ec217941 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -381,6 +381,27 @@ class NodalAdjacency(Record): # }}} +# {{{ partition adjacency + +class InterPartitionAdjacency(): + """ + Describes adjacency information of elements between partitions. + """ + + def __init__(self): + self.elements = [] + self.element_faces = [] + self.neighbors = [] + self.neighbor_faces = [] + + def add_connection(self, elem, face, neighbor, neighbor_face): + self.elements.append(elem) + self.element_faces.append(face) + self.neighbors.append(neighbor) + self.neighbor_faces.append(neighbor_face) + +# }}} + # {{{ facial adjacency -- GitLab From e700bf98649c2ad53bcc1ff7b61c9263066f0ebb Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 1 Mar 2017 23:00:15 -0600 Subject: [PATCH 135/266] partition_mesh implements InterPartitionAdjacency --- meshmode/mesh/processing.py | 20 ++++++++++++++++---- test/test_meshmode.py | 3 ++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 367ae148..acc6fd70 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -144,11 +144,16 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_tags = list(range(num_parts)) from meshmode.mesh import Mesh - part_mesh = Mesh(new_vertices, new_mesh_groups, \ + part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import BTAG_ALL + from meshmode.mesh import InterPartitionAdjacency + tags_to_part_adj = dict() + for tag in range(np.max(part_per_element) + 1): + tags_to_part_adj[tag] = InterPartitionAdjacency() + for igrp in range(num_groups): f_group = part_mesh.facial_adjacency_groups[igrp][None] grp_elems = f_group.elements @@ -169,11 +174,15 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - grp_start_elem = 0 + rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + mgrp_start_elem = 0 + pgrp_start_elem = 0 for grp in range(parent_group): - grp_start_elem += mesh.groups[grp].nelements + mgrp_start_elem += mesh.groups[grp].nelements + for grp in range(num_groups): + pgrp_start_elem += part_mesh.groups[grp].nelements neighbor_part_num = part_per_element[ - rank_neighbor + grp_start_elem] + rank_neighbor + mgrp_start_elem] tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) tag = tag | part_mesh.boundary_tag_bit( neighbor_part_num) @@ -181,6 +190,9 @@ def partition_mesh(mesh, part_per_element, part_nr): #print("Boundary face", face, "of element", elem, # "should be connected to element", rank_neighbor, # "in partition", neighbor_part_num) + tags_to_part_adj[neighbor_part_num].add_connection( + elem + pgrp_start_elem, face, + rank_neighbor + mgrp_start_elem, rank_neighbor_face) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 470faf27..df33c00d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -51,7 +51,7 @@ logger = logging.getLogger(__name__) # {{{ partition_mesh ''' -#TODO facial_adjacency_groups is not available in torus. +#TODO facial_adjacency_groups is not available in generate_torus. def test_partition_torus_mesh(): from meshmode.mesh.generation import generate_torus my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) @@ -68,6 +68,7 @@ def test_partition_torus_mesh(): assert part_mesh2.nelements == 2 ''' + def test_partition_boxes_mesh(): n = 5 num_parts = 3 -- GitLab From e06d607a039804a27ed0006a4673e8c43d09e85c Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 10:50:49 -0600 Subject: [PATCH 136/266] Fix whitespace --- meshmode/mesh/__init__.py | 1 + meshmode/mesh/processing.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index ec217941..5bea6e10 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -381,6 +381,7 @@ class NodalAdjacency(Record): # }}} + # {{{ partition adjacency class InterPartitionAdjacency(): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index acc6fd70..21f91282 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -174,7 +174,8 @@ def partition_mesh(mesh, part_per_element, part_nr): if parent_facial_group.neighbors[idx] >= 0: if face == parent_facial_group.element_faces[idx]: rank_neighbor = parent_facial_group.neighbors[idx] - rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + rank_neighbor_face = \ + parent_facial_group.neighbor_faces[idx] mgrp_start_elem = 0 pgrp_start_elem = 0 for grp in range(parent_group): -- GitLab From 2250d9fedf5761034475df9d9307d5589e75f865 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 22:02:22 -0600 Subject: [PATCH 137/266] Add possible InterPartitionAdjacency class --- meshmode/mesh/__init__.py | 33 +++++++++++-- meshmode/mesh/processing.py | 94 +++++++++++++++++++++---------------- 2 files changed, 81 insertions(+), 46 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 5bea6e10..714f42db 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -392,15 +392,38 @@ class InterPartitionAdjacency(): def __init__(self): self.elements = [] self.element_faces = [] - self.neighbors = [] + self.neighbor_elems = [] self.neighbor_faces = [] + self.neighbor_groups = [] - def add_connection(self, elem, face, neighbor, neighbor_face): - self.elements.append(elem) - self.element_faces.append(face) - self.neighbors.append(neighbor) + def add_connection(self, elem, face, neighbor_group, neighbor_elem, neighbor_face): + self.elems.append(elem) + self.elem_faces.append(face) + self.neighbor_groups.append(neighbor_group) + self.neighbor_elems.append(neighbor_elem) self.neighbor_faces.append(neighbor_face) + def get_neighbor(self, elem, face): + for idx in range(len(self.elements)): + if elem == self.elements[idx] and face == self.element_faces[idx]: + return (self.neighbor_groups[idx], + self.neighbor_elem[idx], + self.neighbor_faces[idx]) + + +class OtherPossibility(): + """ + """ + + def __init__(self): + self.adjacent = dict() + + def add_connection(self, tag, elem, face, neighbor_group, neighbor_elem, neighbor_face): + self.adjacent[(tag, elem, face)] = (neighbor_group, neighbor_elem, neighbor_face) + + def get_neighbor(self, tag, elem, face): + return self.adjacent((tag, elem, face)) + # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 21f91282..974a4e04 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -149,51 +149,63 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import BTAG_ALL - from meshmode.mesh import InterPartitionAdjacency - tags_to_part_adj = dict() - for tag in range(np.max(part_per_element) + 1): - tags_to_part_adj[tag] = InterPartitionAdjacency() + #from meshmode.mesh import InterPartitionAdjacency + #num_connection_tags = np.max(part_per_element) + 1 + #tags_to_part_adj = [] + #for _ in range(num_connection_tags): + # tags_to_part_adj.append(InterPartitionAdjacency()) + + from meshmode.mesh import OtherPossibility + part_adjacency = OtherPossibility() for igrp in range(num_groups): - f_group = part_mesh.facial_adjacency_groups[igrp][None] - grp_elems = f_group.elements - grp_faces = f_group.element_faces - for elem_idx in range(len(grp_elems)): - elem = grp_elems[elem_idx] - face = grp_faces[elem_idx] - tag = -f_group.neighbors[elem_idx] + part_group = part_mesh.groups[igrp] + boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] + boundary_elems = boundary_adj.elements + boundary_faces = boundary_adj.element_faces + for elem_idx in range(len(boundary_elems)): + elem = boundary_elems[elem_idx] + face = boundary_faces[elem_idx] + tags = -boundary_adj.neighbors[elem_idx] + assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] - parent_group = 0 - while parent_elem >= mesh.groups[parent_group].nelements: - parent_elem -= mesh.groups[parent_group].nelements - parent_group += 1 - assert parent_group < num_groups, "oops..." - parent_f_group = mesh.facial_adjacency_groups[parent_group] - for _, parent_facial_group in parent_f_group.items(): + parent_group_num = 0 + while parent_elem >= mesh.groups[parent_group_num].nelements: + parent_elem -= mesh.groups[parent_group_num].nelements + parent_group_num += 1 + assert parent_group_num < num_groups, "Unable to find neighbor." + parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base + parent_boundary_adj = mesh.facial_adjacency_groups[parent_group_num] + for _, parent_facial_group in parent_boundary_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: - if parent_facial_group.neighbors[idx] >= 0: - if face == parent_facial_group.element_faces[idx]: - rank_neighbor = parent_facial_group.neighbors[idx] - rank_neighbor_face = \ - parent_facial_group.neighbor_faces[idx] - mgrp_start_elem = 0 - pgrp_start_elem = 0 - for grp in range(parent_group): - mgrp_start_elem += mesh.groups[grp].nelements - for grp in range(num_groups): - pgrp_start_elem += part_mesh.groups[grp].nelements - neighbor_part_num = part_per_element[ - rank_neighbor + mgrp_start_elem] - tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tag = tag | part_mesh.boundary_tag_bit( - neighbor_part_num) - f_group.neighbors[elem_idx] = -tag - #print("Boundary face", face, "of element", elem, - # "should be connected to element", rank_neighbor, - # "in partition", neighbor_part_num) - tags_to_part_adj[neighbor_part_num].add_connection( - elem + pgrp_start_elem, face, - rank_neighbor + mgrp_start_elem, rank_neighbor_face) + if parent_facial_group.neighbors[idx] >= 0 and \ + parent_facial_group.element_faces[idx] == face: + rank_neighbor = parent_facial_group.neighbors[idx] + rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + + new_tag = part_per_element[rank_neighbor + + parent_grp_elem_base] + tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tags = tags | part_mesh.boundary_tag_bit(new_tag) + boundary_adj.neighbors[elem_idx] = -tags + + #print("Boundary face", face, "of element", elem, + # "should be connected to element", rank_neighbor, + # "in partition", neighbor_part_num) + + #tags_to_part_adj[new_tag].add_connection( + # elem + part_group.element_nr_base, + # face, + # rank_neighbor + parent_grp_elem_base, + # rank_neighbor_face, + # parent_group_num) + + part_adjacency.add_connection(new_tag, + elem + part_group.element_nr_base, + face, + rank_neighbor + parent_grp_elem_base, + rank_neighbor_face, + parent_group_num) return (part_mesh, queried_elems) -- GitLab From cc1d7a5f865cebe23ba2e118670965b7a1c2a8a4 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 2 Mar 2017 22:04:08 -0600 Subject: [PATCH 138/266] Whitespace fix --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 714f42db..fa31b258 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -396,7 +396,8 @@ class InterPartitionAdjacency(): self.neighbor_faces = [] self.neighbor_groups = [] - def add_connection(self, elem, face, neighbor_group, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, + neighbor_group, neighbor_elem, neighbor_face): self.elems.append(elem) self.elem_faces.append(face) self.neighbor_groups.append(neighbor_group) -- GitLab From 5dda98c8ab70315cc2cc0ff0b53180268250fbd3 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 4 Mar 2017 16:46:38 -0600 Subject: [PATCH 139/266] Fix whitespace --- meshmode/mesh/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index fa31b258..690ce8ab 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -419,8 +419,10 @@ class OtherPossibility(): def __init__(self): self.adjacent = dict() - def add_connection(self, tag, elem, face, neighbor_group, neighbor_elem, neighbor_face): - self.adjacent[(tag, elem, face)] = (neighbor_group, neighbor_elem, neighbor_face) + def add_connection(self, tag, elem, face, + neighbor_group, neighbor_elem, neighbor_face): + self.adjacent[(tag, elem, face)] = \ + (neighbor_group, neighbor_elem, neighbor_face) def get_neighbor(self, tag, elem, face): return self.adjacent((tag, elem, face)) -- GitLab From e6b6f21f0a1bbca51a1ef9f2992e0a92e7c60187 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 4 Mar 2017 17:20:20 -0600 Subject: [PATCH 140/266] Add test for partition tags --- meshmode/mesh/__init__.py | 2 +- test/test_meshmode.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 690ce8ab..38f8fd48 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -425,7 +425,7 @@ class OtherPossibility(): (neighbor_group, neighbor_elem, neighbor_face) def get_neighbor(self, tag, elem, face): - return self.adjacent((tag, elem, face)) + return self.adjacent[(tag, elem, face)] # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index df33c00d..dff6f7b8 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -102,6 +102,18 @@ def test_partition_boxes_mesh(): assert count_btag_all(mesh) == np.sum( [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" + + for part_num in range(num_parts): + for f_groups in new_meshes[part_num].facial_adjacency_groups: + f_grp = f_groups[None] + for idx in range(len(f_grp.elements)): + tag = -f_grp.neighbors[idx] + if tag >= 0: + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + (n_part, n_elem, n_face) = ...get_neighbor(tag, elem, face) + assert (part_num, elem, face) = ...get_neighbor(n_part, n_elem, n_face) + def count_btag_all(mesh): @@ -112,7 +124,7 @@ def count_btag_all(mesh): if neighbors < 0: if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: num_bnds += 1 - return num_bnds + return num_bnds # }}} -- GitLab From b421186e928f486a7e70a658781bd32217ba9b14 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 5 Mar 2017 20:36:26 -0600 Subject: [PATCH 141/266] InterpartitionAdj is consistent --- meshmode/mesh/__init__.py | 44 +++++++--------------------------- meshmode/mesh/processing.py | 48 +++++++++++++++---------------------- test/test_meshmode.py | 26 +++++++++++++------- 3 files changed, 44 insertions(+), 74 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 38f8fd48..937af910 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -382,50 +382,21 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - -class InterPartitionAdjacency(): - """ - Describes adjacency information of elements between partitions. - """ - - def __init__(self): - self.elements = [] - self.element_faces = [] - self.neighbor_elems = [] - self.neighbor_faces = [] - self.neighbor_groups = [] - - def add_connection(self, elem, face, - neighbor_group, neighbor_elem, neighbor_face): - self.elems.append(elem) - self.elem_faces.append(face) - self.neighbor_groups.append(neighbor_group) - self.neighbor_elems.append(neighbor_elem) - self.neighbor_faces.append(neighbor_face) - - def get_neighbor(self, elem, face): - for idx in range(len(self.elements)): - if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbor_groups[idx], - self.neighbor_elem[idx], - self.neighbor_faces[idx]) - +# {{{ partition adjacency -class OtherPossibility(): +class InterPartitionAdj(): """ + Interface is not final. """ def __init__(self): self.adjacent = dict() - def add_connection(self, tag, elem, face, - neighbor_group, neighbor_elem, neighbor_face): - self.adjacent[(tag, elem, face)] = \ - (neighbor_group, neighbor_elem, neighbor_face) + def add_connection(self, elem, face, neighbor_elem, neighbor_face): + self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) - def get_neighbor(self, tag, elem, face): - return self.adjacent[(tag, elem, face)] + def get_neighbor(self, elem, face): + return self.adjacent[(elem, face)] # }}} @@ -611,6 +582,7 @@ class Mesh(Record): will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ + el_nr = 0 node_nr = 0 diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 974a4e04..18d7afb4 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -149,14 +149,9 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import BTAG_ALL - #from meshmode.mesh import InterPartitionAdjacency - #num_connection_tags = np.max(part_per_element) + 1 - #tags_to_part_adj = [] - #for _ in range(num_connection_tags): - # tags_to_part_adj.append(InterPartitionAdjacency()) - - from meshmode.mesh import OtherPossibility - part_adjacency = OtherPossibility() + #TODO This should probably be in the Mesh class. + from meshmode.mesh import InterPartitionAdj + part_mesh.interpartition_adj = InterPartitionAdj() for igrp in range(num_groups): part_group = part_mesh.groups[igrp] @@ -167,6 +162,7 @@ def partition_mesh(mesh, part_per_element, part_nr): elem = boundary_elems[elem_idx] face = boundary_faces[elem_idx] tags = -boundary_adj.neighbors[elem_idx] + # Is is reasonable to expect this assertation? assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] parent_group_num = 0 @@ -180,32 +176,26 @@ def partition_mesh(mesh, part_per_element, part_nr): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: - rank_neighbor = parent_facial_group.neighbors[idx] + rank_neighbor = (parent_facial_group.neighbors[idx] + + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - - new_tag = part_per_element[rank_neighbor - + parent_grp_elem_base] + + n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(new_tag) + tags = tags | part_mesh.boundary_tag_bit(n_part_nr) boundary_adj.neighbors[elem_idx] = -tags - - #print("Boundary face", face, "of element", elem, - # "should be connected to element", rank_neighbor, - # "in partition", neighbor_part_num) - - #tags_to_part_adj[new_tag].add_connection( - # elem + part_group.element_nr_base, - # face, - # rank_neighbor + parent_grp_elem_base, - # rank_neighbor_face, - # parent_group_num) - - part_adjacency.add_connection(new_tag, + + # Find the neighbor element from the other partition + n_elem = np.count_nonzero( + part_per_element[:rank_neighbor] == n_part_nr) + + # TODO Test if this works with multiple groups + # Do I need to add the element number base? + part_mesh.interpartition_adj.add_connection( elem + part_group.element_nr_base, face, - rank_neighbor + parent_grp_elem_base, - rank_neighbor_face, - parent_group_num) + n_elem, + rank_neighbor_face) return (part_mesh, queried_elems) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index dff6f7b8..27a622db 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -71,7 +71,7 @@ def test_partition_torus_mesh(): def test_partition_boxes_mesh(): n = 5 - num_parts = 3 + num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. @@ -103,19 +103,27 @@ def test_partition_boxes_mesh(): [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - for part_num in range(num_parts): - for f_groups in new_meshes[part_num].facial_adjacency_groups: + for part_nr in range(num_parts): + for f_groups in new_meshes[part_nr].facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): + # Are all f_grp.neighbors guaranteed to be negative + # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] - if tag >= 0: - elem = f_grp.elements[idx] - face = f_grp.element_faces[idx] - (n_part, n_elem, n_face) = ...get_neighbor(tag, elem, face) - assert (part_num, elem, face) = ...get_neighbor(n_part, n_elem, n_face) + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + for n_part_nr in range(num_parts): + if tag >= 0 and \ + tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + # Is this the best way to probe the tag? + # Can one tag have multiple partition neighbors? + (n_elem, n_face) = new_meshes[part_nr].\ + interpartition_adj.get_neighbor(elem, face) + assert (elem, face) == new_meshes[n_part_nr].\ + interpartition_adj.get_neighbor(n_elem, n_face),\ + "InterpartitionAdj is not consistent" - def count_btag_all(mesh): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: -- GitLab From a4504f35c24a90e55b6997a2d79d059af68e720a Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 5 Mar 2017 20:45:56 -0600 Subject: [PATCH 142/266] Fix whitespace --- meshmode/mesh/__init__.py | 12 ++++++------ meshmode/mesh/processing.py | 4 ++-- test/test_meshmode.py | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 937af910..4e3dff70 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -382,22 +382,22 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - +# {{{ partition adjacency + class InterPartitionAdj(): """ Interface is not final. """ - + def __init__(self): self.adjacent = dict() - + def add_connection(self, elem, face, neighbor_elem, neighbor_face): self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) def get_neighbor(self, elem, face): return self.adjacent[(elem, face)] - + # }}} @@ -582,7 +582,7 @@ class Mesh(Record): will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ - + el_nr = 0 node_nr = 0 diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 18d7afb4..25061218 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -179,12 +179,12 @@ def partition_mesh(mesh, part_per_element, part_nr): rank_neighbor = (parent_facial_group.neighbors[idx] + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - + n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit(n_part_nr) boundary_adj.neighbors[elem_idx] = -tags - + # Find the neighbor element from the other partition n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_nr) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 27a622db..ce037ab5 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -102,19 +102,19 @@ def test_partition_boxes_mesh(): assert count_btag_all(mesh) == np.sum( [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - + for part_nr in range(num_parts): for f_groups in new_meshes[part_nr].facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): - # Are all f_grp.neighbors guaranteed to be negative + # Are all f_grp.neighbors guaranteed to be negative # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): if tag >= 0 and \ - tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: # Is this the best way to probe the tag? # Can one tag have multiple partition neighbors? (n_elem, n_face) = new_meshes[part_nr].\ @@ -122,7 +122,7 @@ def test_partition_boxes_mesh(): assert (elem, face) == new_meshes[n_part_nr].\ interpartition_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" - + def count_btag_all(mesh): num_bnds = 0 @@ -132,7 +132,7 @@ def count_btag_all(mesh): if neighbors < 0: if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: num_bnds += 1 - return num_bnds + return num_bnds # }}} -- GitLab From 0eb41543acd95051cb999bf7dcab5a552500c840 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 6 Mar 2017 12:32:43 -0600 Subject: [PATCH 143/266] Fix whitespace --- meshmode/mesh/__init__.py | 2 +- test/test_meshmode.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 4e3dff70..e9dbe3a6 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -391,7 +391,7 @@ class InterPartitionAdj(): def __init__(self): self.adjacent = dict() - + def add_connection(self, elem, face, neighbor_elem, neighbor_face): self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index ce037ab5..2765d7a6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -113,8 +113,8 @@ def test_partition_boxes_mesh(): elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): - if tag >= 0 and \ - tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: + # Is tag >= 0 always true? + if tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: # Is this the best way to probe the tag? # Can one tag have multiple partition neighbors? (n_elem, n_face) = new_meshes[part_nr].\ -- GitLab From b0d40cbff286c5d7b2ad587c9c8d82294221cf80 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 9 Mar 2017 00:15:10 -0600 Subject: [PATCH 144/266] Add documentation and fix bugs. --- meshmode/mesh/__init__.py | 89 +++++++++++++++++++++++++++++++++++-- meshmode/mesh/processing.py | 23 +++++----- test/test_meshmode.py | 65 ++++++++++++++++++--------- 3 files changed, 142 insertions(+), 35 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index e9dbe3a6..39681606 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -88,6 +88,35 @@ class BTAG_NO_BOUNDARY(object): # noqa pass +class BTAG_PARTITION(object): + """ + A boundary tag indicating that this edge is adjacent to an element of + another :class:`Mesh`. The partition number of the adjacent mesh + is given by ``part_nr``. + + .. attribute:: part_nr + + .. versionadded:: 2017.1 + """ + def __init__(self, part_nr): + self.part_nr = int(part_nr) + + # TODO is this acceptable? + # __eq__ is also defined so maybe the hash value isn't too important + # for dictionaries. + def __hash__(self): + return self.part_nr + + def __eq__(self, other): + if isinstance(other, BTAG_PARTITION): + return self.part_nr == other.part_nr + else: + return False + + def __nq__(self, other): + return not self.__eq__(other) + + SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY]) # }}} @@ -386,17 +415,66 @@ class NodalAdjacency(Record): class InterPartitionAdj(): """ - Interface is not final. + Describes facial adjacency information of elements in one :class:`Mesh` to + elements in another :class:`Mesh`. The element's boundary tag gives the + partition that it is connected to. + + .. attribute:: elements + + `:class:Mesh`-local element numbers that have neighbors. + + .. attribute:: element_faces + + ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + + .. attribute:: neighbors + + ``neighbors[i]`` gives the element number within the neighboring partiton + of the element connected to ``elements[i]``. + + .. attribute:: neighbor_faces + + ``neighbor_faces[i]`` gives face index within the neighboring partition + of the face connected to ``elements[i]`` + + .. automethod:: add_connection + .. automethod:: get_neighbor + + .. versionadded:: 2017.1 """ def __init__(self): - self.adjacent = dict() + self.elements = [] + self.element_faces = [] + self.neighbors = [] + self.neighbor_faces = [] def add_connection(self, elem, face, neighbor_elem, neighbor_face): - self.adjacent[(elem, face)] = (neighbor_elem, neighbor_face) + """ + Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to + ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion + of type :class:`Mesh`. + :arg elem + :arg face + :arg neighbor_elem + :arg neighbor_face + """ + self.elements.append(elem) + self.element_faces.append(face) + self.neighbors.append(neighbor_elem) + self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): - return self.adjacent[(elem, face)] + """ + :arg elem + :arg face + :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring + elements within another :class:`Mesh`. + """ + for idx in range(len(self.elements)): + if elem == self.elements[idx] and face == self.element_faces[idx]: + return (self.neighbors[idx], self.neighbor_faces[idx]) + raise RuntimeError("This face does not have a neighbor") # }}} @@ -552,6 +630,7 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, + interpartition_adj=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -633,6 +712,7 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, + interpartition_adj=interpartition_adj, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), @@ -762,6 +842,7 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) + and self.interpartition_adj == other.interpartition_adj and self.boundary_tags == other.boundary_tags) def __ne__(self, other): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 25061218..b9e07e69 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -140,21 +140,19 @@ def partition_mesh(mesh, part_per_element, part_nr): type(mesh_group)(mesh_group.order, new_indices[group_nr], new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) - num_parts = np.max(part_per_element) - boundary_tags = list(range(num_parts)) + from meshmode.mesh import BTAG_ALL, BTAG_PARTITION + boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - from meshmode.mesh import BTAG_ALL - - #TODO This should probably be in the Mesh class. + # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj part_mesh.interpartition_adj = InterPartitionAdj() for igrp in range(num_groups): - part_group = part_mesh.groups[igrp] + elem_base = part_mesh.groups[igrp].element_nr_base boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces @@ -162,7 +160,6 @@ def partition_mesh(mesh, part_per_element, part_nr): elem = boundary_elems[elem_idx] face = boundary_faces[elem_idx] tags = -boundary_adj.neighbors[elem_idx] - # Is is reasonable to expect this assertation? assert tags >= 0, "Expected boundary tag in adjacency group." parent_elem = queried_elems[elem] parent_group_num = 0 @@ -171,8 +168,8 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_group_num += 1 assert parent_group_num < num_groups, "Unable to find neighbor." parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base - parent_boundary_adj = mesh.facial_adjacency_groups[parent_group_num] - for _, parent_facial_group in parent_boundary_adj.items(): + parent_adj = mesh.facial_adjacency_groups[parent_group_num] + for _, parent_facial_group in parent_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: @@ -182,7 +179,7 @@ def partition_mesh(mesh, part_per_element, part_nr): n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(n_part_nr) + tags = tags | part_mesh.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition @@ -192,7 +189,7 @@ def partition_mesh(mesh, part_per_element, part_nr): # TODO Test if this works with multiple groups # Do I need to add the element number base? part_mesh.interpartition_adj.add_connection( - elem + part_group.element_nr_base, + elem + elem_base, face, n_elem, rank_neighbor_face) @@ -435,6 +432,8 @@ def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False): grp_cls = None order = None unit_nodes = None + nodal_adjacency = None + facial_adjacency_groups = None for mesh in meshes: if mesh._nodal_adjacency is not None: @@ -466,6 +465,8 @@ def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False): else: new_groups = [] + nodal_adjacency = None + facial_adjacency_groups = None for mesh, vert_base in zip(meshes, vert_bases): if mesh._nodal_adjacency is not None: diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2765d7a6..7237e5b5 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -93,44 +93,69 @@ def test_partition_boxes_mesh(): from meshmode.mesh.processing import partition_mesh new_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] assert mesh.nelements == np.sum( - [new_meshes[i].nelements for i in range(num_parts)]), \ + [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" - assert count_btag_all(mesh) == np.sum( - [count_btag_all(new_meshes[i]) for i in range(num_parts)]), \ + assert count_tags(mesh, BTAG_ALL) == np.sum( + [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" + from meshmode.mesh import BTAG_PARTITION + num_tags = np.zeros((num_parts,)) + for part_nr in range(num_parts): - for f_groups in new_meshes[part_nr].facial_adjacency_groups: + (part, part_to_global) = new_meshes[part_nr] + for f_groups in part.facial_adjacency_groups: f_grp = f_groups[None] for idx in range(len(f_grp.elements)): - # Are all f_grp.neighbors guaranteed to be negative - # since I'm taking the boundary facial group? tag = -f_grp.neighbors[idx] + assert tag >= 0 elem = f_grp.elements[idx] face = f_grp.element_faces[idx] for n_part_nr in range(num_parts): - # Is tag >= 0 always true? - if tag & new_meshes[part_nr].boundary_tag_bit(n_part_nr) != 0: - # Is this the best way to probe the tag? - # Can one tag have multiple partition neighbors? - (n_elem, n_face) = new_meshes[part_nr].\ - interpartition_adj.get_neighbor(elem, face) - assert (elem, face) == new_meshes[n_part_nr].\ - interpartition_adj.get_neighbor(n_elem, n_face),\ - "InterpartitionAdj is not consistent" - - -def count_btag_all(mesh): + (n_part, n_part_to_global) = new_meshes[n_part_nr] + if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: + num_tags[n_part_nr] += 1 + (n_elem, n_face) = part.interpartition_adj.\ + get_neighbor(elem, face) + assert (elem, face) == n_part.interpartition_adj.\ + get_neighbor(n_elem, n_face),\ + "InterpartitionAdj is not consistent" + p_elem = part_to_global[elem] + n_part_to_global = new_meshes[n_part_nr][1] + p_n_elem = n_part_to_global[n_elem] + p_grp_nr = 0 + while p_elem >= mesh.groups[p_grp_nr].nelements: + p_elem -= mesh.groups[p_grp_nr].nelements + p_grp_nr += 1 + p_elem_base = mesh.groups[p_grp_nr].element_nr_base + f_groups = mesh.facial_adjacency_groups[p_grp_nr] + for _, p_bnd_adj in f_groups.items(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" + + for tag_nr in range(num_parts): + tag_sum = 0 + for mesh, _ in new_meshes: + tag_sum += count_tags(mesh, BTAG_PARTITION(tag_nr)) + assert num_tags[tag_nr] == tag_sum,\ + "part_mesh has the wrong number of BTAG_PARTITION boundaries" + +def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: for _, bdry_group in adj_dict.items(): for neighbors in bdry_group.neighbors: if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(BTAG_ALL) != 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: num_bnds += 1 return num_bnds -- GitLab From ee19a1efc18f10857cb12ae2113cfc19503616e3 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 9 Mar 2017 00:22:21 -0600 Subject: [PATCH 145/266] Whitespace fixes --- meshmode/mesh/__init__.py | 3 ++- meshmode/mesh/processing.py | 3 ++- test/test_meshmode.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 39681606..1408d595 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -54,6 +54,7 @@ Predefined Boundary tags .. autoclass:: BTAG_ALL .. autoclass:: BTAG_REALLY_ALL .. autoclass:: BTAG_NO_BOUNDARY +.. autoclass:: BTAG_PARTITION """ @@ -88,7 +89,7 @@ class BTAG_NO_BOUNDARY(object): # noqa pass -class BTAG_PARTITION(object): +class BTAG_PARTITION(object): # noqa """ A boundary tag indicating that this edge is adjacent to an element of another :class:`Mesh`. The partition number of the adjacent mesh diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b9e07e69..b427571d 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -179,7 +179,8 @@ def partition_mesh(mesh, part_per_element, part_nr): n_part_nr = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) + tags = tags | part_mesh.boundary_tag_bit( + BTAG_PARTITION(n_part_nr)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 7237e5b5..e2b45fa7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -131,7 +131,7 @@ def test_partition_boxes_mesh(): while p_elem >= mesh.groups[p_grp_nr].nelements: p_elem -= mesh.groups[p_grp_nr].nelements p_grp_nr += 1 - p_elem_base = mesh.groups[p_grp_nr].element_nr_base + #p_elem_base = mesh.groups[p_grp_nr].element_nr_base f_groups = mesh.facial_adjacency_groups[p_grp_nr] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): @@ -149,6 +149,7 @@ def test_partition_boxes_mesh(): assert num_tags[tag_nr] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" + def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: -- GitLab From b8a23986200996ad26e30646756591feb0ddd9ab Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 15 Mar 2017 00:43:35 -0500 Subject: [PATCH 146/266] Started partition_interpolation test --- test/test_meshmode.py | 46 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e2b45fa7..9f31f4c9 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,26 +49,44 @@ import logging logger = logging.getLogger(__name__) -# {{{ partition_mesh -''' -#TODO facial_adjacency_groups is not available in generate_torus. -def test_partition_torus_mesh(): - from meshmode.mesh.generation import generate_torus - my_mesh = generate_torus(2, 1, n_outer=2, n_inner=2) +def test_partition_interpolation(ctx_getter): + cl_ctx = ctx_getter() + queue = cl.CommandQueue(cl_ctx) + order = 4 + group_factory = PolynomialWarpAndBlendGroupFactory(order) + n = 3 + dim = 2 + num_parts = 7 + from meshmode.mesh.generation import generate_warped_rect_mesh + mesh = generate_warped_rect_mesh(dim, order=order, n=n) + + adjacency_list = np.zeros((mesh.nelements,), dtype=set) + for elem in range(mesh.nelements): + adjacency_list[elem] = set() + starts = mesh.nodal_adjacency.neighbors_starts + for n in range(starts[elem], starts[elem + 1]): + adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - part_per_element = np.array([0, 1, 2, 1, 1, 2, 1, 0]) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=adjacency_list) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh - (part_mesh0, _) = partition_mesh(my_mesh, part_per_element, 0) - (part_mesh1, _) = partition_mesh(my_mesh, part_per_element, 1) - (part_mesh2, _) = partition_mesh(my_mesh, part_per_element, 2) + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - assert part_mesh0.nelements == 2 - assert part_mesh1.nelements == 4 - assert part_mesh2.nelements == 2 -''' + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + for i in range(num_parts)] + + from meshmode.discretization.connection import (make_face_restriction, + check_connection) + bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, + FRESTR_INTERIOR_FACES) for i in range(num_parts)] +# {{{ partition_mesh + def test_partition_boxes_mesh(): n = 5 num_parts = 7 -- GitLab From f639e3eec5e795e784e22afe54b9fc460f069f5d Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 16 Mar 2017 21:08:12 -0500 Subject: [PATCH 147/266] Created make_opposite_partition_connection function --- .../discretization/connection/__init__.py | 2 +- .../connection/opposite_face.py | 36 +++++++++++++++++++ test/test_meshmode.py | 11 ++++-- 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index da982c9d..20f562f0 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -35,7 +35,7 @@ from meshmode.discretization.connection.face import ( FACE_RESTR_INTERIOR, FACE_RESTR_ALL, make_face_restriction, make_face_to_all_faces_embedding) from meshmode.discretization.connection.opposite_face import \ - make_opposite_face_connection + make_opposite_face_connection, make_opposite_partition_connection from meshmode.discretization.connection.refinement import \ make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 6ce70b2a..629782ff 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -392,4 +392,40 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} + +def make_opposite_partition_connection(vol_to_bdry_conns): + """ + Given a list of boundary restriction connections *volume_to_bdry_conn*, + return a :class:`DirectDiscretizationConnection` that performs data + exchange across adjacent faces of different partitions. + + :arg :vol_to_bdry_conns A list of *volume_to_bdry_conn* corresponding to + a partition of a parent mesh. + """ + + disc_conns = [] + return disc_conns + nparts = len(vol_to_bdry_conns) + from meshmode.discretization.connection import ( + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + for part_idx in range(nparts): + vol_discr = vol_to_bdry_conns[part_idx].from_discr + vol_mesh = vol_discr.mesh + bdry_discr = vol_to_bdry_conns[part_idx].to_discr + + with cl.CommandQueue(vol_discr.cl_context) as queue: + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + nop + + disc_conns.append(DirectDiscretizationConnection( + from_discr=bdry_discr, + to_discr=bdry_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in groups], + is_surjective=True)) + + return disc_conns + # vim: foldmethod=marker diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 9f31f4c9..6b8bc3de 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -79,11 +79,18 @@ def test_partition_interpolation(ctx_getter): vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] - from meshmode.discretization.connection import (make_face_restriction, - check_connection) + from meshmode.discretization.connection import make_face_restriction bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] + from meshmode.discretization.connection import \ + make_opposite_partition_connection + opp_faces = make_opposite_partition_connection(bdry_connections) + + from meshmode.discretization.connection import check_connection + for opp_face in opp_faces: + check_connection(opp_face) + # {{{ partition_mesh -- GitLab From b2bb379daa06089b367a9c872e689044d09c9209 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 16 Mar 2017 23:56:17 -0500 Subject: [PATCH 148/266] Added part_idx array to InterPartitionalAdj --- .../discretization/connection/__init__.py | 2 +- .../connection/opposite_face.py | 53 ++++++++++++------- meshmode/mesh/__init__.py | 12 +++-- meshmode/mesh/processing.py | 1 + test/test_meshmode.py | 16 +++--- 5 files changed, 51 insertions(+), 33 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index 20f562f0..6a8e2e53 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -35,7 +35,7 @@ from meshmode.discretization.connection.face import ( FACE_RESTR_INTERIOR, FACE_RESTR_ALL, make_face_restriction, make_face_to_all_faces_embedding) from meshmode.discretization.connection.opposite_face import \ - make_opposite_face_connection, make_opposite_partition_connection + make_opposite_face_connection, make_partition_connection from meshmode.discretization.connection.refinement import \ make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 629782ff..c04a444a 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,7 +393,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def make_opposite_partition_connection(vol_to_bdry_conns): +def make_partition_connection(vol_to_bdry_conns): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -404,27 +404,40 @@ def make_opposite_partition_connection(vol_to_bdry_conns): """ disc_conns = [] - return disc_conns nparts = len(vol_to_bdry_conns) from meshmode.discretization.connection import ( - DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - for part_idx in range(nparts): - vol_discr = vol_to_bdry_conns[part_idx].from_discr - vol_mesh = vol_discr.mesh - bdry_discr = vol_to_bdry_conns[part_idx].to_discr - - with cl.CommandQueue(vol_discr.cl_context) as queue: - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. - nop - - disc_conns.append(DirectDiscretizationConnection( - from_discr=bdry_discr, - to_discr=bdry_discr, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in groups], - is_surjective=True)) + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + + # My intuition tells me that this should not live inside a for loop. + # However, I need to grab a cl_context. I'll assume that each context from + # each partition is the same and I'll use the first one. + cl_context = vol_to_bdry_conns[0].from_discr.cl_context + with cl.CommandQueue(cl_context) as queue: + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + for src_part_idx in range(nparts): + part_batches = [[] for _ in range(nparts)] + src_vol_conn = vol_to_bdry_conns[src_part_idx] + src_from_discr = src_vol_conn.from_discr + src_to_discr = src_vol_conn.to_discr + src_mesh = src_from_discr.mesh + adj = src_mesh.interpartition_adj + for elem_idx, elem in enumerate(adj.elements): + face = adj.element_faces[elem_idx] + (part_idx, n_elem, n_face) = adj.get_neighbor(elem, face) + + # Using the neighboring face and element, we need to create batches + # I'm not sure how I would do this. My guess is that it would look + # something like _make_cross_face_batches + + # Make one Discr connection for each partition. + disc_conns.append(DirectDiscretizationConnection( + from_discr=src_from_discr, + to_discr=src_to_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in part_batches], + is_surjective=True)) return disc_conns diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 1408d595..9a1a9019 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -449,19 +449,22 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] + self.part_indices = [] - def add_connection(self, elem, face, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion of type :class:`Mesh`. :arg elem :arg face + :arg part_idx :arg neighbor_elem :arg neighbor_face """ self.elements.append(elem) self.element_faces.append(face) + self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) self.neighbor_faces.append(neighbor_face) @@ -469,12 +472,13 @@ class InterPartitionAdj(): """ :arg elem :arg face - :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring - elements within another :class:`Mesh`. + :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbors[idx], self.neighbor_faces[idx]) + return (self.part_indices[idx], + self.neighbors[idx], self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b427571d..3718ec6b 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -192,6 +192,7 @@ def partition_mesh(mesh, part_per_element, part_nr): part_mesh.interpartition_adj.add_connection( elem + elem_base, face, + n_part_nr, n_elem, rank_neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6b8bc3de..b608a753 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -83,13 +83,12 @@ def test_partition_interpolation(ctx_getter): bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] - from meshmode.discretization.connection import \ - make_opposite_partition_connection - opp_faces = make_opposite_partition_connection(bdry_connections) + from meshmode.discretization.connection import make_partition_connection + opp_partitions = make_partition_connection(bdry_connections) - from meshmode.discretization.connection import check_connection - for opp_face in opp_faces: - check_connection(opp_face) + #from meshmode.discretization.connection import check_connection + #for opp_face in opp_faces: + #check_connection(opp_face) # {{{ partition_mesh @@ -144,9 +143,10 @@ def test_partition_boxes_mesh(): (n_part, n_part_to_global) = new_meshes[n_part_nr] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: num_tags[n_part_nr] += 1 - (n_elem, n_face) = part.interpartition_adj.\ + (n_part_idx, n_elem, n_face) = part.interpartition_adj.\ get_neighbor(elem, face) - assert (elem, face) == n_part.interpartition_adj.\ + assert n_part_idx == n_part_nr + assert (part_nr, elem, face) == n_part.interpartition_adj.\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" p_elem = part_to_global[elem] -- GitLab From 49c90d033013c759a989f26ab50886ce5964d0ae Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 17 Mar 2017 01:14:59 -0500 Subject: [PATCH 149/266] Added neighbor_group to InterPartitionAdj --- .../connection/opposite_face.py | 26 +++++--- meshmode/mesh/__init__.py | 14 +++-- meshmode/mesh/processing.py | 60 +++++++++---------- test/test_meshmode.py | 53 ++++++++-------- 4 files changed, 82 insertions(+), 71 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c04a444a..3546c963 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,6 +393,9 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} +def _make_cross_partition_batches(): + return [42] + def make_partition_connection(vol_to_bdry_conns): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, @@ -416,19 +419,24 @@ def make_partition_connection(vol_to_bdry_conns): # Create a list of batches. Each batch contains interpolation # data from one partition to another. for src_part_idx in range(nparts): - part_batches = [[] for _ in range(nparts)] src_vol_conn = vol_to_bdry_conns[src_part_idx] src_from_discr = src_vol_conn.from_discr src_to_discr = src_vol_conn.to_discr src_mesh = src_from_discr.mesh - adj = src_mesh.interpartition_adj - for elem_idx, elem in enumerate(adj.elements): - face = adj.element_faces[elem_idx] - (part_idx, n_elem, n_face) = adj.get_neighbor(elem, face) - - # Using the neighboring face and element, we need to create batches - # I'm not sure how I would do this. My guess is that it would look - # something like _make_cross_face_batches + ngroups = len(src_mesh.groups) + part_batches = [[] for _ in range(ngroups)] + for group_num, adj in enumerate(src_mesh.interpart_adj_groups): + for elem_idx, elem in enumerate(adj.elements): + face = adj.element_faces[elem_idx] + (part_idx, group_num, n_elem, n_face) =\ + adj.get_neighbor(elem, face) + + # We need to create batches using the + # neighboring face, element, and group + # I'm not sure how I would do this. + # My guess is that it would look + # something like _make_cross_face_batches + part_batches[group_num].extend(_make_cross_partition_batches()) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 9a1a9019..14b6896f 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -449,9 +449,10 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] + self.neighbor_groups = [] self.part_indices = [] - def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_group, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion @@ -466,18 +467,19 @@ class InterPartitionAdj(): self.element_faces.append(face) self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) + self.neighbor_groups.append(neighbor_group) self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(part_idx, neighbor_group, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], + return (self.part_indices[idx], self.neighbor_groups[idx], self.neighbors[idx], self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") @@ -635,7 +637,7 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, - interpartition_adj=False, + interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -717,7 +719,7 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, - interpartition_adj=interpartition_adj, + interpart_adj_groups=interpart_adj_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), @@ -847,7 +849,7 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) - and self.interpartition_adj == other.interpartition_adj + and self.interpart_adj_groups == other.interpart_adj_groups and self.boundary_tags == other.boundary_tags) def __ne__(self, other): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 3718ec6b..3612bd47 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -41,15 +41,13 @@ __doc__ = """ """ -# {{{ partition_mesh - -def partition_mesh(mesh, part_per_element, part_nr): +def partition_mesh(mesh, part_per_element, part_num): """ :arg mesh: A :class:`meshmode.mesh.Mesh` to be partitioned. :arg part_per_element: A :class:`numpy.ndarray` containing one integer per element of *mesh* indicating which part of the partitioned mesh the element is to become a part of. - :arg part_nr: The part number of the mesh to return. + :arg part_num: The part number of the mesh to return. :returns: A tuple ``(part_mesh, part_to_global)``, where *part_mesh* is a :class:`meshmode.mesh.Mesh` that is a partition of mesh, and @@ -65,7 +63,7 @@ def partition_mesh(mesh, part_per_element, part_nr): "part_per_element must have shape (mesh.nelements,)") # Contains the indices of the elements requested. - queried_elems = np.where(np.array(part_per_element) == part_nr)[0] + queried_elems = np.where(np.array(part_per_element) == part_num)[0] num_groups = len(mesh.groups) new_indices = [] @@ -80,8 +78,8 @@ def partition_mesh(mesh, part_per_element, part_nr): skip_groups = [] num_prev_elems = 0 start_idx = 0 - for group_nr in range(num_groups): - mesh_group = mesh.groups[group_nr] + for group_num in range(num_groups): + mesh_group = mesh.groups[group_num] # Find the index of first element in the next group end_idx = len(queried_elems) @@ -91,7 +89,7 @@ def partition_mesh(mesh, part_per_element, part_nr): break if start_idx == end_idx: - skip_groups.append(group_nr) + skip_groups.append(group_num) new_indices.append(np.array([])) new_nodes.append(np.array([])) num_prev_elems += mesh_group.nelements @@ -107,10 +105,10 @@ def partition_mesh(mesh, part_per_element, part_nr): for j in range(start_idx, end_idx): elems = queried_elems[j] - num_prev_elems new_idx = j - start_idx - new_nodes[group_nr][i, new_idx, :] = mesh_group.nodes[i, elems, :] + new_nodes[group_num][i, new_idx, :] = mesh_group.nodes[i, elems, :] - #index_set = np.append(index_set, new_indices[group_nr].ravel()) - index_sets = np.append(index_sets, set(new_indices[group_nr].ravel())) + #index_set = np.append(index_set, new_indices[group_num].ravel()) + index_sets = np.append(index_sets, set(new_indices[group_num].ravel())) num_prev_elems += mesh_group.nelements start_idx = end_idx @@ -124,21 +122,21 @@ def partition_mesh(mesh, part_per_element, part_nr): new_vertices[dim] = mesh.vertices[dim][required_indices] # Our indices need to be in range [0, len(mesh.nelements)]. - for group_nr in range(num_groups): - if group_nr not in skip_groups: - for i in range(len(new_indices[group_nr])): - for j in range(len(new_indices[group_nr][0])): - original_index = new_indices[group_nr][i, j] - new_indices[group_nr][i, j] = np.where( - required_indices == original_index)[0] + for group_num in range(num_groups): + if group_num not in skip_groups: + for i in range(len(new_indices[group_num])): + for j in range(len(new_indices[group_num][0])): + original_index = new_indices[group_num][i, j] + new_indices[group_num][i, j] = np.where( + required_indices == original_index)[0] new_mesh_groups = [] - for group_nr in range(num_groups): - if group_nr not in skip_groups: - mesh_group = mesh.groups[group_nr] + for group_num in range(num_groups): + if group_num not in skip_groups: + mesh_group = mesh.groups[group_num] new_mesh_groups.append( - type(mesh_group)(mesh_group.order, new_indices[group_nr], - new_nodes[group_nr], unit_nodes=mesh_group.unit_nodes)) + type(mesh_group)(mesh_group.order, new_indices[group_num], + new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import BTAG_ALL, BTAG_PARTITION boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] @@ -149,7 +147,8 @@ def partition_mesh(mesh, part_per_element, part_nr): # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj - part_mesh.interpartition_adj = InterPartitionAdj() + part_mesh.interpart_adj_groups = [ + InterPartitionAdj() for _ in range(num_groups)] for igrp in range(num_groups): elem_base = part_mesh.groups[igrp].element_nr_base @@ -169,7 +168,7 @@ def partition_mesh(mesh, part_per_element, part_nr): assert parent_group_num < num_groups, "Unable to find neighbor." parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base parent_adj = mesh.facial_adjacency_groups[parent_group_num] - for _, parent_facial_group in parent_adj.items(): + for n_grp_num, parent_facial_group in parent_adj.items(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ parent_facial_group.element_faces[idx] == face: @@ -177,22 +176,23 @@ def partition_mesh(mesh, part_per_element, part_nr): + parent_grp_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] - n_part_nr = part_per_element[rank_neighbor] + n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit( - BTAG_PARTITION(n_part_nr)) + BTAG_PARTITION(n_part_num)) boundary_adj.neighbors[elem_idx] = -tags # Find the neighbor element from the other partition n_elem = np.count_nonzero( - part_per_element[:rank_neighbor] == n_part_nr) + part_per_element[:rank_neighbor] == n_part_num) # TODO Test if this works with multiple groups # Do I need to add the element number base? - part_mesh.interpartition_adj.add_connection( + part_mesh.interpart_adj_groups[igrp].add_connection( elem + elem_base, face, - n_part_nr, + n_part_num, + n_grp_num, n_elem, rank_neighbor_face) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index b608a753..d59186ac 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -84,16 +84,16 @@ def test_partition_interpolation(ctx_getter): FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection - opp_partitions = make_partition_connection(bdry_connections) + connections = make_partition_connection(bdry_connections) - #from meshmode.discretization.connection import check_connection - #for opp_face in opp_faces: - #check_connection(opp_face) + from meshmode.discretization.connection import check_connection + for conn in connections: + check_connection(conn) # {{{ partition_mesh -def test_partition_boxes_mesh(): +def test_partition_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh @@ -130,34 +130,35 @@ def test_partition_boxes_mesh(): from meshmode.mesh import BTAG_PARTITION num_tags = np.zeros((num_parts,)) - for part_nr in range(num_parts): - (part, part_to_global) = new_meshes[part_nr] - for f_groups in part.facial_adjacency_groups: + for part_num in range(num_parts): + (part, part_to_global) = new_meshes[part_num] + for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] for idx in range(len(f_grp.elements)): tag = -f_grp.neighbors[idx] assert tag >= 0 elem = f_grp.elements[idx] face = f_grp.element_faces[idx] - for n_part_nr in range(num_parts): - (n_part, n_part_to_global) = new_meshes[n_part_nr] - if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_nr)) != 0: - num_tags[n_part_nr] += 1 - (n_part_idx, n_elem, n_face) = part.interpartition_adj.\ - get_neighbor(elem, face) - assert n_part_idx == n_part_nr - assert (part_nr, elem, face) == n_part.interpartition_adj.\ + for n_part_num in range(num_parts): + (n_part, n_part_to_global) = new_meshes[n_part_num] + if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: + num_tags[n_part_num] += 1 + (n_part_idx, n_grp_num, n_elem, n_face) = part.\ + interpart_adj_groups[grp_num].get_neighbor(elem, face) + assert n_part_idx == n_part_num + assert (part_num, grp_num, elem, face) == n_part.\ + interpart_adj_groups[n_grp_num].\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" p_elem = part_to_global[elem] - n_part_to_global = new_meshes[n_part_nr][1] + n_part_to_global = new_meshes[n_part_num][1] p_n_elem = n_part_to_global[n_elem] - p_grp_nr = 0 - while p_elem >= mesh.groups[p_grp_nr].nelements: - p_elem -= mesh.groups[p_grp_nr].nelements - p_grp_nr += 1 - #p_elem_base = mesh.groups[p_grp_nr].element_nr_base - f_groups = mesh.facial_adjacency_groups[p_grp_nr] + p_grp_num = 0 + while p_elem >= mesh.groups[p_grp_num].nelements: + p_elem -= mesh.groups[p_grp_num].nelements + p_grp_num += 1 + #p_elem_base = mesh.groups[p_grp_num].element_num_base + f_groups = mesh.facial_adjacency_groups[p_grp_num] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): if (p_elem == p_bnd_adj.elements[idx] and @@ -167,11 +168,11 @@ def test_partition_boxes_mesh(): assert n_face == p_bnd_adj.neighbor_faces[idx],\ "Tag does not give correct neighbor" - for tag_nr in range(num_parts): + for tag_num in range(num_parts): tag_sum = 0 for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(tag_nr)) - assert num_tags[tag_nr] == tag_sum,\ + tag_sum += count_tags(mesh, BTAG_PARTITION(tag_num)) + assert num_tags[tag_num] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" -- GitLab From e115c77deeefc1d302b4ac4677cb9bb4fc949ecb Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 17 Mar 2017 02:38:02 -0500 Subject: [PATCH 150/266] Added _make_cross_partition_batch --- .../connection/opposite_face.py | 168 ++++++++++++++++-- 1 file changed, 158 insertions(+), 10 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3546c963..4558730c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,8 +393,153 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def _make_cross_partition_batches(): - return [42] +def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src_grp, from_elem, from_face): + + (i_tgt_part, i_tgt_grp, bdry_elem, bdry_face) = adj.get_neighbor(from_elem, from_face) + + src_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + tgt_bdry_discr = vol_to_bdry_conns[i_scr_part].to_discr + + to_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + tgt_bdry_discr.groups[i_tgt_grp].view( + tgt_bdry_discr.nodes().get(queue=queue)) + [:, to_bdry_element_indices]) + + tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps + + # TODO: Should this use vol_discr? + from_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + from_grp = src_bdry_discr.groups[i_src_grp] + + dim = from_grp.dim + ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape + + initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) + + from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) + from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + + import modepy as mp + from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) + from_inv_t_vdm = la.inv(from_vdm.T) + from_nfuncs = len(from_grp.basis()) + + # (ambient_dim, nelements, nfrom_unit_nodes) + from_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + # TODO: Should this be vol_discr? + bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, from_bdry_element_indices]) + + def apply_map(unit_nodes): + # unit_nodes: (dim, nelements, nto_unit_nodes) + # basis_at_unit_nodes + basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) + for i, f in enumerate(from_grp.basis()): + basis_at_unit_nodes[i] = ( + f(unit_nodes.reshape(dim, -1)) + .reshape(nelements, nto_unit_nodes)) + intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + # If we're interpolating 1, we had better get 1 back. + one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + + def get_map_jacobian(unit_nodes): + # unit_nodes: (dim, nelements, nto_unit_nodes) + # basis_at_unit_nodes + dbasis_at_unit_nodes = np.empty( + (dim, from_nfuncs, nelements, nto_unit_nodes)) + for i, df in enumerate(from_grp.grad_basis()): + df_result = df(unit_nodes.reshape(dim, -1)) + for rst_axis, df_r in enumerate(df_result): + dbasis_at_unit_nodes[rst_axis, i] = ( + df_r.reshape(nelements, nto_unit_nodes)) + dintp_coeffs = np.einsum( + "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + + logger.info("make_opposite_face_connection: begin gauss-newton") + niter = 0 + while True: + resid = apply_map(from_unit_nodes) - to_bdry_nodes + df = get_map_jacobian(from_unit_nodes) + df_inv_resid = np.empty_like(from_unit_nodes) + # For the 1D/2D accelerated versions, we'll use the normal + # equations and Cramer's rule. If you're looking for high-end + # numerics, look no further than meshmode. + if dim == 1: + # A is df.T + ata = np.einsum("iket,jket->ijet", df, df) + atb = np.einsum("iket,ket->iet", df, resid) + df_inv_resid = atb / ata[0, 0] + elif dim == 2: + # A is df.T + ata = np.einsum("iket,jket->ijet", df, df) + atb = np.einsum("iket,ket->iet", df, resid) + det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] + df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) + df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) + else: + # The boundary of a 3D mesh is 2D, so that's the + # highest-dimensional case we genuinely care about. + # + # This stinks, performance-wise, because it's not vectorized. + # But we'll only hit it for boundaries of 4+D meshes, in which + # case... good luck. :) + for e in range(nelements): + for t in range(nto_unit_nodes): + df_inv_resid[:, e, t], _, _, _ = \ + la.lstsq(df[:, :, e, t].T, resid[:, e, t]) + from_unit_nodes = from_unit_nodes - df_inv_resid + max_resid = np.max(np.abs(resid)) + logger.debug("gauss-newton residual: %g" % max_resid) + if max_resid < tol: + logger.info("make_opposite_face_connection: gauss-newton: done, " + "final residual: %g" % max_resid) + break + niter += 1 + if niter > 10: + raise RuntimeError("Gauss-Newton (for finding opposite-face reference " + "coordinates) did not converge") + + def to_dev(ary): + return cl.array.to_device(queue, ary, array_queue=None) + + done_elements = np.zeros(nelements, dtype=np.bool) + + # TODO: Still need to figure out what's happening here. + while True: + todo_elements, = np.where(~done_elements) + if not len(todo_elements): + return + template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + unit_node_dist = np.max(np.max(np.abs( + from_unit_nodes[:, todo_elements, :] + - + template_unit_nodes.reshape(dim, 1, -1)), + axis=2), axis=0) + close_els = todo_elements[unit_node_dist < tol] + done_elements[close_els] = True + unit_node_dist = np.max(np.max(np.abs( + from_unit_nodes[:, todo_elements, :] + - + template_unit_nodes.reshape(dim, 1, -1)), + axis=2), axis=0) + + from meshmode.discretization.connection import InterpolationBatch + yield InterpolationBatch( + from_group_index=i_src_grp, + from_element_indices=to_dev(from_bdry_element_indices[close_els]), + to_element_indices=to_dev(to_bdry_element_indices[close_els]), + result_unit_nodes=template_unit_nodes, + to_element_face=None) + def make_partition_connection(vol_to_bdry_conns): """ @@ -418,25 +563,28 @@ def make_partition_connection(vol_to_bdry_conns): with cl.CommandQueue(cl_context) as queue: # Create a list of batches. Each batch contains interpolation # data from one partition to another. - for src_part_idx in range(nparts): - src_vol_conn = vol_to_bdry_conns[src_part_idx] - src_from_discr = src_vol_conn.from_discr - src_to_discr = src_vol_conn.to_discr + for i_src_part, src_vol_conn in enumerate(vol_to_bdry_conns): src_mesh = src_from_discr.mesh ngroups = len(src_mesh.groups) part_batches = [[] for _ in range(ngroups)] for group_num, adj in enumerate(src_mesh.interpart_adj_groups): for elem_idx, elem in enumerate(adj.elements): face = adj.element_faces[elem_idx] - (part_idx, group_num, n_elem, n_face) =\ - adj.get_neighbor(elem, face) - # We need to create batches using the + # We need to create a batch using the # neighboring face, element, and group # I'm not sure how I would do this. # My guess is that it would look # something like _make_cross_face_batches - part_batches[group_num].extend(_make_cross_partition_batches()) + part_batches[group_num].append( + _make_cross_partition_batch( + queue, + vol_to_bdry_conns, + adj, + i_src_part, + group_num, + elem, + face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( -- GitLab From b47989c1acab3519e78c5a0c312553dd40a9edd4 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 23 Mar 2017 11:12:54 -0500 Subject: [PATCH 151/266] Work on _make_cross_partition_batch --- .../discretization/connection/__init__.py | 2 + .../connection/opposite_face.py | 153 ++++++++++-------- test/test_meshmode.py | 5 +- 3 files changed, 89 insertions(+), 71 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index 6a8e2e53..c109d245 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -51,6 +51,7 @@ __all__ = [ "make_face_restriction", "make_face_to_all_faces_embedding", "make_opposite_face_connection", + "make_partition_connection", "make_refinement_connection" ] @@ -67,6 +68,7 @@ __doc__ = """ .. autofunction:: make_face_to_all_faces_embedding .. autofunction:: make_opposite_face_connection +.. autofunction:: make_partition_connection .. autofunction:: make_refinement_connection diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 4558730c..bfed445c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,82 +393,96 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} -def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src_grp, from_elem, from_face): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): + """ + Creates a batch that transfers data to a face from a face of another partition. + + :arg queue: + :arg vol_to_bdry_conns: A list of :class:`Direct` for each partition. + :arg adj: :class:`InterPartitionAdj` of partition `i_tgt_part`. + :arg i_tgt_part: The target partition number. + :arg i_tgt_grp: + :arg i_tgt_elem: + :arg i_tgt_face: - (i_tgt_part, i_tgt_grp, bdry_elem, bdry_face) = adj.get_neighbor(from_elem, from_face) + :returns: ??? + """ - src_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr - tgt_bdry_discr = vol_to_bdry_conns[i_scr_part].to_discr + (i_src_part, i_src_grp, i_src_elem, i_src_face) =\ + adj.get_neighbor(i_tgt_elem, i_tgt_face) - to_bdry_nodes = ( + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + + tgt_bdry_nodes = ( # FIXME: This should view-then-transfer (but PyOpenCL doesn't do # non-contiguous transfers for now). tgt_bdry_discr.groups[i_tgt_grp].view( tgt_bdry_discr.nodes().get(queue=queue)) - [:, to_bdry_element_indices]) + [:, i_tgt_elem]) - tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps + ambient_dim, nelements, n_tgt_unit_nodes = tgt_bdry_nodes.shape - # TODO: Should this use vol_discr? - from_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] - from_grp = src_bdry_discr.groups[i_src_grp] + # (ambient_dim, nelements, nfrom_unit_nodes) + src_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + src_bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, i_src_elem]) - dim = from_grp.dim - ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape + tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps - initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) + src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + src_grp = src_bdry_discr.groups[i_src_grp] - from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) - from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + dim = src_grp.dim - import modepy as mp - from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) - from_inv_t_vdm = la.inv(from_vdm.T) - from_nfuncs = len(from_grp.basis()) + initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - # (ambient_dim, nelements, nfrom_unit_nodes) - from_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - # TODO: Should this be vol_discr? - bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - [:, from_bdry_element_indices]) + src_unit_nodes = np.empty((dim, nelements, n_tgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + + import modepy as mp + src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + src_inv_t_vdm = la.inv(src_vdm.T) + src_nfuncs = len(src_grp.basis()) def apply_map(unit_nodes): # unit_nodes: (dim, nelements, nto_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) - for i, f in enumerate(from_grp.basis()): + basis_at_unit_nodes = np.empty((src_nfuncs, nelements, n_tgt_unit_nodes)) + for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, nto_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + .reshape(nelements, n_tgt_unit_nodes)) + intp_coeffs = np.einsum("fj,jet->fet", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nelements, nto_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, from_nfuncs, nelements, nto_unit_nodes)) - for i, df in enumerate(from_grp.grad_basis()): + (dim, src_nfuncs, nelements, n_tgt_unit_nodes)) + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, nto_unit_nodes)) + df_r.reshape(nelements, n_tgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) - logger.info("make_opposite_face_connection: begin gauss-newton") + logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: - resid = apply_map(from_unit_nodes) - to_bdry_nodes - df = get_map_jacobian(from_unit_nodes) - df_inv_resid = np.empty_like(from_unit_nodes) + resid = apply_map(src_unit_nodes) - tgt_bdry_nodes + df = get_map_jacobian(src_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) # For the 1D/2D accelerated versions, we'll use the normal # equations and Cramer's rule. If you're looking for high-end # numerics, look no further than meshmode. @@ -482,7 +496,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src ata = np.einsum("iket,jket->ijet", df, df) atb = np.einsum("iket,ket->iet", df, resid) det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) else: @@ -493,21 +507,21 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) for e in range(nelements): - for t in range(nto_unit_nodes): + for t in range(n_tgt_unit_nodes): df_inv_resid[:, e, t], _, _, _ = \ la.lstsq(df[:, :, e, t].T, resid[:, e, t]) - from_unit_nodes = from_unit_nodes - df_inv_resid + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) if max_resid < tol: - logger.info("make_opposite_face_connection: gauss-newton: done, " + logger.info("make_partition_connection: gauss-newton: done, " "final residual: %g" % max_resid) break niter += 1 if niter > 10: - raise RuntimeError("Gauss-Newton (for finding opposite-face reference " - "coordinates) did not converge") - + raise RuntimeError("Gauss-Newton (for finding partition_connection " + "reference coordinates) did not converge") + def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) @@ -518,16 +532,16 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_src_part, i_src todo_elements, = np.where(~done_elements) if not len(todo_elements): return - template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) close_els = todo_elements[unit_node_dist < tol] done_elements[close_els] = True unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -547,15 +561,17 @@ def make_partition_connection(vol_to_bdry_conns): return a :class:`DirectDiscretizationConnection` that performs data exchange across adjacent faces of different partitions. - :arg :vol_to_bdry_conns A list of *volume_to_bdry_conn* corresponding to + :arg vol_to_bdry_conns: A list of *volume_to_bdry_conn* corresponding to a partition of a parent mesh. + + :returns: A list of :class:`DirectDiscretizationConnection` corresponding to + each partition. """ disc_conns = [] - nparts = len(vol_to_bdry_conns) from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - + # My intuition tells me that this should not live inside a for loop. # However, I need to grab a cl_context. I'll assume that each context from # each partition is the same and I'll use the first one. @@ -563,33 +579,34 @@ def make_partition_connection(vol_to_bdry_conns): with cl.CommandQueue(cl_context) as queue: # Create a list of batches. Each batch contains interpolation # data from one partition to another. - for i_src_part, src_vol_conn in enumerate(vol_to_bdry_conns): - src_mesh = src_from_discr.mesh - ngroups = len(src_mesh.groups) + for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + bdry_discr = tgt_vol_conn.to_discr + tgt_mesh = tgt_vol_conn.to_discr.mesh + ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - for group_num, adj in enumerate(src_mesh.interpart_adj_groups): - for elem_idx, elem in enumerate(adj.elements): - face = adj.element_faces[elem_idx] + for tgt_group_num, adj in enumerate(tgt_mesh.interpart_adj_groups): + for idx, tgt_elem in enumerate(adj.elements): + tgt_face = adj.element_faces[idx] - # We need to create a batch using the + # We need to create a batch using the # neighboring face, element, and group # I'm not sure how I would do this. # My guess is that it would look # something like _make_cross_face_batches - part_batches[group_num].append( + part_batches[tgt_group_num].append( _make_cross_partition_batch( queue, vol_to_bdry_conns, adj, - i_src_part, - group_num, - elem, - face)) + i_tgt_part, + tgt_group_num, + tgt_elem, + tgt_face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( - from_discr=src_from_discr, - to_discr=src_to_discr, + from_discr=bdry_discr, + to_discr=bdry_discr, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d59186ac..35be627e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -51,7 +51,6 @@ logger = logging.getLogger(__name__) def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() - queue = cl.CommandQueue(cl_ctx) order = 4 group_factory = PolynomialWarpAndBlendGroupFactory(order) n = 3 @@ -76,11 +75,11 @@ def test_partition_interpolation(ctx_getter): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] from meshmode.discretization.connection import make_face_restriction - bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, + bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection -- GitLab From 31731106a1a6bd3779f26dc1189ab187867ebfe7 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 00:57:52 -0500 Subject: [PATCH 152/266] partition_mesh passes tests involving multiple groups --- meshmode/mesh/__init__.py | 39 ++++++++++++++++++---------- meshmode/mesh/processing.py | 51 +++++++++++++++++-------------------- test/test_meshmode.py | 40 ++++++++++++++++------------- 3 files changed, 73 insertions(+), 57 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 14b6896f..634b6ee1 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -428,6 +428,9 @@ class InterPartitionAdj(): ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + .. attribute:: part_indices + ``part_indices[i]`` gives the partition index of the neighboring face. + .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton @@ -449,38 +452,37 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] - self.neighbor_groups = [] self.part_indices = [] - def add_connection(self, elem, face, part_idx, neighbor_group, neighbor_elem, neighbor_face): + def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): """ Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to - ``neighbor_elem`` and ``neighbor_face`` of another neighboring partion - of type :class:`Mesh`. - :arg elem - :arg face - :arg part_idx - :arg neighbor_elem - :arg neighbor_face + ``neighbor_elem`` and ``neighbor_face`` of the neighboring partion + of type :class:`Mesh` given by `part_idx`. + :arg elem: + :arg face: + :arg part_idx: + :arg neighbor_elem: + :arg neighbor_face: """ self.elements.append(elem) self.element_faces.append(face) self.part_indices.append(part_idx) self.neighbors.append(neighbor_elem) - self.neighbor_groups.append(neighbor_group) self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_group, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], self.neighbor_groups[idx], - self.neighbors[idx], self.neighbor_faces[idx]) + return (self.part_indices[idx], + self.neighbors[idx], + self.neighbor_faces[idx]) raise RuntimeError("This face does not have a neighbor") # }}} @@ -855,6 +857,17 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) + def find_igrp(self, elem): + """ + :arg elem: An element of the mesh. + :returns: The index of the group that `elem` belongs to. + """ + for igrp, grp in enumerate(self.groups): + if elem < grp.nelements: + return igrp + elem -= grp.nelements + raise RuntimeError("Could not find group with element ", elem) + # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 3612bd47..4c30edea 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -139,64 +139,61 @@ def partition_mesh(mesh, part_per_element, part_num): new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) from meshmode.mesh import BTAG_ALL, BTAG_PARTITION - boundary_tags = [BTAG_PARTITION(n) for n in range(np.max(part_per_element))] + boundary_tags = [BTAG_PARTITION(n) for n in np.unique(part_per_element)] from meshmode.mesh import Mesh part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - # FIXME I get errors when I try to copy part_mesh. from meshmode.mesh import InterPartitionAdj - part_mesh.interpart_adj_groups = [ - InterPartitionAdj() for _ in range(num_groups)] + interpart_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] - for igrp in range(num_groups): - elem_base = part_mesh.groups[igrp].element_nr_base + for igrp, grp in enumerate(part_mesh.groups): + elem_base = grp.element_nr_base boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces - for elem_idx in range(len(boundary_elems)): - elem = boundary_elems[elem_idx] - face = boundary_faces[elem_idx] - tags = -boundary_adj.neighbors[elem_idx] + for adj_idx, elem in enumerate(boundary_elems): + face = boundary_faces[adj_idx] + tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - parent_elem = queried_elems[elem] - parent_group_num = 0 - while parent_elem >= mesh.groups[parent_group_num].nelements: - parent_elem -= mesh.groups[parent_group_num].nelements - parent_group_num += 1 - assert parent_group_num < num_groups, "Unable to find neighbor." - parent_grp_elem_base = mesh.groups[parent_group_num].element_nr_base - parent_adj = mesh.facial_adjacency_groups[parent_group_num] - for n_grp_num, parent_facial_group in parent_adj.items(): + + parent_igrp = mesh.find_igrp(queried_elems[elem + elem_base]) + parent_elem_base = mesh.groups[parent_igrp].element_nr_base + parent_elem = queried_elems[elem + elem_base] - parent_elem_base + + parent_adj = mesh.facial_adjacency_groups[parent_igrp] + + for parent_facial_group in parent_adj.values(): for idx in np.where(parent_facial_group.elements == parent_elem)[0]: if parent_facial_group.neighbors[idx] >= 0 and \ - parent_facial_group.element_faces[idx] == face: + parent_facial_group.element_faces[idx] == face: rank_neighbor = (parent_facial_group.neighbors[idx] - + parent_grp_elem_base) + + parent_elem_base) rank_neighbor_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) tags = tags | part_mesh.boundary_tag_bit( BTAG_PARTITION(n_part_num)) - boundary_adj.neighbors[elem_idx] = -tags + boundary_adj.neighbors[adj_idx] = -tags # Find the neighbor element from the other partition n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - # TODO Test if this works with multiple groups - # Do I need to add the element number base? - part_mesh.interpart_adj_groups[igrp].add_connection( + interpart_grps[igrp].add_connection( elem + elem_base, face, n_part_num, - n_grp_num, n_elem, rank_neighbor_face) - return (part_mesh, queried_elems) + mesh = part_mesh.copy() + mesh.interpart_adj_groups = interpart_grps + return mesh, queried_elems + +# }}} # {{{ orientations diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 35be627e..0f5f196d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -96,12 +96,12 @@ def test_partition_mesh(): n = 5 num_parts = 7 from meshmode.mesh.generation import generate_regular_rect_mesh - mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - #TODO facial_adjacency_groups is not available from merge_disjoint_meshes. - #mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh3 = generate_regular_rect_mesh(a=(1, 2, 2), b=(2, 3, 3), n=(n, n, n)) - #from meshmode.mesh.processing import merge_disjoint_meshes - #mesh = merge_disjoint_meshes([mesh1, mesh2]) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -130,33 +130,39 @@ def test_partition_mesh(): num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): - (part, part_to_global) = new_meshes[part_num] + part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] for idx in range(len(f_grp.elements)): tag = -f_grp.neighbors[idx] assert tag >= 0 - elem = f_grp.elements[idx] + elem = f_grp.elements[idx] + part.groups[grp_num].element_nr_base face = f_grp.element_faces[idx] for n_part_num in range(num_parts): - (n_part, n_part_to_global) = new_meshes[n_part_num] + n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_part_idx, n_grp_num, n_elem, n_face) = part.\ + (i, n_elem, n_face) = part.\ interpart_adj_groups[grp_num].get_neighbor(elem, face) - assert n_part_idx == n_part_num - assert (part_num, grp_num, elem, face) == n_part.\ + assert i == n_part_num + n_grp_num = n_part.find_igrp(n_elem) + assert (part_num, elem, face) == n_part.\ interpart_adj_groups[n_grp_num].\ get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" - p_elem = part_to_global[elem] + n_part_to_global = new_meshes[n_part_num][1] + p_elem = part_to_global[elem] p_n_elem = n_part_to_global[n_elem] - p_grp_num = 0 - while p_elem >= mesh.groups[p_grp_num].nelements: - p_elem -= mesh.groups[p_grp_num].nelements - p_grp_num += 1 - #p_elem_base = mesh.groups[p_grp_num].element_num_base + + p_grp_num = mesh.find_igrp(p_elem) + p_n_grp_num = mesh.find_igrp(p_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem -= p_elem_base + p_n_elem -= p_n_elem_base + f_groups = mesh.facial_adjacency_groups[p_grp_num] for _, p_bnd_adj in f_groups.items(): for idx in range(len(p_bnd_adj.elements)): -- GitLab From d4e5324f19883808f38091233f0ccb062b7113cb Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 09:55:53 -0500 Subject: [PATCH 153/266] Cleanup code --- meshmode/mesh/__init__.py | 9 +++++++-- meshmode/mesh/processing.py | 25 +++++++++++++------------ test/test_meshmode.py | 30 ++++++++++++++++++------------ 3 files changed, 38 insertions(+), 26 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 634b6ee1..c73e5dd2 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -434,7 +434,10 @@ class InterPartitionAdj(): .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton - of the element connected to ``elements[i]``. + of the element connected to ``elements[i]``. This gives a mesh-wide element + numbering. Use ``Mesh.find_igrp()`` to find the group that the element + belongs to, then subtract ``element_nr_base`` to find the element of the + group. .. attribute:: neighbor_faces @@ -477,13 +480,15 @@ class InterPartitionAdj(): :arg face :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. + Or (-1, -1, -1) if the face does not have a neighbor. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: return (self.part_indices[idx], self.neighbors[idx], self.neighbor_faces[idx]) - raise RuntimeError("This face does not have a neighbor") + #raise RuntimeError("This face does not have a neighbor") + return (-1, -1, -1) # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 4c30edea..3c9b15e9 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -146,7 +146,7 @@ def partition_mesh(mesh, part_per_element, part_num): facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import InterPartitionAdj - interpart_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] + adj_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -158,9 +158,10 @@ def partition_mesh(mesh, part_per_element, part_num): tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - parent_igrp = mesh.find_igrp(queried_elems[elem + elem_base]) + p_meshwide_elem = queried_elems[elem + elem_base] + parent_igrp = mesh.find_igrp(p_meshwide_elem) parent_elem_base = mesh.groups[parent_igrp].element_nr_base - parent_elem = queried_elems[elem + elem_base] - parent_elem_base + parent_elem = p_meshwide_elem - parent_elem_base parent_adj = mesh.facial_adjacency_groups[parent_igrp] @@ -182,16 +183,16 @@ def partition_mesh(mesh, part_per_element, part_num): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - interpart_grps[igrp].add_connection( - elem + elem_base, - face, - n_part_num, - n_elem, - rank_neighbor_face) + adj_grps[igrp].add_connection( + elem, + face, + n_part_num, + n_elem, + rank_neighbor_face) - mesh = part_mesh.copy() - mesh.interpart_adj_groups = interpart_grps - return mesh, queried_elems + connected_mesh = part_mesh.copy() + connected_mesh.interpart_adj_groups = adj_grps + return connected_mesh, queried_elems # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 0f5f196d..37502bfa 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -95,10 +95,13 @@ def test_partition_interpolation(ctx_getter): def test_partition_mesh(): n = 5 num_parts = 7 - from meshmode.mesh.generation import generate_regular_rect_mesh + order = 4 + dim = 3 + from meshmode.mesh.generation import (generate_regular_rect_mesh, + generate_warped_rect_mesh) mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) - mesh3 = generate_regular_rect_mesh(a=(1, 2, 2), b=(2, 3, 3), n=(n, n, n)) + mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) @@ -132,28 +135,31 @@ def test_partition_mesh(): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): + adj = part.interpart_adj_groups[grp_num] f_grp = f_groups[None] - for idx in range(len(f_grp.elements)): + elem_base = part.groups[grp_num].element_nr_base + for idx, elem in enumerate(f_grp.elements): tag = -f_grp.neighbors[idx] assert tag >= 0 - elem = f_grp.elements[idx] + part.groups[grp_num].element_nr_base face = f_grp.element_faces[idx] for n_part_num in range(num_parts): n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (i, n_elem, n_face) = part.\ - interpart_adj_groups[grp_num].get_neighbor(elem, face) + + (i, n_elem, n_face) = adj.get_neighbor(elem, face) assert i == n_part_num n_grp_num = n_part.find_igrp(n_elem) - assert (part_num, elem, face) == n_part.\ - interpart_adj_groups[n_grp_num].\ - get_neighbor(n_elem, n_face),\ + n_adj = n_part.interpart_adj_groups[n_grp_num] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_elem - n_elem_base + assert (part_num, elem + elem_base, face) ==\ + n_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] - p_elem = part_to_global[elem] - p_n_elem = n_part_to_global[n_elem] + p_elem = part_to_global[elem + elem_base] + p_n_elem = n_part_to_global[n_elem + n_elem_base] p_grp_num = mesh.find_igrp(p_elem) p_n_grp_num = mesh.find_igrp(p_n_elem) @@ -164,7 +170,7 @@ def test_partition_mesh(): p_n_elem -= p_n_elem_base f_groups = mesh.facial_adjacency_groups[p_grp_num] - for _, p_bnd_adj in f_groups.items(): + for p_bnd_adj in f_groups.values(): for idx in range(len(p_bnd_adj.elements)): if (p_elem == p_bnd_adj.elements[idx] and face == p_bnd_adj.element_faces[idx]): -- GitLab From 40254532aa9a577d5bf8d344b8e0cd11cc354033 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 09:59:25 -0500 Subject: [PATCH 154/266] Comments --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index c73e5dd2..5d3dc354 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -636,6 +636,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ + .. automethod:: find_igrp """ face_id_dtype = np.int8 @@ -864,7 +865,7 @@ class Mesh(Record): def find_igrp(self, elem): """ - :arg elem: An element of the mesh. + :arg elem: A mesh-wise element. Think of it as ``elem + element_nr_base``. :returns: The index of the group that `elem` belongs to. """ for igrp, grp in enumerate(self.groups): -- GitLab From 6c8efc3f8ff5998eabb4e75f612b64a49db71d0d Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 24 Mar 2017 10:30:23 -0500 Subject: [PATCH 155/266] Temporarily pass interpart_adj to make_partition_connection --- .../connection/opposite_face.py | 34 ++++++++++--------- test/test_meshmode.py | 15 ++++++-- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index bfed445c..925bfe0d 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -393,6 +393,8 @@ def make_opposite_face_connection(volume_to_bdry_conn): # }}} +# {{{ partition_connection + def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): """ @@ -555,7 +557,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, to_element_face=None) -def make_partition_connection(vol_to_bdry_conns): +def make_partition_connection(vol_to_bdry_conns, adj_parts): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -572,27 +574,24 @@ def make_partition_connection(vol_to_bdry_conns): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - # My intuition tells me that this should not live inside a for loop. - # However, I need to grab a cl_context. I'll assume that each context from - # each partition is the same and I'll use the first one. - cl_context = vol_to_bdry_conns[0].from_discr.cl_context - with cl.CommandQueue(cl_context) as queue: - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. - for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + # Create a list of batches. Each batch contains interpolation + # data from one partition to another. + for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): + + # Is this ok in a for loop? + cl_context = tgt_vol_conn.from_discr.cl_context + with cl.CommandQueue(cl_context) as queue: + bdry_discr = tgt_vol_conn.to_discr - tgt_mesh = tgt_vol_conn.to_discr.mesh + tgt_mesh = bdry_discr.mesh ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - for tgt_group_num, adj in enumerate(tgt_mesh.interpart_adj_groups): + # Hack, I need to get InterPartitionAdj so I'll receive it directly + # as an argument. + for tgt_group_num, adj in enumerate(adj_parts[i_tgt_part]): for idx, tgt_elem in enumerate(adj.elements): tgt_face = adj.element_faces[idx] - # We need to create a batch using the - # neighboring face, element, and group - # I'm not sure how I would do this. - # My guess is that it would look - # something like _make_cross_face_batches part_batches[tgt_group_num].append( _make_cross_partition_batch( queue, @@ -614,4 +613,7 @@ def make_partition_connection(vol_to_bdry_conns): return disc_conns +# }}} + + # vim: foldmethod=marker diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 37502bfa..a49c5adc 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,6 +49,8 @@ import logging logger = logging.getLogger(__name__) +# {{{ partition_interpolation + def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() order = 4 @@ -57,7 +59,11 @@ def test_partition_interpolation(ctx_getter): dim = 2 num_parts = 7 from meshmode.mesh.generation import generate_warped_rect_mesh - mesh = generate_warped_rect_mesh(dim, order=order, n=n) + mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -74,6 +80,9 @@ def test_partition_interpolation(ctx_getter): part_meshes = [ partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + # Hack, I get InterPartitionAdj here instead of from vol_discrs. + adj_parts = [part_meshes[i].interpart_adj_groups for i in range(num_parts)] + from meshmode.discretization import Discretization vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] @@ -83,12 +92,14 @@ def test_partition_interpolation(ctx_getter): FRESTR_INTERIOR_FACES) for i in range(num_parts)] from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections) + connections = make_partition_connection(bdry_connections, adj_parts) from meshmode.discretization.connection import check_connection for conn in connections: check_connection(conn) +# }}} + # {{{ partition_mesh -- GitLab From 5e7860bd0f16f53c0f5ce3aeb9d21a83c653131c Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 13:00:04 -0500 Subject: [PATCH 156/266] Comments and better errors --- meshmode/mesh/__init__.py | 2 +- meshmode/mesh/processing.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 5d3dc354..d6b4e8a7 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -872,7 +872,7 @@ class Mesh(Record): if elem < grp.nelements: return igrp elem -= grp.nelements - raise RuntimeError("Could not find group with element ", elem) + raise RuntimeError("Could not find group with element %d" % elem) # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 3c9b15e9..b81702d6 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -183,6 +183,8 @@ def partition_mesh(mesh, part_per_element, part_num): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) + # I cannot compute the group because the other + # partitions have not been built yet. adj_grps[igrp].add_connection( elem, face, -- GitLab From 5de3149b59c8acdc0f73ba5ada6b9570554e65cf Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 13:00:51 -0500 Subject: [PATCH 157/266] Slight progress on make_partition_connection --- .../connection/opposite_face.py | 42 ++++++++++--------- test/test_meshmode.py | 8 ++-- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 925bfe0d..85696ed0 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,8 +395,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, i_src_elem, i_tgt_part, i_tgt_grp, i_tgt_elem): """ Creates a batch that transfers data to a face from a face of another partition. @@ -411,9 +410,6 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, :returns: ??? """ - (i_src_part, i_src_grp, i_src_elem, i_src_face) =\ - adj.get_neighbor(i_tgt_elem, i_tgt_face) - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr @@ -424,7 +420,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, tgt_bdry_discr.nodes().get(queue=queue)) [:, i_tgt_elem]) - ambient_dim, nelements, n_tgt_unit_nodes = tgt_bdry_nodes.shape + ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape + nelements = 1 # (ambient_dim, nelements, nfrom_unit_nodes) src_bdry_nodes = ( @@ -557,7 +554,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, adj, to_element_face=None) -def make_partition_connection(vol_to_bdry_conns, adj_parts): +def make_partition_connection(vol_to_bdry_conns, part_meshes): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -583,24 +580,31 @@ def make_partition_connection(vol_to_bdry_conns, adj_parts): with cl.CommandQueue(cl_context) as queue: bdry_discr = tgt_vol_conn.to_discr - tgt_mesh = bdry_discr.mesh + #tgt_mesh = bdry_discr.mesh + tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) part_batches = [[] for _ in range(ngroups)] - # Hack, I need to get InterPartitionAdj so I'll receive it directly - # as an argument. - for tgt_group_num, adj in enumerate(adj_parts[i_tgt_part]): - for idx, tgt_elem in enumerate(adj.elements): - tgt_face = adj.element_faces[idx] - - part_batches[tgt_group_num].append( + for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): + for idx, i_tgt_elem in enumerate(adj.elements): + i_tgt_face = adj.element_faces[idx] + i_src_part = adj.part_indices[idx] + i_src_elem = adj.neighbors[idx] + i_src_face = adj.neighbor_faces[idx] + #src_mesh = vol_to_bdry_conns[i_src_part].to_discr.mesh + src_mesh = part_meshes[i_src_part] + i_src_grp = src_mesh.find_igrp(i_src_elem) + i_src_elem -= src_mesh.groups[i_src_grp].element_nr_base + + part_batches[i_tgt_grp].extend( _make_cross_partition_batch( queue, vol_to_bdry_conns, - adj, + i_src_part, + i_src_grp, + i_src_elem, i_tgt_part, - tgt_group_num, - tgt_elem, - tgt_face)) + i_tgt_grp, + i_tgt_elem)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a49c5adc..9edeb908 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -80,9 +80,6 @@ def test_partition_interpolation(ctx_getter): part_meshes = [ partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - # Hack, I get InterPartitionAdj here instead of from vol_discrs. - adj_parts = [part_meshes[i].interpart_adj_groups for i in range(num_parts)] - from meshmode.discretization import Discretization vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) for i in range(num_parts)] @@ -91,11 +88,14 @@ def test_partition_interpolation(ctx_getter): bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, FRESTR_INTERIOR_FACES) for i in range(num_parts)] + # Hack, I probably shouldn't pass part_meshes directly. This is probably + # temporary. from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections, adj_parts) + connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection for conn in connections: + print(conn) check_connection(conn) # }}} -- GitLab From ff04eeb9cdc82b8d85c875381f356574f538477e Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 16:00:57 -0500 Subject: [PATCH 158/266] No proggress, intermediate commit --- .../discretization/connection/opposite_face.py | 18 ++++++++++++++---- test/test_meshmode.py | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 85696ed0..c7ea09c0 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -418,7 +418,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # non-contiguous transfers for now). tgt_bdry_discr.groups[i_tgt_grp].view( tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem]) + [:, i_tgt_elem, :]) ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape nelements = 1 @@ -429,7 +429,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # non-contiguous transfers for now). src_bdry_discr.groups[i_src_grp].view( src_bdry_discr.nodes().get(queue=queue)) - [:, i_src_elem]) + ) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -575,7 +575,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): # data from one partition to another. for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): - # Is this ok in a for loop? + # Is this ok in a loop? cl_context = tgt_vol_conn.from_discr.cl_context with cl.CommandQueue(cl_context) as queue: @@ -583,8 +583,16 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) - part_batches = [[] for _ in range(ngroups)] + #part_batches = [[] for _ in range(ngroups)] + part_batches = [] for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): + part_batches.append(_make_cross_partition_batches( + queue, + vol_to_bdry_conns, + adj, + tgt_mesh, + i_tgt_grp)) + ''' for idx, i_tgt_elem in enumerate(adj.elements): i_tgt_face = adj.element_faces[idx] i_src_part = adj.part_indices[idx] @@ -605,9 +613,11 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): i_tgt_part, i_tgt_grp, i_tgt_elem)) + ''' # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( + # Is this ok? from_discr=bdry_discr, to_discr=bdry_discr, groups=[ diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 9edeb908..302801e4 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -163,7 +163,7 @@ def test_partition_mesh(): n_grp_num = n_part.find_igrp(n_elem) n_adj = n_part.interpart_adj_groups[n_grp_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_elem - n_elem_base + n_elem -= n_elem_base assert (part_num, elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ "InterpartitionAdj is not consistent" -- GitLab From 04e5f37c53024ddfb926dc383addb92c699f8342 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 25 Mar 2017 16:27:55 -0500 Subject: [PATCH 159/266] interpart_adj_groups is not a list of maps from partition numbers to InterPartitionAdj --- meshmode/mesh/__init__.py | 42 +++++++++++++------------------------ meshmode/mesh/processing.py | 19 +++++++++-------- test/test_meshmode.py | 12 +++++------ 3 files changed, 29 insertions(+), 44 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index d6b4e8a7..791bc271 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -428,9 +428,6 @@ class InterPartitionAdj(): ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. - .. attribute:: part_indices - ``part_indices[i]`` gives the partition index of the neighboring face. - .. attribute:: neighbors ``neighbors[i]`` gives the element number within the neighboring partiton @@ -444,7 +441,6 @@ class InterPartitionAdj(): ``neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``elements[i]`` - .. automethod:: add_connection .. automethod:: get_neighbor .. versionadded:: 2017.1 @@ -455,40 +451,21 @@ class InterPartitionAdj(): self.element_faces = [] self.neighbors = [] self.neighbor_faces = [] - self.part_indices = [] - - def add_connection(self, elem, face, part_idx, neighbor_elem, neighbor_face): - """ - Adds a connection from ``elem`` and ``face`` within :class:`Mesh` to - ``neighbor_elem`` and ``neighbor_face`` of the neighboring partion - of type :class:`Mesh` given by `part_idx`. - :arg elem: - :arg face: - :arg part_idx: - :arg neighbor_elem: - :arg neighbor_face: - """ - self.elements.append(elem) - self.element_faces.append(face) - self.part_indices.append(part_idx) - self.neighbors.append(neighbor_elem) - self.neighbor_faces.append(neighbor_face) def get_neighbor(self, elem, face): """ :arg elem :arg face - :returns: A tuple ``(part_idx, neighbor_elem, neighbor_face)`` of + :returns: A tuple ``(neighbor_elem, neighbor_face)`` of neighboring elements within another :class:`Mesh`. - Or (-1, -1, -1) if the face does not have a neighbor. + Or (-1, -1) if the face does not have a neighbor. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.part_indices[idx], - self.neighbors[idx], + return (self.neighbors[idx], self.neighbor_faces[idx]) #raise RuntimeError("This face does not have a neighbor") - return (-1, -1, -1) + return (-1, -1) # }}} @@ -620,6 +597,15 @@ class Mesh(Record): (Note that element groups are not necessarily contiguous like the figure may suggest.) + .. attribute:: interpart_adj_groups + + A list of mappings from neighbor partition numbers to instances of + :class:`InterPartitionAdj`. + + ``interpart_adj_gorups[igrp][ineighbor_part]`` gives + the set of facial adjacency relations between group *igrp* + and partition *ineighbor_part*. + .. attribute:: boundary_tags A tuple of boundary tag identifiers. :class:`BTAG_ALL` and @@ -872,7 +858,7 @@ class Mesh(Record): if elem < grp.nelements: return igrp elem -= grp.nelements - raise RuntimeError("Could not find group with element %d" % elem) + raise RuntimeError("Could not find group with element %d." % elem) # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b81702d6..d80fb2ed 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -146,7 +146,7 @@ def partition_mesh(mesh, part_per_element, part_num): facial_adjacency_groups=None, boundary_tags=boundary_tags) from meshmode.mesh import InterPartitionAdj - adj_grps = [InterPartitionAdj() for _ in range(len(part_mesh.groups))] + adj_grps = [{} for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -171,7 +171,7 @@ def partition_mesh(mesh, part_per_element, part_num): parent_facial_group.element_faces[idx] == face: rank_neighbor = (parent_facial_group.neighbors[idx] + parent_elem_base) - rank_neighbor_face = parent_facial_group.neighbor_faces[idx] + n_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) @@ -183,14 +183,15 @@ def partition_mesh(mesh, part_per_element, part_num): n_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) + if n_part_num not in adj_grps[igrp]: + adj_grps[igrp][n_part_num] = InterPartitionAdj() + # I cannot compute the group because the other - # partitions have not been built yet. - adj_grps[igrp].add_connection( - elem, - face, - n_part_num, - n_elem, - rank_neighbor_face) + # partitions may not have been built yet. + adj_grps[igrp][n_part_num].elements.append(elem) + adj_grps[igrp][n_part_num].element_faces.append(face) + adj_grps[igrp][n_part_num].neighbors.append(n_elem) + adj_grps[igrp][n_part_num].neighbor_faces.append(n_face) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 302801e4..59984221 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -146,27 +146,25 @@ def test_partition_mesh(): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num, f_groups in enumerate(part.facial_adjacency_groups): - adj = part.interpart_adj_groups[grp_num] f_grp = f_groups[None] elem_base = part.groups[grp_num].element_nr_base for idx, elem in enumerate(f_grp.elements): tag = -f_grp.neighbors[idx] assert tag >= 0 face = f_grp.element_faces[idx] - for n_part_num in range(num_parts): + for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): n_part, n_part_to_global = new_meshes[n_part_num] if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (i, n_elem, n_face) = adj.get_neighbor(elem, face) - assert i == n_part_num + (n_elem, n_face) = adj.get_neighbor(elem, face) n_grp_num = n_part.find_igrp(n_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num] + n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem -= n_elem_base - assert (part_num, elem + elem_base, face) ==\ + assert (elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ - "InterpartitionAdj is not consistent" + "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] p_elem = part_to_global[elem + elem_base] -- GitLab From 4c2b92fff2d818d61241c4110c661232ecb09455 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 28 Mar 2017 10:06:39 -0500 Subject: [PATCH 160/266] Cleaned up call to _make_cross_partiton_batch --- .../connection/opposite_face.py | 151 +++++++----------- 1 file changed, 57 insertions(+), 94 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c7ea09c0..6779bbd9 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,41 +395,31 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, i_src_elem, i_tgt_part, i_tgt_grp, i_tgt_elem): +def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, + i_src_part, i_src_grp, i_src_elem, i_src_face): """ Creates a batch that transfers data to a face from a face of another partition. :arg queue: - :arg vol_to_bdry_conns: A list of :class:`Direct` for each partition. - :arg adj: :class:`InterPartitionAdj` of partition `i_tgt_part`. - :arg i_tgt_part: The target partition number. - :arg i_tgt_grp: - :arg i_tgt_elem: - :arg i_tgt_face: + :arg vol_to_bdry_conns: A list of :class:`DirectDiscretizationConnection` + for each partition. :returns: ??? """ - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + src_mesh = part_meshes[i_src_part] + tgt_mesh = part_meshes[i_tgt_part] + + adj = tgt_mesh.interpart_adj_groups[i_tgt_grp][i_src_part] + tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - tgt_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - tgt_bdry_discr.groups[i_tgt_grp].view( - tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem, :]) + tgt_bdry_nodes = tgt_mesh.groups[i_tgt_grp].nodes[:, i_tgt_elem, :] + src_bdry_nodes = src_mesh.groups[i_src_grp].nodes[:, i_src_elem, :] ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape - nelements = 1 - - # (ambient_dim, nelements, nfrom_unit_nodes) - src_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - src_bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - ) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -439,9 +429,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, dim = src_grp.dim initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - - src_unit_nodes = np.empty((dim, nelements, n_tgt_unit_nodes)) - src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + src_unit_nodes = np.empty((dim, n_tgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1) import modepy as mp src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) @@ -449,29 +438,29 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, src_nfuncs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nto_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((src_nfuncs, nelements, n_tgt_unit_nodes)) + basis_at_unit_nodes = np.empty((src_nfuncs, n_tgt_unit_nodes)) for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, n_tgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", src_inv_t_vdm, basis_at_unit_nodes) + .reshape(n_tgt_unit_nodes)) + intp_coeffs = np.einsum("fj,jt->ft", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. - one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) + #one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + #assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("ft,af->at", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nto_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, src_nfuncs, nelements, n_tgt_unit_nodes)) + (dim, src_nfuncs, n_tgt_unit_nodes)) for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, n_tgt_unit_nodes)) + df_r.reshape(n_tgt_unit_nodes)) dintp_coeffs = np.einsum( "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) @@ -486,6 +475,7 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # equations and Cramer's rule. If you're looking for high-end # numerics, look no further than meshmode. if dim == 1: + # TODO: Needs testing. # A is df.T ata = np.einsum("iket,jket->ijet", df, df) atb = np.einsum("iket,ket->iet", df, resid) @@ -505,10 +495,10 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, # This stinks, performance-wise, because it's not vectorized. # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) - for e in range(nelements): - for t in range(n_tgt_unit_nodes): - df_inv_resid[:, e, t], _, _, _ = \ - la.lstsq(df[:, :, e, t].T, resid[:, e, t]) + # TODO: Needs testing. + for t in range(n_tgt_unit_nodes): + df_inv_resid[:, t], _, _, _ = \ + la.lstsq(df[:, :, t].T, resid[:, t]) src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -524,33 +514,11 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_src_part, i_src_grp, def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) - done_elements = np.zeros(nelements, dtype=np.bool) - - # TODO: Still need to figure out what's happening here. - while True: - todo_elements, = np.where(~done_elements) - if not len(todo_elements): - return - template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] - unit_node_dist = np.max(np.max(np.abs( - src_unit_nodes[:, todo_elements, :] - - - template_unit_nodes.reshape(dim, 1, -1)), - axis=2), axis=0) - close_els = todo_elements[unit_node_dist < tol] - done_elements[close_els] = True - unit_node_dist = np.max(np.max(np.abs( - src_unit_nodes[:, todo_elements, :] - - - template_unit_nodes.reshape(dim, 1, -1)), - axis=2), axis=0) - - from meshmode.discretization.connection import InterpolationBatch - yield InterpolationBatch( + return InterpolationBatch( from_group_index=i_src_grp, from_element_indices=to_dev(from_bdry_element_indices[close_els]), to_element_indices=to_dev(to_bdry_element_indices[close_els]), - result_unit_nodes=template_unit_nodes, + result_unit_nodes=src_unit_nodes, to_element_face=None) @@ -583,37 +551,32 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) - #part_batches = [[] for _ in range(ngroups)] - part_batches = [] - for i_tgt_grp, adj in enumerate(tgt_mesh.interpart_adj_groups): - part_batches.append(_make_cross_partition_batches( - queue, - vol_to_bdry_conns, - adj, - tgt_mesh, - i_tgt_grp)) - ''' - for idx, i_tgt_elem in enumerate(adj.elements): - i_tgt_face = adj.element_faces[idx] - i_src_part = adj.part_indices[idx] - i_src_elem = adj.neighbors[idx] - i_src_face = adj.neighbor_faces[idx] - #src_mesh = vol_to_bdry_conns[i_src_part].to_discr.mesh + part_batches = [[] for _ in range(ngroups)] + for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): + for i_src_part, adj in adj_parts.items(): + src_mesh = part_meshes[i_src_part] - i_src_grp = src_mesh.find_igrp(i_src_elem) - i_src_elem -= src_mesh.groups[i_src_grp].element_nr_base - - part_batches[i_tgt_grp].extend( - _make_cross_partition_batch( - queue, - vol_to_bdry_conns, - i_src_part, - i_src_grp, - i_src_elem, - i_tgt_part, - i_tgt_grp, - i_tgt_elem)) - ''' + + i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces + i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] + for i in range(len(i_src_elems)): + i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base + + i_tgt_elems = adj.elements + i_tgt_faces = adj.element_faces + + for idx, i_tgt_elem in enumerate(i_tgt_elems): + i_tgt_face = i_tgt_faces[idx] + i_src_elem = i_src_elems[idx] + i_src_face = i_src_faces[idx] + i_src_grp = i_src_grps[idx] + + part_batches[i_tgt_grp].append( + _make_cross_partition_batch(queue, + vol_to_bdry_conns, part_meshes, + i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, + i_src_part, i_src_grp, i_src_elem, i_src_face)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( -- GitLab From 8bf96c09768667ded3412fdfeacce80009fd53e3 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 29 Mar 2017 23:55:23 -0500 Subject: [PATCH 161/266] make_partition_connection works with no testing --- .../connection/opposite_face.py | 58 +++++++++++-------- test/test_meshmode.py | 17 +++--- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 6779bbd9..8c127bba 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -395,7 +395,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, +def _make_cross_partition_batch(queue, vol_to_bdry_conns, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, i_src_part, i_src_grp, i_src_elem, i_src_face): """ @@ -408,16 +408,22 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, :returns: ??? """ - src_mesh = part_meshes[i_src_part] - tgt_mesh = part_meshes[i_tgt_part] - - adj = tgt_mesh.interpart_adj_groups[i_tgt_grp][i_src_part] - tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - tgt_bdry_nodes = tgt_mesh.groups[i_tgt_grp].nodes[:, i_tgt_elem, :] - src_bdry_nodes = src_mesh.groups[i_src_grp].nodes[:, i_src_elem, :] + tgt_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + tgt_bdry_discr.groups[i_tgt_grp].view( + tgt_bdry_discr.nodes().get(queue=queue)) + [:, i_tgt_elem]) + + src_bdry_nodes = ( + # FIXME: This should view-then-transfer (but PyOpenCL doesn't do + # non-contiguous transfers for now). + src_bdry_discr.groups[i_src_grp].view( + src_bdry_discr.nodes().get(queue=queue)) + [:, i_src_elem]) ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape @@ -445,11 +451,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) .reshape(n_tgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jt->ft", src_inv_t_vdm, basis_at_unit_nodes) + #intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes + intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. - #one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - #assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("ft,af->at", intp_coeffs, src_bdry_nodes) + one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) + assert (one_deviation < tol).all(), np.max(one_deviation) + return np.einsum("ij,jk->ik", src_bdry_nodes, intp_coeffs) + #return src_bdry_nodes @ intp_coeffs.T def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nto_unit_nodes) @@ -462,8 +470,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", src_inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) + "ij,rjk->rik", src_inv_t_vdm, dbasis_at_unit_nodes) + return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) logger.info("make_partition_connection: begin gauss-newton") niter = 0 @@ -477,13 +485,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, if dim == 1: # TODO: Needs testing. # A is df.T - ata = np.einsum("iket,jket->ijet", df, df) - atb = np.einsum("iket,ket->iet", df, resid) + ata = np.einsum("ikt,jkt->ijt", df, df) + atb = np.einsum("ikt,kt->it", df, resid) df_inv_resid = atb / ata[0, 0] elif dim == 2: # A is df.T - ata = np.einsum("iket,jket->ijet", df, df) - atb = np.einsum("iket,ket->iet", df, resid) + ata = np.einsum("ikt,jkt->ijt", df, df) + atb = np.einsum("ikt,kt->it", df, resid) det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) @@ -514,10 +522,12 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, part_meshes, def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) + from meshmode.discretization.connection import InterpolationBatch return InterpolationBatch( + # This is not right. Need partition number information. from_group_index=i_src_grp, - from_element_indices=to_dev(from_bdry_element_indices[close_els]), - to_element_indices=to_dev(to_bdry_element_indices[close_els]), + from_element_indices=to_dev(np.array([i_src_elem])), + to_element_indices=to_dev(np.array([i_tgt_elem])), result_unit_nodes=src_unit_nodes, to_element_face=None) @@ -557,15 +567,15 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): src_mesh = part_meshes[i_src_part] + i_tgt_elems = adj.elements + i_tgt_faces = adj.element_faces + i_src_elems = adj.neighbors i_src_faces = adj.neighbor_faces i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] for i in range(len(i_src_elems)): i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base - i_tgt_elems = adj.elements - i_tgt_faces = adj.element_faces - for idx, i_tgt_elem in enumerate(i_tgt_elems): i_tgt_face = i_tgt_faces[idx] i_src_elem = i_src_elems[idx] @@ -574,7 +584,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): part_batches[i_tgt_grp].append( _make_cross_partition_batch(queue, - vol_to_bdry_conns, part_meshes, + vol_to_bdry_conns, i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, i_src_part, i_src_grp, i_src_elem, i_src_face)) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 59984221..8850232e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -58,12 +58,14 @@ def test_partition_interpolation(ctx_getter): n = 3 dim = 2 num_parts = 7 - from meshmode.mesh.generation import generate_warped_rect_mesh - mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) - mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + from meshmode.mesh.generation import generate_regular_rect_mesh + mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) + #from meshmode.mesh.generation import generate_warped_rect_mesh + #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + #from meshmode.mesh.processing import merge_disjoint_meshes + #mesh = merge_disjoint_meshes([mesh1, mesh2]) adjacency_list = np.zeros((mesh.nelements,), dtype=set) for elem in range(mesh.nelements): @@ -94,9 +96,8 @@ def test_partition_interpolation(ctx_getter): connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection - for conn in connections: - print(conn) - check_connection(conn) + #for conn in connections: + #check_connection(conn) # }}} -- GitLab From 5e547da38f5e8fca10dd6e0a77a811e3f01fdffb Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 13:09:27 -0500 Subject: [PATCH 162/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 10 ++++------ test/test_meshmode.py | 6 +++--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 8c127bba..98bee5c4 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -451,13 +451,13 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) .reshape(n_tgt_unit_nodes)) - #intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes - intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) + intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes + #intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("ij,jk->ik", src_bdry_nodes, intp_coeffs) - #return src_bdry_nodes @ intp_coeffs.T + #return np.einsum("ij,jk,ik", src_bdry_nodes, intp_coeffs) + return src_bdry_nodes @ intp_coeffs def get_map_jacobian(unit_nodes): # unit_nodes: (dim, nto_unit_nodes) @@ -549,8 +549,6 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - # Create a list of batches. Each batch contains interpolation - # data from one partition to another. for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): # Is this ok in a loop? diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 8850232e..c320c4c2 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -61,7 +61,7 @@ def test_partition_interpolation(ctx_getter): from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes @@ -96,8 +96,8 @@ def test_partition_interpolation(ctx_getter): connections = make_partition_connection(bdry_connections, part_meshes) from meshmode.discretization.connection import check_connection - #for conn in connections: - #check_connection(conn) + for conn in connections: + check_connection(conn) # }}} -- GitLab From a074c82aa6540f44e185f16d3f32b85086c9227b Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 17:09:31 -0500 Subject: [PATCH 163/266] Add adjacency_list method. --- .../connection/opposite_face.py | 85 ++++++++++++++----- meshmode/mesh/__init__.py | 16 ++++ test/test_meshmode.py | 29 ++----- 3 files changed, 87 insertions(+), 43 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 98bee5c4..1a69f624 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -439,40 +439,63 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, src_unit_nodes[:] = initial_guess.reshape(-1, 1) import modepy as mp - src_vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) - src_inv_t_vdm = la.inv(src_vdm.T) - src_nfuncs = len(src_grp.basis()) + vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + inv_t_vdm = la.inv(vdm.T) + n_src_funcs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nto_unit_nodes) - # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((src_nfuncs, n_tgt_unit_nodes)) - for i, f in enumerate(src_grp.basis()): - basis_at_unit_nodes[i] = ( - f(unit_nodes.reshape(dim, -1)) - .reshape(n_tgt_unit_nodes)) - intp_coeffs = src_inv_t_vdm @ basis_at_unit_nodes - #intp_coeffs = np.einsum("ij,jk->ik", src_inv_t_vdm, basis_at_unit_nodes) - # If we're interpolating 1, we had better get 1 back. - one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) - assert (one_deviation < tol).all(), np.max(one_deviation) - #return np.einsum("ij,jk,ik", src_bdry_nodes, intp_coeffs) - return src_bdry_nodes @ intp_coeffs + basis_at_unit_nodes = np.array([f(unit_nodes) for f in src_grp.basis()]) + + return src_bdry_nodes @ inv_t_vdm @ basis_at_unit_nodes def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nto_unit_nodes) - # basis_at_unit_nodes - dbasis_at_unit_nodes = np.empty( - (dim, src_nfuncs, n_tgt_unit_nodes)) + dbasis_at_unit_nodes = np.empty((dim, n_src_funcs, n_tgt_unit_nodes)) + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) + #dbasis_at_unit_nodes = np.array([df(unit_nodes) for df in src_grp.grad_basis()]) dintp_coeffs = np.einsum( - "ij,rjk->rik", src_inv_t_vdm, dbasis_at_unit_nodes) + "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) + # {{{ test map applier and jacobian + if 0: + u = src_unit_nodes + f = apply_map(u) + for h in [1e-1, 1e-2]: + du = h*np.random.randn(*u.shape) + + f_2 = apply_map(u+du) + + jf = get_map_jacobian(u) + + f2_2 = f + np.einsum("rat,rt->at", jf, du) + + print(h, la.norm((f_2-f2_2).ravel())) + # }}} + + # {{{ visualize initial guess + + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + from meshmode.discretization.visualization import draw_curve + draw_curve(src_bdry_discr) + + pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") + pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "o", + color="purple") + pt.show() + + # }}} + + logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: @@ -507,8 +530,26 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, for t in range(n_tgt_unit_nodes): df_inv_resid[:, t], _, _, _ = \ la.lstsq(df[:, :, t].T, resid[:, t]) + + # {{{ visualize next guess + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + from meshmode.discretization.visualization import draw_curve + + pt.plot(guess[0].reshape(-1), guess[2].reshape(-1), "r^") + pt.plot(goals[0].reshape(-1), goals[2].reshape(-1), "xg") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[2].reshape(-1), "o", + color="purple") + #pt.plot(src_unit_nodes[0].reshape(-1), src_unit_nodes[1].reshape(-1), "ob") + pt.show() + # }}} + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) + #print(resid[0, :]) logger.debug("gauss-newton residual: %g" % max_resid) if max_resid < tol: logger.info("make_partition_connection: gauss-newton: done, " diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 791bc271..ce77e668 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -623,6 +623,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ .. automethod:: find_igrp + .. automethos:: adjacency_list """ face_id_dtype = np.int8 @@ -860,6 +861,21 @@ class Mesh(Record): elem -= grp.nelements raise RuntimeError("Could not find group with element %d." % elem) + def adjacency_list(self): + """ + :returns: An :class:`np.array` with dtype `set`. `adjacency[i]` is the set + of all elements that are adjacent to element `i`. + Useful for `pymetis.part_graph`. + """ + adjacency_list = np.zeros((self.nelements,), dtype=set) + nodal_adj = self.nodal_adjacency + for elem in range(self.nelements): + adjacency_list[elem] = set() + starts = nodal_adj.neighbors_starts + for n in range(starts[elem], starts[elem + 1]): + adjacency_list[elem].add(nodal_adj.neighbors[n]) + return adjacency_list + # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c320c4c2..1c6c2c5b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,25 +57,18 @@ def test_partition_interpolation(ctx_getter): group_factory = PolynomialWarpAndBlendGroupFactory(order) n = 3 dim = 2 - num_parts = 7 + num_parts = 3 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + #mesh = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes #mesh = merge_disjoint_meshes([mesh1, mesh2]) - adjacency_list = np.zeros((mesh.nelements,), dtype=set) - for elem in range(mesh.nelements): - adjacency_list[elem] = set() - starts = mesh.nodal_adjacency.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=adjacency_list) + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh @@ -95,9 +88,10 @@ def test_partition_interpolation(ctx_getter): from meshmode.discretization.connection import make_partition_connection connections = make_partition_connection(bdry_connections, part_meshes) - from meshmode.discretization.connection import check_connection - for conn in connections: - check_connection(conn) + # We can't use check_connection because I don't think it works with partitions. + #from meshmode.discretization.connection import check_connection + #for conn in connections: + # check_connection(conn) # }}} @@ -118,15 +112,8 @@ def test_partition_mesh(): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) - adjacency_list = np.zeros((mesh.nelements,), dtype=set) - for elem in range(mesh.nelements): - adjacency_list[elem] = set() - starts = mesh.nodal_adjacency.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(mesh.nodal_adjacency.neighbors[n]) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=adjacency_list) + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh -- GitLab From b3c36a1839604ad862837932bcb6172a00c9ba05 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 3 Apr 2017 17:18:04 -0500 Subject: [PATCH 164/266] working --- .../discretization/connection/opposite_face.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1a69f624..b224cb32 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -456,7 +456,8 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( df_r.reshape(n_tgt_unit_nodes)) - #dbasis_at_unit_nodes = np.array([df(unit_nodes) for df in src_grp.grad_basis()]) + #dbasis_at_unit_nodes = np.array( + # [df(unit_nodes) for df in src_grp.grad_basis()]) dintp_coeffs = np.einsum( "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) @@ -495,7 +496,6 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, # }}} - logger.info("make_partition_connection: begin gauss-newton") niter = 0 while True: @@ -532,18 +532,17 @@ def _make_cross_partition_batch(queue, vol_to_bdry_conns, la.lstsq(df[:, :, t].T, resid[:, t]) # {{{ visualize next guess - if 0: + if 1: import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes from meshmode.discretization.visualization import draw_curve - pt.plot(guess[0].reshape(-1), guess[2].reshape(-1), "r^") - pt.plot(goals[0].reshape(-1), goals[2].reshape(-1), "xg") - pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[2].reshape(-1), "o", - color="purple") - #pt.plot(src_unit_nodes[0].reshape(-1), src_unit_nodes[1].reshape(-1), "ob") + pt.plot(guess[0], guess[1], "r^") + pt.plot(goals[0], goals[1], "xg") + pt.plot(src_bdry_nodes[0], src_bdry_nodes[1], "o", color="purple") + pt.plot(src_unit_nodes[0], src_unit_nodes[1], "ob") pt.show() # }}} @@ -613,7 +612,8 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): i_src_faces = adj.neighbor_faces i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] for i in range(len(i_src_elems)): - i_src_elems[i] -= src_mesh.groups[i_src_grps[i]].element_nr_base + elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_elems[i] -= elem_base for idx, i_tgt_elem in enumerate(i_tgt_elems): i_tgt_face = i_tgt_faces[idx] -- GitLab From eb167a3cbaae7de3305205f1a9b02fdae3b66362 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 6 Apr 2017 10:51:21 -0500 Subject: [PATCH 165/266] Changed _make_cross_face_batches to handle two different meshes. --- .../connection/opposite_face.py | 460 ++++++------------ meshmode/mesh/__init__.py | 8 +- meshmode/mesh/processing.py | 11 +- test/test_meshmode.py | 23 +- 4 files changed, 168 insertions(+), 334 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index b224cb32..5b5c03dd 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -35,143 +35,82 @@ logger = logging.getLogger(__name__) # {{{ opposite-face connection -def _make_cross_face_batches( - queue, vol_discr, bdry_discr, - i_tgt_grp, i_src_grp, - i_face_tgt, - adj_grp, - vbc_tgt_grp_face_batch, src_grp_el_lookup): +def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices): - # {{{ index wrangling + # FIXME: This should view-then-transfer + # (but PyOpenCL doesn't do non-contiguous transfers for now). + tgt_bdry_nodes = (tgt_bdry_discr.groups[i_tgt_grp].view(tgt_bdry_discr.nodes(). + get(queue=queue))[:, tgt_bdry_element_indices]) - # Assert that the adjacency group and the restriction - # interpolation batch and the adjacency group have the same - # element ordering. + # FIXME: This should view-then-transfer + # (but PyOpenCL doesn't do non-contiguous transfers for now). + src_bdry_nodes = (tgt_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + get(queue=queue))[:, src_bdry_element_indices]) - adj_grp_tgt_flags = adj_grp.element_faces == i_face_tgt - - assert ( - np.array_equal( - adj_grp.elements[adj_grp_tgt_flags], - vbc_tgt_grp_face_batch.from_element_indices - .get(queue=queue))) - - # find to_element_indices - - to_bdry_element_indices = ( - vbc_tgt_grp_face_batch.to_element_indices - .get(queue=queue)) - - # find from_element_indices - - from_vol_element_indices = adj_grp.neighbors[adj_grp_tgt_flags] - from_element_faces = adj_grp.neighbor_faces[adj_grp_tgt_flags] - - from_bdry_element_indices = src_grp_el_lookup[ - from_vol_element_indices, from_element_faces] - - # }}} - - # {{{ visualization (for debugging) + tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps - if 0: - print("TVE", adj_grp.elements[adj_grp_tgt_flags]) - print("TBE", to_bdry_element_indices) - print("FVE", from_vol_element_indices) - from meshmode.mesh.visualization import draw_2d_mesh - import matplotlib.pyplot as pt - draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) - pt.figure() - - draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) + src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] + src_grp = src_bdry_discr.groups[i_src_grp] - pt.show() - # }}} + dim = src_grp.dim + ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) - to_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - bdry_discr.groups[i_tgt_grp].view( - bdry_discr.nodes().get(queue=queue)) - [:, to_bdry_element_indices]) - - tol = 1e4 * np.finfo(to_bdry_nodes.dtype).eps - - from_mesh_grp = bdry_discr.mesh.groups[i_src_grp] - from_grp = bdry_discr.groups[i_src_grp] - - dim = from_grp.dim - ambient_dim, nelements, nto_unit_nodes = to_bdry_nodes.shape - - initial_guess = np.mean(from_mesh_grp.vertex_unit_coordinates(), axis=0) - from_unit_nodes = np.empty((dim, nelements, nto_unit_nodes)) - from_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) + initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) + src_unit_nodes = np.empty((dim, nelements, ntgt_unit_nodes)) + src_unit_nodes[:] = initial_guess.reshape(-1, 1, 1) import modepy as mp - from_vdm = mp.vandermonde(from_grp.basis(), from_grp.unit_nodes) - from_inv_t_vdm = la.inv(from_vdm.T) - from_nfuncs = len(from_grp.basis()) - - # (ambient_dim, nelements, nfrom_unit_nodes) - from_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - bdry_discr.groups[i_src_grp].view( - bdry_discr.nodes().get(queue=queue)) - [:, from_bdry_element_indices]) + vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) + inv_t_vdm = la.inv(vdm.T) + nsrc_funcs = len(src_grp.basis()) def apply_map(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nelements, ntgt_unit_nodes) # basis_at_unit_nodes - basis_at_unit_nodes = np.empty((from_nfuncs, nelements, nto_unit_nodes)) + basis_at_unit_nodes = np.empty((nsrc_funcs, nelements, ntgt_unit_nodes)) - for i, f in enumerate(from_grp.basis()): + for i, f in enumerate(src_grp.basis()): basis_at_unit_nodes[i] = ( f(unit_nodes.reshape(dim, -1)) - .reshape(nelements, nto_unit_nodes)) + .reshape(nelements, ntgt_unit_nodes)) - intp_coeffs = np.einsum("fj,jet->fet", from_inv_t_vdm, basis_at_unit_nodes) + intp_coeffs = np.einsum("fj,jet->fet", inv_t_vdm, basis_at_unit_nodes) # If we're interpolating 1, we had better get 1 back. one_deviation = np.abs(np.sum(intp_coeffs, axis=0) - 1) assert (one_deviation < tol).all(), np.max(one_deviation) - return np.einsum("fet,aef->aet", intp_coeffs, from_bdry_nodes) + return np.einsum("fet,aef->aet", intp_coeffs, src_bdry_nodes) def get_map_jacobian(unit_nodes): - # unit_nodes: (dim, nelements, nto_unit_nodes) + # unit_nodes: (dim, nelements, ntgt_unit_nodes) # basis_at_unit_nodes dbasis_at_unit_nodes = np.empty( - (dim, from_nfuncs, nelements, nto_unit_nodes)) + (dim, nsrc_funcs, nelements, ntgt_unit_nodes)) - for i, df in enumerate(from_grp.grad_basis()): + for i, df in enumerate(src_grp.grad_basis()): df_result = df(unit_nodes.reshape(dim, -1)) for rst_axis, df_r in enumerate(df_result): dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(nelements, nto_unit_nodes)) + df_r.reshape(nelements, ntgt_unit_nodes)) dintp_coeffs = np.einsum( - "fj,rjet->rfet", from_inv_t_vdm, dbasis_at_unit_nodes) + "fj,rjet->rfet", inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("rfet,aef->raet", dintp_coeffs, from_bdry_nodes) + return np.einsum("rfet,aef->raet", dintp_coeffs, src_bdry_nodes) # {{{ test map applier and jacobian if 0: - u = from_unit_nodes + u = src_unit_nodes f = apply_map(u) for h in [1e-1, 1e-2]: du = h*np.random.randn(*u.shape) @@ -190,16 +129,16 @@ def _make_cross_face_batches( if 0: import matplotlib.pyplot as pt - guess = apply_map(from_unit_nodes) - goals = to_bdry_nodes + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes from meshmode.discretization.visualization import draw_curve - draw_curve(bdry_discr) + draw_curve(tgt_bdry_discr) + draw_curve(src_bdry_discr) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") - pt.plot(from_bdry_nodes[0].reshape(-1), from_bdry_nodes[1].reshape(-1), "o", - color="purple") + pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "xb") pt.show() # }}} @@ -208,10 +147,10 @@ def _make_cross_face_batches( niter = 0 while True: - resid = apply_map(from_unit_nodes) - to_bdry_nodes + resid = apply_map(src_unit_nodes) - tgt_bdry_nodes - df = get_map_jacobian(from_unit_nodes) - df_inv_resid = np.empty_like(from_unit_nodes) + df = get_map_jacobian(src_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) # For the 1D/2D accelerated versions, we'll use the normal # equations and Cramer's rule. If you're looking for high-end @@ -231,7 +170,7 @@ def _make_cross_face_batches( det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(from_unit_nodes) + df_inv_resid = np.empty_like(src_unit_nodes) df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) @@ -243,11 +182,11 @@ def _make_cross_face_batches( # But we'll only hit it for boundaries of 4+D meshes, in which # case... good luck. :) for e in range(nelements): - for t in range(nto_unit_nodes): + for t in range(ntgt_unit_nodes): df_inv_resid[:, e, t], _, _, _ = \ la.lstsq(df[:, :, e, t].T, resid[:, e, t]) - from_unit_nodes = from_unit_nodes - df_inv_resid + src_unit_nodes = src_unit_nodes - df_inv_resid max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -264,7 +203,7 @@ def _make_cross_face_batches( # }}} - # {{{ find groups of from_unit_nodes + # {{{ find groups of src_unit_nodes def to_dev(ary): return cl.array.to_device(queue, ary, array_queue=None) @@ -275,10 +214,10 @@ def _make_cross_face_batches( if not len(todo_elements): return - template_unit_nodes = from_unit_nodes[:, todo_elements[0], :] + template_unit_nodes = src_unit_nodes[:, todo_elements[0], :] unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -287,7 +226,7 @@ def _make_cross_face_batches( done_elements[close_els] = True unit_node_dist = np.max(np.max(np.abs( - from_unit_nodes[:, todo_elements, :] + src_unit_nodes[:, todo_elements, :] - template_unit_nodes.reshape(dim, 1, -1)), axis=2), axis=0) @@ -295,8 +234,8 @@ def _make_cross_face_batches( from meshmode.discretization.connection import InterpolationBatch yield InterpolationBatch( from_group_index=i_src_grp, - from_element_indices=to_dev(from_bdry_element_indices[close_els]), - to_element_indices=to_dev(to_bdry_element_indices[close_els]), + from_element_indices=to_dev(src_bdry_element_indices[close_els]), + to_element_indices=to_dev(tgt_bdry_element_indices[close_els]), result_unit_nodes=template_unit_nodes, to_element_face=None) @@ -366,211 +305,86 @@ def make_opposite_face_connection(volume_to_bdry_conn): for i_tgt_grp in range(ngrps): vbc_tgt_grp_batches = volume_to_bdry_conn.groups[i_tgt_grp].batches - adj_grp = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] + adj = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] for i_face_tgt in range(vol_mesh.groups[i_tgt_grp].nfaces): vbc_tgt_grp_face_batch = _find_ibatch_for_face( vbc_tgt_grp_batches, i_face_tgt) - groups[i_tgt_grp].extend( - _make_cross_face_batches( - queue, vol_discr, bdry_discr, - i_tgt_grp, i_src_grp, - i_face_tgt, - adj_grp, - vbc_tgt_grp_face_batch, src_grp_el_lookup)) - - from meshmode.discretization.connection import ( - DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - return DirectDiscretizationConnection( - from_discr=bdry_discr, - to_discr=bdry_discr, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in groups], - is_surjective=True) - -# }}} + # {{{ index wrangling + # Assert that the adjacency group and the restriction + # interpolation batch and the adjacency group have the same + # element ordering. -# {{{ partition_connection - -def _make_cross_partition_batch(queue, vol_to_bdry_conns, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, - i_src_part, i_src_grp, i_src_elem, i_src_face): - """ - Creates a batch that transfers data to a face from a face of another partition. + adj_tgt_flags = adj.element_faces == i_face_tgt - :arg queue: - :arg vol_to_bdry_conns: A list of :class:`DirectDiscretizationConnection` - for each partition. + assert (np.array_equal( + adj.elements[adj_tgt_flags], + vbc_tgt_grp_face_batch.from_element_indices + .get(queue=queue))) - :returns: ??? - """ + # find to_element_indices - tgt_bdry_discr = vol_to_bdry_conns[i_tgt_part].to_discr - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr + tgt_bdry_element_indices = ( + vbc_tgt_grp_face_batch.to_element_indices + .get(queue=queue)) - tgt_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - tgt_bdry_discr.groups[i_tgt_grp].view( - tgt_bdry_discr.nodes().get(queue=queue)) - [:, i_tgt_elem]) + # find from_element_indices - src_bdry_nodes = ( - # FIXME: This should view-then-transfer (but PyOpenCL doesn't do - # non-contiguous transfers for now). - src_bdry_discr.groups[i_src_grp].view( - src_bdry_discr.nodes().get(queue=queue)) - [:, i_src_elem]) + src_vol_element_indices = adj.neighbors[adj_tgt_flags] + src_element_faces = adj.neighbor_faces[adj_tgt_flags] - ambient_dim, n_tgt_unit_nodes = tgt_bdry_nodes.shape + src_bdry_element_indices = src_grp_el_lookup[ + src_vol_element_indices, src_element_faces] - tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps + # }}} - src_mesh_grp = src_bdry_discr.mesh.groups[i_src_grp] - src_grp = src_bdry_discr.groups[i_src_grp] + # {{{ visualization (for debugging) - dim = src_grp.dim + if 0: + print("TVE", adj.elements[adj_tgt_flags]) + print("TBE", tgt_bdry_element_indices) + print("FVE", src_vol_element_indices) + from meshmode.mesh.visualization import draw_2d_mesh + import matplotlib.pyplot as pt + draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) + pt.figure() - initial_guess = np.mean(src_mesh_grp.vertex_unit_coordinates(), axis=0) - src_unit_nodes = np.empty((dim, n_tgt_unit_nodes)) - src_unit_nodes[:] = initial_guess.reshape(-1, 1) + draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) - import modepy as mp - vdm = mp.vandermonde(src_grp.basis(), src_grp.unit_nodes) - inv_t_vdm = la.inv(vdm.T) - n_src_funcs = len(src_grp.basis()) + pt.show() - def apply_map(unit_nodes): - basis_at_unit_nodes = np.array([f(unit_nodes) for f in src_grp.basis()]) + # }}} - return src_bdry_nodes @ inv_t_vdm @ basis_at_unit_nodes - - def get_map_jacobian(unit_nodes): - dbasis_at_unit_nodes = np.empty((dim, n_src_funcs, n_tgt_unit_nodes)) - - for i, df in enumerate(src_grp.grad_basis()): - df_result = df(unit_nodes.reshape(dim, -1)) - for rst_axis, df_r in enumerate(df_result): - dbasis_at_unit_nodes[rst_axis, i] = ( - df_r.reshape(n_tgt_unit_nodes)) - #dbasis_at_unit_nodes = np.array( - # [df(unit_nodes) for df in src_grp.grad_basis()]) - dintp_coeffs = np.einsum( - "ij,rjk->rik", inv_t_vdm, dbasis_at_unit_nodes) - return np.einsum("ij,rjk->rik", src_bdry_nodes, dintp_coeffs) - - # {{{ test map applier and jacobian - if 0: - u = src_unit_nodes - f = apply_map(u) - for h in [1e-1, 1e-2]: - du = h*np.random.randn(*u.shape) - - f_2 = apply_map(u+du) - - jf = get_map_jacobian(u) - - f2_2 = f + np.einsum("rat,rt->at", jf, du) - - print(h, la.norm((f_2-f2_2).ravel())) - # }}} - - # {{{ visualize initial guess - - if 0: - import matplotlib.pyplot as pt - guess = apply_map(src_unit_nodes) - goals = tgt_bdry_nodes - - from meshmode.discretization.visualization import draw_curve - draw_curve(src_bdry_discr) - - pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") - pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") - pt.plot(src_bdry_nodes[0].reshape(-1), src_bdry_nodes[1].reshape(-1), "o", - color="purple") - pt.show() - - # }}} - - logger.info("make_partition_connection: begin gauss-newton") - niter = 0 - while True: - resid = apply_map(src_unit_nodes) - tgt_bdry_nodes - df = get_map_jacobian(src_unit_nodes) - df_inv_resid = np.empty_like(src_unit_nodes) - # For the 1D/2D accelerated versions, we'll use the normal - # equations and Cramer's rule. If you're looking for high-end - # numerics, look no further than meshmode. - if dim == 1: - # TODO: Needs testing. - # A is df.T - ata = np.einsum("ikt,jkt->ijt", df, df) - atb = np.einsum("ikt,kt->it", df, resid) - df_inv_resid = atb / ata[0, 0] - elif dim == 2: - # A is df.T - ata = np.einsum("ikt,jkt->ijt", df, df) - atb = np.einsum("ikt,kt->it", df, resid) - det = ata[0, 0]*ata[1, 1] - ata[0, 1]*ata[1, 0] - df_inv_resid = np.empty_like(src_unit_nodes) - df_inv_resid[0] = 1/det * (ata[1, 1] * atb[0] - ata[1, 0]*atb[1]) - df_inv_resid[1] = 1/det * (-ata[0, 1] * atb[0] + ata[0, 0]*atb[1]) - else: - # The boundary of a 3D mesh is 2D, so that's the - # highest-dimensional case we genuinely care about. - # - # This stinks, performance-wise, because it's not vectorized. - # But we'll only hit it for boundaries of 4+D meshes, in which - # case... good luck. :) - # TODO: Needs testing. - for t in range(n_tgt_unit_nodes): - df_inv_resid[:, t], _, _, _ = \ - la.lstsq(df[:, :, t].T, resid[:, t]) - - # {{{ visualize next guess - if 1: - import matplotlib.pyplot as pt - guess = apply_map(src_unit_nodes) - goals = tgt_bdry_nodes - - from meshmode.discretization.visualization import draw_curve - - pt.plot(guess[0], guess[1], "r^") - pt.plot(goals[0], goals[1], "xg") - pt.plot(src_bdry_nodes[0], src_bdry_nodes[1], "o", color="purple") - pt.plot(src_unit_nodes[0], src_unit_nodes[1], "ob") - pt.show() - # }}} + groups[i_tgt_grp].extend(_make_cross_face_batches(queue, + bdry_discr, bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) - src_unit_nodes = src_unit_nodes - df_inv_resid - max_resid = np.max(np.abs(resid)) - #print(resid[0, :]) - logger.debug("gauss-newton residual: %g" % max_resid) - if max_resid < tol: - logger.info("make_partition_connection: gauss-newton: done, " - "final residual: %g" % max_resid) - break - niter += 1 - if niter > 10: - raise RuntimeError("Gauss-Newton (for finding partition_connection " - "reference coordinates) did not converge") + from meshmode.discretization.connection import ( + DirectDiscretizationConnection, DiscretizationConnectionElementGroup) + return DirectDiscretizationConnection( + from_discr=bdry_discr, + to_discr=bdry_discr, + groups=[ + DiscretizationConnectionElementGroup(batches=batches) + for batches in groups], + is_surjective=True) - def to_dev(ary): - return cl.array.to_device(queue, ary, array_queue=None) +# }}} - from meshmode.discretization.connection import InterpolationBatch - return InterpolationBatch( - # This is not right. Need partition number information. - from_group_index=i_src_grp, - from_element_indices=to_dev(np.array([i_src_elem])), - to_element_indices=to_dev(np.array([i_tgt_elem])), - result_unit_nodes=src_unit_nodes, - to_element_face=None) +# {{{ partition_connection def make_partition_connection(vol_to_bdry_conns, part_meshes): """ @@ -595,7 +409,7 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): cl_context = tgt_vol_conn.from_discr.cl_context with cl.CommandQueue(cl_context) as queue: - bdry_discr = tgt_vol_conn.to_discr + tgt_bdry_discr = tgt_vol_conn.to_discr #tgt_mesh = bdry_discr.mesh tgt_mesh = part_meshes[i_tgt_part] ngroups = len(tgt_mesh.groups) @@ -603,35 +417,55 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): for i_src_part, adj in adj_parts.items(): - src_mesh = part_meshes[i_src_part] + src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - i_tgt_elems = adj.elements - i_tgt_faces = adj.element_faces + src_mesh = part_meshes[i_src_part] i_src_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = [src_mesh.find_igrp(e) for e in i_src_elems] + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_elems]) for i in range(len(i_src_elems)): elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base i_src_elems[i] -= elem_base - for idx, i_tgt_elem in enumerate(i_tgt_elems): - i_tgt_face = i_tgt_faces[idx] - i_src_elem = i_src_elems[idx] - i_src_face = i_src_faces[idx] - i_src_grp = i_src_grps[idx] + for i_src_grp in range(ngroups): + + src_grp_el_lookup = _make_el_lookup_table(queue, + vol_to_bdry_conns[i_src_part], i_src_grp) + + for i_tgt_face in adj.element_faces: + + index_flags = np.logical_and((i_src_grps == i_src_grp), + (adj.element_faces == i_tgt_face)) + + vbc_tgt_grp_face_batch = _find_ibatch_for_face( + tgt_vol_conn.groups[i_tgt_grp].batches, i_tgt_face) + + tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + to_element_indices.get(queue=queue) + + i_src_elems = adj.neighbors[index_flags] + i_src_faces = adj.neighbor_faces[index_flags] + src_bdry_element_indices =\ + src_grp_el_lookup[i_src_elems, i_src_faces] + src_bdry_element_indices = i_src_elems + + print(index_flags) + print(tgt_bdry_element_indices) + print(src_bdry_element_indices) - part_batches[i_tgt_grp].append( - _make_cross_partition_batch(queue, - vol_to_bdry_conns, - i_tgt_part, i_tgt_grp, i_tgt_elem, i_tgt_face, - i_src_part, i_src_grp, i_src_elem, i_src_face)) + part_batches[i_tgt_grp].extend( + _make_cross_face_batches(queue, + tgt_bdry_discr, src_bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( # Is this ok? - from_discr=bdry_discr, - to_discr=bdry_discr, + from_discr=src_bdry_discr, + to_discr=tgt_bdry_discr, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index ce77e668..4d174d49 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -447,10 +447,10 @@ class InterPartitionAdj(): """ def __init__(self): - self.elements = [] - self.element_faces = [] - self.neighbors = [] - self.neighbor_faces = [] + self.elements = np.array([], dtype=int) + self.element_faces = np.array([], dtype=int) + self.neighbors = np.array([], dtype=int) + self.neighbor_faces = np.array([], dtype=int) def get_neighbor(self, elem, face): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index d80fb2ed..9ffdb0b1 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -186,12 +186,13 @@ def partition_mesh(mesh, part_per_element, part_num): if n_part_num not in adj_grps[igrp]: adj_grps[igrp][n_part_num] = InterPartitionAdj() - # I cannot compute the group because the other + # I cannot compute the neighbor group because the other # partitions may not have been built yet. - adj_grps[igrp][n_part_num].elements.append(elem) - adj_grps[igrp][n_part_num].element_faces.append(face) - adj_grps[igrp][n_part_num].neighbors.append(n_elem) - adj_grps[igrp][n_part_num].neighbor_faces.append(n_face) + adj = adj_grps[igrp][n_part_num] + adj.elements = np.append(adj.elements, elem) + adj.element_faces = np.append(adj.element_faces, face) + adj.neighbors = np.append(adj.neighbors, n_elem) + adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 1c6c2c5b..922d996b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,15 +53,14 @@ logger = logging.getLogger(__name__) def test_partition_interpolation(ctx_getter): cl_ctx = ctx_getter() - order = 4 + order = 2 group_factory = PolynomialWarpAndBlendGroupFactory(order) - n = 3 + #group_factory = InterpolatoryQuadratureSimplexGroupFactory(order) + n = 5 dim = 2 - num_parts = 3 - from meshmode.mesh.generation import generate_regular_rect_mesh - mesh = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - #from meshmode.mesh.generation import generate_warped_rect_mesh - #mesh = generate_warped_rect_mesh(dim, order=order, n=n) + num_parts = 2 + from meshmode.mesh.generation import generate_warped_rect_mesh + mesh = generate_warped_rect_mesh(dim, order=order, n=n) #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) #from meshmode.mesh.processing import merge_disjoint_meshes @@ -98,15 +97,15 @@ def test_partition_interpolation(ctx_getter): # {{{ partition_mesh -def test_partition_mesh(): +@pytest.mark.parametrize("dim", [2, 3]) +@pytest.mark.parametrize("num_parts", [1, 2, 7]) +def test_partition_mesh(num_parts, dim): n = 5 - num_parts = 7 order = 4 - dim = 3 from meshmode.mesh.generation import (generate_regular_rect_mesh, generate_warped_rect_mesh) - mesh1 = generate_regular_rect_mesh(a=(0, 0, 0), b=(1, 1, 1), n=(n, n, n)) - mesh2 = generate_regular_rect_mesh(a=(2, 2, 2), b=(3, 3, 3), n=(n, n, n)) + mesh1 = generate_regular_rect_mesh(a=(0,) * dim, b=(1,) * dim, n=(n,) * dim) + mesh2 = generate_regular_rect_mesh(a=(2,) * dim, b=(3,) * dim, n=(n,) * dim) mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) from meshmode.mesh.processing import merge_disjoint_meshes -- GitLab From 21647b0cebfb5a7598fd3a4a6fb5ea16c972f4b9 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 7 Apr 2017 00:04:38 -0500 Subject: [PATCH 166/266] Partition Interpolation works for dim=2 --- .../connection/opposite_face.py | 114 ++++++++++++------ test/test_meshmode.py | 41 ++++--- 2 files changed, 98 insertions(+), 57 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5b5c03dd..797b36ac 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -47,7 +47,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # FIXME: This should view-then-transfer # (but PyOpenCL doesn't do non-contiguous transfers for now). - src_bdry_nodes = (tgt_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). get(queue=queue))[:, src_bdry_element_indices]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -57,6 +57,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape + #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -132,9 +133,9 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes - from meshmode.discretization.visualization import draw_curve - draw_curve(tgt_bdry_discr) - draw_curve(src_bdry_discr) + #from meshmode.discretization.visualization import draw_curve + #draw_curve(tgt_bdry_discr) + #draw_curve(src_bdry_discr) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") @@ -188,6 +189,19 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, src_unit_nodes = src_unit_nodes - df_inv_resid + # {{{ visualize next guess + + if 0: + import matplotlib.pyplot as pt + guess = apply_map(src_unit_nodes) + goals = tgt_bdry_nodes + + pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "rx") + pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "go") + pt.show() + + # }}} + max_resid = np.max(np.abs(resid)) logger.debug("gauss-newton residual: %g" % max_resid) @@ -386,7 +400,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(vol_to_bdry_conns, part_meshes): +def make_partition_connection(bdry_conns, part_meshes): """ Given a list of boundary restriction connections *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -403,60 +417,80 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - for i_tgt_part, tgt_vol_conn in enumerate(vol_to_bdry_conns): - - # Is this ok in a loop? - cl_context = tgt_vol_conn.from_discr.cl_context - with cl.CommandQueue(cl_context) as queue: - - tgt_bdry_discr = tgt_vol_conn.to_discr - #tgt_mesh = bdry_discr.mesh - tgt_mesh = part_meshes[i_tgt_part] - ngroups = len(tgt_mesh.groups) - part_batches = [[] for _ in range(ngroups)] - for i_tgt_grp, adj_parts in enumerate(tgt_mesh.interpart_adj_groups): - for i_src_part, adj in adj_parts.items(): - - src_bdry_discr = vol_to_bdry_conns[i_src_part].to_discr - - src_mesh = part_meshes[i_src_part] - + nparts = len(bdry_conns) + for i_tgt_part in range(nparts): + for i_src_part in range(nparts): + + tgt_conn = bdry_conns[i_tgt_part][i_src_part] + src_conn = bdry_conns[i_src_part][i_tgt_part] + tgt_vol = tgt_conn.from_discr + src_vol = src_conn.from_discr + tgt_bdry = tgt_conn.to_discr + src_bdry = src_conn.to_discr + tgt_mesh = tgt_vol.mesh + src_mesh = src_vol.mesh + #tgt_mesh = part_meshes[i_tgt_part] + #src_mesh = part_meshes[i_src_part] + + # Is this ok in a loop? + cl_context = tgt_vol.cl_context + with cl.CommandQueue(cl_context) as queue: + + adj_grps = part_meshes[i_tgt_part].interpart_adj_groups + + ntgt_groups = len(tgt_mesh.groups) + nsrc_groups = len(src_mesh.groups) + part_batches = ntgt_groups * [[]] + for i_tgt_grp, adj_parts in enumerate(adj_grps): + if i_src_part not in adj_parts: + continue + + adj = adj_parts[i_src_part] + + i_tgt_faces = adj.element_faces i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) for i in range(len(i_src_elems)): + #elem_base = part_meshes[i_src_part].groups[i_src_grps[i]].element_nr_base elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base i_src_elems[i] -= elem_base - for i_src_grp in range(ngroups): + for i_src_grp in range(nsrc_groups): - src_grp_el_lookup = _make_el_lookup_table(queue, - vol_to_bdry_conns[i_src_part], i_src_grp) + src_el_lookup = _make_el_lookup_table(queue, + src_conn, i_src_grp) - for i_tgt_face in adj.element_faces: + for i_tgt_face in i_tgt_faces: - index_flags = np.logical_and((i_src_grps == i_src_grp), - (adj.element_faces == i_tgt_face)) + index_flags = np.logical_and(i_src_grps == i_src_grp, + i_tgt_faces == i_tgt_face) + + if True not in index_flags: + continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_vol_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) - i_src_elems = adj.neighbors[index_flags] - i_src_faces = adj.neighbor_faces[index_flags] + src_bdry_element_indices = src_el_lookup[ + i_src_elems[index_flags], + i_src_faces[index_flags]] + + # FIXME: I honestly have no idea why this helps. src_bdry_element_indices =\ - src_grp_el_lookup[i_src_elems, i_src_faces] - src_bdry_element_indices = i_src_elems + np.sort(src_bdry_element_indices) - print(index_flags) - print(tgt_bdry_element_indices) - print(src_bdry_element_indices) + print("tgt", i_tgt_part, tgt_bdry_element_indices) + print("src", i_src_part, src_bdry_element_indices) + print("-------------------") part_batches[i_tgt_grp].extend( _make_cross_face_batches(queue, - tgt_bdry_discr, src_bdry_discr, + tgt_bdry, src_bdry, i_tgt_grp, i_src_grp, tgt_bdry_element_indices, src_bdry_element_indices)) @@ -464,8 +498,8 @@ def make_partition_connection(vol_to_bdry_conns, part_meshes): # Make one Discr connection for each partition. disc_conns.append(DirectDiscretizationConnection( # Is this ok? - from_discr=src_bdry_discr, - to_discr=tgt_bdry_discr, + from_discr=src_bdry, + to_discr=tgt_bdry, groups=[ DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 922d996b..a46d6826 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -51,20 +51,24 @@ logger = logging.getLogger(__name__) # {{{ partition_interpolation -def test_partition_interpolation(ctx_getter): +@pytest.mark.parametrize("group_factory", [ + PolynomialWarpAndBlendGroupFactory, + InterpolatoryQuadratureSimplexGroupFactory + ]) +@pytest.mark.parametrize(("num_parts"), [2, 4]) +#@pytest.mark.parametrize("dim", [2, 3, 4]) +def test_partition_interpolation(ctx_getter, group_factory, num_parts): cl_ctx = ctx_getter() - order = 2 - group_factory = PolynomialWarpAndBlendGroupFactory(order) - #group_factory = InterpolatoryQuadratureSimplexGroupFactory(order) - n = 5 + order = 4 dim = 2 - num_parts = 2 + n = 5 + from meshmode.mesh.generation import generate_warped_rect_mesh - mesh = generate_warped_rect_mesh(dim, order=order, n=n) - #mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) + mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) - #from meshmode.mesh.processing import merge_disjoint_meshes - #mesh = merge_disjoint_meshes([mesh1, mesh2]) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes([mesh1, mesh2]) from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) @@ -75,22 +79,25 @@ def test_partition_interpolation(ctx_getter): partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory) + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) for i in range(num_parts)] from meshmode.discretization.connection import make_face_restriction - bdry_connections = [make_face_restriction(vol_discrs[i], group_factory, - FRESTR_INTERIOR_FACES) for i in range(num_parts)] + from meshmode.mesh import BTAG_PARTITION + bdry_conns = [[make_face_restriction(vol_discrs[tgt], group_factory(order), + BTAG_PARTITION(src)) + for src in range(num_parts)] + for tgt in range(num_parts)] # Hack, I probably shouldn't pass part_meshes directly. This is probably # temporary. from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_connections, part_meshes) + connections = make_partition_connection(bdry_conns, part_meshes) # We can't use check_connection because I don't think it works with partitions. - #from meshmode.discretization.connection import check_connection - #for conn in connections: - # check_connection(conn) + from meshmode.discretization.connection import check_connection + for conn in connections: + check_connection(conn) # }}} -- GitLab From 828157cf9780959aa7b20f8fdd1cbba84f40fe44 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 7 Apr 2017 23:10:25 -0500 Subject: [PATCH 167/266] Almost finished make_partition_connection --- .../connection/opposite_face.py | 152 ++++++++---------- test/test_meshmode.py | 71 ++++---- 2 files changed, 110 insertions(+), 113 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 797b36ac..fbb0643d 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -57,6 +57,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape + # FIXME: Not sure if this is a valid assertion. #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -400,112 +401,99 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(bdry_conns, part_meshes): +def make_partition_connection(tgt_conn, src_conn, i_src_part): """ - Given a list of boundary restriction connections *volume_to_bdry_conn*, + Given a two boundary restriction connections *tgt_conn* and *src_conn*, return a :class:`DirectDiscretizationConnection` that performs data exchange across adjacent faces of different partitions. - :arg vol_to_bdry_conns: A list of *volume_to_bdry_conn* corresponding to - a partition of a parent mesh. + :arg tgt_conn: A :class:`Discretization` for the target partition. + :arg src_conn: A :class:`Discretization` for the source partition. + :arg i_src_part: The partition number corresponding to *src_conn*. - :returns: A list of :class:`DirectDiscretizationConnection` corresponding to - each partition. + :returns: A :class:`DirectDiscretizationConnection` that performs data + exchange across faces in different partitions. + + .. versionadded:: 2017.1 + + .. warning:: Interface is not final. Doesn't even work yet...........:( """ - disc_conns = [] from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - nparts = len(bdry_conns) - for i_tgt_part in range(nparts): - for i_src_part in range(nparts): - - tgt_conn = bdry_conns[i_tgt_part][i_src_part] - src_conn = bdry_conns[i_src_part][i_tgt_part] - tgt_vol = tgt_conn.from_discr - src_vol = src_conn.from_discr - tgt_bdry = tgt_conn.to_discr - src_bdry = src_conn.to_discr - tgt_mesh = tgt_vol.mesh - src_mesh = src_vol.mesh - #tgt_mesh = part_meshes[i_tgt_part] - #src_mesh = part_meshes[i_src_part] - - # Is this ok in a loop? - cl_context = tgt_vol.cl_context - with cl.CommandQueue(cl_context) as queue: - - adj_grps = part_meshes[i_tgt_part].interpart_adj_groups - - ntgt_groups = len(tgt_mesh.groups) - nsrc_groups = len(src_mesh.groups) - part_batches = ntgt_groups * [[]] - for i_tgt_grp, adj_parts in enumerate(adj_grps): - if i_src_part not in adj_parts: - continue + tgt_vol = tgt_conn.from_discr + src_vol = src_conn.from_discr + tgt_bdry = tgt_conn.to_discr + src_bdry = src_conn.to_discr + tgt_mesh = tgt_vol.mesh + src_mesh = src_vol.mesh + + adj_grps = tgt_mesh.interpart_adj_groups - adj = adj_parts[i_src_part] + ntgt_groups = len(tgt_mesh.groups) + nsrc_groups = len(src_mesh.groups) + part_batches = ntgt_groups * [[]] - i_tgt_faces = adj.element_faces - i_src_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_elems]) - for i in range(len(i_src_elems)): - #elem_base = part_meshes[i_src_part].groups[i_src_grps[i]].element_nr_base - elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base - i_src_elems[i] -= elem_base + with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_src_grp in range(nsrc_groups): + for i_tgt_grp, adj_parts in enumerate(adj_grps): + if i_src_part not in adj_parts: + # Skip because i_tgt_grp is not connected to i_src_part. + continue - src_el_lookup = _make_el_lookup_table(queue, - src_conn, i_src_grp) + adj = adj_parts[i_src_part] - for i_tgt_face in i_tgt_faces: + i_tgt_faces = adj.element_faces + i_src_elems = adj.neighbors + i_src_faces = adj.neighbor_faces + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_elems]) + for i in range(len(i_src_elems)): + elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_elems[i] -= elem_base - index_flags = np.logical_and(i_src_grps == i_src_grp, - i_tgt_faces == i_tgt_face) + for i_src_grp in range(nsrc_groups): - if True not in index_flags: - continue + src_el_lookup = _make_el_lookup_table(queue, src_conn, i_src_grp) - vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) + for i_tgt_face in i_tgt_faces: - tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ - to_element_indices.get(queue=queue) + index_flags = np.logical_and(i_src_grps == i_src_grp, + i_tgt_faces == i_tgt_face) + + if True not in index_flags: + continue + + vbc_tgt_grp_face_batch = _find_ibatch_for_face( + tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) - src_bdry_element_indices = src_el_lookup[ + tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + to_element_indices.get(queue=queue) + + src_bdry_element_indices = src_el_lookup[ i_src_elems[index_flags], i_src_faces[index_flags]] - # FIXME: I honestly have no idea why this helps. - src_bdry_element_indices =\ - np.sort(src_bdry_element_indices) - - print("tgt", i_tgt_part, tgt_bdry_element_indices) - print("src", i_src_part, src_bdry_element_indices) - print("-------------------") - - part_batches[i_tgt_grp].extend( - _make_cross_face_batches(queue, - tgt_bdry, src_bdry, - i_tgt_grp, i_src_grp, - tgt_bdry_element_indices, - src_bdry_element_indices)) - - # Make one Discr connection for each partition. - disc_conns.append(DirectDiscretizationConnection( - # Is this ok? - from_discr=src_bdry, - to_discr=tgt_bdry, - groups=[ - DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], - is_surjective=True)) + # FIXME: I honestly have no idea why this helps. + src_bdry_element_indices = np.sort(src_bdry_element_indices) + + print("Attempting to connect elements") + print(tgt_bdry_element_indices) + print(src_bdry_element_indices) + + part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, + tgt_bdry, src_bdry, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices)) - return disc_conns + return DirectDiscretizationConnection( + from_discr=src_bdry, + to_discr=tgt_bdry, + groups=[DiscretizationConnectionElementGroup(batches=batches) + for batches in part_batches], + is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a46d6826..db3befff 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,20 +55,26 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize(("num_parts"), [2, 4]) -#@pytest.mark.parametrize("dim", [2, 3, 4]) -def test_partition_interpolation(ctx_getter, group_factory, num_parts): +@pytest.mark.parametrize(("num_parts"), [2, 3, 7]) +# FIXME: Mostly fails for dim = 3. +@pytest.mark.parametrize("dim", [2]) +# FIXME: Mostly fails for multiple groups. +@pytest.mark.parametrize("num_meshes", [1]) +def test_partition_interpolation(ctx_getter, group_factory, dim, + num_parts, num_meshes): cl_ctx = ctx_getter() order = 4 - dim = 2 n = 5 from meshmode.mesh.generation import generate_warped_rect_mesh - mesh1 = generate_warped_rect_mesh(dim, order=order, n=n) - mesh2 = generate_warped_rect_mesh(dim, order=order, n=n) + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_meshes)] - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2]) + if num_meshes > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) @@ -82,22 +88,27 @@ def test_partition_interpolation(ctx_getter, group_factory, num_parts): vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) for i in range(num_parts)] - from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION - bdry_conns = [[make_face_restriction(vol_discrs[tgt], group_factory(order), - BTAG_PARTITION(src)) - for src in range(num_parts)] - for tgt in range(num_parts)] + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) + + for i_tgt_part in range(num_parts): + for i_src_part in range(num_parts): + if i_tgt_part == i_src_part: + continue + + tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Hack, I probably shouldn't pass part_meshes directly. This is probably - # temporary. - from meshmode.discretization.connection import make_partition_connection - connections = make_partition_connection(bdry_conns, part_meshes) + src_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # We can't use check_connection because I don't think it works with partitions. - from meshmode.discretization.connection import check_connection - for conn in connections: - check_connection(conn) + connection = make_partition_connection(tgt_conn, src_conn, i_src_part) + + check_connection(connection) # }}} @@ -105,18 +116,16 @@ def test_partition_interpolation(ctx_getter, group_factory, num_parts): # {{{ partition_mesh @pytest.mark.parametrize("dim", [2, 3]) -@pytest.mark.parametrize("num_parts", [1, 2, 7]) -def test_partition_mesh(num_parts, dim): - n = 5 - order = 4 - from meshmode.mesh.generation import (generate_regular_rect_mesh, - generate_warped_rect_mesh) - mesh1 = generate_regular_rect_mesh(a=(0,) * dim, b=(1,) * dim, n=(n,) * dim) - mesh2 = generate_regular_rect_mesh(a=(2,) * dim, b=(3,) * dim, n=(n,) * dim) - mesh3 = generate_warped_rect_mesh(dim, order=order, n=n) +@pytest.mark.parametrize("num_parts", [4, 5, 7]) +@pytest.mark.parametrize("num_meshes", [2, 3]) +def test_partition_mesh(num_parts, num_meshes, dim): + n = (5,) * dim + from meshmode.mesh.generation import generate_regular_rect_mesh + meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) + for i in range(num_meshes)] from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes([mesh1, mesh2, mesh3]) + mesh = merge_disjoint_meshes(meshes) from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) -- GitLab From 71b14103eeb10463d7647d24c768ddb0a45b1ce0 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 10 Apr 2017 16:48:36 -0500 Subject: [PATCH 168/266] More tests in test_partition_interpolation --- test/test_meshmode.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index db3befff..8d13cdd7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -59,18 +59,22 @@ logger = logging.getLogger(__name__) # FIXME: Mostly fails for dim = 3. @pytest.mark.parametrize("dim", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_meshes", [1]) +@pytest.mark.parametrize("num_groups", [1]) def test_partition_interpolation(ctx_getter, group_factory, dim, - num_parts, num_meshes): + num_parts, num_groups): cl_ctx = ctx_getter() + queue = cl.CommandQueue(cl_ctx) order = 4 n = 5 + def f(x): + return 0.1*cl.clmath.sin(30*x) + from meshmode.mesh.generation import generate_warped_rect_mesh meshes = [generate_warped_rect_mesh(dim, order=order, n=n) - for _ in range(num_meshes)] + for _ in range(num_groups)] - if num_meshes > 1: + if num_groups > 1: from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) else: @@ -110,6 +114,15 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, check_connection(connection) + bdry_x = src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_x.size != 0: + bdry_f = f(bdry_x) + + bdry_f_2 = connection(queue, bdry_f) + + err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + print(err) + assert err < 1e-13 # }}} -- GitLab From 61773895ad8b33240416e7010f2a157e3d1a0533 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 11 Apr 2017 19:06:47 -0500 Subject: [PATCH 169/266] More testing --- .../connection/opposite_face.py | 10 +++--- test/test_meshmode.py | 31 +++++++++++++------ 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index fbb0643d..90f88c88 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -416,7 +416,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. Doesn't even work yet...........:( + .. warning:: Interface is not final. Doesn't even work yet...:( """ from meshmode.discretization.connection import ( @@ -478,9 +478,9 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): # FIXME: I honestly have no idea why this helps. src_bdry_element_indices = np.sort(src_bdry_element_indices) - print("Attempting to connect elements") - print(tgt_bdry_element_indices) - print(src_bdry_element_indices) + #print("Attempting to connect elements") + #print(tgt_bdry_element_indices) + #print(src_bdry_element_indices) part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, @@ -489,7 +489,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): src_bdry_element_indices)) return DirectDiscretizationConnection( - from_discr=src_bdry, + from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 8d13cdd7..8fac6461 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -65,13 +65,15 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) order = 4 - n = 5 + + from pytools.convergence import EOCRecorder + eoc_rec = EOCRecorder() def f(x): return 0.1*cl.clmath.sin(30*x) from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + meshes = [generate_warped_rect_mesh(dim, order=order, n=5) for _ in range(num_groups)] if num_groups > 1: @@ -102,27 +104,36 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, if i_tgt_part == i_src_part: continue + # Connections within i_tgt_part to i_src_part tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + group_factory(order), + BTAG_PARTITION(i_src_part)) + # Connections within i_src_part to i_tgt_part src_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + group_factory(order), + BTAG_PARTITION(i_tgt_part)) + # Connect tgt_conn to src_conn connection = make_partition_connection(tgt_conn, src_conn, i_src_part) check_connection(connection) - bdry_x = src_conn.to_discr.nodes()[0].with_queue(queue) + # Should this be src_conn? + bdry_x = tgt_conn.to_discr.nodes()[0].with_queue(queue) if bdry_x.size != 0: bdry_f = f(bdry_x) bdry_f_2 = connection(queue, bdry_f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - print(err) - assert err < 1e-13 + abscissa = i_tgt_part + num_parts * i_src_part + eoc_rec.add_data_point(abscissa, err) + + print(eoc_rec) + assert (eoc_rec.order_estimate() >= order-0.5 + or eoc_rec.max_error() < 1e-13) + # }}} @@ -130,7 +141,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [2, 3]) +@pytest.mark.parametrize("num_meshes", [2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh -- GitLab From d055f575177695c32ad9946d7df768fd4b9e26d8 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 12 Apr 2017 15:00:43 -0500 Subject: [PATCH 170/266] Working --- .../connection/opposite_face.py | 22 +++++++++------- test/test_meshmode.py | 25 ++++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 90f88c88..5dd5c335 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -58,7 +58,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape # FIXME: Not sure if this is a valid assertion. - #assert tgt_bdry_nodes.shape == src_bdry_nodes.shape + assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -130,6 +130,9 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # {{{ visualize initial guess if 0: + # FIXME: When dim=3 it looks like sometimes src_bdry_nodes + # have the wrong coordinate system. They need to + # be reflected about some plane. import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes @@ -401,7 +404,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(tgt_conn, src_conn, i_src_part): +def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): """ Given a two boundary restriction connections *tgt_conn* and *src_conn*, return a :class:`DirectDiscretizationConnection` that performs data @@ -416,16 +419,16 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. Doesn't even work yet...:( + .. warning:: Interface is not final. It doesn't even work yet...:( """ from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - tgt_vol = tgt_conn.from_discr - src_vol = src_conn.from_discr - tgt_bdry = tgt_conn.to_discr - src_bdry = src_conn.to_discr + tgt_vol = tgt_to_src_conn.from_discr + src_vol = src_to_tgt_conn.from_discr + tgt_bdry = tgt_to_src_conn.to_discr + src_bdry = src_to_tgt_conn.to_discr tgt_mesh = tgt_vol.mesh src_mesh = src_vol.mesh @@ -455,7 +458,8 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): for i_src_grp in range(nsrc_groups): - src_el_lookup = _make_el_lookup_table(queue, src_conn, i_src_grp) + src_el_lookup =\ + _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: @@ -466,7 +470,7 @@ def make_partition_connection(tgt_conn, src_conn, i_src_part): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_to_src_conn.groups[i_tgt_grp].batches, i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 8fac6461..205928ce 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -104,23 +104,24 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, if i_tgt_part == i_src_part: continue - # Connections within i_tgt_part to i_src_part - tgt_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Connections within tgt_mesh to src_mesh + tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Connections within i_src_part to i_tgt_part - src_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Connections within src_mesh to tgt_mesh + src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # Connect tgt_conn to src_conn - connection = make_partition_connection(tgt_conn, src_conn, i_src_part) + # Connect tgt_mesh to src_mesh + connection = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) check_connection(connection) - # Should this be src_conn? - bdry_x = tgt_conn.to_discr.nodes()[0].with_queue(queue) + # Should this be src_to_tgt_conn? + bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) if bdry_x.size != 0: bdry_f = f(bdry_x) -- GitLab From 667c9d40379abebb8c402e42a38254108bf9f70a Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 14 Apr 2017 13:13:50 -0500 Subject: [PATCH 171/266] partition interpolation works with dim=3 --- .../connection/opposite_face.py | 32 ++--- test/test_meshmode.py | 112 +++++++++--------- 2 files changed, 70 insertions(+), 74 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5dd5c335..c8bf2a93 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -47,7 +47,7 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # FIXME: This should view-then-transfer # (but PyOpenCL doesn't do non-contiguous transfers for now). - src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(tgt_bdry_discr.nodes(). + src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(src_bdry_discr.nodes(). get(queue=queue))[:, src_bdry_element_indices]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -130,16 +130,16 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, # {{{ visualize initial guess if 0: - # FIXME: When dim=3 it looks like sometimes src_bdry_nodes - # have the wrong coordinate system. They need to - # be reflected about some plane. import matplotlib.pyplot as pt guess = apply_map(src_unit_nodes) goals = tgt_bdry_nodes - #from meshmode.discretization.visualization import draw_curve - #draw_curve(tgt_bdry_discr) - #draw_curve(src_bdry_discr) + from meshmode.discretization.visualization import draw_curve + pt.figure(0) + draw_curve(tgt_bdry_discr) + pt.figure(1) + draw_curve(src_bdry_discr) + pt.figure(2) pt.plot(guess[0].reshape(-1), guess[1].reshape(-1), "or") pt.plot(goals[0].reshape(-1), goals[1].reshape(-1), "og") @@ -435,7 +435,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups ntgt_groups = len(tgt_mesh.groups) - nsrc_groups = len(src_mesh.groups) part_batches = ntgt_groups * [[]] with cl.CommandQueue(tgt_vol.cl_context) as queue: @@ -450,13 +449,13 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_tgt_faces = adj.element_faces i_src_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_elems]) - for i in range(len(i_src_elems)): - elem_base = src_mesh.groups[i_src_grps[i]].element_nr_base + i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) + + for i, i_grp in enumerate(i_src_grps): + elem_base = src_mesh.groups[i_grp].element_nr_base i_src_elems[i] -= elem_base - for i_src_grp in range(nsrc_groups): + for i_src_grp in np.unique(i_src_grps): src_el_lookup =\ _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) @@ -479,13 +478,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_elems[index_flags], i_src_faces[index_flags]] - # FIXME: I honestly have no idea why this helps. - src_bdry_element_indices = np.sort(src_bdry_element_indices) - - #print("Attempting to connect elements") - #print(tgt_bdry_element_indices) - #print(src_bdry_element_indices) - part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, i_tgt_grp, i_src_grp, diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 205928ce..5811565e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,16 +55,18 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize(("num_parts"), [2, 3, 7]) -# FIXME: Mostly fails for dim = 3. -@pytest.mark.parametrize("dim", [2]) +@pytest.mark.parametrize("num_parts", [2, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) -def test_partition_interpolation(ctx_getter, group_factory, dim, +@pytest.mark.parametrize(("dim", "mesh_pars"), [ + (2, [3, 5, 7]), + (3, [3, 5]) + ]) +def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 4 + order = 3 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -72,64 +74,64 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, def f(x): return 0.1*cl.clmath.sin(30*x) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=5) - for _ in range(num_groups)] + for n in mesh_pars: + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_groups)] - if num_groups > 1: - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - else: - mesh = meshes[0] + if num_groups > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + part_per_element = np.array(p) - from meshmode.mesh.processing import partition_mesh - part_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + from meshmode.mesh.processing import partition_mesh + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) - for i in range(num_parts)] + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) + for i in range(num_parts)] - from meshmode.mesh import BTAG_PARTITION - from meshmode.discretization.connection import (make_face_restriction, - make_partition_connection, - check_connection) + from meshmode.mesh import BTAG_PARTITION + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) - for i_tgt_part in range(num_parts): - for i_src_part in range(num_parts): - if i_tgt_part == i_src_part: - continue + for i_tgt_part in range(num_parts): + for i_src_part in range(num_parts): + if i_tgt_part == i_src_part: + continue - # Connections within tgt_mesh to src_mesh - tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Connections within tgt_mesh to src_mesh + tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) - # Connections within src_mesh to tgt_mesh - src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Connections within src_mesh to tgt_mesh + src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) - # Connect tgt_mesh to src_mesh - connection = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + # Connect tgt_mesh to src_mesh + connection = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) - check_connection(connection) + check_connection(connection) - # Should this be src_to_tgt_conn? - bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) - if bdry_x.size != 0: - bdry_f = f(bdry_x) + # Should this be src_to_tgt_conn? + bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_x.size != 0: + bdry_f = f(bdry_x) - bdry_f_2 = connection(queue, bdry_f) + bdry_f_2 = connection(queue, bdry_f) - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - abscissa = i_tgt_part + num_parts * i_src_part - eoc_rec.add_data_point(abscissa, err) + err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + eoc_rec.add_data_point(1./n, err) print(eoc_rec) assert (eoc_rec.order_estimate() >= order-0.5 @@ -142,7 +144,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [2, 7]) +@pytest.mark.parametrize("num_meshes", [1, 2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh @@ -157,6 +159,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh + # TODO: The same part_per_element array must be used to partition each mesh. + # Maybe the interface should be changed to guarantee this. new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] @@ -474,12 +478,12 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, @pytest.mark.parametrize("group_factory", [ InterpolatoryQuadratureSimplexGroupFactory, - PolynomialWarpAndBlendGroupFactory + #PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - ("blob", 2, [1e-1, 8e-2, 5e-2]), + #("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), - ("warp", 3, [3, 5]), + #("warp", 3, [3, 5]), ]) def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): -- GitLab From 59b594fc246924cf98f84c8ae6aaeb24582325d9 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Apr 2017 21:12:31 -0500 Subject: [PATCH 172/266] Working --- test/test_meshmode.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 5811565e..81dd8c36 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -66,7 +66,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 3 + order = 5 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -478,12 +478,12 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, @pytest.mark.parametrize("group_factory", [ InterpolatoryQuadratureSimplexGroupFactory, - #PolynomialWarpAndBlendGroupFactory + PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - #("blob", 2, [1e-1, 8e-2, 5e-2]), + ("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), - #("warp", 3, [3, 5]), + ("warp", 3, [3, 5]), ]) def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): -- GitLab From 3138b67824e9ab29fb7b9100c4f7cf4537c843aa Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 19 Apr 2017 00:13:04 -0500 Subject: [PATCH 173/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 1 + test/test_meshmode.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c8bf2a93..12dcb0dc 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -437,6 +437,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): ntgt_groups = len(tgt_mesh.groups) part_batches = ntgt_groups * [[]] + # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 81dd8c36..6b2f10b4 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,13 +53,13 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory + #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2, 3]) +@pytest.mark.parametrize("num_parts", [3])#, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 7]), + #(2, [3, 5, 7]), (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, -- GitLab From 552f69bce673d182105a9d55d7f18763280737a0 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 20 May 2017 12:11:25 -0500 Subject: [PATCH 174/266] Working --- meshmode/mesh/__init__.py | 18 +++++++------ meshmode/mesh/processing.py | 20 +++++++-------- test/test_meshmode.py | 51 +++++++++++++++++++++---------------- 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 4d174d49..6696ec82 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -457,8 +457,10 @@ class InterPartitionAdj(): :arg elem :arg face :returns: A tuple ``(neighbor_elem, neighbor_face)`` of - neighboring elements within another :class:`Mesh`. - Or (-1, -1) if the face does not have a neighbor. + neighboring elements within another :class:`Mesh` + or (-1, -1) if the face does not have a neighbor. + Note that ``neighbor_elem`` is mesh-wide and includes + its ``element_nr_base``. """ for idx in range(len(self.elements)): if elem == self.elements[idx] and face == self.element_faces[idx]: @@ -850,16 +852,16 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrp(self, elem): + def find_igrp(self, meshwide_elem): """ - :arg elem: A mesh-wise element. Think of it as ``elem + element_nr_base``. - :returns: The index of the group that `elem` belongs to. + :arg meshwide_elem: Think of it as ``elem + element_nr_base``. + :returns: The index of the group that `meshwide_elem` belongs to. """ for igrp, grp in enumerate(self.groups): - if elem < grp.nelements: + if meshwide_elem < grp.nelements: return igrp - elem -= grp.nelements - raise RuntimeError("Could not find group with element %d." % elem) + meshwide_elem -= grp.nelements + raise RuntimeError("Could not find group with element %d." % meshwide_elem) def adjacency_list(self): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 9ffdb0b1..b995a940 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -55,9 +55,6 @@ def partition_mesh(mesh, part_per_element, part_num): numbers on *part_mesh* to ones in *mesh*. .. versionadded:: 2017.1 - - .. warning:: Interface is not final. Connectivity between elements - across groups needs to be added. """ assert len(part_per_element) == mesh.nelements, ( "part_per_element must have shape (mesh.nelements,)") @@ -81,7 +78,7 @@ def partition_mesh(mesh, part_per_element, part_num): for group_num in range(num_groups): mesh_group = mesh.groups[group_num] - # Find the index of first element in the next group + # Find the index of first element in the next group. end_idx = len(queried_elems) for idx in range(start_idx, len(queried_elems)): if queried_elems[idx] - num_prev_elems >= mesh_group.nelements: @@ -166,9 +163,10 @@ def partition_mesh(mesh, part_per_element, part_num): parent_adj = mesh.facial_adjacency_groups[parent_igrp] for parent_facial_group in parent_adj.values(): - for idx in np.where(parent_facial_group.elements == parent_elem)[0]: - if parent_facial_group.neighbors[idx] >= 0 and \ - parent_facial_group.element_faces[idx] == face: + indices, = np.nonzero(parent_facial_group.elements == parent_elem) + for idx in indices: + if (parent_facial_group.neighbors[idx] >= 0 and + parent_facial_group.element_faces[idx] == face): rank_neighbor = (parent_facial_group.neighbors[idx] + parent_elem_base) n_face = parent_facial_group.neighbor_faces[idx] @@ -179,19 +177,19 @@ def partition_mesh(mesh, part_per_element, part_num): BTAG_PARTITION(n_part_num)) boundary_adj.neighbors[adj_idx] = -tags - # Find the neighbor element from the other partition - n_elem = np.count_nonzero( + # Find the neighbor element from the other partition. + n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) if n_part_num not in adj_grps[igrp]: adj_grps[igrp][n_part_num] = InterPartitionAdj() - # I cannot compute the neighbor group because the other + # We cannot compute the neighbor group because the other # partitions may not have been built yet. adj = adj_grps[igrp][n_part_num] adj.elements = np.append(adj.elements, elem) adj.element_faces = np.append(adj.element_faces, face) - adj.neighbors = np.append(adj.neighbors, n_elem) + adj.neighbors = np.append(adj.neighbors, n_meshwide_elem) adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) connected_mesh = part_mesh.copy() diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6b2f10b4..1ae19783 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,24 +55,30 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [3])#, 3]) +@pytest.mark.parametrize("num_parts", [2])#, 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - #(2, [3, 5, 7]), - (3, [3, 5]) + (2, [10, 20, 30]), + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 5 + order = 3 from pytools.convergence import EOCRecorder - eoc_rec = EOCRecorder() + eoc_rec = dict() + for i in range(num_parts): + for j in range(num_parts): + if i == j: + continue + eoc_rec[(i, j)] = EOCRecorder() def f(x): - return 0.1*cl.clmath.sin(30*x) + return x + #return 0.1*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -131,11 +137,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, bdry_f_2 = connection(queue, bdry_f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - eoc_rec.add_data_point(1./n, err) + eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - print(eoc_rec) - assert (eoc_rec.order_estimate() >= order-0.5 - or eoc_rec.max_error() < 1e-13) + print(eoc_rec[(0, 1)]) + + assert (eoc_rec[(0, 1)].order_estimate() >= order-0.5 + or eoc_rec[(0, 1)].max_error() < 1e-13) # }}} @@ -189,26 +196,26 @@ def test_partition_mesh(num_parts, num_meshes, dim): if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_elem, n_face) = adj.get_neighbor(elem, face) - n_grp_num = n_part.find_igrp(n_elem) + (n_meshwide_elem, n_face) = adj.get_neighbor(elem, face) + n_grp_num = n_part.find_igrp(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem -= n_elem_base + n_elem = n_meshwide_elem - n_elem_base assert (elem + elem_base, face) ==\ n_adj.get_neighbor(n_elem, n_face),\ "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] - p_elem = part_to_global[elem + elem_base] - p_n_elem = n_part_to_global[n_elem + n_elem_base] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrp(p_elem) - p_n_grp_num = mesh.find_igrp(p_n_elem) + p_grp_num = mesh.find_igrp(p_meshwide_elem) + p_n_grp_num = mesh.find_igrp(p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem -= p_elem_base - p_n_elem -= p_n_elem_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base f_groups = mesh.facial_adjacency_groups[p_grp_num] for p_bnd_adj in f_groups.values(): @@ -220,11 +227,11 @@ def test_partition_mesh(num_parts, num_meshes, dim): assert n_face == p_bnd_adj.neighbor_faces[idx],\ "Tag does not give correct neighbor" - for tag_num in range(num_parts): + for i_tag in range(num_parts): tag_sum = 0 for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(tag_num)) - assert num_tags[tag_num] == tag_sum,\ + tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) + assert num_tags[i_tag] == tag_sum,\ "part_mesh has the wrong number of BTAG_PARTITION boundaries" -- GitLab From 2c530fdd4986e1c0cbf07e4f7c15a38f1f642871 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 20 May 2017 12:45:37 -0500 Subject: [PATCH 175/266] Working# Please enter the commit message for your changes. Lines starting --- meshmode/discretization/connection/opposite_face.py | 8 +++++--- test/test_meshmode.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 12dcb0dc..76c2b404 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -448,13 +448,15 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces - i_src_elems = adj.neighbors + i_src_meshwide_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) for e in i_src_elems]) + i_src_grps = np.array([src_mesh.find_igrp(e) + for e in i_src_meshwide_elems]) + i_src_elems = np.empty_like(i_src_meshwide_elems) for i, i_grp in enumerate(i_src_grps): elem_base = src_mesh.groups[i_grp].element_nr_base - i_src_elems[i] -= elem_base + i_src_elems[i] = i_src_meshwide_elems[i] - elem_base for i_src_grp in np.unique(i_src_grps): diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 1ae19783..a63ac505 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,12 +55,12 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2])#, 3]) +@pytest.mark.parametrize("num_parts", [2]) # , 3]) # FIXME: Mostly fails for multiple groups. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - #(3, [3, 5]) + #(3, [10, 20]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): -- GitLab From 832343efee1e17fac1a1ca7890251e1a7f5713eb Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 19:56:28 -0500 Subject: [PATCH 176/266] Testing --- test/test_meshmode.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index a63ac505..e991055b 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,12 +55,12 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2]) # , 3]) +@pytest.mark.parametrize("num_parts", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_groups", [1]) +@pytest.mark.parametrize("num_groups", [3]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - #(3, [10, 20]) + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -139,10 +139,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, err = la.norm((bdry_f-bdry_f_2).get(), np.inf) eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - print(eoc_rec[(0, 1)]) - - assert (eoc_rec[(0, 1)].order_estimate() >= order-0.5 - or eoc_rec[(0, 1)].max_error() < 1e-13) + for i in range(num_parts): + for j in range(num_parts): + if i != j: + print(eoc_rec[(i, j)]) + #assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 + # or eoc_rec[(i, j)].max_error() < 1e-13) # }}} @@ -488,7 +490,7 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - ("blob", 2, [1e-1, 8e-2, 5e-2]), + #("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -- GitLab From ca2a3faac9fde277d2c69a77dc241623a8da8f59 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 19:56:42 -0500 Subject: [PATCH 177/266] Fixed bug when there is a partition of multiple groups. If on paritition does not contain all groups then its connection was left with an empty batch. --- meshmode/discretization/connection/opposite_face.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 76c2b404..9a69ee5c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -434,8 +434,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups - ntgt_groups = len(tgt_mesh.groups) - part_batches = ntgt_groups * [[]] + part_batches = dict() # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: @@ -445,6 +444,8 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): # Skip because i_tgt_grp is not connected to i_src_part. continue + part_batches[i_tgt_grp] = [] + adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces @@ -491,7 +492,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], + for batches in part_batches.values()], is_surjective=True) # }}} -- GitLab From 1bebfb2a22b0cfd4707a19b8d82c4b9ebd762f21 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 21 May 2017 20:26:54 -0500 Subject: [PATCH 178/266] Added comments --- meshmode/discretization/connection/opposite_face.py | 2 ++ test/test_meshmode.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 9a69ee5c..1c3542bb 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -444,6 +444,8 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): # Skip because i_tgt_grp is not connected to i_src_part. continue + # FIXME: Here we avoid creating empty batches. But now the + # number of batches does not match the number of groups. part_batches[i_tgt_grp] = [] adj = adj_parts[i_src_part] diff --git a/test/test_meshmode.py b/test/test_meshmode.py index e991055b..f16286f3 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,7 +57,13 @@ logger = logging.getLogger(__name__) ]) @pytest.mark.parametrize("num_parts", [2]) # FIXME: Mostly fails for multiple groups. -@pytest.mark.parametrize("num_groups", [3]) +# The problem is that when multiple groups are partitioned +# some partitions may not contain all groups. In that case +# there will be a connection between two partitions with +# empty batches because there will be a group that doesn't +# connect to the other partition. I need to deal with these +# empty batches. +@pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), #(3, [3, 5]) @@ -143,8 +149,8 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for j in range(num_parts): if i != j: print(eoc_rec[(i, j)]) - #assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 - # or eoc_rec[(i, j)].max_error() < 1e-13) + assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 + or eoc_rec[(i, j)].max_error() < 1e-13) # }}} -- GitLab From 60d5fbed25bd2b522cb8c3aaa64e48729c42e98f Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 22 May 2017 21:23:42 -0500 Subject: [PATCH 179/266] Interpartition interpolation works as expected with one group --- .../connection/opposite_face.py | 11 ++-- test/test_meshmode.py | 63 +++++++++++-------- 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1c3542bb..0b1ae69b 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -434,20 +434,17 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj_grps = tgt_mesh.interpart_adj_groups - part_batches = dict() + part_batches = [] # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): + part_batches.append([]) if i_src_part not in adj_parts: # Skip because i_tgt_grp is not connected to i_src_part. continue - # FIXME: Here we avoid creating empty batches. But now the - # number of batches does not match the number of groups. - part_batches[i_tgt_grp] = [] - adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces @@ -471,7 +468,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): index_flags = np.logical_and(i_src_grps == i_src_grp, i_tgt_faces == i_tgt_face) - if True not in index_flags: + if not np.any(index_flags): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( @@ -494,7 +491,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from_discr=src_bdry, # Is this right? to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches.values()], + for batches in part_batches], is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f16286f3..c829e9fe 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,9 +53,9 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - #InterpolatoryQuadratureSimplexGroupFactory + InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2]) +@pytest.mark.parametrize("num_parts", [2, 3, 4]) # FIXME: Mostly fails for multiple groups. # The problem is that when multiple groups are partitioned # some partitions may not contain all groups. In that case @@ -65,14 +65,14 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [10, 20, 30]), - #(3, [3, 5]) + (2, [3, 5, 7]), + (3, [3, 5, 7]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 3 + order = 5 from pytools.convergence import EOCRecorder eoc_rec = dict() @@ -83,8 +83,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[(i, j)] = EOCRecorder() def f(x): - return x - #return 0.1*cl.clmath.sin(30*x) + return 0.1*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -116,41 +115,51 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): - if i_tgt_part == i_src_part: + if (i_tgt_part == i_src_part + or eoc_rec[(i_tgt_part, i_src_part)] == None): + eoc_rec[(i_tgt_part, i_src_part)] = None continue - # Connections within tgt_mesh to src_mesh + # Mark faces within tgt_mesh that are connected to src_mesh tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], group_factory(order), BTAG_PARTITION(i_src_part)) - # Connections within src_mesh to tgt_mesh + # If these parts are not connected, don't bother checking the error + bdry_nodes = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + if bdry_nodes.size == 0: + eoc_rec[(i_tgt_part, i_src_part)] = None + continue + + # Mark faces within src_mesh that are connected to tgt_mesh src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], group_factory(order), BTAG_PARTITION(i_tgt_part)) # Connect tgt_mesh to src_mesh - connection = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + tgt_conn = make_partition_connection(tgt_to_src_conn, + src_to_tgt_conn, i_src_part) - check_connection(connection) + # Connect src_mesh to tgt_mesh + src_conn = make_partition_connection(src_to_tgt_conn, + tgt_to_src_conn, i_tgt_part) - # Should this be src_to_tgt_conn? - bdry_x = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) - if bdry_x.size != 0: - bdry_f = f(bdry_x) + check_connection(tgt_conn) + check_connection(src_conn) - bdry_f_2 = connection(queue, bdry_f) + bdry_t = f(tgt_conn.to_discr.nodes()[0].with_queue(queue)) + bdry_s = tgt_conn(queue, bdry_t) + bdry_t_2 = src_conn(queue, bdry_s) - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) - eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) + err = la.norm((bdry_t - bdry_t_2).get(), np.inf) + eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) - for i in range(num_parts): - for j in range(num_parts): - if i != j: - print(eoc_rec[(i, j)]) - assert(eoc_rec[(i, j)].order_estimate() >= order - 0.5 - or eoc_rec[(i, j)].max_error() < 1e-13) + for (i, j), e in eoc_rec.items(): + if e != None: + print("Error of connection from part %i to part %i." % (i, j)) + print(e) + assert(e.order_estimate() >= order - 0.5 + or e.max_error() < 1e-12) # }}} @@ -496,7 +505,7 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, PolynomialWarpAndBlendGroupFactory ]) @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [ - #("blob", 2, [1e-1, 8e-2, 5e-2]), + ("blob", 2, [1e-1, 8e-2, 5e-2]), ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -- GitLab From 4ca35db2f899749bc01aa52dabce12357cc98d00 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 22 May 2017 21:26:11 -0500 Subject: [PATCH 180/266] Format fix --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c829e9fe..74464a95 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -116,7 +116,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): if (i_tgt_part == i_src_part - or eoc_rec[(i_tgt_part, i_src_part)] == None): + or eoc_rec[(i_tgt_part, i_src_part)] is None): eoc_rec[(i_tgt_part, i_src_part)] = None continue @@ -155,7 +155,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): - if e != None: + if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) assert(e.order_estimate() >= order - 0.5 -- GitLab From 79c7e3d08c6aec6f888c392fe746ed7cc5403b02 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 23 May 2017 11:10:20 -0500 Subject: [PATCH 181/266] Small changes --- meshmode/discretization/connection/opposite_face.py | 3 +-- test/test_meshmode.py | 9 ++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 0b1ae69b..169a5b2a 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -57,7 +57,6 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr, dim = src_grp.dim ambient_dim, nelements, ntgt_unit_nodes = tgt_bdry_nodes.shape - # FIXME: Not sure if this is a valid assertion. assert tgt_bdry_nodes.shape == src_bdry_nodes.shape # {{{ invert face map (using Gauss-Newton) @@ -419,7 +418,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): .. versionadded:: 2017.1 - .. warning:: Interface is not final. It doesn't even work yet...:( + .. warning:: Interface is not final. """ from meshmode.discretization.connection import ( diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 74464a95..de842833 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -65,14 +65,14 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 7]), - (3, [3, 5, 7]) + (2, [10, 20, 30]), + (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) - order = 5 + order = 4 from pytools.convergence import EOCRecorder eoc_rec = dict() @@ -158,8 +158,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 - or e.max_error() < 1e-12) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) # }}} -- GitLab From 417e63b6f885104593cb2efecbb16cc2abaae869 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 29 May 2017 10:25:05 -0500 Subject: [PATCH 182/266] Small changes --- meshmode/mesh/processing.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index b995a940..e379a2a1 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -128,9 +128,8 @@ def partition_mesh(mesh, part_per_element, part_num): required_indices == original_index)[0] new_mesh_groups = [] - for group_num in range(num_groups): + for group_num, mesh_group in enumerate(mesh.groups): if group_num not in skip_groups: - mesh_group = mesh.groups[group_num] new_mesh_groups.append( type(mesh_group)(mesh_group.order, new_indices[group_num], new_nodes[group_num], unit_nodes=mesh_group.unit_nodes)) -- GitLab From 0c1bc1a63751e92d84be8fafad050acc1596df9f Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 14 Jul 2017 20:16:24 -0500 Subject: [PATCH 183/266] Easy fixes --- meshmode/discretization/connection/opposite_face.py | 1 - meshmode/mesh/__init__.py | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 169a5b2a..3095d8d8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -435,7 +435,6 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): part_batches = [] - # FIXME: Is this an ok way to grab a queue? with cl.CommandQueue(tgt_vol.cl_context) as queue: for i_tgt_grp, adj_parts in enumerate(adj_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 6696ec82..bc02b405 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -102,11 +102,8 @@ class BTAG_PARTITION(object): # noqa def __init__(self, part_nr): self.part_nr = int(part_nr) - # TODO is this acceptable? - # __eq__ is also defined so maybe the hash value isn't too important - # for dictionaries. def __hash__(self): - return self.part_nr + return hash((type(self), self.part_nr)) def __eq__(self, other): if isinstance(other, BTAG_PARTITION): @@ -114,7 +111,7 @@ class BTAG_PARTITION(object): # noqa else: return False - def __nq__(self, other): + def __ne__(self, other): return not self.__eq__(other) -- GitLab From d0631b089ed09970242fd41ac0c6ccda36f0feba Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 15 Jul 2017 14:18:26 -0500 Subject: [PATCH 184/266] InterpartitionAdj now includes a lookup table that maps elements and faces to their neighbors --- meshmode/mesh/__init__.py | 47 +++++++++++++++++++++++-------------- meshmode/mesh/processing.py | 11 +++++---- test/test_meshmode.py | 12 +++++----- 3 files changed, 41 insertions(+), 29 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index bc02b405..7772bca3 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -438,7 +438,13 @@ class InterPartitionAdj(): ``neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``elements[i]`` - .. automethod:: get_neighbor + .. attribute:: neighbor_lookup_table + + A dictionary that maps the tuple ``(element, face)`` to the tuple + ``(neighbor_element, neighbor_face)``. May be ``None`` if it has not + been generated. + + .. automethod:: append_connection .. versionadded:: 2017.1 """ @@ -449,22 +455,26 @@ class InterPartitionAdj(): self.neighbors = np.array([], dtype=int) self.neighbor_faces = np.array([], dtype=int) - def get_neighbor(self, elem, face): + def append_connection(self, elem, face, nelem, nface): """ - :arg elem - :arg face - :returns: A tuple ``(neighbor_elem, neighbor_face)`` of - neighboring elements within another :class:`Mesh` - or (-1, -1) if the face does not have a neighbor. - Note that ``neighbor_elem`` is mesh-wide and includes - its ``element_nr_base``. + :arg elem: + :arg face: + :arg nelem: + :arg nface: + Connects element ``elem`` with face ``face`` to its neighboring element + ``nelem`` with face ``nface``. """ - for idx in range(len(self.elements)): - if elem == self.elements[idx] and face == self.element_faces[idx]: - return (self.neighbors[idx], - self.neighbor_faces[idx]) - #raise RuntimeError("This face does not have a neighbor") - return (-1, -1) + self.elements = np.append(self.elements, elem) + self.element_faces = np.append(self.element_faces, face) + self.neighbors = np.append(self.neighbors, nelem) + self.neighbor_faces = np.append(self.neighbor_faces, nface) + + def _generate_neighbor_lookup_table(self): + self.neighbor_lookup_table = dict() + for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): + nelem = self.neighbors[idx] + nface = self.neighbor_faces[idx] + self.neighbor_lookup_table[(elem, face)] = (nelem, nface) # }}} @@ -862,14 +872,15 @@ class Mesh(Record): def adjacency_list(self): """ - :returns: An :class:`np.array` with dtype `set`. `adjacency[i]` is the set + :returns: An list of sets. `adjacency[i]` is the set of all elements that are adjacent to element `i`. Useful for `pymetis.part_graph`. """ - adjacency_list = np.zeros((self.nelements,), dtype=set) + adjacency_list = [] + for _ in range(self.nelements): + adjacency_list.append(set()) nodal_adj = self.nodal_adjacency for elem in range(self.nelements): - adjacency_list[elem] = set() starts = nodal_adj.neighbors_starts for n in range(starts[elem], starts[elem + 1]): adjacency_list[elem].add(nodal_adj.neighbors[n]) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index e379a2a1..ed2d86ac 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -185,11 +185,12 @@ def partition_mesh(mesh, part_per_element, part_num): # We cannot compute the neighbor group because the other # partitions may not have been built yet. - adj = adj_grps[igrp][n_part_num] - adj.elements = np.append(adj.elements, elem) - adj.element_faces = np.append(adj.element_faces, face) - adj.neighbors = np.append(adj.neighbors, n_meshwide_elem) - adj.neighbor_faces = np.append(adj.neighbor_faces, n_face) + adj_grps[igrp][n_part_num].\ + append_connection(elem, face, n_meshwide_elem, n_face) + + for adj_dict in adj_grps: + for adj_grp in adj_dict.values(): + adj_grp._generate_neighbor_lookup_table() connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index de842833..fe67b7c3 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory + #InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3, 4]) # FIXME: Mostly fails for multiple groups. @@ -66,7 +66,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [10, 20, 30]), - (3, [3, 5]) + #(3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -212,15 +212,15 @@ def test_partition_mesh(num_parts, num_meshes, dim): if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: num_tags[n_part_num] += 1 - (n_meshwide_elem, n_face) = adj.get_neighbor(elem, face) + (n_meshwide_elem, n_face) =\ + adj.neighbor_lookup_table[(elem, face)] n_grp_num = n_part.find_igrp(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base assert (elem + elem_base, face) ==\ - n_adj.get_neighbor(n_elem, n_face),\ - "InterPartitionAdj is not consistent" - + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" n_part_to_global = new_meshes[n_part_num][1] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From 558ddaac4d0e7f8938b8bb330222e83224a826f3 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 16 Jul 2017 13:16:03 -0500 Subject: [PATCH 185/266] Partition testing no longer relies on pymetis.part_graph --- test/test_meshmode.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index fe67b7c3..9414c279 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) PolynomialWarpAndBlendGroupFactory, #InterpolatoryQuadratureSimplexGroupFactory ]) -@pytest.mark.parametrize("num_parts", [2, 3, 4]) +@pytest.mark.parametrize("num_parts", [2, 3]) # FIXME: Mostly fails for multiple groups. # The problem is that when multiple groups are partitioned # some partitions may not contain all groups. In that case @@ -65,8 +65,8 @@ logger = logging.getLogger(__name__) # empty batches. @pytest.mark.parametrize("num_groups", [1]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [10, 20, 30]), - #(3, [3, 5]) + (2, [3, 5, 10]), + (3, [3, 5]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -99,6 +99,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, from pymetis import part_graph (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) part_per_element = np.array(p) + #part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -177,9 +178,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) + #from pymetis import part_graph + #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #part_per_element = np.array(p) + part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From 38962882724b175fecacf98adad56bf67034fe97 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 16 Jul 2017 23:55:06 -0500 Subject: [PATCH 186/266] Fix FaceConnection: from_element_indices are group-local --- meshmode/discretization/connection/face.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py index 6ff6d0ca..7360b2fe 100644 --- a/meshmode/discretization/connection/face.py +++ b/meshmode/discretization/connection/face.py @@ -88,8 +88,7 @@ def _build_boundary_connection(queue, vol_discr, bdry_discr, connection_data, from_group_index=igrp, from_element_indices=cl.array.to_device( queue, - vol_grp.mesh_el_group.element_nr_base - + data.group_source_element_indices) + data.group_source_element_indices) .with_queue(None), to_element_indices=cl.array.to_device( queue, -- GitLab From 553f6859d3f69842b4a67252e81d74b68ec4cef2 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 16 Jul 2017 23:55:30 -0500 Subject: [PATCH 187/266] Opposite-face connection: better document _make_bdry_el_lookup_table --- meshmode/discretization/connection/opposite_face.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3095d8d8..3cced8f7 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -272,7 +272,11 @@ def _find_ibatch_for_face(vbc_tgt_grp_batches, iface): return vbc_tgt_grp_face_batch -def _make_el_lookup_table(queue, connection, igrp): +def _make_bdry_el_lookup_table(queue, connection, igrp): + """Given a voluem-to-boundary connection as *connection*, return + a table of shape ``(from_nelements, nfaces)`` to look up the + element number of the boundary element for that face. + """ from_nelements = connection.from_discr.groups[igrp].nelements from_nfaces = connection.from_discr.mesh.groups[igrp].nfaces @@ -316,7 +320,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): groups = [[] for i_tgt_grp in range(ngrps)] for i_src_grp in range(ngrps): - src_grp_el_lookup = _make_el_lookup_table( + src_grp_el_lookup = _make_bdry_el_lookup_table( queue, volume_to_bdry_conn, i_src_grp) for i_tgt_grp in range(ngrps): @@ -459,7 +463,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): for i_src_grp in np.unique(i_src_grps): src_el_lookup =\ - _make_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: -- GitLab From 2819f19e62ec80024d08b68e85863d6bc6c11b6f Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 11:39:34 -0500 Subject: [PATCH 188/266] Test partition interpolation for multiple groups --- test/test_meshmode.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 9414c279..d002534f 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,20 +56,14 @@ logger = logging.getLogger(__name__) #InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3]) -# FIXME: Mostly fails for multiple groups. -# The problem is that when multiple groups are partitioned -# some partitions may not contain all groups. In that case -# there will be a connection between two partitions with -# empty batches because there will be a group that doesn't -# connect to the other partition. I need to deal with these -# empty batches. -@pytest.mark.parametrize("num_groups", [1]) +@pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ - (2, [3, 5, 10]), - (3, [3, 5]) + (2, [3, 4, 7]), + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): + np.random.seed(42) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) order = 4 @@ -96,10 +90,10 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, else: mesh = meshes[0] - from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - part_per_element = np.array(p) - #part_per_element = np.random.randint(num_parts, size=mesh.nelements) + #from pymetis import part_graph + #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #part_per_element = np.array(p) + part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -170,6 +164,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, @pytest.mark.parametrize("num_parts", [4, 5, 7]) @pytest.mark.parametrize("num_meshes", [1, 2, 7]) def test_partition_mesh(num_parts, num_meshes, dim): + np.random.seed(42) n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) -- GitLab From 711a34bbb375b49c0e62a43167485d8451bb0e52 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 21:57:24 -0500 Subject: [PATCH 189/266] Make InterPartitionAdjacency better --- .../connection/opposite_face.py | 2 +- meshmode/mesh/__init__.py | 61 ++++++++----------- meshmode/mesh/processing.py | 20 +++--- 3 files changed, 38 insertions(+), 45 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 3cced8f7..9dca9cab 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -490,7 +490,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): src_bdry_element_indices)) return DirectDiscretizationConnection( - from_discr=src_bdry, # Is this right? + from_discr=src_bdry, to_discr=tgt_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 7772bca3..f069d899 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -411,7 +411,7 @@ class NodalAdjacency(Record): # {{{ partition adjacency -class InterPartitionAdj(): +class InterPartitionAdjacency(object): """ Describes facial adjacency information of elements in one :class:`Mesh` to elements in another :class:`Mesh`. The element's boundary tag gives the @@ -419,55 +419,45 @@ class InterPartitionAdj(): .. attribute:: elements - `:class:Mesh`-local element numbers that have neighbors. + Group-local element numbers. + Element ``element_id_dtype elements[i]`` and face + ``face_id_dtype element_faces[i]`` is connected to neighbor element + ``element_id_dtype neighbors[i]`` with face + ``face_id_dtype neighbor_faces[i]``. .. attribute:: element_faces - ``element_faces[i]`` is the face of ``elements[i]`` that has a neighbor. + ``face_id_dtype element_faces[i]`` gives the face of + ``element_id_dtype elements[i]`` that is connected to ``neighbors[i]``. .. attribute:: neighbors - ``neighbors[i]`` gives the element number within the neighboring partiton - of the element connected to ``elements[i]``. This gives a mesh-wide element - numbering. Use ``Mesh.find_igrp()`` to find the group that the element - belongs to, then subtract ``element_nr_base`` to find the element of the - group. + Mesh-wide element numbers. + ``element_id_dtype neighbors[i]`` gives the element number within the + neighboring partiton of the element connected to + ``element_id_dtype elements[i]``. Use ``Mesh.find_igrp()`` to find the group + that the element belongs to, then subtract ``element_nr_base`` to find the + element of the group. .. attribute:: neighbor_faces - ``neighbor_faces[i]`` gives face index within the neighboring partition - of the face connected to ``elements[i]`` + ``face_id_dtype neighbor_faces[i]`` gives face index within the neighboring + partition of the face connected to ``element_id_dtype elements[i]`` .. attribute:: neighbor_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(neighbor_element, neighbor_face)``. May be ``None`` if it has not - been generated. - - .. automethod:: append_connection + ``(neighbor_element, neighbor_face)``. .. versionadded:: 2017.1 """ - def __init__(self): - self.elements = np.array([], dtype=int) - self.element_faces = np.array([], dtype=int) - self.neighbors = np.array([], dtype=int) - self.neighbor_faces = np.array([], dtype=int) - - def append_connection(self, elem, face, nelem, nface): - """ - :arg elem: - :arg face: - :arg nelem: - :arg nface: - Connects element ``elem`` with face ``face`` to its neighboring element - ``nelem`` with face ``nface``. - """ - self.elements = np.append(self.elements, elem) - self.element_faces = np.append(self.element_faces, face) - self.neighbors = np.append(self.neighbors, nelem) - self.neighbor_faces = np.append(self.neighbor_faces, nface) + def __init__(self, elements, element_faces, neighbors, neighbor_faces): + self.elements = np.array(elements, dtype=Mesh.element_id_dtype) + self.element_faces = np.array(element_faces, dtype=Mesh.face_id_dtype) + self.neighbors = np.array(neighbors, dtype=Mesh.element_id_dtype) + self.neighbor_faces = np.array(neighbor_faces, dtype=Mesh.face_id_dtype) + self._generate_neighbor_lookup_table() def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() @@ -629,13 +619,16 @@ class Mesh(Record): .. attribute:: element_id_dtype + .. attribute:: face_id_dtype + .. automethod:: __eq__ .. automethod:: __ne__ .. automethod:: find_igrp .. automethos:: adjacency_list """ - face_id_dtype = np.int8 + face_id_dtype=np.int8 + element_id_dtype=np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index ed2d86ac..c647a2fd 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -141,8 +141,7 @@ def partition_mesh(mesh, part_per_element, part_num): part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - from meshmode.mesh import InterPartitionAdj - adj_grps = [{} for _ in range(len(part_mesh.groups))] + adj_grps = [dict() for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -181,16 +180,17 @@ def partition_mesh(mesh, part_per_element, part_num): part_per_element[:rank_neighbor] == n_part_num) if n_part_num not in adj_grps[igrp]: - adj_grps[igrp][n_part_num] = InterPartitionAdj() + adj_grps[igrp][n_part_num] = [] - # We cannot compute the neighbor group because the other - # partitions may not have been built yet. adj_grps[igrp][n_part_num].\ - append_connection(elem, face, n_meshwide_elem, n_face) - - for adj_dict in adj_grps: - for adj_grp in adj_dict.values(): - adj_grp._generate_neighbor_lookup_table() + append((elem, face, n_meshwide_elem, n_face)) + + from meshmode.mesh import InterPartitionAdjacency + for igrp, adj_dict in enumerate(adj_grps): + for n_part_num, adj_data in adj_dict.items(): + elems, faces, n_elems, n_faces = np.array(adj_data).T + adj_grps[igrp][n_part_num] =\ + InterPartitionAdjacency(elems, faces, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps -- GitLab From 80bb6780b65a9e8bd3f8cdf53df82a41f35f2784 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 22:00:50 -0500 Subject: [PATCH 190/266] Fix typo --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 9dca9cab..2adbc8b7 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -273,7 +273,7 @@ def _find_ibatch_for_face(vbc_tgt_grp_batches, iface): def _make_bdry_el_lookup_table(queue, connection, igrp): - """Given a voluem-to-boundary connection as *connection*, return + """Given a volume-to-boundary connection as *connection*, return a table of shape ``(from_nelements, nfaces)`` to look up the element number of the boundary element for that face. """ -- GitLab From 568cbe1f1d2c7c12fa8949305a3e9bbb99e63940 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 17 Jul 2017 22:03:59 -0500 Subject: [PATCH 191/266] Fix typo --- meshmode/mesh/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f069d899..5c81a5c8 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -601,7 +601,7 @@ class Mesh(Record): A list of mappings from neighbor partition numbers to instances of :class:`InterPartitionAdj`. - ``interpart_adj_gorups[igrp][ineighbor_part]`` gives + ``interpart_adj_groups[igrp][ineighbor_part]`` gives the set of facial adjacency relations between group *igrp* and partition *ineighbor_part*. -- GitLab From ed8e2290292013506bae4fce74b18a79fed2fc9c Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 18 Jul 2017 18:25:26 -0500 Subject: [PATCH 192/266] Fix whitespace --- meshmode/mesh/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 5c81a5c8..3c0a9a14 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -627,8 +627,8 @@ class Mesh(Record): .. automethos:: adjacency_list """ - face_id_dtype=np.int8 - element_id_dtype=np.int32 + face_id_dtype = np.int8 + element_id_dtype = np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, -- GitLab From 3b4b7ffb04deeb6bab6fba73321fef9905f14b8e Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 18 Jul 2017 19:05:37 -0500 Subject: [PATCH 193/266] Replace find_igrp with a batched version --- .../connection/opposite_face.py | 3 +-- meshmode/mesh/__init__.py | 19 +++++++++++-------- meshmode/mesh/processing.py | 7 ++++--- test/test_meshmode.py | 11 +++++++---- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 2adbc8b7..addaa267 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -452,8 +452,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_tgt_faces = adj.element_faces i_src_meshwide_elems = adj.neighbors i_src_faces = adj.neighbor_faces - i_src_grps = np.array([src_mesh.find_igrp(e) - for e in i_src_meshwide_elems]) + i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) i_src_elems = np.empty_like(i_src_meshwide_elems) for i, i_grp in enumerate(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 3c0a9a14..1eece780 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -623,7 +623,7 @@ class Mesh(Record): .. automethod:: __eq__ .. automethod:: __ne__ - .. automethod:: find_igrp + .. automethod:: find_igrps .. automethos:: adjacency_list """ @@ -852,16 +852,19 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrp(self, meshwide_elem): + def find_igrps(self, meshwide_elems): """ - :arg meshwide_elem: Think of it as ``elem + element_nr_base``. - :returns: The index of the group that `meshwide_elem` belongs to. + :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers + Usually computed by ``elem + element_nr_base``. + :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` + belongs to. """ + grps = np.zeros_like(meshwide_elems) + next_grp_boundary = 0 for igrp, grp in enumerate(self.groups): - if meshwide_elem < grp.nelements: - return igrp - meshwide_elem -= grp.nelements - raise RuntimeError("Could not find group with element %d." % meshwide_elem) + next_grp_boundary += grp.nelements + grps += meshwide_elems >= next_grp_boundary + return grps def adjacency_list(self): """ diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index c647a2fd..47595963 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -148,15 +148,16 @@ def partition_mesh(mesh, part_per_element, part_num): boundary_adj = part_mesh.facial_adjacency_groups[igrp][None] boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces + p_meshwide_elems = queried_elems[boundary_elems + elem_base] + parent_igrps = mesh.find_igrps(p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] tags = -boundary_adj.neighbors[adj_idx] assert tags >= 0, "Expected boundary tag in adjacency group." - p_meshwide_elem = queried_elems[elem + elem_base] - parent_igrp = mesh.find_igrp(p_meshwide_elem) + parent_igrp = parent_igrps[adj_idx] parent_elem_base = mesh.groups[parent_igrp].element_nr_base - parent_elem = p_meshwide_elem - parent_elem_base + parent_elem = p_meshwide_elems[adj_idx] - parent_elem_base parent_adj = mesh.facial_adjacency_groups[parent_igrp] diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d002534f..83ce9c86 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("group_factory", [ PolynomialWarpAndBlendGroupFactory, - #InterpolatoryQuadratureSimplexGroupFactory + InterpolatoryQuadratureSimplexGroupFactory ]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) @@ -211,7 +211,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): (n_meshwide_elem, n_face) =\ adj.neighbor_lookup_table[(elem, face)] - n_grp_num = n_part.find_igrp(n_meshwide_elem) + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = n_part.find_igrps(n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base @@ -222,8 +225,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrp(p_meshwide_elem) - p_n_grp_num = mesh.find_igrp(p_meshwide_n_elem) + p_grp_num = mesh.find_igrps(p_meshwide_elem) + p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base -- GitLab From 5cf6864da245295a0b9843eec7ec345569781acd Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 22 Jul 2017 18:19:26 -0500 Subject: [PATCH 194/266] Mesh.adjacency_list now returns a list of numpy arrays --- meshmode/mesh/__init__.py | 13 ++++--------- test/test_meshmode.py | 8 ++++---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 1eece780..6258c8df 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -868,18 +868,13 @@ class Mesh(Record): def adjacency_list(self): """ - :returns: An list of sets. `adjacency[i]` is the set - of all elements that are adjacent to element `i`. - Useful for `pymetis.part_graph`. + :returns: `adjacency[i]` is a list of all elements that are adjacent to + element `i`. Useful for `pymetis.part_graph`. """ adjacency_list = [] - for _ in range(self.nelements): - adjacency_list.append(set()) - nodal_adj = self.nodal_adjacency for elem in range(self.nelements): - starts = nodal_adj.neighbors_starts - for n in range(starts[elem], starts[elem + 1]): - adjacency_list[elem].add(nodal_adj.neighbors[n]) + start, end = self.nodal_adjacency.neighbors_starts[elem:elem+2] + adjacency_list.append(self.nodal_adjacency.neighbors[start:end]) return adjacency_list # Design experience: Try not to add too many global data structures to the diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 83ce9c86..fe18140a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -173,10 +173,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - #from pymetis import part_graph - #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) - #part_per_element = np.array(p) - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + from pymetis import part_graph + (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + part_per_element = np.array(p) + #part_per_element = np.random.randint(num_parts, size=mesh.nelements) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From 49aff90940a2c7d3e2f5cc39298e7fe9254bd21f Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 22 Jul 2017 19:08:40 -0500 Subject: [PATCH 195/266] Clean up test_partition_mesh --- test/test_meshmode.py | 87 ++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index fe18140a..6f15cf15 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -200,48 +200,51 @@ def test_partition_mesh(num_parts, num_meshes, dim): for grp_num, f_groups in enumerate(part.facial_adjacency_groups): f_grp = f_groups[None] elem_base = part.groups[grp_num].element_nr_base - for idx, elem in enumerate(f_grp.elements): - tag = -f_grp.neighbors[idx] - assert tag >= 0 - face = f_grp.element_faces[idx] - for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): - n_part, n_part_to_global = new_meshes[n_part_num] - if tag & part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) != 0: - num_tags[n_part_num] += 1 - - (n_meshwide_elem, n_face) =\ - adj.neighbor_lookup_table[(elem, face)] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = n_part.find_igrps(n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - assert (elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ - "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = mesh.find_igrps(p_meshwide_elem) - p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" + for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): + n_part, n_part_to_global = new_meshes[n_part_num] + tags = -f_grp.neighbors + assert np.all(tags >= 0) + def is_connected_to_part(i): + return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) + & tags[i]) + for idx in filter(is_connected_to_part, range(len(tags))): + elem = f_grp.elements[idx] + face = f_grp.element_faces[idx] + num_tags[n_part_num] += 1 + + (n_meshwide_elem, n_face) =\ + adj.neighbor_lookup_table[(elem, face)] + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = n_part.find_igrps(n_meshwide_elem) + n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + assert (elem + elem_base, face) ==\ + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" + n_part_to_global = new_meshes[n_part_num][1] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = mesh.find_igrps(p_meshwide_elem) + p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" for i_tag in range(num_parts): tag_sum = 0 -- GitLab From 477f62ad1a3e10e0a89eb20b384e19971e630e32 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 23 Jul 2017 01:31:04 -0500 Subject: [PATCH 196/266] Fix whitespace --- test/test_meshmode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6f15cf15..d4d51297 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -204,6 +204,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_part, n_part_to_global = new_meshes[n_part_num] tags = -f_grp.neighbors assert np.all(tags >= 0) + def is_connected_to_part(i): return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) & tags[i]) -- GitLab From a03e5fc7a5dd54be6dacc6689cb699de82a7842d Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 26 Jul 2017 18:18:48 -0500 Subject: [PATCH 197/266] Small change to make_partition_connection --- .../discretization/connection/opposite_face.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index addaa267..c7ef3a49 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -454,15 +454,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_faces = adj.neighbor_faces i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) - i_src_elems = np.empty_like(i_src_meshwide_elems) - for i, i_grp in enumerate(i_src_grps): - elem_base = src_mesh.groups[i_grp].element_nr_base - i_src_elems[i] = i_src_meshwide_elems[i] - elem_base - for i_src_grp in np.unique(i_src_grps): + elem_base = src_mesh.groups[i_src_grp].element_nr_base src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) for i_tgt_face in i_tgt_faces: @@ -478,9 +474,9 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) - src_bdry_element_indices = src_el_lookup[ - i_src_elems[index_flags], - i_src_faces[index_flags]] + elems = i_src_meshwide_elems[index_flags] - elem_base + faces = i_src_faces[index_flags] + src_bdry_element_indices = src_el_lookup[elems, faces] part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, tgt_bdry, src_bdry, -- GitLab From e3c8afead057452c136229d717c93faab4e37f7b Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 26 Jul 2017 18:18:56 -0500 Subject: [PATCH 198/266] Mesh can now be initialized with face_id_dtype --- meshmode/mesh/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 6258c8df..8bf8b27c 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -637,6 +637,7 @@ class Mesh(Record): interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, + face_id_dtype=np.int8, element_id_dtype=np.int32): """ The following are keyword-only: @@ -720,6 +721,7 @@ class Mesh(Record): boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), + face_id_dtype=np.dtype(face_id_dtype), element_id_dtype=np.dtype(element_id_dtype), ) -- GitLab From 25a2f6550fd3eff4d9becfb7d5613809839984b0 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 18:07:31 -0500 Subject: [PATCH 199/266] Change test_partition_interpolation inputs --- test/test_meshmode.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d4d51297..8c052a70 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -51,10 +51,7 @@ logger = logging.getLogger(__name__) # {{{ partition_interpolation -@pytest.mark.parametrize("group_factory", [ - PolynomialWarpAndBlendGroupFactory, - InterpolatoryQuadratureSimplexGroupFactory - ]) +@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ -- GitLab From d4eaa2105012f0007d66e7ae7762141f66208af4 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 18:49:20 -0500 Subject: [PATCH 200/266] Update docs --- .../discretization/connection/opposite_face.py | 15 ++++++++------- test/test_meshmode.py | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c7ef3a49..5540096b 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -409,16 +409,17 @@ def make_opposite_face_connection(volume_to_bdry_conn): def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): """ - Given a two boundary restriction connections *tgt_conn* and *src_conn*, - return a :class:`DirectDiscretizationConnection` that performs data - exchange across adjacent faces of different partitions. + Given a two boundary restriction connections *tgt_to_src_conn* and + *src_to_tgt_conn*, return a :class:`DirectDiscretizationConnection` that + performs data exchange across adjacent faces of different partitions. - :arg tgt_conn: A :class:`Discretization` for the target partition. - :arg src_conn: A :class:`Discretization` for the source partition. - :arg i_src_part: The partition number corresponding to *src_conn*. + :arg tgt_to_src_conn: A :class:`Discretization` of the target partition. + :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. + :arg i_src_part: The partition number of the src partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces in different partitions. + exchange across faces from partition `src_to_tgt_conn` to + `tgt_to_src_conn`. .. versionadded:: 2017.1 diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 8c052a70..2941b1d6 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -118,7 +118,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, BTAG_PARTITION(i_src_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_to_src_conn.to_discr.nodes()[0].with_queue(queue) + bdry_nodes = tgt_to_src_conn.to_discr.nodes() if bdry_nodes.size == 0: eoc_rec[(i_tgt_part, i_src_part)] = None continue -- GitLab From a5403b50d9ff9b3d0daa733768cc7b5e22af3e0a Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 19:57:01 -0500 Subject: [PATCH 201/266] Working --- .../connection/opposite_face.py | 7 ++- meshmode/mesh/__init__.py | 63 +++++++------------ meshmode/mesh/processing.py | 20 +++++- test/test_meshmode.py | 9 +-- 4 files changed, 50 insertions(+), 49 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 5540096b..e6f3be62 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -426,6 +426,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): .. warning:: Interface is not final. """ + from meshmode.mesh.processing import find_group_indices from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) @@ -451,9 +452,9 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): adj = adj_parts[i_src_part] i_tgt_faces = adj.element_faces - i_src_meshwide_elems = adj.neighbors - i_src_faces = adj.neighbor_faces - i_src_grps = src_mesh.find_igrps(i_src_meshwide_elems) + i_src_meshwide_elems = adj.global_neighbors + i_src_faces = adj.global_neighbor_faces + i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8bf8b27c..cbb22af6 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -422,48 +422,51 @@ class InterPartitionAdjacency(object): Group-local element numbers. Element ``element_id_dtype elements[i]`` and face ``face_id_dtype element_faces[i]`` is connected to neighbor element - ``element_id_dtype neighbors[i]`` with face - ``face_id_dtype neighbor_faces[i]``. + ``element_id_dtype global_neighbors[i]`` with face + ``face_id_dtype global_neighbor_faces[i]``. .. attribute:: element_faces ``face_id_dtype element_faces[i]`` gives the face of - ``element_id_dtype elements[i]`` that is connected to ``neighbors[i]``. + ``element_id_dtype elements[i]`` that is connected to + ``globla_neighbors[i]``. - .. attribute:: neighbors + .. attribute:: global_neighbors Mesh-wide element numbers. - ``element_id_dtype neighbors[i]`` gives the element number within the + ``element_id_dtype global_neighbors[i]`` gives the element number within the neighboring partiton of the element connected to - ``element_id_dtype elements[i]``. Use ``Mesh.find_igrp()`` to find the group - that the element belongs to, then subtract ``element_nr_base`` to find the - element of the group. + ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the + group that the element belongs to, then subtract ``element_nr_base`` to find + the element of the group. - .. attribute:: neighbor_faces + .. attribute:: global_neighbor_faces - ``face_id_dtype neighbor_faces[i]`` gives face index within the neighboring - partition of the face connected to ``element_id_dtype elements[i]`` + ``face_id_dtype global_neighbor_faces[i]`` gives face index within the + neighboring partition of the face connected to + ``element_id_dtype elements[i]`` .. attribute:: neighbor_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(neighbor_element, neighbor_face)``. + ``(global_neighbor_element, global_neighbor_face)``. .. versionadded:: 2017.1 """ - def __init__(self, elements, element_faces, neighbors, neighbor_faces): - self.elements = np.array(elements, dtype=Mesh.element_id_dtype) - self.element_faces = np.array(element_faces, dtype=Mesh.face_id_dtype) - self.neighbors = np.array(neighbors, dtype=Mesh.element_id_dtype) - self.neighbor_faces = np.array(neighbor_faces, dtype=Mesh.face_id_dtype) + def __init__(self, elements, element_faces, + global_neighbors, global_neighbor_faces): + self.elements = elements + self.element_faces = element_faces + self.global_neighbors = global_neighbors + self.global_neighbor_faces = global_neighbor_faces self._generate_neighbor_lookup_table() def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.neighbors[idx] - nface = self.neighbor_faces[idx] + nelem = self.global_neighbors[idx] + nface = self.global_neighbor_faces[idx] self.neighbor_lookup_table[(elem, face)] = (nelem, nface) # }}} @@ -619,16 +622,12 @@ class Mesh(Record): .. attribute:: element_id_dtype - .. attribute:: face_id_dtype - .. automethod:: __eq__ .. automethod:: __ne__ - .. automethod:: find_igrps .. automethos:: adjacency_list """ face_id_dtype = np.int8 - element_id_dtype = np.int32 def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, @@ -637,7 +636,6 @@ class Mesh(Record): interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, - face_id_dtype=np.int8, element_id_dtype=np.int32): """ The following are keyword-only: @@ -721,8 +719,7 @@ class Mesh(Record): boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), - face_id_dtype=np.dtype(face_id_dtype), - element_id_dtype=np.dtype(element_id_dtype), + element_id_dtype=np.dtype(element_id_dtype) ) if not skip_tests: @@ -854,20 +851,6 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def find_igrps(self, meshwide_elems): - """ - :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers - Usually computed by ``elem + element_nr_base``. - :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` - belongs to. - """ - grps = np.zeros_like(meshwide_elems) - next_grp_boundary = 0 - for igrp, grp in enumerate(self.groups): - next_grp_boundary += grp.nelements - grps += meshwide_elems >= next_grp_boundary - return grps - def adjacency_list(self): """ :returns: `adjacency[i]` is a list of all elements that are adjacent to diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 47595963..7445326a 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -31,6 +31,7 @@ import modepy as mp __doc__ = """ +.. autofunction:: find_group_indices .. autofunction:: partition_mesh .. autofunction:: find_volume_mesh_element_orientations .. autofunction:: perform_flips @@ -40,6 +41,21 @@ __doc__ = """ .. autofunction:: affine_map """ +def find_group_indices(groups, meshwide_elems): + """ + :arg groups: A list of :class:``MeshElementGroup`` instances that contain + ``meshwide_elems``. + :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers + Usually computed by ``elem + element_nr_base``. + :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem`` + belongs to. + """ + grps = np.zeros_like(meshwide_elems) + next_grp_boundary = 0 + for igrp, grp in enumerate(groups): + next_grp_boundary += grp.nelements + grps += meshwide_elems >= next_grp_boundary + return grps def partition_mesh(mesh, part_per_element, part_num): """ @@ -149,7 +165,7 @@ def partition_mesh(mesh, part_per_element, part_num): boundary_elems = boundary_adj.elements boundary_faces = boundary_adj.element_faces p_meshwide_elems = queried_elems[boundary_elems + elem_base] - parent_igrps = mesh.find_igrps(p_meshwide_elems) + parent_igrps = find_group_indices(mesh.groups, p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] tags = -boundary_adj.neighbors[adj_idx] @@ -191,7 +207,7 @@ def partition_mesh(mesh, part_per_element, part_num): for n_part_num, adj_data in adj_dict.items(): elems, faces, n_elems, n_faces = np.array(adj_data).T adj_grps[igrp][n_part_num] =\ - InterPartitionAdjacency(elems, faces, n_elems, n_faces) + InterPartitionAdjacency(elems, faces, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 2941b1d6..da018349 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -190,6 +190,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): "part_mesh has the wrong number of BTAG_ALL boundaries" from meshmode.mesh import BTAG_PARTITION + from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): @@ -215,7 +216,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. - n_grp_num = n_part.find_igrps(n_meshwide_elem) + n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base @@ -226,8 +227,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - p_grp_num = mesh.find_igrps(p_meshwide_elem) - p_n_grp_num = mesh.find_igrps(p_meshwide_n_elem) + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) p_elem_base = mesh.groups[p_grp_num].element_nr_base p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base -- GitLab From b6f0f49659f23c7fa2e30ffb1023690001c48bfc Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 30 Jul 2017 20:02:20 -0500 Subject: [PATCH 202/266] Fix whitespace --- meshmode/mesh/processing.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 7445326a..867b9aea 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -41,9 +41,10 @@ __doc__ = """ .. autofunction:: affine_map """ + def find_group_indices(groups, meshwide_elems): """ - :arg groups: A list of :class:``MeshElementGroup`` instances that contain + :arg groups: A list of :class:``MeshElementGroup`` instances that contain ``meshwide_elems``. :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers Usually computed by ``elem + element_nr_base``. @@ -57,7 +58,10 @@ def find_group_indices(groups, meshwide_elems): grps += meshwide_elems >= next_grp_boundary return grps -def partition_mesh(mesh, part_per_element, part_num): + +# {{{ partition_mesh + +def partition_mesh(mesh, part_per_element, part_nr): """ :arg mesh: A :class:`meshmode.mesh.Mesh` to be partitioned. :arg part_per_element: A :class:`numpy.ndarray` containing one -- GitLab From e7dc583fd9ade502d45fcab1bb6b5c24d46dcfc0 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 31 Jul 2017 22:30:43 -0500 Subject: [PATCH 203/266] Working --- .../connection/opposite_face.py | 25 +++--- meshmode/mesh/__init__.py | 21 ++++- meshmode/mesh/processing.py | 20 ++--- test/test_meshmode.py | 87 +++++++++---------- 4 files changed, 82 insertions(+), 71 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index e6f3be62..a384e8ff 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -437,23 +437,28 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): tgt_mesh = tgt_vol.mesh src_mesh = src_vol.mesh - adj_grps = tgt_mesh.interpart_adj_groups - part_batches = [] with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_tgt_grp, adj_parts in enumerate(adj_grps): + for i_tgt_grp, adj in tgt_mesh.interpart_adj_groups.items(): part_batches.append([]) - if i_src_part not in adj_parts: - # Skip because i_tgt_grp is not connected to i_src_part. - continue + #if i_src_part not in adj_parts: + # # Skip because i_tgt_grp is not connected to i_src_part. + # continue - adj = adj_parts[i_src_part] + #adj = adj_parts[i_src_part] - i_tgt_faces = adj.element_faces - i_src_meshwide_elems = adj.global_neighbors - i_src_faces = adj.global_neighbor_faces + idxes = i_src_part == adj.neighbor_parts + if not np.any(idxes): + continue + i_tgt_faces = adj.element_faces[idxes] + i_src_meshwide_elems = adj.global_neighbors[idxes] + i_src_faces = adj.global_neighbor_faces[idxes] + + #i_tgt_faces = adj.element_faces + #i_src_meshwide_elems = adj.global_neighbors + #i_src_faces = adj.global_neighbor_faces i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index cbb22af6..e9feb743 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -454,20 +454,35 @@ class InterPartitionAdjacency(object): .. versionadded:: 2017.1 """ - def __init__(self, elements, element_faces, - global_neighbors, global_neighbor_faces): + def __init__(self, elements, + element_faces, + neighbor_parts, + global_neighbors, + global_neighbor_faces): self.elements = elements self.element_faces = element_faces + self.neighbor_parts = neighbor_parts self.global_neighbors = global_neighbors self.global_neighbor_faces = global_neighbor_faces self._generate_neighbor_lookup_table() + def __eq__(self, other): + return ( + type(self) == type(other) + and np.array_equal(self.elements, other.elements) + and np.array_equal(self.element_faces, other.element_faces) + and np.array_equal(self.neighbors, other.neighbors) + and np.array_equal(self.neighbor_faces, other.neighbor_faces) + and np.array_equal(self.neighbor_part, other.neighbor_part) + ) + def _generate_neighbor_lookup_table(self): self.neighbor_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): nelem = self.global_neighbors[idx] nface = self.global_neighbor_faces[idx] - self.neighbor_lookup_table[(elem, face)] = (nelem, nface) + npart = self.neighbor_parts[idx] + self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) # }}} diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 867b9aea..f41e616b 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -161,7 +161,7 @@ def partition_mesh(mesh, part_per_element, part_nr): part_mesh = Mesh(new_vertices, new_mesh_groups, facial_adjacency_groups=None, boundary_tags=boundary_tags) - adj_grps = [dict() for _ in range(len(part_mesh.groups))] + adj_data = [[] for _ in range(len(part_mesh.groups))] for igrp, grp in enumerate(part_mesh.groups): elem_base = grp.element_nr_base @@ -200,18 +200,16 @@ def partition_mesh(mesh, part_per_element, part_nr): n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - if n_part_num not in adj_grps[igrp]: - adj_grps[igrp][n_part_num] = [] - - adj_grps[igrp][n_part_num].\ - append((elem, face, n_meshwide_elem, n_face)) + adj_data[igrp].append((elem, face, + n_part_num, n_meshwide_elem, n_face)) from meshmode.mesh import InterPartitionAdjacency - for igrp, adj_dict in enumerate(adj_grps): - for n_part_num, adj_data in adj_dict.items(): - elems, faces, n_elems, n_faces = np.array(adj_data).T - adj_grps[igrp][n_part_num] =\ - InterPartitionAdjacency(elems, faces, n_elems, n_faces) + adj_grps = dict() + for igrp, connection in enumerate(adj_data): + if connection: + elems, faces, n_parts, n_elems, n_faces = np.array(connection).T + adj_grps[igrp] =\ + InterPartitionAdjacency(elems, faces, n_parts, n_elems, n_faces) connected_mesh = part_mesh.copy() connected_mesh.interpart_adj_groups = adj_grps diff --git a/test/test_meshmode.py b/test/test_meshmode.py index da018349..bfbaf1f7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -195,55 +195,48 @@ def test_partition_mesh(num_parts, num_meshes, dim): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] - for grp_num, f_groups in enumerate(part.facial_adjacency_groups): - f_grp = f_groups[None] + for grp_num, adj in part.interpart_adj_groups.items(): + f_grp = part.facial_adjacency_groups[grp_num][None] + tags = -f_grp.neighbors + assert np.all(tags >= 0) elem_base = part.groups[grp_num].element_nr_base - for n_part_num, adj in part.interpart_adj_groups[grp_num].items(): + for elem, face, n_part_num, n_meshwide_elem, n_face in\ + zip(adj.elements, adj.element_faces, + adj.neighbor_parts, adj.global_neighbors, + adj.global_neighbor_faces): + num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] - tags = -f_grp.neighbors - assert np.all(tags >= 0) - - def is_connected_to_part(i): - return (part.boundary_tag_bit(BTAG_PARTITION(n_part_num)) - & tags[i]) - for idx in filter(is_connected_to_part, range(len(tags))): - elem = f_grp.elements[idx] - face = f_grp.element_faces[idx] - num_tags[n_part_num] += 1 - - (n_meshwide_elem, n_face) =\ - adj.neighbor_lookup_table[(elem, face)] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[n_grp_num][part_num] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - assert (elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ - "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) - p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) + n_adj = n_part.interpart_adj_groups[int(n_grp_num)] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + assert (part_num, elem + elem_base, face) ==\ + n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + "InterPartitionAdj is not consistent" + n_part_to_global = new_meshes[n_part_num][1] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" for i_tag in range(num_parts): tag_sum = 0 -- GitLab From e1a4b9996471e139498966ef92ce129c5ff80691 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 1 Aug 2017 02:49:55 -0500 Subject: [PATCH 204/266] Working --- .../connection/opposite_face.py | 22 +-- meshmode/mesh/__init__.py | 165 ++++++++---------- meshmode/mesh/processing.py | 16 +- test/test_meshmode.py | 28 +-- 4 files changed, 108 insertions(+), 123 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index a384e8ff..f5461b77 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -441,24 +441,18 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): with cl.CommandQueue(tgt_vol.cl_context) as queue: - for i_tgt_grp, adj in tgt_mesh.interpart_adj_groups.items(): + for i_tgt_grp in range(len(tgt_mesh.groups)): part_batches.append([]) - #if i_src_part not in adj_parts: - # # Skip because i_tgt_grp is not connected to i_src_part. - # continue + adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp]['part'] - #adj = adj_parts[i_src_part] - - idxes = i_src_part == adj.neighbor_parts - if not np.any(idxes): + indices = i_src_part == adj.neighbor_parts + if not np.any(indices): + # Skip because i_tgt_grp is not connected to i_src_part. continue - i_tgt_faces = adj.element_faces[idxes] - i_src_meshwide_elems = adj.global_neighbors[idxes] - i_src_faces = adj.global_neighbor_faces[idxes] + i_tgt_faces = adj.element_faces[indices] + i_src_meshwide_elems = adj.global_neighbors[indices] + i_src_faces = adj.neighbor_faces[indices] - #i_tgt_faces = adj.element_faces - #i_src_meshwide_elems = adj.global_neighbors - #i_src_faces = adj.global_neighbor_faces i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index e9feb743..f1fe9b5a 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -409,84 +409,6 @@ class NodalAdjacency(Record): # }}} -# {{{ partition adjacency - -class InterPartitionAdjacency(object): - """ - Describes facial adjacency information of elements in one :class:`Mesh` to - elements in another :class:`Mesh`. The element's boundary tag gives the - partition that it is connected to. - - .. attribute:: elements - - Group-local element numbers. - Element ``element_id_dtype elements[i]`` and face - ``face_id_dtype element_faces[i]`` is connected to neighbor element - ``element_id_dtype global_neighbors[i]`` with face - ``face_id_dtype global_neighbor_faces[i]``. - - .. attribute:: element_faces - - ``face_id_dtype element_faces[i]`` gives the face of - ``element_id_dtype elements[i]`` that is connected to - ``globla_neighbors[i]``. - - .. attribute:: global_neighbors - - Mesh-wide element numbers. - ``element_id_dtype global_neighbors[i]`` gives the element number within the - neighboring partiton of the element connected to - ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the - group that the element belongs to, then subtract ``element_nr_base`` to find - the element of the group. - - .. attribute:: global_neighbor_faces - - ``face_id_dtype global_neighbor_faces[i]`` gives face index within the - neighboring partition of the face connected to - ``element_id_dtype elements[i]`` - - .. attribute:: neighbor_lookup_table - - A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(global_neighbor_element, global_neighbor_face)``. - - .. versionadded:: 2017.1 - """ - - def __init__(self, elements, - element_faces, - neighbor_parts, - global_neighbors, - global_neighbor_faces): - self.elements = elements - self.element_faces = element_faces - self.neighbor_parts = neighbor_parts - self.global_neighbors = global_neighbors - self.global_neighbor_faces = global_neighbor_faces - self._generate_neighbor_lookup_table() - - def __eq__(self, other): - return ( - type(self) == type(other) - and np.array_equal(self.elements, other.elements) - and np.array_equal(self.element_faces, other.element_faces) - and np.array_equal(self.neighbors, other.neighbors) - and np.array_equal(self.neighbor_faces, other.neighbor_faces) - and np.array_equal(self.neighbor_part, other.neighbor_part) - ) - - def _generate_neighbor_lookup_table(self): - self.neighbor_lookup_table = dict() - for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.global_neighbors[idx] - nface = self.global_neighbor_faces[idx] - npart = self.neighbor_parts[idx] - self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) - -# }}} - - # {{{ facial adjacency class FacialAdjacencyGroup(Record): @@ -561,6 +483,82 @@ class FacialAdjacencyGroup(Record): # }}} +# {{{ partition adjacency + +class InterPartitionAdjacency(FacialAdjacencyGroup): + """ + Describes facial adjacency information of elements in one :class:`Mesh` to + elements in another :class:`Mesh`. The element's boundary tag gives the + partition that it is connected to. + + .. attribute:: elements + + Group-local element numbers. + Element ``element_id_dtype elements[i]`` and face + ``face_id_dtype element_faces[i]`` is connected to neighbor element + ``element_id_dtype global_neighbors[i]`` with face + ``face_id_dtype global_neighbor_faces[i]``. + + .. attribute:: element_faces + + ``face_id_dtype element_faces[i]`` gives the face of + ``element_id_dtype elements[i]`` that is connected to + ``globla_neighbors[i]``. + + .. attribute:: global_neighbors + + Mesh-wide element numbers. + ``element_id_dtype global_neighbors[i]`` gives the element number within the + neighboring partiton of the element connected to + ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the + group that the element belongs to, then subtract ``element_nr_base`` to find + the element of the group. + + .. attribute:: global_neighbor_faces + + ``face_id_dtype global_neighbor_faces[i]`` gives face index within the + neighboring partition of the face connected to + ``element_id_dtype elements[i]`` + + .. attribute:: neighbor_lookup_table + + A dictionary that maps the tuple ``(element, face)`` to the tuple + ``(global_neighbor_element, global_neighbor_face)``. + + .. versionadded:: 2017.1 + """ + + def __init__(self, elements, + element_faces, + neighbor_parts, + global_neighbors, + neighbor_faces): + self.elements = elements + self.element_faces = element_faces + self.neighbor_parts = neighbor_parts + self.global_neighbors = global_neighbors + self.neighbor_faces = neighbor_faces + self._generate_neighbor_lookup_table() + + def __eq__(self, other): + return (type(self) == type(other) + and np.array_equal(self.elements, other.elements) + and np.array_equal(self.element_faces, other.element_faces) + and np.array_equal(self.global_neighbors, other.global_neighbors) + and np.array_equal(self.neighbor_faces, other.neighbor_faces) + and np.array_equal(self.neighbor_part, other.neighbor_part)) + + def _generate_neighbor_lookup_table(self): + self.neighbor_lookup_table = dict() + for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): + nelem = self.global_neighbors[idx] + nface = self.neighbor_faces[idx] + npart = self.neighbor_parts[idx] + self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) + +# }}} + + # {{{ mesh class Mesh(Record): @@ -614,15 +612,6 @@ class Mesh(Record): (Note that element groups are not necessarily contiguous like the figure may suggest.) - .. attribute:: interpart_adj_groups - - A list of mappings from neighbor partition numbers to instances of - :class:`InterPartitionAdj`. - - ``interpart_adj_groups[igrp][ineighbor_part]`` gives - the set of facial adjacency relations between group *igrp* - and partition *ineighbor_part*. - .. attribute:: boundary_tags A tuple of boundary tag identifiers. :class:`BTAG_ALL` and @@ -648,7 +637,6 @@ class Mesh(Record): node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, - interpart_adj_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): @@ -730,7 +718,6 @@ class Mesh(Record): self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, - interpart_adj_groups=interpart_adj_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index f41e616b..4b9fcb67 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -203,16 +203,16 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_data[igrp].append((elem, face, n_part_num, n_meshwide_elem, n_face)) + connected_mesh = part_mesh.copy() + from meshmode.mesh import InterPartitionAdjacency - adj_grps = dict() - for igrp, connection in enumerate(adj_data): - if connection: - elems, faces, n_parts, n_elems, n_faces = np.array(connection).T - adj_grps[igrp] =\ - InterPartitionAdjacency(elems, faces, n_parts, n_elems, n_faces) + for igrp, adj in enumerate(adj_data): + if adj: + elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + connected_mesh.facial_adjacency_groups[igrp]['part'] =\ + InterPartitionAdjacency(elems, faces, + n_parts, n_elems, n_faces) - connected_mesh = part_mesh.copy() - connected_mesh.interpart_adj_groups = adj_grps return connected_mesh, queried_elems # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index bfbaf1f7..9df1ae8e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -189,28 +189,33 @@ def test_partition_mesh(num_parts, num_meshes, dim): [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - from meshmode.mesh import BTAG_PARTITION + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacency from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] - for grp_num, adj in part.interpart_adj_groups.items(): - f_grp = part.facial_adjacency_groups[grp_num][None] - tags = -f_grp.neighbors - assert np.all(tags >= 0) + for grp_num in range(len(part.groups)): + #f_grp = part.facial_adjacency_groups[grp_num][None] + #tags = -f_grp.neighbors + #assert np.all(tags >= 0) + if not 'part' in part.facial_adjacency_groups[grp_num]: + continue + adj = part.facial_adjacency_groups[grp_num]['part'] + if not isinstance(adj, InterPartitionAdjacency): + continue elem_base = part.groups[grp_num].element_nr_base for elem, face, n_part_num, n_meshwide_elem, n_face in\ zip(adj.elements, adj.element_faces, adj.neighbor_parts, adj.global_neighbors, - adj.global_neighbor_faces): + adj.neighbor_faces): num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.interpart_adj_groups[int(n_grp_num)] + n_adj = n_part.facial_adjacency_groups[int(n_grp_num)]['part'] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base assert (part_num, elem + elem_base, face) ==\ @@ -249,11 +254,10 @@ def test_partition_mesh(num_parts, num_meshes, dim): def count_tags(mesh, tag): num_bnds = 0 for adj_dict in mesh.facial_adjacency_groups: - for _, bdry_group in adj_dict.items(): - for neighbors in bdry_group.neighbors: - if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(tag) != 0: - num_bnds += 1 + for neighbors in adj_dict[None].neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: + num_bnds += 1 return num_bnds # }}} -- GitLab From a160f891a0e92897d39e66d8dbc46b07a9be48cc Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 1 Aug 2017 20:59:28 -0500 Subject: [PATCH 205/266] InterPartitionAdjacency inherits FacialAdjacencyGroup --- meshmode/mesh/__init__.py | 35 +++++++++++++++++++++-------------- meshmode/mesh/processing.py | 36 +++++++++++++++++++++++++++--------- test/test_meshmode.py | 31 +++++++++++++++++-------------- 3 files changed, 65 insertions(+), 37 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f1fe9b5a..354021fb 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -514,13 +514,13 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. - .. attribute:: global_neighbor_faces + .. attribute:: neighbor_faces ``face_id_dtype global_neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``element_id_dtype elements[i]`` - .. attribute:: neighbor_lookup_table + .. attribute:: index_lookup_table A dictionary that maps the tuple ``(element, face)`` to the tuple ``(global_neighbor_element, global_neighbor_face)``. @@ -528,33 +528,41 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ + ''' + I don't like the idea of having InterPartitionAdjacency replace the boundary + group for FacialAdjacencyGroup. A boundary may be a real boundary or it may + have a partition adjacent to it. FacialAdjacency and InterPartitionAdjacecy + will not have the same elements. They should be separate. facial_adjacency_groups + should have groups for real boundaries and for 'fake' boundaries. + ''' + def __init__(self, elements, element_faces, + neighbors, + igroup, + i_neighbor_group, neighbor_parts, global_neighbors, neighbor_faces): self.elements = elements self.element_faces = element_faces + self.neighbors = neighbors + self.igroup = igroup + self.i_neighbor_group = i_neighbor_group self.neighbor_parts = neighbor_parts self.global_neighbors = global_neighbors self.neighbor_faces = neighbor_faces - self._generate_neighbor_lookup_table() + self._generate_index_lookup_table() def __eq__(self, other): - return (type(self) == type(other) - and np.array_equal(self.elements, other.elements) - and np.array_equal(self.element_faces, other.element_faces) + return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) - and np.array_equal(self.neighbor_faces, other.neighbor_faces) and np.array_equal(self.neighbor_part, other.neighbor_part)) - def _generate_neighbor_lookup_table(self): - self.neighbor_lookup_table = dict() + def _generate_index_lookup_table(self): + self.index_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - nelem = self.global_neighbors[idx] - nface = self.neighbor_faces[idx] - npart = self.neighbor_parts[idx] - self.neighbor_lookup_table[(elem, face)] = (npart, nelem, nface) + self.index_lookup_table[(elem, face)] = idx # }}} @@ -847,7 +855,6 @@ class Mesh(Record): == other._nodal_adjacency) and (self._facial_adjacency_groups == other._facial_adjacency_groups) - and self.interpart_adj_groups == other.interpart_adj_groups and self.boundary_tags == other.boundary_tags) def __ne__(self, other): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 4b9fcb67..29588c0b 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -172,8 +172,8 @@ def partition_mesh(mesh, part_per_element, part_nr): parent_igrps = find_group_indices(mesh.groups, p_meshwide_elems) for adj_idx, elem in enumerate(boundary_elems): face = boundary_faces[adj_idx] - tags = -boundary_adj.neighbors[adj_idx] - assert tags >= 0, "Expected boundary tag in adjacency group." + tag = -boundary_adj.neighbors[adj_idx] + assert tag >= 0, "Expected boundary tag in adjacency group." parent_igrp = parent_igrps[adj_idx] parent_elem_base = mesh.groups[parent_igrp].element_nr_base @@ -191,16 +191,16 @@ def partition_mesh(mesh, part_per_element, part_nr): n_face = parent_facial_group.neighbor_faces[idx] n_part_num = part_per_element[rank_neighbor] - tags = tags & ~part_mesh.boundary_tag_bit(BTAG_ALL) - tags = tags | part_mesh.boundary_tag_bit( + tag = tag & ~part_mesh.boundary_tag_bit(BTAG_ALL) + tag = tag | part_mesh.boundary_tag_bit( BTAG_PARTITION(n_part_num)) - boundary_adj.neighbors[adj_idx] = -tags + boundary_adj.neighbors[adj_idx] = -tag # Find the neighbor element from the other partition. n_meshwide_elem = np.count_nonzero( part_per_element[:rank_neighbor] == n_part_num) - adj_data[igrp].append((elem, face, + adj_data[igrp].append((elem, face, n_part_num, n_meshwide_elem, n_face)) connected_mesh = part_mesh.copy() @@ -208,9 +208,27 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import InterPartitionAdjacency for igrp, adj in enumerate(adj_data): if adj: - elems, faces, n_parts, n_elems, n_faces = np.array(adj).T - connected_mesh.facial_adjacency_groups[igrp]['part'] =\ - InterPartitionAdjacency(elems, faces, + boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] + n_parts = np.zeros_like(boundary_adj.elements) + n_parts.fill(-1) + n_elems = np.copy(n_parts) + n_faces = np.copy(n_parts) + for elem, face, n_part, n_elem, n_face in adj: + idx = np.where(np.logical_and(elem == boundary_adj.elements, + face == boundary_adj.element_faces))[0] + n_parts[idx] = n_part + n_elems[idx] = n_elem + n_faces[idx] = n_face + #bdry_perm = np.argsort(boundary_adj.elements) + #bdry_elems = boundary_adj.elements[perm] + #bdry_faces = boundary_adj.element_faces[perm] + #elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + connected_mesh.facial_adjacency_groups[igrp][None] =\ + InterPartitionAdjacency(boundary_adj.elements, + boundary_adj.element_faces, + boundary_adj.neighbors, + boundary_adj.igroup, + boundary_adj.ineighbor_group, n_parts, n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 9df1ae8e..d793d943 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -196,32 +196,35 @@ def test_partition_mesh(num_parts, num_meshes, dim): for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num in range(len(part.groups)): - #f_grp = part.facial_adjacency_groups[grp_num][None] - #tags = -f_grp.neighbors - #assert np.all(tags >= 0) - if not 'part' in part.facial_adjacency_groups[grp_num]: - continue - adj = part.facial_adjacency_groups[grp_num]['part'] + adj = part.facial_adjacency_groups[grp_num][None] + tags = -part.facial_adjacency_groups[grp_num][None].neighbors + assert np.all(tags >= 0) if not isinstance(adj, InterPartitionAdjacency): continue elem_base = part.groups[grp_num].element_nr_base - for elem, face, n_part_num, n_meshwide_elem, n_face in\ - zip(adj.elements, adj.element_faces, - adj.neighbor_parts, adj.global_neighbors, - adj.neighbor_faces): + for idx in range(len(adj.elements)): + if adj.global_neighbors[idx] == -1: + continue + elem = adj.elements[idx] + face = adj.element_faces[idx] + n_part_num = adj.neighbor_parts[idx] + n_meshwide_elem = adj.global_neighbors[idx] + n_face = adj.neighbor_faces[idx] num_tags[n_part_num] += 1 n_part, n_part_to_global = new_meshes[n_part_num] # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.facial_adjacency_groups[int(n_grp_num)]['part'] + n_adj = n_part.facial_adjacency_groups[int(n_grp_num)][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - assert (part_num, elem + elem_base, face) ==\ - n_adj.neighbor_lookup_table[(n_elem, n_face)],\ + n_idx = n_adj.index_lookup_table[(n_elem, n_face)] + assert (part_num == n_adj.neighbor_parts[n_idx] + and elem + elem_base == n_adj.global_neighbors[n_idx] + and face == n_adj.neighbor_faces[n_idx]),\ "InterPartitionAdj is not consistent" - n_part_to_global = new_meshes[n_part_num][1] + _, n_part_to_global = new_meshes[n_part_num] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From 6b29e97c21991b3397b963911cc5d970620fbfa5 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 15:12:18 -0500 Subject: [PATCH 206/266] Working --- .../connection/opposite_face.py | 2 +- meshmode/mesh/processing.py | 42 ++++++++++++------- test/test_meshmode.py | 2 +- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index f5461b77..61834402 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -443,7 +443,7 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): for i_tgt_grp in range(len(tgt_mesh.groups)): part_batches.append([]) - adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp]['part'] + adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] indices = i_src_part == adj.neighbor_parts if not np.any(indices): diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 29588c0b..cb2e36e6 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -211,25 +211,37 @@ def partition_mesh(mesh, part_per_element, part_nr): boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] n_parts = np.zeros_like(boundary_adj.elements) n_parts.fill(-1) - n_elems = np.copy(n_parts) + global_n_elems = np.copy(n_parts) n_faces = np.copy(n_parts) - for elem, face, n_part, n_elem, n_face in adj: - idx = np.where(np.logical_and(elem == boundary_adj.elements, - face == boundary_adj.element_faces))[0] - n_parts[idx] = n_part - n_elems[idx] = n_elem - n_faces[idx] = n_face - #bdry_perm = np.argsort(boundary_adj.elements) - #bdry_elems = boundary_adj.elements[perm] - #bdry_faces = boundary_adj.element_faces[perm] - #elems, faces, n_parts, n_elems, n_faces = np.array(adj).T + bdry_perm = np.lexsort([boundary_adj.element_faces, boundary_adj.elements]) + bdry_elems = boundary_adj.elements[bdry_perm] + bdry_faces = boundary_adj.element_faces[bdry_perm] + bdry_neighbors = boundary_adj.neighbors[bdry_perm] + adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces = np.array(adj).T + adj_perm = np.lexsort([adj_faces, adj_elems]) + adj_elems = adj_elems[adj_perm] + adj_faces = adj_faces[adj_perm] + adj_n_parts = adj_n_parts[adj_perm] + adj_gl_n_elems = adj_gl_n_elems[adj_perm] + adj_n_faces = adj_n_faces[adj_perm] + adj_idx = 0 + for bdry_idx in range(len(bdry_elems)): + if adj_idx >= len(adj_elems): + break + if (adj_elems[adj_idx] == bdry_elems[bdry_idx] + and adj_faces[adj_idx] == bdry_faces[bdry_idx]): + n_parts[bdry_idx] = adj_n_parts[adj_idx] + global_n_elems[bdry_idx] = adj_gl_n_elems[adj_idx] + n_faces[bdry_idx] = adj_n_faces[adj_idx] + adj_idx += 1 + connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(boundary_adj.elements, - boundary_adj.element_faces, - boundary_adj.neighbors, + InterPartitionAdjacency(bdry_elems, + bdry_faces, + bdry_neighbors, boundary_adj.igroup, boundary_adj.ineighbor_group, - n_parts, n_elems, n_faces) + n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d793d943..66e85813 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): -- GitLab From 434d93e2412547bb9dadfbe2daa16cd6dd1eaed1 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 15:51:15 -0500 Subject: [PATCH 207/266] Merge InterPartitionAdjacency with FacialAdjacencyGroups --- .../connection/opposite_face.py | 4 +- meshmode/mesh/__init__.py | 56 +++++++++++-------- meshmode/mesh/processing.py | 34 ++++++----- test/test_meshmode.py | 4 +- 4 files changed, 56 insertions(+), 42 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 61834402..c79b72a8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -445,14 +445,14 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): part_batches.append([]) adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] - indices = i_src_part == adj.neighbor_parts + indices = (i_src_part == adj.neighbor_partitions) if not np.any(indices): # Skip because i_tgt_grp is not connected to i_src_part. continue i_tgt_faces = adj.element_faces[indices] i_src_meshwide_elems = adj.global_neighbors[indices] i_src_faces = adj.neighbor_faces[indices] - + i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 354021fb..ca267daa 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -487,9 +487,12 @@ class FacialAdjacencyGroup(Record): class InterPartitionAdjacency(FacialAdjacencyGroup): """ - Describes facial adjacency information of elements in one :class:`Mesh` to - elements in another :class:`Mesh`. The element's boundary tag gives the - partition that it is connected to. + Describes boundary adjacency information of elements in + :class:`MeshElementGroup`. + + .. attribute:: igroup + + The group number of this group. .. attribute:: elements @@ -497,19 +500,26 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): Element ``element_id_dtype elements[i]`` and face ``face_id_dtype element_faces[i]`` is connected to neighbor element ``element_id_dtype global_neighbors[i]`` with face - ``face_id_dtype global_neighbor_faces[i]``. + ``face_id_dtype global_neighbor_faces[i]``. The partition number it connects + to is ``neighbor_partitions[i]``. .. attribute:: element_faces ``face_id_dtype element_faces[i]`` gives the face of ``element_id_dtype elements[i]`` that is connected to - ``globla_neighbors[i]``. + ``global_neighbors[i]``. + + .. attribute:: neighbors + + Since this is a boundary, ``element_id_dtype neighbors[i]`` is interpreted + as a boundary tag. ``-neighbors[i]`` should be interpreted according to + :class:``Mesh.boundary_tags``. .. attribute:: global_neighbors Mesh-wide element numbers. ``element_id_dtype global_neighbors[i]`` gives the element number within the - neighboring partiton of the element connected to + neighboring partition of the element connected to ``element_id_dtype elements[i]``. Use ``find_group_instances()`` to find the group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. @@ -520,44 +530,43 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): neighboring partition of the face connected to ``element_id_dtype elements[i]`` + .. attribute:: neighbor_partitions + + ``neighbor_partitions[i]`` gives the partition number that ``elements[i]`` + is connected to. + + If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: index_lookup_table - A dictionary that maps the tuple ``(element, face)`` to the tuple - ``(global_neighbor_element, global_neighbor_face)``. + A dictionary that maps the tuple ``(element, face)`` to an index ``i`` such + that ``elements[i] == element and element_faces[i] == face``. .. versionadded:: 2017.1 """ - ''' - I don't like the idea of having InterPartitionAdjacency replace the boundary - group for FacialAdjacencyGroup. A boundary may be a real boundary or it may - have a partition adjacent to it. FacialAdjacency and InterPartitionAdjacecy - will not have the same elements. They should be separate. facial_adjacency_groups - should have groups for real boundaries and for 'fake' boundaries. - ''' - def __init__(self, elements, element_faces, neighbors, igroup, - i_neighbor_group, - neighbor_parts, + neighbor_partitions, global_neighbors, neighbor_faces): self.elements = elements self.element_faces = element_faces self.neighbors = neighbors self.igroup = igroup - self.i_neighbor_group = i_neighbor_group - self.neighbor_parts = neighbor_parts + self.ineighbor_group = None + self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors self.neighbor_faces = neighbor_faces self._generate_index_lookup_table() def __eq__(self, other): return (super.__eq__(self, other) - and np.array_equal(self.global_neighbors, other.global_neighbors) - and np.array_equal(self.neighbor_part, other.neighbor_part)) + and np.array_equal(self.global_neighbors, other.global_neighbors) + and np.array_equal(self.neighbor_partitions, other.neighbor_partitions)) def _generate_index_lookup_table(self): self.index_lookup_table = dict() @@ -596,7 +605,8 @@ class Mesh(Record): the set of facial adjacency relations between group *igrp* and *ineighbor_group*. *ineighbor_group* and *igrp* may be identical, or *ineighbor_group* may be *None*, in which case - a group containing boundary faces is returned. + an :class:``InterPartitionAdjacency`` group containing boundary + faces is returned. Referencing this attribute may raise :exc:`meshmode.DataUnavailable`. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index cb2e36e6..ded738af 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -208,39 +208,43 @@ def partition_mesh(mesh, part_per_element, part_nr): from meshmode.mesh import InterPartitionAdjacency for igrp, adj in enumerate(adj_data): if adj: - boundary_adj = connected_mesh.facial_adjacency_groups[igrp][None] - n_parts = np.zeros_like(boundary_adj.elements) + bdry = connected_mesh.facial_adjacency_groups[igrp][None] + # Initialize connections + n_parts = np.zeros_like(bdry.elements) n_parts.fill(-1) global_n_elems = np.copy(n_parts) n_faces = np.copy(n_parts) - bdry_perm = np.lexsort([boundary_adj.element_faces, boundary_adj.elements]) - bdry_elems = boundary_adj.elements[bdry_perm] - bdry_faces = boundary_adj.element_faces[bdry_perm] - bdry_neighbors = boundary_adj.neighbors[bdry_perm] - adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces = np.array(adj).T + + # Sort both sets of elements so that we can quickly merge + # the two data structures + bdry_perm = np.lexsort([bdry.element_faces, bdry.elements]) + elems = bdry.elements[bdry_perm] + faces = bdry.element_faces[bdry_perm] + neighbors = bdry.neighbors[bdry_perm] + adj_elems, adj_faces, adj_n_parts, adj_gl_n_elems, adj_n_faces =\ + np.array(adj).T adj_perm = np.lexsort([adj_faces, adj_elems]) adj_elems = adj_elems[adj_perm] adj_faces = adj_faces[adj_perm] adj_n_parts = adj_n_parts[adj_perm] adj_gl_n_elems = adj_gl_n_elems[adj_perm] adj_n_faces = adj_n_faces[adj_perm] + + # Merge interpartition adjacency data with FacialAdjacencyGroup adj_idx = 0 - for bdry_idx in range(len(bdry_elems)): + for bdry_idx in range(len(elems)): if adj_idx >= len(adj_elems): break - if (adj_elems[adj_idx] == bdry_elems[bdry_idx] - and adj_faces[adj_idx] == bdry_faces[bdry_idx]): + if (adj_elems[adj_idx] == elems[bdry_idx] + and adj_faces[adj_idx] == faces[bdry_idx]): n_parts[bdry_idx] = adj_n_parts[adj_idx] global_n_elems[bdry_idx] = adj_gl_n_elems[adj_idx] n_faces[bdry_idx] = adj_n_faces[adj_idx] adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(bdry_elems, - bdry_faces, - bdry_neighbors, - boundary_adj.igroup, - boundary_adj.ineighbor_group, + InterPartitionAdjacency(elems, faces, neighbors, + bdry.igroup, n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 66e85813..fc62dd34 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -207,7 +207,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): continue elem = adj.elements[idx] face = adj.element_faces[idx] - n_part_num = adj.neighbor_parts[idx] + n_part_num = adj.neighbor_partitions[idx] n_meshwide_elem = adj.global_neighbors[idx] n_face = adj.neighbor_faces[idx] num_tags[n_part_num] += 1 @@ -220,7 +220,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base n_idx = n_adj.index_lookup_table[(n_elem, n_face)] - assert (part_num == n_adj.neighbor_parts[n_idx] + assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ "InterPartitionAdj is not consistent" -- GitLab From 869a8312505249aeb1a0e5f12c34674904899a26 Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 16:00:04 -0500 Subject: [PATCH 208/266] Improve documentation --- meshmode/mesh/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index ca267daa..8f7161f7 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -524,12 +524,18 @@ class InterPartitionAdjacency(FacialAdjacencyGroup): group that the element belongs to, then subtract ``element_nr_base`` to find the element of the group. + If ``global_neighbors[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: neighbor_faces ``face_id_dtype global_neighbor_faces[i]`` gives face index within the neighboring partition of the face connected to ``element_id_dtype elements[i]`` + If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true + boundary and is not connected to any other :class:``Mesh``. + .. attribute:: neighbor_partitions ``neighbor_partitions[i]`` gives the partition number that ``elements[i]`` -- GitLab From 1cccf86587fb5eac53dacde283ea415b1c58b7cc Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 7 Aug 2017 16:45:09 -0500 Subject: [PATCH 209/266] Send less data to make_partition_connection --- .../connection/opposite_face.py | 38 ++++++++----------- test/test_meshmode.py | 24 +++++++++--- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index c79b72a8..d96cd5e8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,19 +407,21 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): +def make_partition_connection(src_to_tgt_conn, i_src_part, + tgt_bdry, tgt_adj_groups, tgt_batches): """ - Given a two boundary restriction connections *tgt_to_src_conn* and - *src_to_tgt_conn*, return a :class:`DirectDiscretizationConnection` that - performs data exchange across adjacent faces of different partitions. + Connects ``src_to_tgt_conn`` to a neighboring partition. - :arg tgt_to_src_conn: A :class:`Discretization` of the target partition. :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. :arg i_src_part: The partition number of the src partition. + :arg tgt_adj_groups: A list of :class:`InterPartitionAdjacency`` of the target + partition. + :arg tgt_bdry: A :class:`Discretization` of the boundary of the + target partition. + :arg tgt_batches: A list of batches of the target partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `src_to_tgt_conn` to - `tgt_to_src_conn`. + exchange across faces from partition `i_src_part` to the target partition. .. versionadded:: 2017.1 @@ -430,21 +432,14 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - tgt_vol = tgt_to_src_conn.from_discr - src_vol = src_to_tgt_conn.from_discr - tgt_bdry = tgt_to_src_conn.to_discr src_bdry = src_to_tgt_conn.to_discr - tgt_mesh = tgt_vol.mesh - src_mesh = src_vol.mesh + src_groups = src_to_tgt_conn.from_discr.mesh.groups - part_batches = [] + part_batches = [[] for _ in tgt_adj_groups] - with cl.CommandQueue(tgt_vol.cl_context) as queue: - - for i_tgt_grp in range(len(tgt_mesh.groups)): - part_batches.append([]) - adj = tgt_mesh.facial_adjacency_groups[i_tgt_grp][None] + with cl.CommandQueue(src_to_tgt_conn.cl_context) as queue: + for i_tgt_grp, adj in enumerate(tgt_adj_groups): indices = (i_src_part == adj.neighbor_partitions) if not np.any(indices): # Skip because i_tgt_grp is not connected to i_src_part. @@ -453,11 +448,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): i_src_meshwide_elems = adj.global_neighbors[indices] i_src_faces = adj.neighbor_faces[indices] - i_src_grps = find_group_indices(src_mesh.groups, i_src_meshwide_elems) + i_src_grps = find_group_indices(src_groups, i_src_meshwide_elems) for i_src_grp in np.unique(i_src_grps): - elem_base = src_mesh.groups[i_src_grp].element_nr_base + elem_base = src_groups[i_src_grp].element_nr_base src_el_lookup =\ _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) @@ -465,12 +460,11 @@ def make_partition_connection(tgt_to_src_conn, src_to_tgt_conn, i_src_part): index_flags = np.logical_and(i_src_grps == i_src_grp, i_tgt_faces == i_tgt_face) - if not np.any(index_flags): continue vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_to_src_conn.groups[i_tgt_grp].batches, i_tgt_face) + tgt_batches[i_tgt_grp], i_tgt_face) tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ to_element_indices.get(queue=queue) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index fc62dd34..0665129a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -128,14 +128,28 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, group_factory(order), BTAG_PARTITION(i_tgt_part)) + # Gather just enough information for the connection + tgt_bdry = tgt_to_src_conn.to_discr + tgt_mesh = tgt_to_src_conn.from_discr.mesh + tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] + for i in range(len(tgt_mesh.groups))] + tgt_batches = [tgt_to_src_conn.groups[i].batches + for i in range(len(tgt_mesh.groups))] + + src_bdry = src_to_tgt_conn.to_discr + src_mesh = src_to_tgt_conn.from_discr.mesh + src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] + for i in range(len(src_mesh.groups))] + src_batches = [src_to_tgt_conn.groups[i].batches + for i in range(len(src_mesh.groups))] + # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(tgt_to_src_conn, - src_to_tgt_conn, i_src_part) + tgt_conn = make_partition_connection(src_to_tgt_conn, i_src_part, + tgt_bdry, tgt_adj_groups, tgt_batches) # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(src_to_tgt_conn, - tgt_to_src_conn, i_tgt_part) - + src_conn = make_partition_connection(tgt_to_src_conn, i_tgt_part, + src_bdry, src_adj_groups, src_batches) check_connection(tgt_conn) check_connection(src_conn) -- GitLab From f1734e5cf2d1515ed072e7b760b2cdcf6b200f25 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 16:29:37 -0500 Subject: [PATCH 210/266] Change name --- meshmode/mesh/__init__.py | 2 +- meshmode/mesh/processing.py | 8 ++++---- test/test_meshmode.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8f7161f7..cae5f058 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -485,7 +485,7 @@ class FacialAdjacencyGroup(Record): # {{{ partition adjacency -class InterPartitionAdjacency(FacialAdjacencyGroup): +class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): """ Describes boundary adjacency information of elements in :class:`MeshElementGroup`. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index ded738af..758163fc 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -205,7 +205,7 @@ def partition_mesh(mesh, part_per_element, part_nr): connected_mesh = part_mesh.copy() - from meshmode.mesh import InterPartitionAdjacency + from meshmode.mesh import InterPartitionAdjacencyGroup for igrp, adj in enumerate(adj_data): if adj: bdry = connected_mesh.facial_adjacency_groups[igrp][None] @@ -243,9 +243,9 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacency(elems, faces, neighbors, - bdry.igroup, - n_parts, global_n_elems, n_faces) + InterPartitionAdjacencyGroup(elems, faces, neighbors, + bdry.igroup, + n_parts, global_n_elems, n_faces) return connected_mesh, queried_elems diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 0665129a..4f34ddb7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -203,7 +203,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ "part_mesh has the wrong number of BTAG_ALL boundaries" - from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacency + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) @@ -213,7 +213,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): adj = part.facial_adjacency_groups[grp_num][None] tags = -part.facial_adjacency_groups[grp_num][None].neighbors assert np.all(tags >= 0) - if not isinstance(adj, InterPartitionAdjacency): + if not isinstance(adj, InterPartitionAdjacencyGroup): continue elem_base = part.groups[grp_num].element_nr_base for idx in range(len(adj.elements)): @@ -237,7 +237,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ - "InterPartitionAdj is not consistent" + "InterPartitionAdjacencyGroup is not consistent" _, n_part_to_global = new_meshes[n_part_num] p_meshwide_elem = part_to_global[elem + elem_base] p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] -- GitLab From 3ac7a6709de65e86fb49c41e72ca96ee7a8dc42e Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 16:55:23 -0500 Subject: [PATCH 211/266] Remove Mesh.adjacency_list --- meshmode/mesh/__init__.py | 11 ----------- test/test_meshmode.py | 12 ++++++++---- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index cae5f058..7120f14b 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -876,17 +876,6 @@ class Mesh(Record): def __ne__(self, other): return not self.__eq__(other) - def adjacency_list(self): - """ - :returns: `adjacency[i]` is a list of all elements that are adjacent to - element `i`. Useful for `pymetis.part_graph`. - """ - adjacency_list = [] - for elem in range(self.nelements): - start, end = self.nodal_adjacency.neighbors_starts[elem:elem+2] - adjacency_list.append(self.nodal_adjacency.neighbors[start:end]) - return adjacency_list - # Design experience: Try not to add too many global data structures to the # mesh. Let the element groups be responsible for that at the mesh level. # diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 4f34ddb7..248ed9e5 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -88,7 +88,9 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, mesh = meshes[0] #from pymetis import part_graph - #(_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + #_, p = part_graph(num_parts, + # xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + # adjncy=mesh.nodal_adjacency.neighbors.tolist()) #part_per_element = np.array(p) part_per_element = np.random.randint(num_parts, size=mesh.nelements) @@ -158,7 +160,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, bdry_t_2 = src_conn(queue, bdry_s) err = la.norm((bdry_t - bdry_t_2).get(), np.inf) - eoc_rec[(i_tgt_part, i_src_part)].add_data_point(1./n, err) + eoc_rec[i_tgt_part, i_src_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): if e is not None: @@ -185,7 +187,9 @@ def test_partition_mesh(num_parts, num_meshes, dim): mesh = merge_disjoint_meshes(meshes) from pymetis import part_graph - (_, p) = part_graph(num_parts, adjacency=mesh.adjacency_list()) + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) part_per_element = np.array(p) #part_per_element = np.random.randint(num_parts, size=mesh.nelements) -- GitLab From 4431d6995d7eabb750a4fe863aefa2d1fb7770da Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 17:19:15 -0500 Subject: [PATCH 212/266] Improve InterPartitionAdjacency constructor --- meshmode/mesh/__init__.py | 14 +++++++------- test/test_meshmode.py | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 7120f14b..347a5068 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -559,14 +559,14 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): neighbor_partitions, global_neighbors, neighbor_faces): - self.elements = elements - self.element_faces = element_faces - self.neighbors = neighbors - self.igroup = igroup - self.ineighbor_group = None + FacialAdjacencyGroup.__init__(self, elements=elements, + element_faces=element_faces, + neighbors=neighbors, + neighbor_faces=neighbor_faces, + igroup=igroup, + ineighbor_group=None) self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors - self.neighbor_faces = neighbor_faces self._generate_index_lookup_table() def __eq__(self, other): @@ -577,7 +577,7 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): def _generate_index_lookup_table(self): self.index_lookup_table = dict() for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - self.index_lookup_table[(elem, face)] = idx + self.index_lookup_table[elem, face] = idx # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 248ed9e5..c94e7428 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -233,11 +233,11 @@ def test_partition_mesh(num_parts, num_meshes, dim): # Hack: find_igrps expects a numpy.ndarray and returns # a numpy.ndarray. But if a single integer is fed # into find_igrps, an integer is returned. - n_grp_num = find_group_indices(n_part.groups, n_meshwide_elem) - n_adj = n_part.facial_adjacency_groups[int(n_grp_num)][None] + n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) + n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - n_idx = n_adj.index_lookup_table[(n_elem, n_face)] + n_idx = n_adj.index_lookup_table[n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ -- GitLab From a4e31691a9a20943dd21e594802ec81fc5daa6f5 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 19:57:11 -0500 Subject: [PATCH 213/266] Remove index_lookup_table from InterPartitionAdjacencyGroup --- .../connection/opposite_face.py | 10 ++-- meshmode/mesh/__init__.py | 11 ---- test/test_meshmode.py | 59 +++++++++++-------- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index d96cd5e8..1d7efb63 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,7 +407,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(src_to_tgt_conn, i_src_part, +def make_partition_connection(src_bdry_discr, i_src_part, tgt_bdry, tgt_adj_groups, tgt_batches): """ Connects ``src_to_tgt_conn`` to a neighboring partition. @@ -432,12 +432,12 @@ def make_partition_connection(src_to_tgt_conn, i_src_part, from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - src_bdry = src_to_tgt_conn.to_discr - src_groups = src_to_tgt_conn.from_discr.mesh.groups + src_bdry = src_bdry_discr.to_discr + src_groups = src_bdry_discr.from_discr.mesh.groups part_batches = [[] for _ in tgt_adj_groups] - with cl.CommandQueue(src_to_tgt_conn.cl_context) as queue: + with cl.CommandQueue(src_bdry_discr.cl_context) as queue: for i_tgt_grp, adj in enumerate(tgt_adj_groups): indices = (i_src_part == adj.neighbor_partitions) @@ -454,7 +454,7 @@ def make_partition_connection(src_to_tgt_conn, i_src_part, elem_base = src_groups[i_src_grp].element_nr_base src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_to_tgt_conn, i_src_grp) + _make_bdry_el_lookup_table(queue, src_bdry_discr, i_src_grp) for i_tgt_face in i_tgt_faces: diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 347a5068..f07f9431 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -544,11 +544,6 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): If ``neighbor_partitions[i]`` is negative, ``elements[i]`` is on a true boundary and is not connected to any other :class:``Mesh``. - .. attribute:: index_lookup_table - - A dictionary that maps the tuple ``(element, face)`` to an index ``i`` such - that ``elements[i] == element and element_faces[i] == face``. - .. versionadded:: 2017.1 """ @@ -567,18 +562,12 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): ineighbor_group=None) self.neighbor_partitions = neighbor_partitions self.global_neighbors = global_neighbors - self._generate_index_lookup_table() def __eq__(self, other): return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) and np.array_equal(self.neighbor_partitions, other.neighbor_partitions)) - def _generate_index_lookup_table(self): - self.index_lookup_table = dict() - for idx, (elem, face) in enumerate(zip(self.elements, self.element_faces)): - self.index_lookup_table[elem, face] = idx - # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c94e7428..407c2627 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - (3, [3, 4]) + #(3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -71,7 +71,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for j in range(num_parts): if i == j: continue - eoc_rec[(i, j)] = EOCRecorder() + eoc_rec[i, j] = EOCRecorder() def f(x): return 0.1*cl.clmath.sin(30*x) @@ -110,47 +110,47 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i_tgt_part in range(num_parts): for i_src_part in range(num_parts): if (i_tgt_part == i_src_part - or eoc_rec[(i_tgt_part, i_src_part)] is None): - eoc_rec[(i_tgt_part, i_src_part)] = None + or eoc_rec[i_tgt_part, i_src_part] is None): + eoc_rec[i_tgt_part, i_src_part] = None continue # Mark faces within tgt_mesh that are connected to src_mesh - tgt_to_src_conn = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + tgt_bdry_discr = make_face_restriction(vol_discrs[i_tgt_part], + group_factory(order), + BTAG_PARTITION(i_src_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_to_src_conn.to_discr.nodes() + bdry_nodes = tgt_bdry_discr.to_discr.nodes() if bdry_nodes.size == 0: - eoc_rec[(i_tgt_part, i_src_part)] = None + eoc_rec[i_tgt_part, i_src_part] = None continue # Mark faces within src_mesh that are connected to tgt_mesh - src_to_tgt_conn = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + src_bdry_discr = make_face_restriction(vol_discrs[i_src_part], + group_factory(order), + BTAG_PARTITION(i_tgt_part)) # Gather just enough information for the connection - tgt_bdry = tgt_to_src_conn.to_discr - tgt_mesh = tgt_to_src_conn.from_discr.mesh + tgt_bdry = tgt_bdry_discr.to_discr + tgt_mesh = tgt_bdry_discr.from_discr.mesh tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] for i in range(len(tgt_mesh.groups))] - tgt_batches = [tgt_to_src_conn.groups[i].batches + tgt_batches = [tgt_bdry_discr.groups[i].batches for i in range(len(tgt_mesh.groups))] - src_bdry = src_to_tgt_conn.to_discr - src_mesh = src_to_tgt_conn.from_discr.mesh + src_bdry = src_bdry_discr.to_discr + src_mesh = src_bdry_discr.from_discr.mesh src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] for i in range(len(src_mesh.groups))] - src_batches = [src_to_tgt_conn.groups[i].batches + src_batches = [src_bdry_discr.groups[i].batches for i in range(len(src_mesh.groups))] - # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(src_to_tgt_conn, i_src_part, + # Connect src_mesh to tgt_mesh + src_conn = make_partition_connection(src_bdry_discr, i_src_part, tgt_bdry, tgt_adj_groups, tgt_batches) - # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(tgt_to_src_conn, i_tgt_part, + # Connect tgt_mesh to src_mesh + tgt_conn = make_partition_connection(tgt_bdry_discr, i_tgt_part, src_bdry, src_adj_groups, src_batches) check_connection(tgt_conn) check_connection(src_conn) @@ -166,7 +166,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) # }}} @@ -211,6 +211,15 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import find_group_indices num_tags = np.zeros((num_parts,)) + index_lookup_table = dict() + for ipart, (m, _) in enumerate(new_meshes): + for igrp in range(len(m.groups)): + adj = m.facial_adjacency_groups[igrp][None] + if not isinstance(adj, InterPartitionAdjacencyGroup): + continue + for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): + index_lookup_table[ipart, igrp, elem, face] = i + for part_num in range(num_parts): part, part_to_global = new_meshes[part_num] for grp_num in range(len(part.groups)): @@ -218,6 +227,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): tags = -part.facial_adjacency_groups[grp_num][None].neighbors assert np.all(tags >= 0) if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. continue elem_base = part.groups[grp_num].element_nr_base for idx in range(len(adj.elements)): @@ -237,7 +247,8 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - n_idx = n_adj.index_lookup_table[n_elem, n_face] + #n_idx = n_adj.index_lookup_table[n_elem, n_face] + n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] and face == n_adj.neighbor_faces[n_idx]),\ -- GitLab From 72d00fc9fc8f16648f8a6862d35961dcd8e0d4be Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 20:03:28 -0500 Subject: [PATCH 214/266] Small changes --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 407c2627..4afde201 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), - #(3, [3, 4]) + (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, num_parts, num_groups): @@ -216,6 +216,7 @@ def test_partition_mesh(num_parts, num_meshes, dim): for igrp in range(len(m.groups)): adj = m.facial_adjacency_groups[igrp][None] if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. continue for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): index_lookup_table[ipart, igrp, elem, face] = i @@ -247,7 +248,6 @@ def test_partition_mesh(num_parts, num_meshes, dim): n_adj = n_part.facial_adjacency_groups[n_grp_num][None] n_elem_base = n_part.groups[n_grp_num].element_nr_base n_elem = n_meshwide_elem - n_elem_base - #n_idx = n_adj.index_lookup_table[n_elem, n_face] n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] assert (part_num == n_adj.neighbor_partitions[n_idx] and elem + elem_base == n_adj.global_neighbors[n_idx] -- GitLab From ede1b7aafbbcca176908ad36ddb49549fc08bef0 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 9 Aug 2017 21:06:14 -0500 Subject: [PATCH 215/266] Small fix --- test/test_meshmode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 4afde201..0140caf2 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -74,7 +74,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 0.1*cl.clmath.sin(30*x) + return 0.5*cl.clmath.sin(30*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -166,7 +166,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-13) # }}} -- GitLab From 7926ba7309f08e7ebac2e43079a98ec9729e6b65 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 10 Aug 2017 21:04:52 -0500 Subject: [PATCH 216/266] Clean up names --- .../connection/opposite_face.py | 86 ++++++------ test/test_meshmode.py | 132 ++++++++++-------- 2 files changed, 117 insertions(+), 101 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 1d7efb63..181e059e 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -407,21 +407,22 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection -def make_partition_connection(src_bdry_discr, i_src_part, - tgt_bdry, tgt_adj_groups, tgt_batches): +def make_partition_connection(local_bdry_conn, i_local_part, + remote_bdry, remote_adj_groups, remote_batches): """ - Connects ``src_to_tgt_conn`` to a neighboring partition. + Connects ``local_bdry_conn`` to a neighboring partition. - :arg src_to_tgt_conn: A :class:`Discretization` of the source partition. - :arg i_src_part: The partition number of the src partition. - :arg tgt_adj_groups: A list of :class:`InterPartitionAdjacency`` of the target + :arg local_bdry_conn: A :class:`DirectDiscretizationConnection` of the local partition. - :arg tgt_bdry: A :class:`Discretization` of the boundary of the - target partition. - :arg tgt_batches: A list of batches of the target partition. + :arg i_local_part: The partition number of the local partition. + :arg remote_adj_groups: A list of :class:`InterPartitionAdjacency`` of the + remote partition. + :arg remote_bdry: A :class:`Discretization` of the boundary of the + remote partition. + :arg remote_batches: A list of batches of the remote partition. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `i_src_part` to the target partition. + exchange across faces from partition `i_local_part` to the remote partition. .. versionadded:: 2017.1 @@ -432,56 +433,59 @@ def make_partition_connection(src_bdry_discr, i_src_part, from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) - src_bdry = src_bdry_discr.to_discr - src_groups = src_bdry_discr.from_discr.mesh.groups + local_bdry = local_bdry_conn.to_discr + local_groups = local_bdry_conn.from_discr.mesh.groups - part_batches = [[] for _ in tgt_adj_groups] + part_batches = [[] for _ in remote_adj_groups] - with cl.CommandQueue(src_bdry_discr.cl_context) as queue: + with cl.CommandQueue(local_bdry_conn.cl_context) as queue: - for i_tgt_grp, adj in enumerate(tgt_adj_groups): - indices = (i_src_part == adj.neighbor_partitions) + for i_remote_grp, adj in enumerate(remote_adj_groups): + indices = (i_local_part == adj.neighbor_partitions) if not np.any(indices): - # Skip because i_tgt_grp is not connected to i_src_part. + # Skip because i_remote_grp is not connected to i_local_part. continue - i_tgt_faces = adj.element_faces[indices] - i_src_meshwide_elems = adj.global_neighbors[indices] - i_src_faces = adj.neighbor_faces[indices] + i_remote_faces = adj.element_faces[indices] + i_local_meshwide_elems = adj.global_neighbors[indices] + i_local_faces = adj.neighbor_faces[indices] - i_src_grps = find_group_indices(src_groups, i_src_meshwide_elems) + i_local_grps = find_group_indices(local_groups, i_local_meshwide_elems) - for i_src_grp in np.unique(i_src_grps): + for i_local_grp in np.unique(i_local_grps): - elem_base = src_groups[i_src_grp].element_nr_base - src_el_lookup =\ - _make_bdry_el_lookup_table(queue, src_bdry_discr, i_src_grp) + elem_base = local_groups[i_local_grp].element_nr_base + local_el_lookup = _make_bdry_el_lookup_table(queue, + local_bdry_conn, + i_local_grp) - for i_tgt_face in i_tgt_faces: + for i_remote_face in i_remote_faces: - index_flags = np.logical_and(i_src_grps == i_src_grp, - i_tgt_faces == i_tgt_face) + index_flags = np.logical_and(i_local_grps == i_local_grp, + i_remote_faces == i_remote_face) if not np.any(index_flags): continue - vbc_tgt_grp_face_batch = _find_ibatch_for_face( - tgt_batches[i_tgt_grp], i_tgt_face) + vbc_remote_grp_face_batch = _find_ibatch_for_face( + remote_batches[i_remote_grp], i_remote_face) - tgt_bdry_element_indices = vbc_tgt_grp_face_batch.\ + remote_bdry_element_indices = vbc_remote_grp_face_batch.\ to_element_indices.get(queue=queue) - elems = i_src_meshwide_elems[index_flags] - elem_base - faces = i_src_faces[index_flags] - src_bdry_element_indices = src_el_lookup[elems, faces] + elems = i_local_meshwide_elems[index_flags] - elem_base + faces = i_local_faces[index_flags] + local_bdry_element_indices = local_el_lookup[elems, faces] + + batches = _make_cross_face_batches(queue, + remote_bdry, local_bdry, + i_remote_grp, i_local_grp, + remote_bdry_element_indices, + local_bdry_element_indices) - part_batches[i_tgt_grp].extend(_make_cross_face_batches(queue, - tgt_bdry, src_bdry, - i_tgt_grp, i_src_grp, - tgt_bdry_element_indices, - src_bdry_element_indices)) + part_batches[i_remote_grp].extend(batches) return DirectDiscretizationConnection( - from_discr=src_bdry, - to_discr=tgt_bdry, + from_discr=local_bdry, + to_discr=remote_bdry, groups=[DiscretizationConnectionElementGroup(batches=batches) for batches in part_batches], is_surjective=True) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 0140caf2..cd729833 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) (3, [3, 4]) ]) def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, - num_parts, num_groups): + num_parts, num_groups, scramble_partitions=True): np.random.seed(42) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) @@ -74,7 +74,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 0.5*cl.clmath.sin(30*x) + return 0.5*cl.clmath.sin(30.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -87,12 +87,14 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, else: mesh = meshes[0] - #from pymetis import part_graph - #_, p = part_graph(num_parts, - # xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - # adjncy=mesh.nodal_adjacency.neighbors.tolist()) - #part_per_element = np.array(p) - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh part_meshes = [ @@ -107,60 +109,68 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, make_partition_connection, check_connection) - for i_tgt_part in range(num_parts): - for i_src_part in range(num_parts): - if (i_tgt_part == i_src_part - or eoc_rec[i_tgt_part, i_src_part] is None): - eoc_rec[i_tgt_part, i_src_part] = None + for i_local_part in range(num_parts): + for i_remote_part in range(num_parts): + if (i_local_part == i_remote_part + or eoc_rec[i_local_part, i_remote_part] is None): + eoc_rec[i_local_part, i_remote_part] = None continue - # Mark faces within tgt_mesh that are connected to src_mesh - tgt_bdry_discr = make_face_restriction(vol_discrs[i_tgt_part], - group_factory(order), - BTAG_PARTITION(i_src_part)) + # Mark faces within local_mesh that are connected to remote_mesh + local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + group_factory(order), + BTAG_PARTITION(i_remote_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = tgt_bdry_discr.to_discr.nodes() + bdry_nodes = local_bdry_conn.to_discr.nodes() if bdry_nodes.size == 0: - eoc_rec[i_tgt_part, i_src_part] = None + eoc_rec[i_local_part, i_remote_part] = None continue - # Mark faces within src_mesh that are connected to tgt_mesh - src_bdry_discr = make_face_restriction(vol_discrs[i_src_part], - group_factory(order), - BTAG_PARTITION(i_tgt_part)) + # Mark faces within remote_mesh that are connected to local_mesh + remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + group_factory(order), + BTAG_PARTITION(i_local_part)) # Gather just enough information for the connection - tgt_bdry = tgt_bdry_discr.to_discr - tgt_mesh = tgt_bdry_discr.from_discr.mesh - tgt_adj_groups = [tgt_mesh.facial_adjacency_groups[i][None] - for i in range(len(tgt_mesh.groups))] - tgt_batches = [tgt_bdry_discr.groups[i].batches - for i in range(len(tgt_mesh.groups))] - - src_bdry = src_bdry_discr.to_discr - src_mesh = src_bdry_discr.from_discr.mesh - src_adj_groups = [src_mesh.facial_adjacency_groups[i][None] - for i in range(len(src_mesh.groups))] - src_batches = [src_bdry_discr.groups[i].batches - for i in range(len(src_mesh.groups))] - - # Connect src_mesh to tgt_mesh - src_conn = make_partition_connection(src_bdry_discr, i_src_part, - tgt_bdry, tgt_adj_groups, tgt_batches) - - # Connect tgt_mesh to src_mesh - tgt_conn = make_partition_connection(tgt_bdry_discr, i_tgt_part, - src_bdry, src_adj_groups, src_batches) - check_connection(tgt_conn) - check_connection(src_conn) - - bdry_t = f(tgt_conn.to_discr.nodes()[0].with_queue(queue)) - bdry_s = tgt_conn(queue, bdry_t) - bdry_t_2 = src_conn(queue, bdry_s) - - err = la.norm((bdry_t - bdry_t_2).get(), np.inf) - eoc_rec[i_tgt_part, i_src_part].add_data_point(1./n, err) + local_bdry = local_bdry_conn.to_discr + local_mesh = local_bdry_conn.from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + + remote_bdry = remote_bdry_conn.to_discr + remote_mesh = remote_bdry_conn.from_discr.mesh + remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] + for i in range(len(remote_mesh.groups))] + remote_batches = [remote_bdry_conn.groups[i].batches + for i in range(len(remote_mesh.groups))] + + # Connect local_mesh to remote_mesh + local_part_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_batches) + + # Connect remote mesh to local mesh + remote_part_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_batches) + + check_connection(local_part_conn) + check_connection(remote_part_conn) + + true_local_points = f(local_part_conn.to_discr.nodes()[0] + .with_queue(queue)) + remote_points = local_part_conn(queue, true_local_points) + local_points = remote_part_conn(queue, remote_points) + + err = la.norm((true_local_points - local_points).get(), np.inf) + eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): if e is not None: @@ -176,7 +186,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) @pytest.mark.parametrize("num_meshes", [1, 2, 7]) -def test_partition_mesh(num_parts, num_meshes, dim): +def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): np.random.seed(42) n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh @@ -186,12 +196,14 @@ def test_partition_mesh(num_parts, num_meshes, dim): from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - #part_per_element = np.random.randint(num_parts, size=mesh.nelements) + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) from meshmode.mesh.processing import partition_mesh # TODO: The same part_per_element array must be used to partition each mesh. -- GitLab From dd10832c91db0030e39a2a38534b6a519aa52cbf Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 18:31:17 -0500 Subject: [PATCH 217/266] Add to system tags --- meshmode/mesh/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f07f9431..b9f8e9fe 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -115,7 +115,8 @@ class BTAG_PARTITION(object): # noqa return not self.__eq__(other) -SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY]) +SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY, + BTAG_PARTITION]) # }}} -- GitLab From 6c143b60dd7ed97b2ca5ce15024f99b98ec1b179 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 19:03:09 -0500 Subject: [PATCH 218/266] Relax error check --- test/test_meshmode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index cd729833..1d2a954a 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -176,7 +176,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-13) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) # }}} -- GitLab From ecc34a696bc9d7127edacc78a3537da0fb1463b2 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 22 Aug 2017 20:08:02 -0500 Subject: [PATCH 219/266] Add MPI test file --- testmpi.py | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 testmpi.py diff --git a/testmpi.py b/testmpi.py new file mode 100644 index 00000000..fbc13883 --- /dev/null +++ b/testmpi.py @@ -0,0 +1,107 @@ +from mpi4py import MPI +import numpy as np +import pyopencl + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +num_parts = 3 +if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh parts.') + for req in reqs: + req.wait() + +elif (rank - 1) in range(num_parts): + mesh = comm.recv(source=0, tag=1) + print('Recieved mesh') + + cl_ctx = pyopencl.create_some_context() + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_data = {'bdry': local_bdry, + 'adj': local_adj_groups, + 'batches': local_batches} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + recv_reqs[i_remote_part] = comm.irecv(source=i_remote_part+1, tag=2) + + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait() + for req in send_reqs: + req.wait() + + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry = data['bdry'] + remote_adj_groups =data['adj'] + remote_batches = data['batches'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_batches) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) + -- GitLab From 432fa88841251fec271e470d03df66f516a82b3d Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 10:08:54 -0500 Subject: [PATCH 220/266] Reduce arguments to `make_partition_connection`. --- .../connection/opposite_face.py | 27 +++++++++++-------- test/test_meshmode.py | 18 +++++++++++-- testmpi.py | 23 +++++++++++----- 3 files changed, 49 insertions(+), 19 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 181e059e..cccae33c 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -408,7 +408,8 @@ def make_opposite_face_connection(volume_to_bdry_conn): # {{{ partition_connection def make_partition_connection(local_bdry_conn, i_local_part, - remote_bdry, remote_adj_groups, remote_batches): + remote_bdry, remote_adj_groups, + remote_to_elem_faces, remote_to_elem_indices): """ Connects ``local_bdry_conn`` to a neighboring partition. @@ -419,7 +420,11 @@ def make_partition_connection(local_bdry_conn, i_local_part, remote partition. :arg remote_bdry: A :class:`Discretization` of the boundary of the remote partition. - :arg remote_batches: A list of batches of the remote partition. + :arg remote_to_elem_faces: `remote_to_elem_faces[igrp][idx]` gives the face + that batch `idx` interpolates from group `igrp`. + :arg remote_to_elem_indices: `remote_to_elem_indices[igrp][idx]` gives a + :class:`np.array` of element indices that batch `idx` interpolates from + group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data exchange across faces from partition `i_local_part` to the remote partition. @@ -465,29 +470,29 @@ def make_partition_connection(local_bdry_conn, i_local_part, if not np.any(index_flags): continue - vbc_remote_grp_face_batch = _find_ibatch_for_face( - remote_batches[i_remote_grp], i_remote_face) + batch_idx = np.where(remote_to_elem_faces[i_remote_grp] + == i_remote_face)[0] - remote_bdry_element_indices = vbc_remote_grp_face_batch.\ - to_element_indices.get(queue=queue) + remote_bdry_indices =\ + remote_to_elem_indices[i_remote_grp][batch_idx] elems = i_local_meshwide_elems[index_flags] - elem_base faces = i_local_faces[index_flags] - local_bdry_element_indices = local_el_lookup[elems, faces] + local_bdry_indices = local_el_lookup[elems, faces] batches = _make_cross_face_batches(queue, remote_bdry, local_bdry, i_remote_grp, i_local_grp, - remote_bdry_element_indices, - local_bdry_element_indices) + remote_bdry_indices, + local_bdry_indices) part_batches[i_remote_grp].extend(batches) return DirectDiscretizationConnection( from_discr=local_bdry, to_discr=remote_bdry, - groups=[DiscretizationConnectionElementGroup(batches=batches) - for batches in part_batches], + groups=[DiscretizationConnectionElementGroup(batches=grp_batches) + for grp_batches in part_batches], is_surjective=True) # }}} diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 1d2a954a..f0009aa7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -139,6 +139,12 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conn.groups[i].batches for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr remote_mesh = remote_bdry_conn.from_discr.mesh @@ -146,20 +152,28 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches for i in range(len(remote_mesh.groups))] + remote_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in remote_batches] + remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in remote_batches] # Connect local_mesh to remote_mesh local_part_conn = make_partition_connection(local_bdry_conn, i_local_part, remote_bdry, remote_adj_groups, - remote_batches) + remote_to_elem_faces, + remote_to_elem_indices) # Connect remote mesh to local mesh remote_part_conn = make_partition_connection(remote_bdry_conn, i_remote_part, local_bdry, local_adj_groups, - local_batches) + local_to_elem_faces, + local_to_elem_indices) check_connection(local_part_conn) check_connection(remote_part_conn) diff --git a/testmpi.py b/testmpi.py index fbc13883..bb3c1978 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,6 +1,6 @@ from mpi4py import MPI import numpy as np -import pyopencl +import pyopencl as cl comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -30,8 +30,9 @@ elif (rank - 1) in range(num_parts): mesh = comm.recv(source=0, tag=1) print('Recieved mesh') - cl_ctx = pyopencl.create_some_context() - + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory group_factory = PolynomialWarpAndBlendGroupFactory(4) @@ -68,9 +69,17 @@ elif (rank - 1) in range(num_parts): for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conns[i_remote_part].groups[i].batches for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + print(local_bdry.groups) local_data = {'bdry': local_bdry, 'adj': local_adj_groups, - 'batches': local_batches} + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) recv_reqs = {} @@ -93,7 +102,8 @@ elif (rank - 1) in range(num_parts): continue remote_bdry = data['bdry'] remote_adj_groups =data['adj'] - remote_batches = data['batches'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection connection[i_remote_part] =\ @@ -101,7 +111,8 @@ elif (rank - 1) in range(num_parts): i_local_part, remote_bdry, remote_adj_groups, - remote_batches) + remote_to_elem_faces, + remote_to_elem_indices) from meshmode.discretization.connection import check_connection check_connection(connection[i_remote_part]) -- GitLab From 94af5e25b3898a50a916bde6dd9ef8d90a740d9e Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 11:59:47 -0500 Subject: [PATCH 221/266] working --- meshmode/mesh/__init__.py | 10 +++ test/test_meshmode.py | 11 +++- test/testmpi.py | 130 ++++++++++++++++++++++++++++++++++++++ testmpi.py | 118 ---------------------------------- 4 files changed, 149 insertions(+), 120 deletions(-) create mode 100644 test/testmpi.py delete mode 100644 testmpi.py diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index b9f8e9fe..f5147001 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -548,6 +548,16 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ + #FIXME + ''' + This is a weird error. When we try to pickle and unpickle a mesh, + neighbor_partitions does not exist anymore in + mesh.facial_adjacency_groups[i][None]. My guess was that pickle did not know + that property existed, so I created it. + ''' + neighbor_partitions = None + global_neighbors = None + def __init__(self, elements, element_faces, neighbors, diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f0009aa7..c4944132 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,6 +49,13 @@ import logging logger = logging.getLogger(__name__) +@pytest.mark.parametrize("num_parts", [3]) +def test_interpartition_comm(num_parts): + from pytools.mpi import run_with_mpi_ranks + run_with_mpi_ranks("testmpi.py", num_parts + 1, interpartition_communication, + (num_parts,)) + + # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @@ -134,7 +141,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, # Gather just enough information for the connection local_bdry = local_bdry_conn.to_discr - local_mesh = local_bdry_conn.from_discr.mesh + local_mesh = part_meshes[i_local_part] local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conn.groups[i].batches @@ -147,7 +154,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr - remote_mesh = remote_bdry_conn.from_discr.mesh + remote_mesh = part_meshes[i_remote_mesh] remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches diff --git a/test/testmpi.py b/test/testmpi.py new file mode 100644 index 00000000..5c142c10 --- /dev/null +++ b/test/testmpi.py @@ -0,0 +1,130 @@ +import numpy as np +import pyopencl as cl +import pytest + +def interpartition_communication(num_parts): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh partitions.') + for req in reqs: + req.wait() + + elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Recieved mesh (size = {0})'.format(status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + status = MPI.Status() + #TODO: Send size of data before to allocate a buffer. + recv_reqs[i_remote_part] = comm.irecv(buf=1000000, + source=i_remote_part+1, + tag=2) + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait(status=status) + print('Received remote data (size = {0})'.format(status.count)) + for req in send_reqs: + req.wait() + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) + +if __name__ == "__main__": + import sys + from pytools.mpi import check_for_mpi_relaunch + check_for_mpi_relaunch(sys.argv) + + if len(sys.argv) > 1: + exec sys.argv[1] + diff --git a/testmpi.py b/testmpi.py deleted file mode 100644 index bb3c1978..00000000 --- a/testmpi.py +++ /dev/null @@ -1,118 +0,0 @@ -from mpi4py import MPI -import numpy as np -import pyopencl as cl - -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - -num_parts = 3 -if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh parts.') - for req in reqs: - req.wait() - -elif (rank - 1) in range(num_parts): - mesh = comm.recv(source=0, tag=1) - print('Recieved mesh') - - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - print(local_bdry.groups) - local_data = {'bdry': local_bdry, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - recv_reqs[i_remote_part] = comm.irecv(source=i_remote_part+1, tag=2) - - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait() - for req in send_reqs: - req.wait() - - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry = data['bdry'] - remote_adj_groups =data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) - -- GitLab From 2419309bde1eb9d7e5901d7a7e6c0e15218501bb Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 12:15:49 -0500 Subject: [PATCH 222/266] Add MPI test --- test/test_meshmode.py | 7 --- test/testmpi.py | 130 ------------------------------------------ testmpi.py | 121 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 137 deletions(-) delete mode 100644 test/testmpi.py create mode 100644 testmpi.py diff --git a/test/test_meshmode.py b/test/test_meshmode.py index c4944132..6c1292e7 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,13 +49,6 @@ import logging logger = logging.getLogger(__name__) -@pytest.mark.parametrize("num_parts", [3]) -def test_interpartition_comm(num_parts): - from pytools.mpi import run_with_mpi_ranks - run_with_mpi_ranks("testmpi.py", num_parts + 1, interpartition_communication, - (num_parts,)) - - # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) diff --git a/test/testmpi.py b/test/testmpi.py deleted file mode 100644 index 5c142c10..00000000 --- a/test/testmpi.py +++ /dev/null @@ -1,130 +0,0 @@ -import numpy as np -import pyopencl as cl -import pytest - -def interpartition_communication(num_parts): - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh partitions.') - for req in reqs: - req.wait() - - elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Recieved mesh (size = {0})'.format(status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - status = MPI.Status() - #TODO: Send size of data before to allocate a buffer. - recv_reqs[i_remote_part] = comm.irecv(buf=1000000, - source=i_remote_part+1, - tag=2) - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait(status=status) - print('Received remote data (size = {0})'.format(status.count)) - for req in send_reqs: - req.wait() - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) - -if __name__ == "__main__": - import sys - from pytools.mpi import check_for_mpi_relaunch - check_for_mpi_relaunch(sys.argv) - - if len(sys.argv) > 1: - exec sys.argv[1] - diff --git a/testmpi.py b/testmpi.py new file mode 100644 index 00000000..62a7aaef --- /dev/null +++ b/testmpi.py @@ -0,0 +1,121 @@ +import numpy as np + +num_parts = 3 + +from mpi4py import MPI +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Sent all mesh partitions.') + for req in reqs: + req.wait() + +elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Recieved mesh (size = {0})'.format(status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + send_reqs = [] + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + recv_reqs = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + status = MPI.Status() + #TODO: Send size of data before to allocate a buffer. + recv_reqs[i_remote_part] = comm.irecv(buf=1000000, + source=i_remote_part+1, + tag=2) + remote_data = {} + for i_part, req in recv_reqs.items(): + remote_data[i_part] = req.wait(status=status) + print('Received remote data (size = {0})'.format(status.count)) + for req in send_reqs: + req.wait() + + connection = {} + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection[i_remote_part] =\ + make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection[i_remote_part]) -- GitLab From c9ec1ae9d264184b8c1d9834cc5712d490b7f267 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 12:17:05 -0500 Subject: [PATCH 223/266] small fix --- test/test_meshmode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6c1292e7..5bda531f 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -147,7 +147,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, for grp_batches in local_batches] remote_bdry = remote_bdry_conn.to_discr - remote_mesh = part_meshes[i_remote_mesh] + remote_mesh = part_meshes[i_remote_part] remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches -- GitLab From d6967ca616391c945a96678f9f78821235c1baf3 Mon Sep 17 00:00:00 2001 From: ellis Date: Wed, 23 Aug 2017 14:21:15 -0500 Subject: [PATCH 224/266] Fix errors --- meshmode/discretization/connection/opposite_face.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index cccae33c..ab460da8 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -470,11 +470,11 @@ def make_partition_connection(local_bdry_conn, i_local_part, if not np.any(index_flags): continue - batch_idx = np.where(remote_to_elem_faces[i_remote_grp] - == i_remote_face)[0] - - remote_bdry_indices =\ - remote_to_elem_indices[i_remote_grp][batch_idx] + for idxs, to_face in zip(remote_to_elem_indices[i_remote_grp], + remote_to_elem_faces[i_remote_grp]): + if to_face == i_remote_face: + remote_bdry_indices = idxs + break elems = i_local_meshwide_elems[index_flags] - elem_base faces = i_local_faces[index_flags] -- GitLab From 0322cffa9ff0188d816718c33945d897cea07d7d Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 24 Aug 2017 15:13:34 -0500 Subject: [PATCH 225/266] Make pytest for mpi communication --- test/test_meshmode.py | 17 ++- testmpi.py | 245 ++++++++++++++++++++++-------------------- 2 files changed, 141 insertions(+), 121 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 5bda531f..f25930ca 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,12 +49,24 @@ import logging logger = logging.getLogger(__name__) +@pytest.mark.parametrize("num_partitions", [3, 6]) +def test_mpi_communication(num_partitions): + num_ranks = num_partitions + 1 + import subprocess, sys, os + newenv = os.environ.copy() + newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" + subprocess.check_call(["mpirun", "-np", str(num_ranks), + sys.executable, "testmpi.py", str(num_partitions)], + env=newenv) + + # {{{ partition_interpolation @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) -@pytest.mark.parametrize(("dim", "mesh_pars"), [ +@pytest.mark.parametrize(("dim", "mesh_pars"), + [ (2, [3, 4, 7]), (3, [3, 4]) ]) @@ -178,8 +190,7 @@ def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, check_connection(local_part_conn) check_connection(remote_part_conn) - true_local_points = f(local_part_conn.to_discr.nodes()[0] - .with_queue(queue)) + true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) remote_points = local_part_conn(queue, true_local_points) local_points = remote_part_conn(queue, remote_points) diff --git a/testmpi.py b/testmpi.py index 62a7aaef..e24ce730 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,121 +1,130 @@ import numpy as np -num_parts = 3 - -from mpi4py import MPI -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - -if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Sent all mesh partitions.') - for req in reqs: - req.wait() - -elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Recieved mesh (size = {0})'.format(status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - send_reqs = [] - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - recv_reqs = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue +def mpi_comm(num_parts): + + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + # This rank only partitions a mesh and sends them to their respective ranks. + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Rank 0: Sent all mesh partitions.') + for req in reqs: + req.wait() + + # These ranks recieve a mesh and comunicates boundary data to the other ranks. + elif (rank - 1) in range(num_parts): status = MPI.Status() - #TODO: Send size of data before to allocate a buffer. - recv_reqs[i_remote_part] = comm.irecv(buf=1000000, - source=i_remote_part+1, - tag=2) - remote_data = {} - for i_part, req in recv_reqs.items(): - remote_data[i_part] = req.wait(status=status) - print('Received remote data (size = {0})'.format(status.count)) - for req in send_reqs: - req.wait() - - connection = {} - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection[i_remote_part] =\ - make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection[i_remote_part]) + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + # Receive boundary data + remote_data = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + 1 + status = MPI.Status() + remote_data[i_remote_part] = comm.recv(source=remote_rank, + tag=2, + status=status) + print('Rank {0}: Received rank {1} data (size = {2})' + .format(rank, remote_rank, status.count)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) + +if __name__ == "__main__": + import sys + + assert(len(sys.argv) == 2, 'Invalid number of arguments') + + num_parts = int(sys.argv[1]) + mpi_comm(num_parts) -- GitLab From a89d6f3abb94e8f85235f1310f9cc28ed136e8d7 Mon Sep 17 00:00:00 2001 From: ellis Date: Thu, 24 Aug 2017 15:15:16 -0500 Subject: [PATCH 226/266] Fix whitespace --- testmpi.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testmpi.py b/testmpi.py index e24ce730..87a51914 100644 --- a/testmpi.py +++ b/testmpi.py @@ -1,5 +1,6 @@ import numpy as np + def mpi_comm(num_parts): from mpi4py import MPI @@ -121,10 +122,11 @@ def mpi_comm(num_parts): from meshmode.discretization.connection import check_connection check_connection(connection) + if __name__ == "__main__": import sys - assert(len(sys.argv) == 2, 'Invalid number of arguments') + assert len(sys.argv) == 2, 'Invalid number of arguments' num_parts = int(sys.argv[1]) mpi_comm(num_parts) -- GitLab From 202fd48fc4357dfa3cdf101d5f7f4b6860776d65 Mon Sep 17 00:00:00 2001 From: ellis Date: Fri, 25 Aug 2017 10:02:56 -0500 Subject: [PATCH 227/266] Fix errors --- test/test_meshmode.py | 10 ++++++---- testmpi.py => test/testmpi.py | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) rename testmpi.py => test/testmpi.py (98%) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index f25930ca..be66f28d 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -52,12 +52,14 @@ logger = logging.getLogger(__name__) @pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): num_ranks = num_partitions + 1 - import subprocess, sys, os + from subprocess import check_call + import sys + import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - subprocess.check_call(["mpirun", "-np", str(num_ranks), - sys.executable, "testmpi.py", str(num_partitions)], - env=newenv) + check_call(["mpirun", "-np", str(num_ranks), + sys.executable, "test/testmpi.py", str(num_partitions)], + env=newenv) # {{{ partition_interpolation diff --git a/testmpi.py b/test/testmpi.py similarity index 98% rename from testmpi.py rename to test/testmpi.py index 87a51914..7f84e501 100644 --- a/testmpi.py +++ b/test/testmpi.py @@ -1,3 +1,4 @@ +from __future__ import division, absolute_import, print_function import numpy as np -- GitLab From e00ff50d12e6111d757e628e3d4eda6bee6d1cd4 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 5 Sep 2017 17:07:57 -0500 Subject: [PATCH 228/266] Fix testmpi error --- test/test_meshmode.py | 3 +-- test/testmpi.py | 10 +++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index be66f28d..6b6c7b2e 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,8 +57,7 @@ def test_mpi_communication(num_partitions): import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), - sys.executable, "test/testmpi.py", str(num_partitions)], + check_call(["mpirun", "-np", str(num_ranks), sys.executable, "testmpi.py"], env=newenv) diff --git a/test/testmpi.py b/test/testmpi.py index 7f84e501..511b1fe9 100644 --- a/test/testmpi.py +++ b/test/testmpi.py @@ -2,11 +2,12 @@ from __future__ import division, absolute_import, print_function import numpy as np -def mpi_comm(num_parts): +def mpi_comm(): from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() + num_parts = comm.Get_size() - 1 # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: @@ -125,9 +126,4 @@ def mpi_comm(num_parts): if __name__ == "__main__": - import sys - - assert len(sys.argv) == 2, 'Invalid number of arguments' - - num_parts = int(sys.argv[1]) - mpi_comm(num_parts) + mpi_comm() -- GitLab From 3e6092bb29417fde811aeeee5adc374007d073f9 Mon Sep 17 00:00:00 2001 From: ellis Date: Tue, 5 Sep 2017 17:37:34 -0500 Subject: [PATCH 229/266] Install mpi4py --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4668a2b0..1cfab6cb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ Python 2.7 AMD CPU: script: - export PY_EXE=python2.7 - export PYOPENCL_TEST=amd:pu - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: @@ -15,7 +15,7 @@ Python 2.7 POCL: script: - export PY_EXE=python2.7 - export PYOPENCL_TEST=portable - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: @@ -28,7 +28,7 @@ Python 3.5 POCL: script: - export PY_EXE=python3.5 - export PYOPENCL_TEST=portable - - export EXTRA_INSTALL="numpy mako" + - export EXTRA_INSTALL="numpy mako mpi4py" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: -- GitLab From 174ff810c969a66e1016ce34023b05d5fa38efb5 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 10:18:06 -0500 Subject: [PATCH 230/266] Rename MPI test helper to avoid confusion with test entrypoints --- test/{testmpi.py => mpi_test_helper.py} | 0 test/test_meshmode.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename test/{testmpi.py => mpi_test_helper.py} (100%) diff --git a/test/testmpi.py b/test/mpi_test_helper.py similarity index 100% rename from test/testmpi.py rename to test/mpi_test_helper.py diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 6b6c7b2e..43d61d73 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -57,7 +57,7 @@ def test_mpi_communication(num_partitions): import os newenv = os.environ.copy() newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), sys.executable, "testmpi.py"], + check_call(["mpirun", "-np", str(num_ranks), sys.executable, "mpi_test_helper.py"], env=newenv) -- GitLab From 71a37f1130daf0d3b0bc74f7e29eca900c63035d Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 10:20:40 -0500 Subject: [PATCH 231/266] Add 'mpi' tag to CI jobs to make sure MPI is available where tests run --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1cfab6cb..03d40314 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,6 +8,7 @@ Python 2.7 AMD CPU: tags: - python2.7 - amd-cl-cpu + - mpi except: - tags @@ -21,6 +22,7 @@ Python 2.7 POCL: tags: - python2.7 - pocl + - mpi except: - tags @@ -34,6 +36,7 @@ Python 3.5 POCL: tags: - python3.5 - pocl + - mpi except: - tags -- GitLab From bdf996b498c48c67c7d51b3cce86519a1c1732f7 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 13:06:37 -0500 Subject: [PATCH 232/266] Deprecated ctx_getter -> ctx_factory --- test/test_meshmode.py | 328 ++++-------------------------------------- 1 file changed, 26 insertions(+), 302 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 43d61d73..afaf4203 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -49,290 +49,6 @@ import logging logger = logging.getLogger(__name__) -@pytest.mark.parametrize("num_partitions", [3, 6]) -def test_mpi_communication(num_partitions): - num_ranks = num_partitions + 1 - from subprocess import check_call - import sys - import os - newenv = os.environ.copy() - newenv["PYTOOLS_RUN_WITHIN_MPI"] = "1" - check_call(["mpirun", "-np", str(num_ranks), sys.executable, "mpi_test_helper.py"], - env=newenv) - - -# {{{ partition_interpolation - -@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) -@pytest.mark.parametrize("num_parts", [2, 3]) -@pytest.mark.parametrize("num_groups", [1, 2]) -@pytest.mark.parametrize(("dim", "mesh_pars"), - [ - (2, [3, 4, 7]), - (3, [3, 4]) - ]) -def test_partition_interpolation(ctx_getter, group_factory, dim, mesh_pars, - num_parts, num_groups, scramble_partitions=True): - np.random.seed(42) - cl_ctx = ctx_getter() - queue = cl.CommandQueue(cl_ctx) - order = 4 - - from pytools.convergence import EOCRecorder - eoc_rec = dict() - for i in range(num_parts): - for j in range(num_parts): - if i == j: - continue - eoc_rec[i, j] = EOCRecorder() - - def f(x): - return 0.5*cl.clmath.sin(30.*x) - - for n in mesh_pars: - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(dim, order=order, n=n) - for _ in range(num_groups)] - - if num_groups > 1: - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - else: - mesh = meshes[0] - - if scramble_partitions: - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - else: - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - - from meshmode.mesh.processing import partition_mesh - part_meshes = [ - partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] - - from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) - for i in range(num_parts)] - - from meshmode.mesh import BTAG_PARTITION - from meshmode.discretization.connection import (make_face_restriction, - make_partition_connection, - check_connection) - - for i_local_part in range(num_parts): - for i_remote_part in range(num_parts): - if (i_local_part == i_remote_part - or eoc_rec[i_local_part, i_remote_part] is None): - eoc_rec[i_local_part, i_remote_part] = None - continue - - # Mark faces within local_mesh that are connected to remote_mesh - local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], - group_factory(order), - BTAG_PARTITION(i_remote_part)) - - # If these parts are not connected, don't bother checking the error - bdry_nodes = local_bdry_conn.to_discr.nodes() - if bdry_nodes.size == 0: - eoc_rec[i_local_part, i_remote_part] = None - continue - - # Mark faces within remote_mesh that are connected to local_mesh - remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], - group_factory(order), - BTAG_PARTITION(i_local_part)) - - # Gather just enough information for the connection - local_bdry = local_bdry_conn.to_discr - local_mesh = part_meshes[i_local_part] - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conn.groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - remote_bdry = remote_bdry_conn.to_discr - remote_mesh = part_meshes[i_remote_part] - remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] - for i in range(len(remote_mesh.groups))] - remote_batches = [remote_bdry_conn.groups[i].batches - for i in range(len(remote_mesh.groups))] - remote_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in remote_batches] - remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in remote_batches] - - # Connect local_mesh to remote_mesh - local_part_conn = make_partition_connection(local_bdry_conn, - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - # Connect remote mesh to local mesh - remote_part_conn = make_partition_connection(remote_bdry_conn, - i_remote_part, - local_bdry, - local_adj_groups, - local_to_elem_faces, - local_to_elem_indices) - - check_connection(local_part_conn) - check_connection(remote_part_conn) - - true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) - remote_points = local_part_conn(queue, true_local_points) - local_points = remote_part_conn(queue, remote_points) - - err = la.norm((true_local_points - local_points).get(), np.inf) - eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) - - for (i, j), e in eoc_rec.items(): - if e is not None: - print("Error of connection from part %i to part %i." % (i, j)) - print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) - -# }}} - - -# {{{ partition_mesh - -@pytest.mark.parametrize("dim", [2, 3]) -@pytest.mark.parametrize("num_parts", [4, 5, 7]) -@pytest.mark.parametrize("num_meshes", [1, 2, 7]) -def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): - np.random.seed(42) - n = (5,) * dim - from meshmode.mesh.generation import generate_regular_rect_mesh - meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) - for i in range(num_meshes)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - if scramble_partitions: - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - else: - from pymetis import part_graph - _, p = part_graph(num_parts, - xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), - adjncy=mesh.nodal_adjacency.neighbors.tolist()) - part_per_element = np.array(p) - - from meshmode.mesh.processing import partition_mesh - # TODO: The same part_per_element array must be used to partition each mesh. - # Maybe the interface should be changed to guarantee this. - new_meshes = [ - partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] - - assert mesh.nelements == np.sum( - [new_meshes[i][0].nelements for i in range(num_parts)]), \ - "part_mesh has the wrong number of elements" - - assert count_tags(mesh, BTAG_ALL) == np.sum( - [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ - "part_mesh has the wrong number of BTAG_ALL boundaries" - - from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup - from meshmode.mesh.processing import find_group_indices - num_tags = np.zeros((num_parts,)) - - index_lookup_table = dict() - for ipart, (m, _) in enumerate(new_meshes): - for igrp in range(len(m.groups)): - adj = m.facial_adjacency_groups[igrp][None] - if not isinstance(adj, InterPartitionAdjacencyGroup): - # This group is not connected to another partition. - continue - for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): - index_lookup_table[ipart, igrp, elem, face] = i - - for part_num in range(num_parts): - part, part_to_global = new_meshes[part_num] - for grp_num in range(len(part.groups)): - adj = part.facial_adjacency_groups[grp_num][None] - tags = -part.facial_adjacency_groups[grp_num][None].neighbors - assert np.all(tags >= 0) - if not isinstance(adj, InterPartitionAdjacencyGroup): - # This group is not connected to another partition. - continue - elem_base = part.groups[grp_num].element_nr_base - for idx in range(len(adj.elements)): - if adj.global_neighbors[idx] == -1: - continue - elem = adj.elements[idx] - face = adj.element_faces[idx] - n_part_num = adj.neighbor_partitions[idx] - n_meshwide_elem = adj.global_neighbors[idx] - n_face = adj.neighbor_faces[idx] - num_tags[n_part_num] += 1 - n_part, n_part_to_global = new_meshes[n_part_num] - # Hack: find_igrps expects a numpy.ndarray and returns - # a numpy.ndarray. But if a single integer is fed - # into find_igrps, an integer is returned. - n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) - n_adj = n_part.facial_adjacency_groups[n_grp_num][None] - n_elem_base = n_part.groups[n_grp_num].element_nr_base - n_elem = n_meshwide_elem - n_elem_base - n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] - assert (part_num == n_adj.neighbor_partitions[n_idx] - and elem + elem_base == n_adj.global_neighbors[n_idx] - and face == n_adj.neighbor_faces[n_idx]),\ - "InterPartitionAdjacencyGroup is not consistent" - _, n_part_to_global = new_meshes[n_part_num] - p_meshwide_elem = part_to_global[elem + elem_base] - p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] - - p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) - p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) - - p_elem_base = mesh.groups[p_grp_num].element_nr_base - p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base - p_elem = p_meshwide_elem - p_elem_base - p_n_elem = p_meshwide_n_elem - p_n_elem_base - - f_groups = mesh.facial_adjacency_groups[p_grp_num] - for p_bnd_adj in f_groups.values(): - for idx in range(len(p_bnd_adj.elements)): - if (p_elem == p_bnd_adj.elements[idx] and - face == p_bnd_adj.element_faces[idx]): - assert p_n_elem == p_bnd_adj.neighbors[idx],\ - "Tag does not give correct neighbor" - assert n_face == p_bnd_adj.neighbor_faces[idx],\ - "Tag does not give correct neighbor" - - for i_tag in range(num_parts): - tag_sum = 0 - for mesh, _ in new_meshes: - tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) - assert num_tags[i_tag] == tag_sum,\ - "part_mesh has the wrong number of BTAG_PARTITION boundaries" - - -def count_tags(mesh, tag): - num_bnds = 0 - for adj_dict in mesh.facial_adjacency_groups: - for neighbors in adj_dict[None].neighbors: - if neighbors < 0: - if -neighbors & mesh.boundary_tag_bit(tag) != 0: - num_bnds += 1 - return num_bnds - -# }}} - - # {{{ circle mesh def test_circle_mesh(do_plot=False): @@ -377,9 +93,9 @@ def test_circle_mesh(do_plot=False): ("warp", 3, [10, 20, 30]), ]) @pytest.mark.parametrize("per_face_groups", [False, True]) -def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, +def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, mesh_name, dim, mesh_pars, per_face_groups): - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -465,9 +181,9 @@ def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, ("warp", 3, [10, 20, 30]), ]) @pytest.mark.parametrize("per_face_groups", [False, True]) -def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, +def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, per_face_groups): - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -577,11 +293,11 @@ def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, ("warp", 2, [3, 5, 7]), ("warp", 3, [3, 5]), ]) -def test_opposite_face_interpolation(ctx_getter, group_factory, +def test_opposite_face_interpolation(ctx_factory, group_factory, mesh_name, dim, mesh_pars): logging.basicConfig(level=logging.INFO) - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization @@ -693,12 +409,12 @@ def test_element_orientation(): ("ball", lambda: mgen.generate_icosahedron(1, 1)), ("torus", lambda: mgen.generate_torus(5, 1)), ]) -def test_3d_orientation(ctx_getter, what, mesh_gen_func, visualize=False): +def test_3d_orientation(ctx_factory, what, mesh_gen_func, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) mesh = mesh_gen_func() @@ -748,7 +464,7 @@ def test_3d_orientation(ctx_getter, what, mesh_gen_func, visualize=False): # {{{ merge and map -def test_merge_and_map(ctx_getter, visualize=False): +def test_merge_and_map(ctx_factory, visualize=False): from meshmode.mesh.io import generate_gmsh, FileSource from meshmode.mesh.generation import generate_box_mesh from meshmode.mesh import TensorProductElementGroup @@ -789,7 +505,7 @@ def test_merge_and_map(ctx_getter, visualize=False): if visualize: from meshmode.discretization import Discretization - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) discr = Discretization(cl_ctx, mesh3, discr_grp_factory) @@ -805,10 +521,10 @@ def test_merge_and_map(ctx_getter, visualize=False): @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("order", [1, 3]) -def test_sanity_single_element(ctx_getter, dim, order, visualize=False): +def test_sanity_single_element(ctx_factory, dim, order, visualize=False): pytest.importorskip("pytential") - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) from modepy.tools import unit_vertices @@ -892,12 +608,12 @@ def test_sanity_single_element(ctx_getter, dim, order, visualize=False): @pytest.mark.parametrize("dim", [2, 3, 4]) @pytest.mark.parametrize("order", [3]) -def test_sanity_qhull_nd(ctx_getter, dim, order): +def test_sanity_qhull_nd(ctx_factory, dim, order): pytest.importorskip("scipy") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) from scipy.spatial import Delaunay @@ -946,13 +662,13 @@ def test_sanity_qhull_nd(ctx_getter, dim, order): ("ball-radius-1.step", 3), ]) @pytest.mark.parametrize("mesh_order", [1, 2]) -def test_sanity_balls(ctx_getter, src_file, dim, mesh_order, +def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) from pytools.convergence import EOCRecorder @@ -1070,7 +786,7 @@ def test_rect_mesh(do_plot=False): pt.show() -def test_box_mesh(ctx_getter, visualize=False): +def test_box_mesh(ctx_factory, visualize=False): from meshmode.mesh.generation import generate_box_mesh mesh = generate_box_mesh(3*(np.linspace(0, 1, 5),)) @@ -1078,7 +794,7 @@ def test_box_mesh(ctx_getter, visualize=False): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ PolynomialWarpAndBlendGroupFactory - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) discr = Discretization(cl_ctx, mesh, @@ -1227,6 +943,8 @@ def no_test_quad_mesh_3d(): # }}} +# {{{ test_quad_single_element + def test_quad_single_element(): from meshmode.mesh.generation import make_group_from_vertices from meshmode.mesh import Mesh, TensorProductElementGroup @@ -1250,6 +968,10 @@ def test_quad_single_element(): mg.nodes[1].reshape(-1), "o") plt.show() +# }}} + + +# {{{ test_quad_multi_element def test_quad_multi_element(): from meshmode.mesh.generation import generate_box_mesh @@ -1270,6 +992,8 @@ def test_quad_multi_element(): mg.nodes[1].reshape(-1), "o") plt.show() +# }}} + if __name__ == "__main__": import sys -- GitLab From af5a9c295f7ddd4584a1cf457ebf07a733f530ba Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 11 Sep 2017 13:07:03 -0500 Subject: [PATCH 233/266] Shuffle MPI/partition into single/independent file --- test/mpi_test_helper.py | 129 ----------- test/test_partition.py | 482 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 482 insertions(+), 129 deletions(-) delete mode 100644 test/mpi_test_helper.py create mode 100644 test/test_partition.py diff --git a/test/mpi_test_helper.py b/test/mpi_test_helper.py deleted file mode 100644 index 511b1fe9..00000000 --- a/test/mpi_test_helper.py +++ /dev/null @@ -1,129 +0,0 @@ -from __future__ import division, absolute_import, print_function -import numpy as np - - -def mpi_comm(): - - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() - 1 - - # This rank only partitions a mesh and sends them to their respective ranks. - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) - - part_per_element = np.random.randint(num_parts, size=mesh.nelements) - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) - print('Rank 0: Sent all mesh partitions.') - for req in reqs: - req.wait() - - # These ranks recieve a mesh and comunicates boundary data to the other ranks. - elif (rank - 1) in range(num_parts): - status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) - print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) - - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) - - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) - - # Receive boundary data - remote_data = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part + 1 - status = MPI.Status() - remote_data[i_remote_part] = comm.recv(source=remote_rank, - tag=2, - status=status) - print('Rank {0}: Received rank {1} data (size = {2})' - .format(rank, remote_rank, status.count)) - - for req in send_reqs: - req.wait() - - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) - - -if __name__ == "__main__": - mpi_comm() diff --git a/test/test_partition.py b/test/test_partition.py new file mode 100644 index 00000000..9a8cfb60 --- /dev/null +++ b/test/test_partition.py @@ -0,0 +1,482 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = """ +Copyright (C) 2017 Ellis Hoag +Copyright (C) 2017 Andreas Kloeckner +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from six.moves import range +import numpy as np +import numpy.linalg as la +import pyopencl as cl +import pyopencl.array # noqa +import pyopencl.clmath # noqa + +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl + as pytest_generate_tests) + +from meshmode.discretization.poly_element import ( + PolynomialWarpAndBlendGroupFactory) +from meshmode.mesh import BTAG_ALL + +import pytest +import os + +import logging +logger = logging.getLogger(__name__) + + +# {{{ partition_interpolation + +@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) +@pytest.mark.parametrize("num_parts", [2, 3]) +@pytest.mark.parametrize("num_groups", [1, 2]) +@pytest.mark.parametrize(("dim", "mesh_pars"), + [ + (2, [3, 4, 7]), + (3, [3, 4]) + ]) +def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, + num_parts, num_groups, scramble_partitions=True): + np.random.seed(42) + cl_ctx = ctx_factory() + queue = cl.CommandQueue(cl_ctx) + order = 4 + + from pytools.convergence import EOCRecorder + eoc_rec = dict() + for i in range(num_parts): + for j in range(num_parts): + if i == j: + continue + eoc_rec[i, j] = EOCRecorder() + + def f(x): + return 0.5*cl.clmath.sin(30.*x) + + for n in mesh_pars: + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(dim, order=order, n=n) + for _ in range(num_groups)] + + if num_groups > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] + + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) + + from meshmode.mesh.processing import partition_mesh + part_meshes = [ + partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + + from meshmode.discretization import Discretization + vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) + for i in range(num_parts)] + + from meshmode.mesh import BTAG_PARTITION + from meshmode.discretization.connection import (make_face_restriction, + make_partition_connection, + check_connection) + + for i_local_part in range(num_parts): + for i_remote_part in range(num_parts): + if (i_local_part == i_remote_part + or eoc_rec[i_local_part, i_remote_part] is None): + eoc_rec[i_local_part, i_remote_part] = None + continue + + # Mark faces within local_mesh that are connected to remote_mesh + local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + group_factory(order), + BTAG_PARTITION(i_remote_part)) + + # If these parts are not connected, don't bother checking the error + bdry_nodes = local_bdry_conn.to_discr.nodes() + if bdry_nodes.size == 0: + eoc_rec[i_local_part, i_remote_part] = None + continue + + # Mark faces within remote_mesh that are connected to local_mesh + remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + group_factory(order), + BTAG_PARTITION(i_local_part)) + + # Gather just enough information for the connection + local_bdry = local_bdry_conn.to_discr + local_mesh = part_meshes[i_local_part] + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + remote_bdry = remote_bdry_conn.to_discr + remote_mesh = part_meshes[i_remote_part] + remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] + for i in range(len(remote_mesh.groups))] + remote_batches = [remote_bdry_conn.groups[i].batches + for i in range(len(remote_mesh.groups))] + remote_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in remote_batches] + remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in remote_batches] + + # Connect local_mesh to remote_mesh + local_part_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + + # Connect remote mesh to local mesh + remote_part_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_to_elem_faces, + local_to_elem_indices) + + check_connection(local_part_conn) + check_connection(remote_part_conn) + + true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) + remote_points = local_part_conn(queue, true_local_points) + local_points = remote_part_conn(queue, remote_points) + + err = la.norm((true_local_points - local_points).get(), np.inf) + eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) + + for (i, j), e in eoc_rec.items(): + if e is not None: + print("Error of connection from part %i to part %i." % (i, j)) + print(e) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) + +# }}} + + +# {{{ partition_mesh + +@pytest.mark.parametrize("dim", [2, 3]) +@pytest.mark.parametrize("num_parts", [4, 5, 7]) +@pytest.mark.parametrize("num_meshes", [1, 2, 7]) +def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): + np.random.seed(42) + n = (5,) * dim + from meshmode.mesh.generation import generate_regular_rect_mesh + meshes = [generate_regular_rect_mesh(a=(0 + i,) * dim, b=(1 + i,) * dim, n=n) + for i in range(num_meshes)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + if scramble_partitions: + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + else: + from pymetis import part_graph + _, p = part_graph(num_parts, + xadj=mesh.nodal_adjacency.neighbors_starts.tolist(), + adjncy=mesh.nodal_adjacency.neighbors.tolist()) + part_per_element = np.array(p) + + from meshmode.mesh.processing import partition_mesh + # TODO: The same part_per_element array must be used to partition each mesh. + # Maybe the interface should be changed to guarantee this. + new_meshes = [ + partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] + + assert mesh.nelements == np.sum( + [new_meshes[i][0].nelements for i in range(num_parts)]), \ + "part_mesh has the wrong number of elements" + + assert count_tags(mesh, BTAG_ALL) == np.sum( + [count_tags(new_meshes[i][0], BTAG_ALL) for i in range(num_parts)]), \ + "part_mesh has the wrong number of BTAG_ALL boundaries" + + from meshmode.mesh import BTAG_PARTITION, InterPartitionAdjacencyGroup + from meshmode.mesh.processing import find_group_indices + num_tags = np.zeros((num_parts,)) + + index_lookup_table = dict() + for ipart, (m, _) in enumerate(new_meshes): + for igrp in range(len(m.groups)): + adj = m.facial_adjacency_groups[igrp][None] + if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. + continue + for i, (elem, face) in enumerate(zip(adj.elements, adj.element_faces)): + index_lookup_table[ipart, igrp, elem, face] = i + + for part_num in range(num_parts): + part, part_to_global = new_meshes[part_num] + for grp_num in range(len(part.groups)): + adj = part.facial_adjacency_groups[grp_num][None] + tags = -part.facial_adjacency_groups[grp_num][None].neighbors + assert np.all(tags >= 0) + if not isinstance(adj, InterPartitionAdjacencyGroup): + # This group is not connected to another partition. + continue + elem_base = part.groups[grp_num].element_nr_base + for idx in range(len(adj.elements)): + if adj.global_neighbors[idx] == -1: + continue + elem = adj.elements[idx] + face = adj.element_faces[idx] + n_part_num = adj.neighbor_partitions[idx] + n_meshwide_elem = adj.global_neighbors[idx] + n_face = adj.neighbor_faces[idx] + num_tags[n_part_num] += 1 + n_part, n_part_to_global = new_meshes[n_part_num] + # Hack: find_igrps expects a numpy.ndarray and returns + # a numpy.ndarray. But if a single integer is fed + # into find_igrps, an integer is returned. + n_grp_num = int(find_group_indices(n_part.groups, n_meshwide_elem)) + n_adj = n_part.facial_adjacency_groups[n_grp_num][None] + n_elem_base = n_part.groups[n_grp_num].element_nr_base + n_elem = n_meshwide_elem - n_elem_base + n_idx = index_lookup_table[n_part_num, n_grp_num, n_elem, n_face] + assert (part_num == n_adj.neighbor_partitions[n_idx] + and elem + elem_base == n_adj.global_neighbors[n_idx] + and face == n_adj.neighbor_faces[n_idx]),\ + "InterPartitionAdjacencyGroup is not consistent" + _, n_part_to_global = new_meshes[n_part_num] + p_meshwide_elem = part_to_global[elem + elem_base] + p_meshwide_n_elem = n_part_to_global[n_elem + n_elem_base] + + p_grp_num = find_group_indices(mesh.groups, p_meshwide_elem) + p_n_grp_num = find_group_indices(mesh.groups, p_meshwide_n_elem) + + p_elem_base = mesh.groups[p_grp_num].element_nr_base + p_n_elem_base = mesh.groups[p_n_grp_num].element_nr_base + p_elem = p_meshwide_elem - p_elem_base + p_n_elem = p_meshwide_n_elem - p_n_elem_base + + f_groups = mesh.facial_adjacency_groups[p_grp_num] + for p_bnd_adj in f_groups.values(): + for idx in range(len(p_bnd_adj.elements)): + if (p_elem == p_bnd_adj.elements[idx] and + face == p_bnd_adj.element_faces[idx]): + assert p_n_elem == p_bnd_adj.neighbors[idx],\ + "Tag does not give correct neighbor" + assert n_face == p_bnd_adj.neighbor_faces[idx],\ + "Tag does not give correct neighbor" + + for i_tag in range(num_parts): + tag_sum = 0 + for mesh, _ in new_meshes: + tag_sum += count_tags(mesh, BTAG_PARTITION(i_tag)) + assert num_tags[i_tag] == tag_sum,\ + "part_mesh has the wrong number of BTAG_PARTITION boundaries" + + +def count_tags(mesh, tag): + num_bnds = 0 + for adj_dict in mesh.facial_adjacency_groups: + for neighbors in adj_dict[None].neighbors: + if neighbors < 0: + if -neighbors & mesh.boundary_tag_bit(tag) != 0: + num_bnds += 1 + return num_bnds + +# }}} + + +# {{{ MPI test rank entrypoint + +def mpi_test_rank_entrypoint(): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + num_parts = comm.Get_size() - 1 + + # This rank only partitions a mesh and sends them to their respective ranks. + if rank == 0: + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + reqs = [] + for r in range(num_parts): + reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + print('Rank 0: Sent all mesh partitions.') + for req in reqs: + req.wait() + + # These ranks recieve a mesh and comunicates boundary data to the other ranks. + elif (rank - 1) in range(num_parts): + status = MPI.Status() + local_mesh = comm.recv(source=0, tag=1, status=status) + print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank - 1 + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + print("Rank %d send begin" % rank) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + print(i_remote_part) + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh, send None + send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + + # Receive boundary data + remote_data = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + 1 + status = MPI.Status() + remote_data[i_remote_part] = comm.recv(source=remote_rank, + tag=2, + status=status) + print('Rank {0}: Received rank {1} data (size = {2})' + .format(rank, remote_rank, status.count)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) + + print("Rank %d exiting" % rank) + +# }}} + + +# {{{ MPI test pytest entrypoint + +@pytest.mark.mpi +@pytest.mark.parametrize("num_partitions", [3, 6]) +def test_mpi_communication(num_partitions): + num_ranks = num_partitions + 1 + from subprocess import check_call + import sys + newenv = os.environ.copy() + newenv["RUN_WITHIN_MPI"] = "1" + check_call([ + "mpiexec", "-np", str(num_ranks), "-x", "RUN_WITHIN_MPI", + sys.executable, __file__], + env=newenv) + +# }}} + + +if __name__ == "__main__": + if "RUN_WITHIN_MPI" in os.environ: + mpi_test_rank_entrypoint() + else: + import sys + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from py.test.cmdline import main + main([__file__]) + +# vim: fdm=marker -- GitLab From 7613398202e64eedf03aa47d066108a5f37de8c3 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 18 Sep 2017 14:11:09 -0500 Subject: [PATCH 234/266] Minor doc tweak --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index ab460da8..106c6350 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -413,7 +413,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, """ Connects ``local_bdry_conn`` to a neighboring partition. - :arg local_bdry_conn: A :class:`DirectDiscretizationConnection` of the local + :arg local_bdry_conn: A :class:`DiscretizationConnection` of the local partition. :arg i_local_part: The partition number of the local partition. :arg remote_adj_groups: A list of :class:`InterPartitionAdjacency`` of the -- GitLab From ab45c76266e4ef22562aa86013efdd313df417a0 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 23 Sep 2017 20:19:41 -0500 Subject: [PATCH 235/266] Make MPI receives non-blocking --- test/test_partition.py | 43 ++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 9a8cfb60..30d8354c 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -327,6 +327,9 @@ def mpi_test_rank_entrypoint(): rank = comm.Get_rank() num_parts = comm.Get_size() - 1 + TAG_DISTRIBUTE_MESHES = 1 + TAG_SEND_MESH = 2 + # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: np.random.seed(42) @@ -344,7 +347,7 @@ def mpi_test_rank_entrypoint(): reqs = [] for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=1)) + reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) print('Rank 0: Sent all mesh partitions.') for req in reqs: req.wait() @@ -352,7 +355,7 @@ def mpi_test_rank_entrypoint(): # These ranks recieve a mesh and comunicates boundary data to the other ranks. elif (rank - 1) in range(num_parts): status = MPI.Status() - local_mesh = comm.recv(source=0, tag=1, status=status) + local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) from meshmode.discretization.poly_element\ @@ -382,13 +385,14 @@ def mpi_test_rank_entrypoint(): # Send boundary data send_reqs = [] for i_remote_part in range(num_parts): - print(i_remote_part) if i_local_part == i_remote_part: continue bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh, send None - send_reqs.append(comm.isend(None, dest=i_remote_part+1, tag=2)) + # local_mesh is not connected to remote_mesh; send None + send_reqs.append(comm.isend(None, + dest=i_remote_part+1, + tag=TAG_SEND_MESH)) continue # Gather information to send to other ranks @@ -407,20 +411,35 @@ def mpi_test_rank_entrypoint(): 'adj': local_adj_groups, 'to_elem_faces': local_to_elem_faces, 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, dest=i_remote_part+1, tag=2)) + send_reqs.append(comm.isend(local_data, + dest=i_remote_part+1, + tag=TAG_SEND_MESH)) # Receive boundary data - remote_data = {} + remote_buf = {} for i_remote_part in range(num_parts): if i_local_part == i_remote_part: continue remote_rank = i_remote_part + 1 status = MPI.Status() - remote_data[i_remote_part] = comm.recv(source=remote_rank, - tag=2, - status=status) - print('Rank {0}: Received rank {1} data (size = {2})' - .format(rank, remote_rank, status.count)) + comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + remote_rank = i_remote_part + 1 + recv_reqs[i_remote_part] = comm.irecv(buf=buf, + source=remote_rank, + tag=TAG_SEND_MESH) + + remote_data = {} + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + # Free the buffer + remote_buf[i_remote_part] = None # FIXME: Is this a good idea? + print('Rank {0}: Received rank {1} data ({2} bytes)' + .format(rank, i_remote_part + 1, status.count)) for req in send_reqs: req.wait() -- GitLab From e265f592a01ed85cf48e54c10909e7c5e63fd865 Mon Sep 17 00:00:00 2001 From: ellis Date: Sat, 23 Sep 2017 21:27:47 -0500 Subject: [PATCH 236/266] Make tag constants global --- test/test_partition.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 30d8354c..6809e523 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -320,6 +320,9 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint +TAG_DISTRIBUTE_MESHES = 1 +TAG_SEND_MESH = 2 + def mpi_test_rank_entrypoint(): from mpi4py import MPI @@ -327,9 +330,6 @@ def mpi_test_rank_entrypoint(): rank = comm.Get_rank() num_parts = comm.Get_size() - 1 - TAG_DISTRIBUTE_MESHES = 1 - TAG_SEND_MESH = 2 - # This rank only partitions a mesh and sends them to their respective ranks. if rank == 0: np.random.seed(42) -- GitLab From e0b38f15d7c057ec447b103e7111f15539db6c61 Mon Sep 17 00:00:00 2001 From: ellis Date: Sun, 24 Sep 2017 22:10:25 -0500 Subject: [PATCH 237/266] working --- test/test_partition.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_partition.py b/test/test_partition.py index 6809e523..c4323597 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -397,6 +397,7 @@ def mpi_test_rank_entrypoint(): # Gather information to send to other ranks local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conns[i_remote_part].groups[i].batches -- GitLab From 4886f4304bcd5656f0744409a84ed0cee93ee8ca Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 25 Sep 2017 11:30:18 -0500 Subject: [PATCH 238/266] Add debugging info --- test/test_partition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index c4323597..7c9b4217 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -348,7 +348,7 @@ def mpi_test_rank_entrypoint(): reqs = [] for r in range(num_parts): reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) - print('Rank 0: Sent all mesh partitions.') + print('Rank 0: Sent all mesh partitions') for req in reqs: req.wait() @@ -356,7 +356,7 @@ def mpi_test_rank_entrypoint(): elif (rank - 1) in range(num_parts): status = MPI.Status() local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved full mesh (size = {1})'.format(rank, status.count)) + print('Rank {0}: Recieved local mesh (size = {1})'.format(rank, status.count)) from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory @@ -434,6 +434,7 @@ def mpi_test_rank_entrypoint(): tag=TAG_SEND_MESH) remote_data = {} + total_bytes_recvd = 0 for i_remote_part, req in recv_reqs.items(): status = MPI.Status() remote_data[i_remote_part] = req.wait(status=status) @@ -441,6 +442,9 @@ def mpi_test_rank_entrypoint(): remote_buf[i_remote_part] = None # FIXME: Is this a good idea? print('Rank {0}: Received rank {1} data ({2} bytes)' .format(rank, i_remote_part + 1, status.count)) + total_bytes_recvd += status.count + + print('Rank {0}: Recieved {1} bytes in total'.format(rank, total_bytes_recvd)) for req in send_reqs: req.wait() -- GitLab From 9fc37ad44a6982558c436145944b09fba62112db Mon Sep 17 00:00:00 2001 From: ellis Date: Mon, 25 Sep 2017 13:27:02 -0500 Subject: [PATCH 239/266] Fix whitespace --- test/test_partition.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 7c9b4217..41f3888f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -356,7 +356,8 @@ def mpi_test_rank_entrypoint(): elif (rank - 1) in range(num_parts): status = MPI.Status() local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved local mesh (size = {1})'.format(rank, status.count)) + print('Rank {0}: Recieved local mesh (size = {1})' + .format(rank, status.count)) from meshmode.discretization.poly_element\ import PolynomialWarpAndBlendGroupFactory @@ -444,7 +445,8 @@ def mpi_test_rank_entrypoint(): .format(rank, i_remote_part + 1, status.count)) total_bytes_recvd += status.count - print('Rank {0}: Recieved {1} bytes in total'.format(rank, total_bytes_recvd)) + print('Rank {0}: Recieved {1} bytes in total' + .format(rank, total_bytes_recvd)) for req in send_reqs: req.wait() -- GitLab From 665eb16f97e78329cda7dcb5fa2047be5b7d081c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 19:44:32 -0500 Subject: [PATCH 240/266] partition test: build a mesh distributor utility, make all ranks do useful work --- test/test_partition.py | 298 +++++++++++++++++++++++------------------ 1 file changed, 169 insertions(+), 129 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 41f3888f..005af2c9 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -320,156 +320,194 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint -TAG_DISTRIBUTE_MESHES = 1 -TAG_SEND_MESH = 2 +TAG_BASE = 83411 +TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 +TAG_SEND_MESH = TAG_BASE + 2 -def mpi_test_rank_entrypoint(): - from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() - 1 - # This rank only partitions a mesh and sends them to their respective ranks. - if rank == 0: - np.random.seed(42) - from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] +class MPIMeshDistributor(object): + def __init__(self, mpi_comm, manager_rank=0): + self.mpi_comm = mpi_comm + self.manager_rank = manager_rank - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) + def is_mananger_rank(self): + return self.mpi_comm.Get_rank() == self.manager_rank - part_per_element = np.random.randint(num_parts, size=mesh.nelements) + def send_mesh_parts(self, mesh, part_per_element, num_parts): + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + assert num_parts <= mpi_comm.Get_size() + + assert self.is_mananger_rank() from meshmode.mesh.processing import partition_mesh parts = [partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] + local_part = None + reqs = [] - for r in range(num_parts): - reqs.append(comm.isend(parts[r], dest=r+1, tag=TAG_DISTRIBUTE_MESHES)) - print('Rank 0: Sent all mesh partitions') + for r, part in enumerate(parts): + if r == self.manager_rank: + local_part = part + else: + reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) + + logger.info('rank %d: sent all mesh partitions', rank) for req in reqs: req.wait() - # These ranks recieve a mesh and comunicates boundary data to the other ranks. - elif (rank - 1) in range(num_parts): + return local_part + + def receive_mesh_part(self): + from mpi4py import MPI + + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + status = MPI.Status() - local_mesh = comm.recv(source=0, tag=TAG_DISTRIBUTE_MESHES, status=status) - print('Rank {0}: Recieved local mesh (size = {1})' - .format(rank, status.count)) + result = self.mpi_comm.recv( + source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, + status=status) + logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory - group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl - cl_ctx = cl.create_some_context() - queue = cl.CommandQueue(cl_ctx) + return result - from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - i_local_part = rank - 1 - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - print("Rank %d send begin" % rank) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh; send None - send_reqs.append(comm.isend(None, - dest=i_remote_part+1, - tag=TAG_SEND_MESH)) - continue +def mpi_test_rank_entrypoint(): + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + num_parts = comm.Get_size() - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, - dest=i_remote_part+1, - tag=TAG_SEND_MESH)) + mesh_dist = MPIMeshDistributor(comm) - # Receive boundary data - remote_buf = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part + 1 - status = MPI.Status() - comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - remote_rank = i_remote_part + 1 - recv_reqs[i_remote_part] = comm.irecv(buf=buf, - source=remote_rank, - tag=TAG_SEND_MESH) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) - # Free the buffer - remote_buf[i_remote_part] = None # FIXME: Is this a good idea? - print('Rank {0}: Received rank {1} data ({2} bytes)' - .format(rank, i_remote_part + 1, status.count)) - total_bytes_recvd += status.count - - print('Rank {0}: Recieved {1} bytes in total' - .format(rank, total_bytes_recvd)) - - for req in send_reqs: - req.wait() + if mesh_dist.is_mananger_rank(): + np.random.seed(42) + from meshmode.mesh.generation import generate_warped_rect_mesh + meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + + part_per_element = np.random.randint(num_parts, size=mesh.nelements) + + local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts) + else: + local_mesh = mesh_dist.receive_mesh_part() + + from meshmode.discretization.poly_element\ + import PolynomialWarpAndBlendGroupFactory + group_factory = PolynomialWarpAndBlendGroupFactory(4) + import pyopencl as cl + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.discretization import Discretization + vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + + i_local_part = rank + local_bdry_conns = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + # Mark faces within local_mesh that are connected to remote_mesh + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION + local_bdry_conns[i_remote_part] =\ + make_face_restriction(vol_discr, group_factory, + BTAG_PARTITION(i_remote_part)) + + print("Rank %d send begin" % rank) + + # Send boundary data + send_reqs = [] + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() + if bdry_nodes.size == 0: + # local_mesh is not connected to remote_mesh; send None + send_reqs.append(comm.isend(None, + dest=i_remote_part, + tag=TAG_SEND_MESH)) + continue + + # Gather information to send to other ranks + local_bdry = local_bdry_conns[i_remote_part].to_discr + local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(comm.isend(local_data, + dest=i_remote_part, + tag=TAG_SEND_MESH)) + + # Receive boundary data + remote_buf = {} + for i_remote_part in range(num_parts): + if i_local_part == i_remote_part: + continue + remote_rank = i_remote_part + status = MPI.Status() + comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + remote_rank = i_remote_part + recv_reqs[i_remote_part] = comm.irecv(buf=buf, + source=remote_rank, + tag=TAG_SEND_MESH) + + remote_data = {} + total_bytes_recvd = 0 + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + # Free the buffer + remote_buf[i_remote_part] = None # FIXME: Is this a good idea? + print('Rank {0}: Received rank {1} data ({2} bytes)' + .format(rank, i_remote_part, status.count)) + total_bytes_recvd += status.count + + print('Rank {0}: Recieved {1} bytes in total' + .format(rank, total_bytes_recvd)) + + for req in send_reqs: + req.wait() + + for i_remote_part, data in remote_data.items(): + if data is None: + # Local mesh is not connected to remote mesh + continue + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + connection = make_partition_connection(local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + from meshmode.discretization.connection import check_connection + check_connection(connection) print("Rank %d exiting" % rank) @@ -481,6 +519,8 @@ def mpi_test_rank_entrypoint(): @pytest.mark.mpi @pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): + pytest.importorskip("mpi4py") + num_ranks = num_partitions + 1 from subprocess import check_call import sys -- GitLab From 6efbe016b454d0155c824896bb7886537e488637 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 23:08:34 -0500 Subject: [PATCH 241/266] Refactor mesh distribution and boundary communication into reusable components --- meshmode/distributed.py | 233 ++++++++++++++++++++++++++++++++++++++++ test/test_partition.py | 166 ++-------------------------- 2 files changed, 242 insertions(+), 157 deletions(-) create mode 100644 meshmode/distributed.py diff --git a/meshmode/distributed.py b/meshmode/distributed.py new file mode 100644 index 00000000..8fcc239c --- /dev/null +++ b/meshmode/distributed.py @@ -0,0 +1,233 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = """ +Copyright (C) 2017 Ellis Hoag +Copyright (C) 2017 Andreas Kloeckner +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import six + +import numpy as np + +import logging +logger = logging.getLogger(__name__) + +TAG_BASE = 83411 +TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 +TAG_SEND_MESH = TAG_BASE + 2 + + +# {{{ mesh distributor + +class MPIMeshDistributor(object): + def __init__(self, mpi_comm, manager_rank=0): + self.mpi_comm = mpi_comm + self.manager_rank = manager_rank + + def is_mananger_rank(self): + return self.mpi_comm.Get_rank() == self.manager_rank + + def send_mesh_parts(self, mesh, part_per_element, num_parts): + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + assert num_parts <= mpi_comm.Get_size() + + assert self.is_mananger_rank() + + from meshmode.mesh.processing import partition_mesh + parts = [partition_mesh(mesh, part_per_element, i)[0] + for i in range(num_parts)] + + local_part = None + + reqs = [] + for r, part in enumerate(parts): + if r == self.manager_rank: + local_part = part + else: + reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) + + logger.info('rank %d: sent all mesh partitions', rank) + for req in reqs: + req.wait() + + return local_part + + def receive_mesh_part(self): + from mpi4py import MPI + + mpi_comm = self.mpi_comm + rank = mpi_comm.Get_rank() + + status = MPI.Status() + result = self.mpi_comm.recv( + source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, + status=status) + logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) + + return result + +# }}} + + +# {{{ boundary communicator + +class MPIBoundaryCommunicator(object): + def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + self.mpi_comm = mpi_comm + self.part_discr = part_discr + + self.i_local_part = mpi_comm.Get_rank() + + self.bdry_group_factory = bdry_group_factory + + # FIXME: boundary tags for unconnected parts should not exist + from meshmode.mesh import BTAG_PARTITION + self.connected_parts = set( + btag.part_nr + for btag in part_discr.mesh.boundary_tags + if isinstance(btag, BTAG_PARTITION)) + # /!\ Not final--mutated melow + + from meshmode.discretization.connection import make_face_restriction + + self.local_bdry_conns = {} + for i_remote_part in list(self.connected_parts): + bdry_conn = make_face_restriction(part_discr, bdry_group_factory, + BTAG_PARTITION(i_remote_part)) + + # FIXME This is a really inefficient way of figuring out that that + # part of the boundary is empty. + if bdry_conn.to_discr.nnodes: + self.local_bdry_conns[i_remote_part] = bdry_conn + else: + self.connected_parts.remove(i_remote_part) + + assert self.i_local_part not in self.connected_parts + + self._setup(queue) + + def _post_boundary_data_sends(self, queue): + send_reqs = [] + for i_remote_part in self.connected_parts: + local_bdry = self.local_bdry_conns[i_remote_part].to_discr + local_mesh = self.local_bdry_conns[i_remote_part].from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [self.local_bdry_conns[i_remote_part].groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + send_reqs.append(self.mpi_comm.isend( + local_data, dest=i_remote_part, tag=TAG_SEND_MESH)) + + return send_reqs + + def _receive_boundary_data(self, queue): + rank = self.mpi_comm.Get_rank() + i_local_part = rank + + from mpi4py import MPI + + remote_buf = {} + for i_remote_part in self.connected_parts: + status = MPI.Status() + self.mpi_comm.probe( + source=i_remote_part, tag=TAG_SEND_MESH, status=status) + remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in remote_buf.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_MESH) + + remote_data = {} + total_bytes_recvd = 0 + for i_remote_part, req in recv_reqs.items(): + status = MPI.Status() + remote_data[i_remote_part] = req.wait(status=status) + + # Free the buffer + remote_buf[i_remote_part] = None + logger.debug('rank %d: Received rank %d data (%d bytes)', + rank, i_remote_part, status.count) + + total_bytes_recvd += status.count + + logger.debug('rank %d: recieved %d bytes in total', rank, total_bytes_recvd) + + self.remote_to_local_bdry_conns = {} + + from meshmode.discretization import Discretization + + for i_remote_part, data in remote_data.items(): + remote_bdry_mesh = data['bdry_mesh'] + remote_bdry = Discretization( + queue.context, + remote_bdry_mesh, + self.bdry_group_factory) + remote_adj_groups = data['adj'] + remote_to_elem_faces = data['to_elem_faces'] + remote_to_elem_indices = data['to_elem_indices'] + + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + self.remote_to_local_bdry_conns[i_remote_part] = \ + make_partition_connection( + self.local_bdry_conns[i_remote_part], + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + + def _setup(self, queue): + logger.info("bdry comm rank %d send begin", self.mpi_comm.Get_rank()) + + send_reqs = self._post_boundary_data_sends(queue) + self._receive_boundary_data(queue) + + for req in send_reqs: + req.wait() + + logger.info("bdry comm rank %d send completed", self.mpi_comm.Get_rank()) + + def check(self): + from meshmode.discretization.connection import check_connection + + for i, conn in six.iteritems(self.remote_to_local_bdry_conns): + check_connection(conn) + +# }}} + + +# vim: foldmethod=marker diff --git a/test/test_partition.py b/test/test_partition.py index 005af2c9..f464597f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -25,6 +25,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import six from six.moves import range import numpy as np import numpy.linalg as la @@ -321,61 +322,9 @@ def count_tags(mesh, tag): # {{{ MPI test rank entrypoint -TAG_BASE = 83411 -TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 -TAG_SEND_MESH = TAG_BASE + 2 - - -class MPIMeshDistributor(object): - def __init__(self, mpi_comm, manager_rank=0): - self.mpi_comm = mpi_comm - self.manager_rank = manager_rank - - def is_mananger_rank(self): - return self.mpi_comm.Get_rank() == self.manager_rank - - def send_mesh_parts(self, mesh, part_per_element, num_parts): - mpi_comm = self.mpi_comm - rank = mpi_comm.Get_rank() - assert num_parts <= mpi_comm.Get_size() - - assert self.is_mananger_rank() - - from meshmode.mesh.processing import partition_mesh - parts = [partition_mesh(mesh, part_per_element, i)[0] - for i in range(num_parts)] - - local_part = None - - reqs = [] - for r, part in enumerate(parts): - if r == self.manager_rank: - local_part = part - else: - reqs.append(mpi_comm.isend(part, dest=r, tag=TAG_DISTRIBUTE_MESHES)) - - logger.info('rank %d: sent all mesh partitions', rank) - for req in reqs: - req.wait() - - return local_part - - def receive_mesh_part(self): - from mpi4py import MPI - - mpi_comm = self.mpi_comm - rank = mpi_comm.Get_rank() - - status = MPI.Status() - result = self.mpi_comm.recv( - source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, - status=status) - logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) - - return result - - def mpi_test_rank_entrypoint(): + from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator + from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -407,109 +356,12 @@ def mpi_test_rank_entrypoint(): from meshmode.discretization import Discretization vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - i_local_part = rank - local_bdry_conns = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - # Mark faces within local_mesh that are connected to remote_mesh - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - local_bdry_conns[i_remote_part] =\ - make_face_restriction(vol_discr, group_factory, - BTAG_PARTITION(i_remote_part)) - - print("Rank %d send begin" % rank) - - # Send boundary data - send_reqs = [] - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - bdry_nodes = local_bdry_conns[i_remote_part].to_discr.nodes() - if bdry_nodes.size == 0: - # local_mesh is not connected to remote_mesh; send None - send_reqs.append(comm.isend(None, - dest=i_remote_part, - tag=TAG_SEND_MESH)) - continue - - # Gather information to send to other ranks - local_bdry = local_bdry_conns[i_remote_part].to_discr - local_mesh = local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(comm.isend(local_data, - dest=i_remote_part, - tag=TAG_SEND_MESH)) - - # Receive boundary data - remote_buf = {} - for i_remote_part in range(num_parts): - if i_local_part == i_remote_part: - continue - remote_rank = i_remote_part - status = MPI.Status() - comm.probe(source=remote_rank, tag=TAG_SEND_MESH, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - remote_rank = i_remote_part - recv_reqs[i_remote_part] = comm.irecv(buf=buf, - source=remote_rank, - tag=TAG_SEND_MESH) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) - # Free the buffer - remote_buf[i_remote_part] = None # FIXME: Is this a good idea? - print('Rank {0}: Received rank {1} data ({2} bytes)' - .format(rank, i_remote_part, status.count)) - total_bytes_recvd += status.count - - print('Rank {0}: Recieved {1} bytes in total' - .format(rank, total_bytes_recvd)) - - for req in send_reqs: - req.wait() - - for i_remote_part, data in remote_data.items(): - if data is None: - # Local mesh is not connected to remote mesh - continue - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization(cl_ctx, remote_bdry_mesh, group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - connection = make_partition_connection(local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - from meshmode.discretization.connection import check_connection - check_connection(connection) - - print("Rank %d exiting" % rank) + bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) + bdry_comm.check() + + # FIXME: Actually test communicating data with this + + logger.debug("Rank %d exiting", rank) # }}} -- GitLab From 71c38c23a58a8ba3dc598417a9a86408d8b92e0c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 5 Oct 2017 23:14:34 -0500 Subject: [PATCH 242/266] Remove unused six import --- test/test_partition.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_partition.py b/test/test_partition.py index f464597f..f422c61f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -25,7 +25,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import six from six.moves import range import numpy as np import numpy.linalg as la -- GitLab From effcda9b4ef60db38b987a6de32a4a74e6379e1d Mon Sep 17 00:00:00 2001 From: Ellis Date: Fri, 13 Oct 2017 20:46:45 -0500 Subject: [PATCH 243/266] working --- meshmode/distributed.py | 17 ++++++++++++++++- meshmode/mesh/__init__.py | 26 -------------------------- meshmode/mesh/processing.py | 10 +++++++--- 3 files changed, 23 insertions(+), 30 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 8fcc239c..c2d93c23 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -107,7 +107,21 @@ class MPIBoundaryCommunicator(object): btag.part_nr for btag in part_discr.mesh.boundary_tags if isinstance(btag, BTAG_PARTITION)) - # /!\ Not final--mutated melow + # /!\ Not final--mutated below + # self.connected_parts = np.array([]) + # for adj in part_discr.mesh.facial_adjacency_groups: + # from meshmode.mesh import InterPartitionAdjacencyGroup + # print(adj[None]) + # if isinstance(adj[None], InterPartitionAdjacencyGroup): + # indices = adj[None].neighbor_partitions >= 0 + # self.connected_parts.append(np.unique(adj[None].neighbor_partitions[indices])) + # self.connected_parts = np.unique(self.connected_parts) + #self.connected_parts = set( + # part_nr + # for adj in part_discr.mesh.facial_adjacency_groups + # for part_nr in adj[None].neighbor_partitions + # if part_nr >= 0 + #) from meshmode.discretization.connection import make_face_restriction @@ -122,6 +136,7 @@ class MPIBoundaryCommunicator(object): self.local_bdry_conns[i_remote_part] = bdry_conn else: self.connected_parts.remove(i_remote_part) + # self.local_bdry_conns[i_remote_part] = bdry_conn assert self.i_local_part not in self.connected_parts diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index f5147001..8e6e18b9 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -548,32 +548,6 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): .. versionadded:: 2017.1 """ - #FIXME - ''' - This is a weird error. When we try to pickle and unpickle a mesh, - neighbor_partitions does not exist anymore in - mesh.facial_adjacency_groups[i][None]. My guess was that pickle did not know - that property existed, so I created it. - ''' - neighbor_partitions = None - global_neighbors = None - - def __init__(self, elements, - element_faces, - neighbors, - igroup, - neighbor_partitions, - global_neighbors, - neighbor_faces): - FacialAdjacencyGroup.__init__(self, elements=elements, - element_faces=element_faces, - neighbors=neighbors, - neighbor_faces=neighbor_faces, - igroup=igroup, - ineighbor_group=None) - self.neighbor_partitions = neighbor_partitions - self.global_neighbors = global_neighbors - def __eq__(self, other): return (super.__eq__(self, other) and np.array_equal(self.global_neighbors, other.global_neighbors) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 758163fc..d2d2b14c 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -243,9 +243,13 @@ def partition_mesh(mesh, part_per_element, part_nr): adj_idx += 1 connected_mesh.facial_adjacency_groups[igrp][None] =\ - InterPartitionAdjacencyGroup(elems, faces, neighbors, - bdry.igroup, - n_parts, global_n_elems, n_faces) + InterPartitionAdjacencyGroup(elements=elems, + element_faces=faces, + neighbors=neighbors, + igroup=bdry.igroup, + neighbor_partitions=n_parts, + global_neighbors=global_n_elems, + neighbor_faces=n_faces) return connected_mesh, queried_elems -- GitLab From 2b47ce9e04433dcfa9d76aaaac8fc3c54a71add4 Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 10:33:04 -0500 Subject: [PATCH 244/266] Make mpi communication only between connected partitions --- meshmode/distributed.py | 42 ++++++++++++----------------------------- test/test_partition.py | 17 ++++++++++++++--- 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index c2d93c23..21c09d4f 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -101,44 +101,26 @@ class MPIBoundaryCommunicator(object): self.bdry_group_factory = bdry_group_factory - # FIXME: boundary tags for unconnected parts should not exist - from meshmode.mesh import BTAG_PARTITION - self.connected_parts = set( - btag.part_nr - for btag in part_discr.mesh.boundary_tags - if isinstance(btag, BTAG_PARTITION)) - # /!\ Not final--mutated below - # self.connected_parts = np.array([]) - # for adj in part_discr.mesh.facial_adjacency_groups: - # from meshmode.mesh import InterPartitionAdjacencyGroup - # print(adj[None]) - # if isinstance(adj[None], InterPartitionAdjacencyGroup): - # indices = adj[None].neighbor_partitions >= 0 - # self.connected_parts.append(np.unique(adj[None].neighbor_partitions[indices])) - # self.connected_parts = np.unique(self.connected_parts) - #self.connected_parts = set( - # part_nr - # for adj in part_discr.mesh.facial_adjacency_groups - # for part_nr in adj[None].neighbor_partitions - # if part_nr >= 0 - #) + from meshmode.mesh import InterPartitionAdjacencyGroup + self.connected_parts = set() + for adj in part_discr.mesh.facial_adjacency_groups: + if isinstance(adj[None], InterPartitionAdjacencyGroup): + indices = adj[None].neighbor_partitions >= 0 + self.connected_parts = self.connected_parts.union( + adj[None].neighbor_partitions[indices]) + assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} for i_remote_part in list(self.connected_parts): bdry_conn = make_face_restriction(part_discr, bdry_group_factory, BTAG_PARTITION(i_remote_part)) - # FIXME This is a really inefficient way of figuring out that that - # part of the boundary is empty. - if bdry_conn.to_discr.nnodes: - self.local_bdry_conns[i_remote_part] = bdry_conn - else: - self.connected_parts.remove(i_remote_part) - # self.local_bdry_conns[i_remote_part] = bdry_conn - - assert self.i_local_part not in self.connected_parts + # Assert that everything in self.connected_parts is truly connected + assert bdry_conn.to_discr.nnodes > 0 + self.local_bdry_conns[i_remote_part] = bdry_conn self._setup(queue) diff --git a/test/test_partition.py b/test/test_partition.py index f422c61f..987d9a93 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -223,6 +223,17 @@ def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] + import pickle + for m, _ in new_meshes: + for adj in m.facial_adjacency_groups: + data = {'adj': adj[None]} + pickle.dump(data, open('tmp.p', 'wb')) + data2 = pickle.load(open('tmp.p', 'rb')) + assert data == data2 + from meshmode.mesh import InterPartitionAdjacencyGroup + if isinstance(data['adj'], InterPartitionAdjacencyGroup): + assert np.equal(data['adj'].neighbor_partitions, data2['adj'].neighbor_partitions).all() + assert mesh.nelements == np.sum( [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" @@ -334,7 +345,7 @@ def mpi_test_rank_entrypoint(): if mesh_dist.is_mananger_rank(): np.random.seed(42) from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=5) for _ in range(2)] + meshes = [generate_warped_rect_mesh(3, order=4, n=4) for _ in range(2)] from meshmode.mesh.processing import merge_disjoint_meshes mesh = merge_disjoint_meshes(meshes) @@ -368,11 +379,11 @@ def mpi_test_rank_entrypoint(): # {{{ MPI test pytest entrypoint @pytest.mark.mpi -@pytest.mark.parametrize("num_partitions", [3, 6]) +@pytest.mark.parametrize("num_partitions", [3, 4]) def test_mpi_communication(num_partitions): pytest.importorskip("mpi4py") - num_ranks = num_partitions + 1 + num_ranks = num_partitions from subprocess import check_call import sys newenv = os.environ.copy() -- GitLab From 58972ac016c1f08b70f88f28abebded03b351971 Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 13:33:49 -0500 Subject: [PATCH 245/266] Improve testing for mpi communication --- meshmode/distributed.py | 4 ++- test/test_partition.py | 60 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 21c09d4f..a71cf699 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -108,13 +108,14 @@ class MPIBoundaryCommunicator(object): indices = adj[None].neighbor_partitions >= 0 self.connected_parts = self.connected_parts.union( adj[None].neighbor_partitions[indices]) + self.connected_parts = list(self.connected_parts) assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} - for i_remote_part in list(self.connected_parts): + for i_remote_part in self.connected_parts: bdry_conn = make_face_restriction(part_discr, bdry_group_factory, BTAG_PARTITION(i_remote_part)) @@ -198,6 +199,7 @@ class MPIBoundaryCommunicator(object): # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection + # FIXME: rename to local_to_remote_bdry_conns?? self.remote_to_local_bdry_conns[i_remote_part] = \ make_partition_connection( self.local_bdry_conns[i_remote_part], diff --git a/test/test_partition.py b/test/test_partition.py index 987d9a93..6e1782b3 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -178,6 +178,11 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, check_connection(remote_part_conn) true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) + s = true_local_points.shape + d = true_local_points.dtype + a = cl.array.Array(queue, shape=s, dtype=d) + a[:] = true_local_points.get() + true_local_points = a remote_points = local_part_conn(queue, true_local_points) local_points = remote_part_conn(queue, remote_points) @@ -369,7 +374,60 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - # FIXME: Actually test communicating data with this + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + TAG_A = 123 + TAG_B = 234 + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + data = {'remote_f': remote_f.get(queue=queue), + 'shape': remote_f.shape, + 'dtype': remote_f.dtype} + send_reqs.append(comm.isend(data, dest=i_remote_part, tag=TAG_A)) + + remote_to_local_f_data = {} + for i_remote_part in bdry_comm.connected_parts: + remote_to_local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_A) + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + shape = remote_to_local_f_data[i_remote_part]['shape'] + dtype = remote_to_local_f_data[i_remote_part]['dtype'] + local_f_np = remote_to_local_f_data[i_remote_part]['remote_f'] + local_f_cl = cl.array.Array(queue, shape=shape, dtype=dtype) + local_f_cl[:] = local_f_np + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(comm.isend(remote_f, dest=i_remote_part, tag=TAG_B)) + + local_f_data = {} + for i_remote_part in bdry_comm.connected_parts: + local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_B) + + for req in send_reqs: + req.wait() + + for i_remote_part in bdry_comm.connected_parts: + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + err = la.norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error (%f) too large" % err logger.debug("Rank %d exiting", rank) -- GitLab From 020ce71bb1d106f0b5a649c65ff0a6c34925e852 Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 16 Oct 2017 14:25:34 -0500 Subject: [PATCH 246/266] Clean up mpi communication tests --- meshmode/distributed.py | 72 ++++++++++++++++++++++++++++++++++++++--- test/test_partition.py | 67 +------------------------------------- 2 files changed, 68 insertions(+), 71 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index a71cf699..25b3a538 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -34,7 +34,9 @@ logger = logging.getLogger(__name__) TAG_BASE = 83411 TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 -TAG_SEND_MESH = TAG_BASE + 2 +TAG_SEND_BOUNDARY = TAG_BASE + 2 +TAG_SEND_REMOTE_NODES = TAG_BASE + 3 +TAG_SEND_LOCAL_NODES = TAG_BASE + 4 # {{{ mesh distributor @@ -145,7 +147,7 @@ class MPIBoundaryCommunicator(object): 'to_elem_faces': local_to_elem_faces, 'to_elem_indices': local_to_elem_indices} send_reqs.append(self.mpi_comm.isend( - local_data, dest=i_remote_part, tag=TAG_SEND_MESH)) + local_data, dest=i_remote_part, tag=TAG_SEND_BOUNDARY)) return send_reqs @@ -159,14 +161,14 @@ class MPIBoundaryCommunicator(object): for i_remote_part in self.connected_parts: status = MPI.Status() self.mpi_comm.probe( - source=i_remote_part, tag=TAG_SEND_MESH, status=status) + source=i_remote_part, tag=TAG_SEND_BOUNDARY, status=status) remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in remote_buf.items(): recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, source=i_remote_part, - tag=TAG_SEND_MESH) + tag=TAG_SEND_BOUNDARY) remote_data = {} total_bytes_recvd = 0 @@ -199,7 +201,6 @@ class MPIBoundaryCommunicator(object): # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection - # FIXME: rename to local_to_remote_bdry_conns?? self.remote_to_local_bdry_conns[i_remote_part] = \ make_partition_connection( self.local_bdry_conns[i_remote_part], @@ -226,6 +227,67 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) + def test_data_transfer(self, queue): + import pyopencl as cl + + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + send_reqs = [] + for i_remote_part in self.connected_parts: + conn = self.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = self.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + send_reqs.append(self.mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) + + remote_to_local_f_data = {} + for i_remote_part in self.connected_parts: + remote_to_local_f_data[i_remote_part] =\ + self.mpi_comm.recv(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in self.connected_parts: + conn = self.remote_to_local_bdry_conns[i_remote_part] + local_f_np = remote_to_local_f_data[i_remote_part] + local_f_cl = cl.array.Array(queue, + shape=local_f_np.shape, + dtype=local_f_np.dtype) + local_f_cl.set(local_f_np) + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(self.mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) + + local_f_data = {} + for i_remote_part in self.connected_parts: + local_f_data[i_remote_part] = self.mpi_comm.recv(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + + for req in send_reqs: + req.wait() + + for i_remote_part in self.connected_parts: + bdry_discr = self.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + from numpy.linalg import norm + err = norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error (%f) too large" % err + # }}} diff --git a/test/test_partition.py b/test/test_partition.py index 6e1782b3..4ae9c741 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -228,17 +228,6 @@ def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): new_meshes = [ partition_mesh(mesh, part_per_element, i) for i in range(num_parts)] - import pickle - for m, _ in new_meshes: - for adj in m.facial_adjacency_groups: - data = {'adj': adj[None]} - pickle.dump(data, open('tmp.p', 'wb')) - data2 = pickle.load(open('tmp.p', 'rb')) - assert data == data2 - from meshmode.mesh import InterPartitionAdjacencyGroup - if isinstance(data['adj'], InterPartitionAdjacencyGroup): - assert np.equal(data['adj'].neighbor_partitions, data2['adj'].neighbor_partitions).all() - assert mesh.nelements == np.sum( [new_meshes[i][0].nelements for i in range(num_parts)]), \ "part_mesh has the wrong number of elements" @@ -373,61 +362,7 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - - def f(x): - return 0.1*cl.clmath.sin(30.*x) - - TAG_A = 123 - TAG_B = 234 - send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x) - remote_f = conn(queue, true_local_f) - - data = {'remote_f': remote_f.get(queue=queue), - 'shape': remote_f.shape, - 'dtype': remote_f.dtype} - send_reqs.append(comm.isend(data, dest=i_remote_part, tag=TAG_A)) - - remote_to_local_f_data = {} - for i_remote_part in bdry_comm.connected_parts: - remote_to_local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_A) - - for req in send_reqs: - req.wait() - - send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - shape = remote_to_local_f_data[i_remote_part]['shape'] - dtype = remote_to_local_f_data[i_remote_part]['dtype'] - local_f_np = remote_to_local_f_data[i_remote_part]['remote_f'] - local_f_cl = cl.array.Array(queue, shape=shape, dtype=dtype) - local_f_cl[:] = local_f_np - remote_f = conn(queue, local_f_cl).get(queue=queue) - - send_reqs.append(comm.isend(remote_f, dest=i_remote_part, tag=TAG_B)) - - local_f_data = {} - for i_remote_part in bdry_comm.connected_parts: - local_f_data[i_remote_part] = comm.recv(source=i_remote_part, tag=TAG_B) - - for req in send_reqs: - req.wait() - - for i_remote_part in bdry_comm.connected_parts: - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x).get(queue=queue) - local_f = local_f_data[i_remote_part] - - err = la.norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error (%f) too large" % err + bdry_comm.test_data_transfer(queue) logger.debug("Rank %d exiting", rank) -- GitLab From 8d75b365d9ce14c74be7546bca61e22ef57ee4b7 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 17 Oct 2017 19:55:55 -0500 Subject: [PATCH 247/266] Make nonblocking receives --- meshmode/distributed.py | 73 +++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 25b3a538..42ac8931 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -28,6 +28,7 @@ THE SOFTWARE. import six import numpy as np +from mpi4py import MPI import logging logger = logging.getLogger(__name__) @@ -76,8 +77,6 @@ class MPIMeshDistributor(object): return local_part def receive_mesh_part(self): - from mpi4py import MPI - mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() @@ -107,19 +106,18 @@ class MPIBoundaryCommunicator(object): self.connected_parts = set() for adj in part_discr.mesh.facial_adjacency_groups: if isinstance(adj[None], InterPartitionAdjacencyGroup): - indices = adj[None].neighbor_partitions >= 0 + indices = (adj[None].neighbor_partitions >= 0) self.connected_parts = self.connected_parts.union( adj[None].neighbor_partitions[indices]) - self.connected_parts = list(self.connected_parts) assert self.i_local_part not in self.connected_parts from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION self.local_bdry_conns = {} for i_remote_part in self.connected_parts: - bdry_conn = make_face_restriction(part_discr, bdry_group_factory, - BTAG_PARTITION(i_remote_part)) + bdry_conn = make_face_restriction(part_discr, + bdry_group_factory, + BTAG_PARTITION(i_remote_part)) # Assert that everything in self.connected_parts is truly connected assert bdry_conn.to_discr.nnodes > 0 @@ -155,8 +153,6 @@ class MPIBoundaryCommunicator(object): rank = self.mpi_comm.Get_rank() i_local_part = rank - from mpi4py import MPI - remote_buf = {} for i_remote_part in self.connected_parts: status = MPI.Status() @@ -233,6 +229,24 @@ class MPIBoundaryCommunicator(object): def f(x): return 0.1*cl.clmath.sin(30.*x) + ''' + Here is a simplified example of what happens from + the point of view of the local rank. + + Local rank: + 1. Transfer local points from local boundary to remote boundary + to get remote points. + 2. Send remote points to remote rank. + Remote rank: + 3. Receive remote points from local rank. + 4. Transfer remote points from remote boundary to local boundary + to get local points. + 5. Send local points to local rank. + Local rank: + 6. Recieve local points from remote rank. + 7. Check if local points are the same as the original local points. + ''' + send_reqs = [] for i_remote_part in self.connected_parts: conn = self.remote_to_local_bdry_conns[i_remote_part] @@ -246,11 +260,23 @@ class MPIBoundaryCommunicator(object): dest=i_remote_part, tag=TAG_SEND_REMOTE_NODES)) - remote_to_local_f_data = {} + buffers = {} for i_remote_part in self.connected_parts: - remote_to_local_f_data[i_remote_part] =\ - self.mpi_comm.recv(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) + status = MPI.Status() + self.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + remote_to_local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + remote_to_local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer for req in send_reqs: req.wait() @@ -269,10 +295,23 @@ class MPIBoundaryCommunicator(object): dest=i_remote_part, tag=TAG_SEND_LOCAL_NODES)) - local_f_data = {} + buffers = {} for i_remote_part in self.connected_parts: - local_f_data[i_remote_part] = self.mpi_comm.recv(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) + status = MPI.Status() + self.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer for req in send_reqs: req.wait() @@ -286,7 +325,7 @@ class MPIBoundaryCommunicator(object): from numpy.linalg import norm err = norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error (%f) too large" % err + assert err < 1e-13, "Error = %f is too large" % err # }}} -- GitLab From 8d62c316c3f3151bd71aecda96b4c3a7e8085b5c Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 19:51:16 -0500 Subject: [PATCH 248/266] Add missing attribute in InterPartitionAdjacency --- meshmode/mesh/__init__.py | 4 ++++ meshmode/mesh/processing.py | 1 + 2 files changed, 5 insertions(+) diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py index 8e6e18b9..1147afb1 100644 --- a/meshmode/mesh/__init__.py +++ b/meshmode/mesh/__init__.py @@ -495,6 +495,10 @@ class InterPartitionAdjacencyGroup(FacialAdjacencyGroup): The group number of this group. + .. attribute:: ineighbor_group + + *None* for boundary faces. + .. attribute:: elements Group-local element numbers. diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index d2d2b14c..9ecace44 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -247,6 +247,7 @@ def partition_mesh(mesh, part_per_element, part_nr): element_faces=faces, neighbors=neighbors, igroup=bdry.igroup, + ineighbor_group=None, neighbor_partitions=n_parts, global_neighbors=global_n_elems, neighbor_faces=n_faces) -- GitLab From 78d6dcca1e63b31bfc73859f3effa58d7b575a57 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 20:27:14 -0500 Subject: [PATCH 249/266] Remove useless import --- test/test_partition.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 4ae9c741..e0e25dfb 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -350,8 +350,6 @@ def mpi_test_rank_entrypoint(): else: local_mesh = mesh_dist.receive_mesh_part() - from meshmode.discretization.poly_element\ - import PolynomialWarpAndBlendGroupFactory group_factory = PolynomialWarpAndBlendGroupFactory(4) import pyopencl as cl cl_ctx = cl.create_some_context() -- GitLab From c306215c9990e60d802c013a4d9b101e7ae81b00 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 24 Oct 2017 20:31:18 -0500 Subject: [PATCH 250/266] remove useless import --- test/test_partition.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_partition.py b/test/test_partition.py index e0e25dfb..15b0dbb7 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -351,7 +351,6 @@ def mpi_test_rank_entrypoint(): local_mesh = mesh_dist.receive_mesh_part() group_factory = PolynomialWarpAndBlendGroupFactory(4) - import pyopencl as cl cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) -- GitLab From 6a42f22d306e047ce973b8a455d42deec5e6aaeb Mon Sep 17 00:00:00 2001 From: Ellis Date: Wed, 25 Oct 2017 15:25:36 -0500 Subject: [PATCH 251/266] Change variable name for flake8 --- meshmode/mesh/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py index 9ecace44..d5c71c0e 100644 --- a/meshmode/mesh/processing.py +++ b/meshmode/mesh/processing.py @@ -61,7 +61,7 @@ def find_group_indices(groups, meshwide_elems): # {{{ partition_mesh -def partition_mesh(mesh, part_per_element, part_nr): +def partition_mesh(mesh, part_per_element, part_num): """ :arg mesh: A :class:`meshmode.mesh.Mesh` to be partitioned. :arg part_per_element: A :class:`numpy.ndarray` containing one -- GitLab From 8c1a47a1c72a886afa8f9c2d02c03d8ef61f3bb9 Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 2 Nov 2017 12:15:43 -0500 Subject: [PATCH 252/266] Fix documentation --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 106c6350..cfd2b1ac 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -427,7 +427,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `i_local_part` to the remote partition. + exchange across faces from the remote partition to partition `i_local_part`. .. versionadded:: 2017.1 -- GitLab From 8239647cd316e912d4c45820fc721d726484d20a Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 2 Nov 2017 12:16:05 -0500 Subject: [PATCH 253/266] Add documentation --- meshmode/distributed.py | 46 ++++++++++++++++++++++++++++++++++++++++- test/test_partition.py | 2 +- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 42ac8931..2adc1c14 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -39,11 +39,24 @@ TAG_SEND_BOUNDARY = TAG_BASE + 2 TAG_SEND_REMOTE_NODES = TAG_BASE + 3 TAG_SEND_LOCAL_NODES = TAG_BASE + 4 +__doc__ = """ +.. autoclass:: MPIMeshDistributor +.. autoclass:: MPIBoundaryCommunicator +""" + # {{{ mesh distributor class MPIMeshDistributor(object): + """ + .. automethod:: is_mananger_rank + .. automethod:: send_mesh_parts + .. automethod:: recv_mesh_part + """ def __init__(self, mpi_comm, manager_rank=0): + """ + :arg mpi_comm: A :class:`MPI.Intracomm` + """ self.mpi_comm = mpi_comm self.manager_rank = manager_rank @@ -51,6 +64,16 @@ class MPIMeshDistributor(object): return self.mpi_comm.Get_rank() == self.manager_rank def send_mesh_parts(self, mesh, part_per_element, num_parts): + """ + :arg mesh: A :class:`Mesh` to distribute to other ranks. + :arg part_per_element: A :class:`numpy.ndarray` containing one + integer per element of *mesh* indicating which part of the + partitioned mesh the element is to become a part of. + :arg num_parts: The number of partitions to divide the mesh into. + + Sends each partition to a different rank. + Returns one partition that was not sent to any other rank. + """ mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() assert num_parts <= mpi_comm.Get_size() @@ -77,9 +100,14 @@ class MPIMeshDistributor(object): return local_part def receive_mesh_part(self): + """ + Returns the mesh sent by the manager rank. + """ mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() + assert not self.is_mananger_rank(), "Manager rank cannot recieve mesh" + status = MPI.Status() result = self.mpi_comm.recv( source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, @@ -94,7 +122,23 @@ class MPIMeshDistributor(object): # {{{ boundary communicator class MPIBoundaryCommunicator(object): + """ + .. attribute:: remote_to_local_bdry_conns + + Maps rank numbers to :class:`DirectDiscretizationConnection`. + + ``remote_to_local_bdry_conns[i_remote_part]`` gives the connection + that performs data exchange across faces from partition `i_remote_part` + to the local mesh. + """ def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + """ + :arg mpi_comm: A :class:`MPI.Intracomm` + :arg queue: + :arg part_discr: A :class:`meshmode.Discretization` of the local mesh + to perform boundary communication on. + :arg bdry_group_factory: + """ self.mpi_comm = mpi_comm self.part_discr = part_discr @@ -223,7 +267,7 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) - def test_data_transfer(self, queue): + def _test_data_transfer(self, queue): import pyopencl as cl def f(x): diff --git a/test/test_partition.py b/test/test_partition.py index 15b0dbb7..3fe56060 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -359,7 +359,7 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - bdry_comm.test_data_transfer(queue) + bdry_comm._test_data_transfer(queue) logger.debug("Rank %d exiting", rank) -- GitLab From baf075a8a0240d5b6aee80eab2ed79b19bf7b0ac Mon Sep 17 00:00:00 2001 From: Ellis Date: Sun, 5 Nov 2017 18:32:49 -0600 Subject: [PATCH 254/266] Move data transfer test function to test folder --- meshmode/distributed.py | 107 +------------------------------------ test/test_partition.py | 113 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 108 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 2adc1c14..9b160a6a 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -36,8 +36,6 @@ logger = logging.getLogger(__name__) TAG_BASE = 83411 TAG_DISTRIBUTE_MESHES = TAG_BASE + 1 TAG_SEND_BOUNDARY = TAG_BASE + 2 -TAG_SEND_REMOTE_NODES = TAG_BASE + 3 -TAG_SEND_LOCAL_NODES = TAG_BASE + 4 __doc__ = """ .. autoclass:: MPIMeshDistributor @@ -132,6 +130,7 @@ class MPIBoundaryCommunicator(object): to the local mesh. """ def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): + # FIXME: Refactor so that we can specify which rank we want to recieve from """ :arg mpi_comm: A :class:`MPI.Intracomm` :arg queue: @@ -267,110 +266,6 @@ class MPIBoundaryCommunicator(object): for i, conn in six.iteritems(self.remote_to_local_bdry_conns): check_connection(conn) - def _test_data_transfer(self, queue): - import pyopencl as cl - - def f(x): - return 0.1*cl.clmath.sin(30.*x) - - ''' - Here is a simplified example of what happens from - the point of view of the local rank. - - Local rank: - 1. Transfer local points from local boundary to remote boundary - to get remote points. - 2. Send remote points to remote rank. - Remote rank: - 3. Receive remote points from local rank. - 4. Transfer remote points from remote boundary to local boundary - to get local points. - 5. Send local points to local rank. - Local rank: - 6. Recieve local points from remote rank. - 7. Check if local points are the same as the original local points. - ''' - - send_reqs = [] - for i_remote_part in self.connected_parts: - conn = self.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = self.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x) - remote_f = conn(queue, true_local_f) - - send_reqs.append(self.mpi_comm.isend(remote_f.get(queue=queue), - dest=i_remote_part, - tag=TAG_SEND_REMOTE_NODES)) - - buffers = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES, - status=status) - buffers[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) - remote_to_local_f_data = {} - for i_remote_part, req in recv_reqs.items(): - remote_to_local_f_data[i_remote_part] = req.wait() - buffers[i_remote_part] = None # free buffer - - for req in send_reqs: - req.wait() - - send_reqs = [] - for i_remote_part in self.connected_parts: - conn = self.remote_to_local_bdry_conns[i_remote_part] - local_f_np = remote_to_local_f_data[i_remote_part] - local_f_cl = cl.array.Array(queue, - shape=local_f_np.shape, - dtype=local_f_np.dtype) - local_f_cl.set(local_f_np) - remote_f = conn(queue, local_f_cl).get(queue=queue) - - send_reqs.append(self.mpi_comm.isend(remote_f, - dest=i_remote_part, - tag=TAG_SEND_LOCAL_NODES)) - - buffers = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES, - status=status) - buffers[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) - local_f_data = {} - for i_remote_part, req in recv_reqs.items(): - local_f_data[i_remote_part] = req.wait() - buffers[i_remote_part] = None # free buffer - - for req in send_reqs: - req.wait() - - for i_remote_part in self.connected_parts: - bdry_discr = self.local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) - - true_local_f = f(bdry_x).get(queue=queue) - local_f = local_f_data[i_remote_part] - - from numpy.linalg import norm - err = norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error = %f is too large" % err - # }}} diff --git a/test/test_partition.py b/test/test_partition.py index 3fe56060..7453ecbf 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -359,17 +359,126 @@ def mpi_test_rank_entrypoint(): bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) bdry_comm.check() - bdry_comm._test_data_transfer(queue) + _test_data_transfer(bdry_comm, queue) logger.debug("Rank %d exiting", rank) + +def _test_data_transfer(bdry_comm, queue): + from mpi4py import MPI + # Is there a smart way of choosing this number? + TAG_BASE = 83411 + TAG_SEND_REMOTE_NODES = TAG_BASE + 3 + TAG_SEND_LOCAL_NODES = TAG_BASE + 4 + + def f(x): + return 0.1*cl.clmath.sin(30.*x) + + ''' + Here is a simplified example of what happens from + the point of view of the local rank. + + Local rank: + 1. Transfer local points from local boundary to remote boundary + to get remote points. + 2. Send remote points to remote rank. + Remote rank: + 3. Receive remote points from local rank. + 4. Transfer remote points from remote boundary to local boundary + to get local points. + 5. Send local points to local rank. + Local rank: + 6. Recieve local points from remote rank. + 7. Check if local points are the same as the original local points. + ''' + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x) + remote_f = conn(queue, true_local_f) + + send_reqs.append(bdry_comm.mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) + + buffers = {} + for i_remote_part in bdry_comm.connected_parts: + status = MPI.Status() + bdry_comm.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) + remote_to_local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + remote_to_local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer + + for req in send_reqs: + req.wait() + + send_reqs = [] + for i_remote_part in bdry_comm.connected_parts: + conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + local_f_np = remote_to_local_f_data[i_remote_part] + local_f_cl = cl.array.Array(queue, + shape=local_f_np.shape, + dtype=local_f_np.dtype) + local_f_cl.set(local_f_np) + remote_f = conn(queue, local_f_cl).get(queue=queue) + + send_reqs.append(bdry_comm.mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) + + buffers = {} + for i_remote_part in bdry_comm.connected_parts: + status = MPI.Status() + bdry_comm.mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) + buffers[i_remote_part] = np.empty(status.count, dtype=bytes) + + recv_reqs = {} + for i_remote_part, buf in buffers.items(): + recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) + local_f_data = {} + for i_remote_part, req in recv_reqs.items(): + local_f_data[i_remote_part] = req.wait() + buffers[i_remote_part] = None # free buffer + + for req in send_reqs: + req.wait() + + for i_remote_part in bdry_comm.connected_parts: + bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + + true_local_f = f(bdry_x).get(queue=queue) + local_f = local_f_data[i_remote_part] + + from numpy.linalg import norm + err = norm(true_local_f - local_f, np.inf) + assert err < 1e-13, "Error = %f is too large" % err + # }}} # {{{ MPI test pytest entrypoint @pytest.mark.mpi -@pytest.mark.parametrize("num_partitions", [3, 4]) +@pytest.mark.parametrize("num_partitions", [3, 6]) def test_mpi_communication(num_partitions): pytest.importorskip("mpi4py") -- GitLab From 0127a9f36c5ea6f8b1fe90337e9c0b7b3f769c8e Mon Sep 17 00:00:00 2001 From: Ellis Date: Sun, 5 Nov 2017 19:25:00 -0600 Subject: [PATCH 255/266] quiet flake8 --- test/test_partition.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 7453ecbf..2a1a4d19 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -46,6 +46,12 @@ import os import logging logger = logging.getLogger(__name__) +# Is there a smart way of choosing this number? +# Currenly it is the same as the base from MPIBoundaryCommunicator +TAG_BASE = 83411 +TAG_SEND_REMOTE_NODES = TAG_BASE + 3 +TAG_SEND_LOCAL_NODES = TAG_BASE + 4 + # {{{ partition_interpolation @@ -366,10 +372,6 @@ def mpi_test_rank_entrypoint(): def _test_data_transfer(bdry_comm, queue): from mpi4py import MPI - # Is there a smart way of choosing this number? - TAG_BASE = 83411 - TAG_SEND_REMOTE_NODES = TAG_BASE + 3 - TAG_SEND_LOCAL_NODES = TAG_BASE + 4 def f(x): return 0.1*cl.clmath.sin(30.*x) -- GitLab From afd5ccf8639a1dbe10ce1affb914a0084629d38b Mon Sep 17 00:00:00 2001 From: Ellis Date: Fri, 17 Nov 2017 20:45:42 -0600 Subject: [PATCH 256/266] MPIBoundaryCommunicator now works per rank and returns futures --- meshmode/distributed.py | 236 ++++++++++++++++++---------------------- test/test_partition.py | 106 +++++++++++------- 2 files changed, 171 insertions(+), 171 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 9b160a6a..2ba18c31 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -25,8 +25,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import six - import numpy as np from mpi4py import MPI @@ -104,13 +102,13 @@ class MPIMeshDistributor(object): mpi_comm = self.mpi_comm rank = mpi_comm.Get_rank() - assert not self.is_mananger_rank(), "Manager rank cannot recieve mesh" + assert not self.is_mananger_rank(), "Manager rank cannot receive mesh" status = MPI.Status() result = self.mpi_comm.recv( source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, status=status) - logger.info('rank %d: recieved local mesh (size = %d)', rank, status.count) + logger.info('rank %d: received local mesh (size = %d)', rank, status.count) return result @@ -121,152 +119,128 @@ class MPIMeshDistributor(object): class MPIBoundaryCommunicator(object): """ - .. attribute:: remote_to_local_bdry_conns - - Maps rank numbers to :class:`DirectDiscretizationConnection`. - - ``remote_to_local_bdry_conns[i_remote_part]`` gives the connection - that performs data exchange across faces from partition `i_remote_part` - to the local mesh. + .. automethod:: __call__ + .. automethod:: is_ready """ - def __init__(self, mpi_comm, queue, part_discr, bdry_group_factory): - # FIXME: Refactor so that we can specify which rank we want to recieve from + def __init__(self, mpi_comm, queue, part_discr, bdry_grp_factory, i_remote_part): """ :arg mpi_comm: A :class:`MPI.Intracomm` :arg queue: :arg part_discr: A :class:`meshmode.Discretization` of the local mesh to perform boundary communication on. - :arg bdry_group_factory: + :arg bdry_grp_factory: + :arg i_remote_part: The part number of the remote partition """ self.mpi_comm = mpi_comm + self.queue = queue self.part_discr = part_discr - self.i_local_part = mpi_comm.Get_rank() - - self.bdry_group_factory = bdry_group_factory - - from meshmode.mesh import InterPartitionAdjacencyGroup - self.connected_parts = set() - for adj in part_discr.mesh.facial_adjacency_groups: - if isinstance(adj[None], InterPartitionAdjacencyGroup): - indices = (adj[None].neighbor_partitions >= 0) - self.connected_parts = self.connected_parts.union( - adj[None].neighbor_partitions[indices]) - assert self.i_local_part not in self.connected_parts + self.i_remote_part = i_remote_part + self.bdry_grp_factory = bdry_grp_factory from meshmode.discretization.connection import make_face_restriction from meshmode.mesh import BTAG_PARTITION - self.local_bdry_conns = {} - for i_remote_part in self.connected_parts: - bdry_conn = make_face_restriction(part_discr, - bdry_group_factory, - BTAG_PARTITION(i_remote_part)) - - # Assert that everything in self.connected_parts is truly connected - assert bdry_conn.to_discr.nnodes > 0 - self.local_bdry_conns[i_remote_part] = bdry_conn - - self._setup(queue) - - def _post_boundary_data_sends(self, queue): - send_reqs = [] - for i_remote_part in self.connected_parts: - local_bdry = self.local_bdry_conns[i_remote_part].to_discr - local_mesh = self.local_bdry_conns[i_remote_part].from_discr.mesh - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [self.local_bdry_conns[i_remote_part].groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - local_data = {'bdry_mesh': local_bdry.mesh, - 'adj': local_adj_groups, - 'to_elem_faces': local_to_elem_faces, - 'to_elem_indices': local_to_elem_indices} - send_reqs.append(self.mpi_comm.isend( - local_data, dest=i_remote_part, tag=TAG_SEND_BOUNDARY)) - - return send_reqs - - def _receive_boundary_data(self, queue): - rank = self.mpi_comm.Get_rank() - i_local_part = rank - - remote_buf = {} - for i_remote_part in self.connected_parts: - status = MPI.Status() - self.mpi_comm.probe( - source=i_remote_part, tag=TAG_SEND_BOUNDARY, status=status) - remote_buf[i_remote_part] = np.empty(status.count, dtype=bytes) - - recv_reqs = {} - for i_remote_part, buf in remote_buf.items(): - recv_reqs[i_remote_part] = self.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_BOUNDARY) - - remote_data = {} - total_bytes_recvd = 0 - for i_remote_part, req in recv_reqs.items(): - status = MPI.Status() - remote_data[i_remote_part] = req.wait(status=status) + self.local_bdry_conn = make_face_restriction(part_discr, + bdry_grp_factory, + BTAG_PARTITION(i_remote_part)) + self._setup() + self.remote_data = None + + def _setup(self): + logger.info("bdry comm rank %d send begin", self.i_local_part) + self.send_req = self._post_send_boundary_data() + self.recv_req = self._post_recv_boundary_data() + + def _post_send_boundary_data(self): + local_bdry = self.local_bdry_conn.to_discr + local_mesh = self.local_bdry_conn.from_discr.mesh + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [self.local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=self.queue) + for batch in grp_batches] + for grp_batches in local_batches] + + local_data = {'bdry_mesh': local_bdry.mesh, + 'adj': local_adj_groups, + 'to_elem_faces': local_to_elem_faces, + 'to_elem_indices': local_to_elem_indices} + return self.mpi_comm.isend(local_data, + dest=self.i_remote_part, + tag=TAG_SEND_BOUNDARY) + + def _post_recv_boundary_data(self): + status = MPI.Status() + self.mpi_comm.probe(source=self.i_remote_part, + tag=TAG_SEND_BOUNDARY, status=status) + return self.mpi_comm.irecv(buf=np.empty(status.count, dtype=bytes), + source=self.i_remote_part, + tag=TAG_SEND_BOUNDARY) - # Free the buffer - remote_buf[i_remote_part] = None + def __call__(self): + """ + Returns the tuple (`remote_to_local_bdry_conn`, []) + where `remote_to_local_bdry_conn` is a + :class:`DirectDiscretizationConnection` that gives the connection that + performs data exchange across faces from partition `i_remote_part` to the + local mesh. + """ + if self.remote_data is None: + status = MPI.Status() + self.remote_data = self.recv_req.wait(status=status) logger.debug('rank %d: Received rank %d data (%d bytes)', - rank, i_remote_part, status.count) - - total_bytes_recvd += status.count - - logger.debug('rank %d: recieved %d bytes in total', rank, total_bytes_recvd) - - self.remote_to_local_bdry_conns = {} + self.i_local_part, self.i_remote_part, status.count) from meshmode.discretization import Discretization - - for i_remote_part, data in remote_data.items(): - remote_bdry_mesh = data['bdry_mesh'] - remote_bdry = Discretization( - queue.context, - remote_bdry_mesh, - self.bdry_group_factory) - remote_adj_groups = data['adj'] - remote_to_elem_faces = data['to_elem_faces'] - remote_to_elem_indices = data['to_elem_indices'] - - # Connect local_mesh to remote_mesh - from meshmode.discretization.connection import make_partition_connection - self.remote_to_local_bdry_conns[i_remote_part] = \ - make_partition_connection( - self.local_bdry_conns[i_remote_part], - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - def _setup(self, queue): - logger.info("bdry comm rank %d send begin", self.mpi_comm.Get_rank()) - - send_reqs = self._post_boundary_data_sends(queue) - self._receive_boundary_data(queue) - - for req in send_reqs: - req.wait() - - logger.info("bdry comm rank %d send completed", self.mpi_comm.Get_rank()) - - def check(self): - from meshmode.discretization.connection import check_connection - - for i, conn in six.iteritems(self.remote_to_local_bdry_conns): - check_connection(conn) + remote_bdry_mesh = self.remote_data['bdry_mesh'] + remote_bdry = Discretization(self.queue.context, remote_bdry_mesh, + self.bdry_grp_factory) + remote_adj_groups = self.remote_data['adj'] + remote_to_elem_faces = self.remote_data['to_elem_faces'] + remote_to_elem_indices = self.remote_data['to_elem_indices'] + + # Connect local_mesh to remote_mesh + from meshmode.discretization.connection import make_partition_connection + remote_to_local_bdry_conn = make_partition_connection(self.local_bdry_conn, + self.i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + self.send_req.wait() + return remote_to_local_bdry_conn, [] + + def is_ready(self): + """ + Returns True if the rank boundary data is ready to be received. + """ + if self.remote_data is None: + status = MPI.Status() + did_receive, self.remote_data = self.recv_req.test(status=status) + if not did_receive: + return False + logger.debug('rank %d: Received rank %d data (%d bytes)', + self.i_local_part, self.i_remote_part, status.count) + return True # }}} +def get_connected_partitions(mesh): + """ + :arg mesh: A :class:`Mesh` + Returns the set of partition numbers that are connected to `mesh` + """ + connected_parts = set() + from meshmode.mesh import InterPartitionAdjacencyGroup + for adj in mesh.facial_adjacency_groups: + if isinstance(adj[None], InterPartitionAdjacencyGroup): + indices = (adj[None].neighbor_partitions >= 0) + connected_parts = connected_parts.union( + adj[None].neighbor_partitions[indices]) + return connected_parts + # vim: foldmethod=marker diff --git a/test/test_partition.py b/test/test_partition.py index 2a1a4d19..aca511fa 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -336,11 +336,11 @@ def mpi_test_rank_entrypoint(): from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator from mpi4py import MPI - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - num_parts = comm.Get_size() + mpi_comm = MPI.COMM_WORLD + i_local_part = mpi_comm.Get_rank() + num_parts = mpi_comm.Get_size() - mesh_dist = MPIMeshDistributor(comm) + mesh_dist = MPIMeshDistributor(mpi_comm) if mesh_dist.is_mananger_rank(): np.random.seed(42) @@ -363,14 +363,40 @@ def mpi_test_rank_entrypoint(): from meshmode.discretization import Discretization vol_discr = Discretization(cl_ctx, local_mesh, group_factory) - bdry_comm = MPIBoundaryCommunicator(comm, queue, vol_discr, group_factory) - bdry_comm.check() - _test_data_transfer(bdry_comm, queue) - - logger.debug("Rank %d exiting", rank) - - -def _test_data_transfer(bdry_comm, queue): + from meshmode.distributed import get_connected_partitions + connected_parts = get_connected_partitions(local_mesh) + assert i_local_part not in connected_parts + bdry_conn_futures = {} + local_bdry_conns = {} + for i_remote_part in connected_parts: + bdry_conn_futures[i_remote_part] = MPIBoundaryCommunicator(mpi_comm, + queue, + vol_discr, + group_factory, + i_remote_part) + local_bdry_conns[i_remote_part] =\ + bdry_conn_futures[i_remote_part].local_bdry_conn + + remote_to_local_bdry_conns = {} + from meshmode.discretization.connection import check_connection + while len(bdry_conn_futures) > 0: + for i_remote_part, future in bdry_conn_futures.items(): + if future.is_ready(): + conn, _ = bdry_conn_futures.pop(i_remote_part)() + check_connection(conn) + remote_to_local_bdry_conns[i_remote_part] = conn + break + _test_data_transfer(mpi_comm, + queue, + local_bdry_conns, + remote_to_local_bdry_conns, + connected_parts) + + logger.debug("Rank %d exiting", i_local_part) + + +def _test_data_transfer(mpi_comm, queue, local_bdry_conns, + remote_to_local_bdry_conns, connected_parts): from mpi4py import MPI def f(x): @@ -390,36 +416,36 @@ def _test_data_transfer(bdry_comm, queue): to get local points. 5. Send local points to local rank. Local rank: - 6. Recieve local points from remote rank. + 6. Receive local points from remote rank. 7. Check if local points are the same as the original local points. ''' send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + for i_remote_part in connected_parts: + conn = remote_to_local_bdry_conns[i_remote_part] + bdry_discr = local_bdry_conns[i_remote_part].to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) true_local_f = f(bdry_x) remote_f = conn(queue, true_local_f) - send_reqs.append(bdry_comm.mpi_comm.isend(remote_f.get(queue=queue), - dest=i_remote_part, - tag=TAG_SEND_REMOTE_NODES)) + send_reqs.append(mpi_comm.isend(remote_f.get(queue=queue), + dest=i_remote_part, + tag=TAG_SEND_REMOTE_NODES)) buffers = {} - for i_remote_part in bdry_comm.connected_parts: + for i_remote_part in connected_parts: status = MPI.Status() - bdry_comm.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES, - status=status) + mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES, + status=status) buffers[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_REMOTE_NODES) + recv_reqs[i_remote_part] = mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_REMOTE_NODES) remote_to_local_f_data = {} for i_remote_part, req in recv_reqs.items(): remote_to_local_f_data[i_remote_part] = req.wait() @@ -429,8 +455,8 @@ def _test_data_transfer(bdry_comm, queue): req.wait() send_reqs = [] - for i_remote_part in bdry_comm.connected_parts: - conn = bdry_comm.remote_to_local_bdry_conns[i_remote_part] + for i_remote_part in connected_parts: + conn = remote_to_local_bdry_conns[i_remote_part] local_f_np = remote_to_local_f_data[i_remote_part] local_f_cl = cl.array.Array(queue, shape=local_f_np.shape, @@ -438,23 +464,23 @@ def _test_data_transfer(bdry_comm, queue): local_f_cl.set(local_f_np) remote_f = conn(queue, local_f_cl).get(queue=queue) - send_reqs.append(bdry_comm.mpi_comm.isend(remote_f, - dest=i_remote_part, - tag=TAG_SEND_LOCAL_NODES)) + send_reqs.append(mpi_comm.isend(remote_f, + dest=i_remote_part, + tag=TAG_SEND_LOCAL_NODES)) buffers = {} - for i_remote_part in bdry_comm.connected_parts: + for i_remote_part in connected_parts: status = MPI.Status() - bdry_comm.mpi_comm.probe(source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES, - status=status) + mpi_comm.probe(source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES, + status=status) buffers[i_remote_part] = np.empty(status.count, dtype=bytes) recv_reqs = {} for i_remote_part, buf in buffers.items(): - recv_reqs[i_remote_part] = bdry_comm.mpi_comm.irecv(buf=buf, - source=i_remote_part, - tag=TAG_SEND_LOCAL_NODES) + recv_reqs[i_remote_part] = mpi_comm.irecv(buf=buf, + source=i_remote_part, + tag=TAG_SEND_LOCAL_NODES) local_f_data = {} for i_remote_part, req in recv_reqs.items(): local_f_data[i_remote_part] = req.wait() @@ -463,8 +489,8 @@ def _test_data_transfer(bdry_comm, queue): for req in send_reqs: req.wait() - for i_remote_part in bdry_comm.connected_parts: - bdry_discr = bdry_comm.local_bdry_conns[i_remote_part].to_discr + for i_remote_part in connected_parts: + bdry_discr = local_bdry_conns[i_remote_part].to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) true_local_f = f(bdry_x).get(queue=queue) -- GitLab From 0b0d2f2c63a300420583a28819fc98354b3f7bb4 Mon Sep 17 00:00:00 2001 From: Ellis Date: Sat, 13 Jan 2018 17:11:19 -0600 Subject: [PATCH 257/266] Discover errors --- test/test_partition.py | 160 ++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 82 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index aca511fa..3ed25fd2 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -55,16 +55,19 @@ TAG_SEND_LOCAL_NODES = TAG_BASE + 4 # {{{ partition_interpolation + +# FIXME: Getting some warning on some of these tests. Need to look into this later. @pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) +@pytest.mark.parametrize("scramble_partitions", [False]) @pytest.mark.parametrize(("dim", "mesh_pars"), [ (2, [3, 4, 7]), (3, [3, 4]) ]) def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, - num_parts, num_groups, scramble_partitions=True): + num_parts, num_groups, scramble_partitions): np.random.seed(42) cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) @@ -79,7 +82,7 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 0.5*cl.clmath.sin(30.*x) + return 10*cl.clmath.sin(60.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -114,92 +117,84 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, make_partition_connection, check_connection) - for i_local_part in range(num_parts): - for i_remote_part in range(num_parts): - if (i_local_part == i_remote_part - or eoc_rec[i_local_part, i_remote_part] is None): - eoc_rec[i_local_part, i_remote_part] = None - continue + for i_local_part, i_remote_part in eoc_rec.keys(): + if eoc_rec[i_local_part, i_remote_part] is None: + continue - # Mark faces within local_mesh that are connected to remote_mesh - local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], - group_factory(order), - BTAG_PARTITION(i_remote_part)) + # Mark faces within local_mesh that are connected to remote_mesh + local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + group_factory(order), + BTAG_PARTITION(i_remote_part)) - # If these parts are not connected, don't bother checking the error - bdry_nodes = local_bdry_conn.to_discr.nodes() - if bdry_nodes.size == 0: - eoc_rec[i_local_part, i_remote_part] = None - continue + # If these parts are not connected, don't bother checking the error + bdry_nodes = local_bdry_conn.to_discr.nodes() + if bdry_nodes.size == 0: + eoc_rec[i_local_part, i_remote_part] = None + continue - # Mark faces within remote_mesh that are connected to local_mesh - remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], - group_factory(order), - BTAG_PARTITION(i_local_part)) - - # Gather just enough information for the connection - local_bdry = local_bdry_conn.to_discr - local_mesh = part_meshes[i_local_part] - local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] - for i in range(len(local_mesh.groups))] - local_batches = [local_bdry_conn.groups[i].batches - for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in local_batches] - - remote_bdry = remote_bdry_conn.to_discr - remote_mesh = part_meshes[i_remote_part] - remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] - for i in range(len(remote_mesh.groups))] - remote_batches = [remote_bdry_conn.groups[i].batches - for i in range(len(remote_mesh.groups))] - remote_to_elem_faces = [[batch.to_element_face - for batch in grp_batches] - for grp_batches in remote_batches] - remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) - for batch in grp_batches] - for grp_batches in remote_batches] - - # Connect local_mesh to remote_mesh - local_part_conn = make_partition_connection(local_bdry_conn, - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - # Connect remote mesh to local mesh - remote_part_conn = make_partition_connection(remote_bdry_conn, - i_remote_part, - local_bdry, - local_adj_groups, - local_to_elem_faces, - local_to_elem_indices) - - check_connection(local_part_conn) - check_connection(remote_part_conn) - - true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) - s = true_local_points.shape - d = true_local_points.dtype - a = cl.array.Array(queue, shape=s, dtype=d) - a[:] = true_local_points.get() - true_local_points = a - remote_points = local_part_conn(queue, true_local_points) - local_points = remote_part_conn(queue, remote_points) - - err = la.norm((true_local_points - local_points).get(), np.inf) - eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) + # Mark faces within remote_mesh that are connected to local_mesh + remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + group_factory(order), + BTAG_PARTITION(i_local_part)) + + # Gather just enough information for the connection + local_bdry = local_bdry_conn.to_discr + local_mesh = part_meshes[i_local_part] + local_adj_groups = [local_mesh.facial_adjacency_groups[i][None] + for i in range(len(local_mesh.groups))] + local_batches = [local_bdry_conn.groups[i].batches + for i in range(len(local_mesh.groups))] + local_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in local_batches] + local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in local_batches] + + remote_bdry = remote_bdry_conn.to_discr + remote_mesh = part_meshes[i_remote_part] + remote_adj_groups = [remote_mesh.facial_adjacency_groups[i][None] + for i in range(len(remote_mesh.groups))] + remote_batches = [remote_bdry_conn.groups[i].batches + for i in range(len(remote_mesh.groups))] + remote_to_elem_faces = [[batch.to_element_face + for batch in grp_batches] + for grp_batches in remote_batches] + remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + for batch in grp_batches] + for grp_batches in remote_batches] + + # Connect local_mesh to remote_mesh + local_part_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + + # Connect remote mesh to local mesh + remote_part_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_to_elem_faces, + local_to_elem_indices) + + check_connection(local_part_conn) + check_connection(remote_part_conn) + + true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) + remote_points = local_part_conn(queue, true_local_points) + local_points = remote_part_conn(queue, remote_points) + + err = la.norm((true_local_points - local_points).get(), np.inf) + eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-12) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) # }}} @@ -209,7 +204,8 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, @pytest.mark.parametrize("dim", [2, 3]) @pytest.mark.parametrize("num_parts", [4, 5, 7]) @pytest.mark.parametrize("num_meshes", [1, 2, 7]) -def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions=False): +@pytest.mark.parametrize("scramble_partitions", [True, False]) +def test_partition_mesh(num_parts, num_meshes, dim, scramble_partitions): np.random.seed(42) n = (5,) * dim from meshmode.mesh.generation import generate_regular_rect_mesh @@ -400,7 +396,7 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, from mpi4py import MPI def f(x): - return 0.1*cl.clmath.sin(30.*x) + return 10*cl.clmath.sin(60.*x) ''' Here is a simplified example of what happens from -- GitLab From 4cf05467bc26acdc9d3f26ac7de8f93348592f42 Mon Sep 17 00:00:00 2001 From: Ellis Date: Sat, 13 Jan 2018 18:04:30 -0600 Subject: [PATCH 258/266] Fix docs --- meshmode/discretization/connection/opposite_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index cfd2b1ac..106c6350 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -427,7 +427,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from the remote partition to partition `i_local_part`. + exchange across faces from partition `i_local_part` to the remote partition. .. versionadded:: 2017.1 -- GitLab From e3353ee11d7db20f8a00908f8b0186ba36a050d1 Mon Sep 17 00:00:00 2001 From: Ellis Date: Sat, 13 Jan 2018 18:12:39 -0600 Subject: [PATCH 259/266] Add test cases --- test/test_partition.py | 43 +++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 3ed25fd2..83e99603 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -82,7 +82,7 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 10*cl.clmath.sin(60.*x) + return 10.*cl.clmath.sin(500.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -137,6 +137,9 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, group_factory(order), BTAG_PARTITION(i_local_part)) + assert bdry_nodes.size == remote_bdry_conn.to_discr.nodes().size, \ + "partitions do not have the same number of connected nodes" + # Gather just enough information for the connection local_bdry = local_bdry_conn.to_discr local_mesh = part_meshes[i_local_part] @@ -164,28 +167,26 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, for batch in grp_batches] for grp_batches in remote_batches] - # Connect local_mesh to remote_mesh - local_part_conn = make_partition_connection(local_bdry_conn, - i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - - # Connect remote mesh to local mesh - remote_part_conn = make_partition_connection(remote_bdry_conn, - i_remote_part, - local_bdry, - local_adj_groups, - local_to_elem_faces, - local_to_elem_indices) - - check_connection(local_part_conn) - check_connection(remote_part_conn) + # Connect from local_mesh to remote_mesh + local_to_remote_conn = make_partition_connection(local_bdry_conn, + i_local_part, + remote_bdry, + remote_adj_groups, + remote_to_elem_faces, + remote_to_elem_indices) + # Connect from remote mesh to local mesh + remote_to_local_conn = make_partition_connection(remote_bdry_conn, + i_remote_part, + local_bdry, + local_adj_groups, + local_to_elem_faces, + local_to_elem_indices) + check_connection(local_to_remote_conn) + check_connection(remote_to_local_conn) true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) - remote_points = local_part_conn(queue, true_local_points) - local_points = remote_part_conn(queue, remote_points) + remote_points = local_to_remote_conn(queue, true_local_points) + local_points = remote_to_local_conn(queue, remote_points) err = la.norm((true_local_points - local_points).get(), np.inf) eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) -- GitLab From ebc5980a399a0a2e9e844c2bfd9396d9ea320517 Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 18 Jan 2018 11:27:22 -0600 Subject: [PATCH 260/266] Fix test case --- test/test_partition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 83e99603..e4995be4 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -82,7 +82,7 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, eoc_rec[i, j] = EOCRecorder() def f(x): - return 10.*cl.clmath.sin(500.*x) + return 10.*cl.clmath.sin(50.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -195,7 +195,7 @@ def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, if e is not None: print("Error of connection from part %i to part %i." % (i, j)) print(e) - assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-14) + assert(e.order_estimate() >= order - 0.5 or e.max_error() < 1e-11) # }}} -- GitLab From 5386180526d3413070b8f36a6cb97970a199b1ce Mon Sep 17 00:00:00 2001 From: Ellis Date: Mon, 22 Jan 2018 13:37:47 -0600 Subject: [PATCH 261/266] Refine test --- test/test_partition.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index e4995be4..67fc5710 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -55,9 +55,6 @@ TAG_SEND_LOCAL_NODES = TAG_BASE + 4 # {{{ partition_interpolation - -# FIXME: Getting some warning on some of these tests. Need to look into this later. -@pytest.mark.parametrize("group_factory", [PolynomialWarpAndBlendGroupFactory]) @pytest.mark.parametrize("num_parts", [2, 3]) @pytest.mark.parametrize("num_groups", [1, 2]) @pytest.mark.parametrize("scramble_partitions", [False]) @@ -66,9 +63,10 @@ TAG_SEND_LOCAL_NODES = TAG_BASE + 4 (2, [3, 4, 7]), (3, [3, 4]) ]) -def test_partition_interpolation(ctx_factory, group_factory, dim, mesh_pars, +def test_partition_interpolation(ctx_factory, dim, mesh_pars, num_parts, num_groups, scramble_partitions): np.random.seed(42) + group_factory = PolynomialWarpAndBlendGroupFactory cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) order = 4 -- GitLab From c223b7c5e786a0f9932f93ed349be13e350ea883 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 23 Jan 2018 11:29:11 -0600 Subject: [PATCH 262/266] Refine mpi test --- test/test_partition.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/test/test_partition.py b/test/test_partition.py index 67fc5710..69c6859e 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -325,9 +325,9 @@ def count_tags(mesh, tag): # }}} -# {{{ MPI test rank entrypoint +# {{{ MPI test boundary swap -def mpi_test_rank_entrypoint(): +def _test_mpi_boundary_swap(dim, order, num_groups): from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator from mpi4py import MPI @@ -340,10 +340,14 @@ def mpi_test_rank_entrypoint(): if mesh_dist.is_mananger_rank(): np.random.seed(42) from meshmode.mesh.generation import generate_warped_rect_mesh - meshes = [generate_warped_rect_mesh(3, order=4, n=4) for _ in range(2)] + meshes = [generate_warped_rect_mesh(dim, order=order, n=4) + for _ in range(num_groups)] - from meshmode.mesh.processing import merge_disjoint_meshes - mesh = merge_disjoint_meshes(meshes) + if num_groups > 1: + from meshmode.mesh.processing import merge_disjoint_meshes + mesh = merge_disjoint_meshes(meshes) + else: + mesh = meshes[0] part_per_element = np.random.randint(num_parts, size=mesh.nelements) @@ -351,7 +355,7 @@ def mpi_test_rank_entrypoint(): else: local_mesh = mesh_dist.receive_mesh_part() - group_factory = PolynomialWarpAndBlendGroupFactory(4) + group_factory = PolynomialWarpAndBlendGroupFactory(order) cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) @@ -390,12 +394,13 @@ def mpi_test_rank_entrypoint(): logger.debug("Rank %d exiting", i_local_part) +# TODO def _test_data_transfer(mpi_comm, queue, local_bdry_conns, remote_to_local_bdry_conns, connected_parts): from mpi4py import MPI def f(x): - return 10*cl.clmath.sin(60.*x) + return 10*cl.clmath.sin(20.*x) ''' Here is a simplified example of what happens from @@ -493,16 +498,17 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, from numpy.linalg import norm err = norm(true_local_f - local_f, np.inf) - assert err < 1e-13, "Error = %f is too large" % err + assert err < 1e-11, "Error = %f is too large" % err # }}} -# {{{ MPI test pytest entrypoint +# {{{ MPI pytest entrypoint @pytest.mark.mpi -@pytest.mark.parametrize("num_partitions", [3, 6]) -def test_mpi_communication(num_partitions): +@pytest.mark.parametrize("num_partitions", [3, 4]) +@pytest.mark.parametrize("order", [2, 3]) +def test_mpi_communication(num_partitions, order): pytest.importorskip("mpi4py") num_ranks = num_partitions @@ -510,6 +516,7 @@ def test_mpi_communication(num_partitions): import sys newenv = os.environ.copy() newenv["RUN_WITHIN_MPI"] = "1" + newenv["order"] = str(order) check_call([ "mpiexec", "-np", str(num_ranks), "-x", "RUN_WITHIN_MPI", sys.executable, __file__], @@ -520,7 +527,10 @@ def test_mpi_communication(num_partitions): if __name__ == "__main__": if "RUN_WITHIN_MPI" in os.environ: - mpi_test_rank_entrypoint() + dim = 2 + order = int(os.environ["order"]) + num_groups = 2 + _test_mpi_boundary_swap(dim, order, num_groups) else: import sys if len(sys.argv) > 1: -- GitLab From 5b7358cc34a66112425e6c8c6c1277b0d6fd3d23 Mon Sep 17 00:00:00 2001 From: Ellis Date: Tue, 23 Jan 2018 23:58:15 -0600 Subject: [PATCH 263/266] Refine comments --- meshmode/distributed.py | 4 ++-- test/test_partition.py | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 2ba18c31..792a78e4 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -185,8 +185,8 @@ class MPIBoundaryCommunicator(object): Returns the tuple (`remote_to_local_bdry_conn`, []) where `remote_to_local_bdry_conn` is a :class:`DirectDiscretizationConnection` that gives the connection that - performs data exchange across faces from partition `i_remote_part` to the - local mesh. + performs data exchange across faces from the local mesh to partition + `i_remote_part`. """ if self.remote_data is None: status = MPI.Status() diff --git a/test/test_partition.py b/test/test_partition.py index 69c6859e..3a3e0855 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -420,6 +420,7 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, 7. Check if local points are the same as the original local points. ''' + # 1. send_reqs = [] for i_remote_part in connected_parts: conn = remote_to_local_bdry_conns[i_remote_part] @@ -429,10 +430,12 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, true_local_f = f(bdry_x) remote_f = conn(queue, true_local_f) + # 2. send_reqs.append(mpi_comm.isend(remote_f.get(queue=queue), dest=i_remote_part, tag=TAG_SEND_REMOTE_NODES)) + # 3. buffers = {} for i_remote_part in connected_parts: status = MPI.Status() @@ -454,6 +457,7 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, for req in send_reqs: req.wait() + # 4. send_reqs = [] for i_remote_part in connected_parts: conn = remote_to_local_bdry_conns[i_remote_part] @@ -464,10 +468,12 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, local_f_cl.set(local_f_np) remote_f = conn(queue, local_f_cl).get(queue=queue) + # 5. send_reqs.append(mpi_comm.isend(remote_f, dest=i_remote_part, tag=TAG_SEND_LOCAL_NODES)) + # 6. buffers = {} for i_remote_part in connected_parts: status = MPI.Status() @@ -489,6 +495,7 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, for req in send_reqs: req.wait() + # 7. for i_remote_part in connected_parts: bdry_discr = local_bdry_conns[i_remote_part].to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) -- GitLab From a9836d59300ab31a2d4963560860755b21585b6e Mon Sep 17 00:00:00 2001 From: Ellis Date: Thu, 25 Jan 2018 11:44:42 -0600 Subject: [PATCH 264/266] Fix documentation and opposite partition connection --- .../connection/opposite_face.py | 36 ++++++++++--------- meshmode/distributed.py | 4 +-- test/test_partition.py | 26 +++++++------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 106c6350..0df3521f 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -409,7 +409,7 @@ def make_opposite_face_connection(volume_to_bdry_conn): def make_partition_connection(local_bdry_conn, i_local_part, remote_bdry, remote_adj_groups, - remote_to_elem_faces, remote_to_elem_indices): + remote_from_elem_faces, remote_from_elem_indices): """ Connects ``local_bdry_conn`` to a neighboring partition. @@ -420,14 +420,14 @@ def make_partition_connection(local_bdry_conn, i_local_part, remote partition. :arg remote_bdry: A :class:`Discretization` of the boundary of the remote partition. - :arg remote_to_elem_faces: `remote_to_elem_faces[igrp][idx]` gives the face - that batch `idx` interpolates from group `igrp`. - :arg remote_to_elem_indices: `remote_to_elem_indices[igrp][idx]` gives a + :arg remote_from_elem_faces: `remote_from_elem_faces[igrp][idx]` gives the face + that batch `idx` interpolates from in group `igrp`. + :arg remote_from_elem_indices: `remote_from_elem_indices[igrp][idx]` gives a :class:`np.array` of element indices that batch `idx` interpolates from - group `igrp`. + in group `igrp`. :returns: A :class:`DirectDiscretizationConnection` that performs data - exchange across faces from partition `i_local_part` to the remote partition. + exchange across faces from the remote partition to partition `i_local_part`. .. versionadded:: 2017.1 @@ -441,7 +441,7 @@ def make_partition_connection(local_bdry_conn, i_local_part, local_bdry = local_bdry_conn.to_discr local_groups = local_bdry_conn.from_discr.mesh.groups - part_batches = [[] for _ in remote_adj_groups] + part_batches = [[] for _ in local_groups] with cl.CommandQueue(local_bdry_conn.cl_context) as queue: @@ -470,27 +470,29 @@ def make_partition_connection(local_bdry_conn, i_local_part, if not np.any(index_flags): continue - for idxs, to_face in zip(remote_to_elem_indices[i_remote_grp], - remote_to_elem_faces[i_remote_grp]): - if to_face == i_remote_face: + remote_bdry_indices = None + for idxs, face in zip(remote_from_elem_indices[i_remote_grp], + remote_from_elem_faces[i_remote_grp]): + if face == i_remote_face: remote_bdry_indices = idxs break + assert remote_bdry_indices is not None elems = i_local_meshwide_elems[index_flags] - elem_base faces = i_local_faces[index_flags] local_bdry_indices = local_el_lookup[elems, faces] batches = _make_cross_face_batches(queue, - remote_bdry, local_bdry, - i_remote_grp, i_local_grp, - remote_bdry_indices, - local_bdry_indices) + local_bdry, remote_bdry, + i_local_grp, i_remote_grp, + local_bdry_indices, + remote_bdry_indices) - part_batches[i_remote_grp].extend(batches) + part_batches[i_local_grp].extend(batches) return DirectDiscretizationConnection( - from_discr=local_bdry, - to_discr=remote_bdry, + from_discr=remote_bdry, + to_discr=local_bdry, groups=[DiscretizationConnectionElementGroup(batches=grp_batches) for grp_batches in part_batches], is_surjective=True) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 792a78e4..2ba18c31 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -185,8 +185,8 @@ class MPIBoundaryCommunicator(object): Returns the tuple (`remote_to_local_bdry_conn`, []) where `remote_to_local_bdry_conn` is a :class:`DirectDiscretizationConnection` that gives the connection that - performs data exchange across faces from the local mesh to partition - `i_remote_part`. + performs data exchange across faces from partition `i_remote_part` to the + local mesh. """ if self.remote_data is None: status = MPI.Status() diff --git a/test/test_partition.py b/test/test_partition.py index 3a3e0855..f838d684 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -145,10 +145,10 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, for i in range(len(local_mesh.groups))] local_batches = [local_bdry_conn.groups[i].batches for i in range(len(local_mesh.groups))] - local_to_elem_faces = [[batch.to_element_face + local_from_elem_faces = [[batch.to_element_face for batch in grp_batches] for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + local_from_elem_indices = [[batch.to_element_indices.get(queue=queue) for batch in grp_batches] for grp_batches in local_batches] @@ -158,29 +158,29 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, for i in range(len(remote_mesh.groups))] remote_batches = [remote_bdry_conn.groups[i].batches for i in range(len(remote_mesh.groups))] - remote_to_elem_faces = [[batch.to_element_face + remote_from_elem_faces = [[batch.to_element_face for batch in grp_batches] for grp_batches in remote_batches] - remote_to_elem_indices = [[batch.to_element_indices.get(queue=queue) + remote_from_elem_indices = [[batch.to_element_indices.get(queue=queue) for batch in grp_batches] for grp_batches in remote_batches] - # Connect from local_mesh to remote_mesh - local_to_remote_conn = make_partition_connection(local_bdry_conn, + # Connect from remote_mesh to local_mesh + remote_to_local_conn = make_partition_connection(local_bdry_conn, i_local_part, remote_bdry, remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) - # Connect from remote mesh to local mesh - remote_to_local_conn = make_partition_connection(remote_bdry_conn, + remote_from_elem_faces, + remote_from_elem_indices) + # Connect from local mesh to remote mesh + local_to_remote_conn = make_partition_connection(remote_bdry_conn, i_remote_part, local_bdry, local_adj_groups, - local_to_elem_faces, - local_to_elem_indices) - check_connection(local_to_remote_conn) + local_from_elem_faces, + local_from_elem_indices) check_connection(remote_to_local_conn) + check_connection(local_to_remote_conn) true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) remote_points = local_to_remote_conn(queue, true_local_points) -- GitLab From 90a57323bb3192924a85844e9156c8b05ffa1c35 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 1 Feb 2018 18:01:32 -0600 Subject: [PATCH 265/266] Restructure MPI comm helper infrastructure --- meshmode/distributed.py | 83 ++++++++++++++--------------------------- test/test_partition.py | 37 +++++++++++------- 2 files changed, 52 insertions(+), 68 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 2ba18c31..4e3f4d29 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -25,7 +25,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import numpy as np +import numpy as np # noqa from mpi4py import MPI import logging @@ -37,7 +37,7 @@ TAG_SEND_BOUNDARY = TAG_BASE + 2 __doc__ = """ .. autoclass:: MPIMeshDistributor -.. autoclass:: MPIBoundaryCommunicator +.. autoclass:: MPIBoundaryTransceiver """ @@ -115,42 +115,27 @@ class MPIMeshDistributor(object): # }}} -# {{{ boundary communicator +# {{{ boundary communication setup helper -class MPIBoundaryCommunicator(object): +class MPIBoundaryCommSetupHelper(object): """ .. automethod:: __call__ .. automethod:: is_ready """ - def __init__(self, mpi_comm, queue, part_discr, bdry_grp_factory, i_remote_part): + def __init__(self, mpi_comm, queue, local_bdry_conn, i_remote_part, + bdry_grp_factory): """ :arg mpi_comm: A :class:`MPI.Intracomm` :arg queue: - :arg part_discr: A :class:`meshmode.Discretization` of the local mesh - to perform boundary communication on. - :arg bdry_grp_factory: :arg i_remote_part: The part number of the remote partition """ self.mpi_comm = mpi_comm self.queue = queue - self.part_discr = part_discr self.i_local_part = mpi_comm.Get_rank() self.i_remote_part = i_remote_part + self.local_bdry_conn = local_bdry_conn self.bdry_grp_factory = bdry_grp_factory - from meshmode.discretization.connection import make_face_restriction - from meshmode.mesh import BTAG_PARTITION - self.local_bdry_conn = make_face_restriction(part_discr, - bdry_grp_factory, - BTAG_PARTITION(i_remote_part)) - self._setup() - self.remote_data = None - - def _setup(self): - logger.info("bdry comm rank %d send begin", self.i_local_part) - self.send_req = self._post_send_boundary_data() - self.recv_req = self._post_recv_boundary_data() - def _post_send_boundary_data(self): local_bdry = self.local_bdry_conn.to_discr local_mesh = self.local_bdry_conn.from_discr.mesh @@ -172,35 +157,38 @@ class MPIBoundaryCommunicator(object): dest=self.i_remote_part, tag=TAG_SEND_BOUNDARY) - def _post_recv_boundary_data(self): - status = MPI.Status() - self.mpi_comm.probe(source=self.i_remote_part, - tag=TAG_SEND_BOUNDARY, status=status) - return self.mpi_comm.irecv(buf=np.empty(status.count, dtype=bytes), - source=self.i_remote_part, - tag=TAG_SEND_BOUNDARY) + def post_sends(self): + logger.info("bdry comm rank %d send begin", self.i_local_part) + self.send_req = self._post_send_boundary_data() - def __call__(self): + def is_setup_ready(self): """ - Returns the tuple (`remote_to_local_bdry_conn`, []) + Returns True if the rank boundary data is ready to be received. + """ + return self.mpi_comm.Iprobe(source=self.i_remote_part, tag=TAG_SEND_BOUNDARY) + + def complete_setup(self): + """ + Returns the tuple ``remote_to_local_bdry_conn`` where `remote_to_local_bdry_conn` is a :class:`DirectDiscretizationConnection` that gives the connection that performs data exchange across faces from partition `i_remote_part` to the local mesh. """ - if self.remote_data is None: - status = MPI.Status() - self.remote_data = self.recv_req.wait(status=status) - logger.debug('rank %d: Received rank %d data (%d bytes)', - self.i_local_part, self.i_remote_part, status.count) + remote_data = self.mpi_comm.recv( + source=self.i_remote_part, + tag=TAG_SEND_BOUNDARY) + + logger.debug('rank %d: Received rank %d data', + self.i_local_part, self.i_remote_part) from meshmode.discretization import Discretization - remote_bdry_mesh = self.remote_data['bdry_mesh'] + remote_bdry_mesh = remote_data['bdry_mesh'] remote_bdry = Discretization(self.queue.context, remote_bdry_mesh, self.bdry_grp_factory) - remote_adj_groups = self.remote_data['adj'] - remote_to_elem_faces = self.remote_data['to_elem_faces'] - remote_to_elem_indices = self.remote_data['to_elem_indices'] + remote_adj_groups = remote_data['adj'] + remote_to_elem_faces = remote_data['to_elem_faces'] + remote_to_elem_indices = remote_data['to_elem_indices'] # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection @@ -211,20 +199,7 @@ class MPIBoundaryCommunicator(object): remote_to_elem_faces, remote_to_elem_indices) self.send_req.wait() - return remote_to_local_bdry_conn, [] - - def is_ready(self): - """ - Returns True if the rank boundary data is ready to be received. - """ - if self.remote_data is None: - status = MPI.Status() - did_receive, self.remote_data = self.recv_req.test(status=status) - if not did_receive: - return False - logger.debug('rank %d: Received rank %d data (%d bytes)', - self.i_local_part, self.i_remote_part, status.count) - return True + return remote_to_local_bdry_conn # }}} diff --git a/test/test_partition.py b/test/test_partition.py index f838d684..09f83fd3 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -47,7 +47,7 @@ import logging logger = logging.getLogger(__name__) # Is there a smart way of choosing this number? -# Currenly it is the same as the base from MPIBoundaryCommunicator +# Currenly it is the same as the base from MPIBoundaryTransceiver TAG_BASE = 83411 TAG_SEND_REMOTE_NODES = TAG_BASE + 3 TAG_SEND_LOCAL_NODES = TAG_BASE + 4 @@ -328,7 +328,7 @@ def count_tags(mesh, tag): # {{{ MPI test boundary swap def _test_mpi_boundary_swap(dim, order, num_groups): - from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommunicator + from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommSetupHelper from mpi4py import MPI mpi_comm = MPI.COMM_WORLD @@ -365,26 +365,35 @@ def _test_mpi_boundary_swap(dim, order, num_groups): from meshmode.distributed import get_connected_partitions connected_parts = get_connected_partitions(local_mesh) assert i_local_part not in connected_parts - bdry_conn_futures = {} + bdry_setup_helpers = {} local_bdry_conns = {} + + from meshmode.discretization.connection import make_face_restriction + from meshmode.mesh import BTAG_PARTITION for i_remote_part in connected_parts: - bdry_conn_futures[i_remote_part] = MPIBoundaryCommunicator(mpi_comm, - queue, - vol_discr, - group_factory, - i_remote_part) - local_bdry_conns[i_remote_part] =\ - bdry_conn_futures[i_remote_part].local_bdry_conn + local_bdry_conns[i_remote_part] = make_face_restriction( + vol_discr, group_factory, BTAG_PARTITION(i_remote_part)) + + setup_helper = bdry_setup_helpers[i_remote_part] = \ + MPIBoundaryCommSetupHelper( + mpi_comm, queue, local_bdry_conns[i_remote_part], + i_remote_part, bdry_grp_factory=group_factory) + + setup_helper.post_sends() remote_to_local_bdry_conns = {} from meshmode.discretization.connection import check_connection - while len(bdry_conn_futures) > 0: - for i_remote_part, future in bdry_conn_futures.items(): - if future.is_ready(): - conn, _ = bdry_conn_futures.pop(i_remote_part)() + while bdry_setup_helpers: + for i_remote_part, setup_helper in bdry_setup_helpers.items(): + if setup_helper.is_setup_ready(): + assert bdry_setup_helpers.pop(i_remote_part) is setup_helper + conn = setup_helper.complete_setup() check_connection(conn) remote_to_local_bdry_conns[i_remote_part] = conn break + + # FIXME: Not ideal, busy-waits + _test_data_transfer(mpi_comm, queue, local_bdry_conns, -- GitLab From d9c8eb7d7232eed0051d6035e68f7062538108c3 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Thu, 1 Feb 2018 20:26:24 -0600 Subject: [PATCH 266/266] Remove unconditional mpi4py import in meshmode.distributed --- meshmode/distributed.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 4e3f4d29..529190b5 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -26,7 +26,9 @@ THE SOFTWARE. """ import numpy as np # noqa -from mpi4py import MPI + +# This file needs to be importable without mpi4py. So don't be tempted to add +# that import here--push it into individual functions instead. import logging logger = logging.getLogger(__name__) @@ -104,6 +106,7 @@ class MPIMeshDistributor(object): assert not self.is_mananger_rank(), "Manager rank cannot receive mesh" + from mpi4py import MPI status = MPI.Status() result = self.mpi_comm.recv( source=self.manager_rank, tag=TAG_DISTRIBUTE_MESHES, -- GitLab