From d14e9f0a53d418cc0d70499386b6b5d5b4079e1d Mon Sep 17 00:00:00 2001 From: "[6~" Date: Mon, 25 May 2020 19:46:57 -0500 Subject: [PATCH 001/106] Drop Py2 compatibility --- .github/workflows/ci.yml | 13 ------------- .gitlab-ci.yml | 18 ------------------ setup.py | 6 ++---- 3 files changed, 2 insertions(+), 35 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 01e2cbfc..1e93235e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,19 +24,6 @@ jobs: curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh . ./prepare-and-run-flake8.sh "$(basename $GITHUB_REPOSITORY)" ./test - pytest2: - name: Pytest Conda Py2 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: "Main Script" - run: | - sed 's/python=3/python=2.7/' .test-conda-env-py3.yml > .test-conda-env-py2.yml - cat .test-conda-env-py2.yml - CONDA_ENVIRONMENT=.test-conda-env-py2.yml - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh - . ./build-and-test-py-project-within-miniconda.sh - pytest3: name: Pytest Conda Py3 runs-on: ubuntu-latest diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 34ebe33a..2d862e1b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,21 +1,3 @@ -Python 2.7 POCL: - script: - - export PY_EXE=python2.7 - - export PYOPENCL_TEST=portable - - export EXTRA_INSTALL="pybind11 cython numpy mako mpi4py" - # cython is here because pytential (for now, for TS) depends on it - - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh - - ". ./build-and-test-py-project.sh" - tags: - - python2.7 - - pocl - - mpi - except: - - tags - artifacts: - reports: - junit: test/pytest.xml - Python 3 Nvidia K40: script: - export PY_EXE=python3 diff --git a/setup.py b/setup.py index 688e9f92..bfbdfbce 100644 --- a/setup.py +++ b/setup.py @@ -27,10 +27,8 @@ def main(): 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Scientific/Engineering :: Mathematics', -- GitLab From 8fba8490577be43f64a673f64b3a0105b474569b Mon Sep 17 00:00:00 2001 From: "[6~" Date: Mon, 25 May 2020 19:52:30 -0500 Subject: [PATCH 002/106] Initial version of array contexts --- doc/array_context.rst | 4 + doc/index.rst | 1 + meshmode/array_context.py | 176 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 181 insertions(+) create mode 100644 doc/array_context.rst create mode 100644 meshmode/array_context.py diff --git a/doc/array_context.rst b/doc/array_context.rst new file mode 100644 index 00000000..fe16b327 --- /dev/null +++ b/doc/array_context.rst @@ -0,0 +1,4 @@ +Array Contexts +============== + +.. automodule:: meshmode.array diff --git a/doc/index.rst b/doc/index.rst index 3a7a2ba7..af5305e6 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -7,6 +7,7 @@ Contents: :maxdepth: 2 mesh + array_context discretization connection misc diff --git a/meshmode/array_context.py b/meshmode/array_context.py new file mode 100644 index 00000000..9b8e55e1 --- /dev/null +++ b/meshmode/array_context.py @@ -0,0 +1,176 @@ +from __future__ import division + +__copyright__ = "Copyright (C) 2020 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +import numpy as np +import loopy as lp +from loopy.version import MOST_RECENT_LANGUAGE_VERSION +from pytools import memoize_method + +__doc__ = """ +.. autoclass:: ArrayContext +.. autoclass:: PyOpenCLArrayContext +""" + + +# {{{ ArrayContext + +class ArrayContext: + """An interface that allows a :class:`Discretization` to create and interact with + arrays of degrees of freedom without fully specifying their types. + + .. automethod:: empty + .. automethod:: zeros + .. automethod:: from_numpy_constant + .. automethod:: from_numpy_data + .. automethod:: to_numpy + .. automethod:: call_loopy + .. automethod:: finalize + """ + + def empty(self, shape, dtype): + raise NotImplementedError + + def zeros(self, shape, dtype): + raise NotImplementedError + + def from_numpy(self, array: np.ndarray): + """ + :returns: the :class:`numpy.ndarray` *array* converted to the + array context's array type. + """ + raise NotImplementedError + + def to_numpy(self, array): + """ + :returns: *array*, an array recognized by the context, converted + to a :class:`numpy.ndarray`. + """ + raise NotImplementedError + + def call_loopy(self, program, **args): + """Execute the :mod:`loopy` program *program* on the arguments + *args*. + + *program* is a :class:`loopy.LoopKernel` or :class:`loopy.Program`. + It is expected to not yet be transformed for execution speed. + It must have :class:`loopy.Options.return_dict` set. + + :return: a :class:`dict` of outputs from the program, each an + array understood by the context. + """ + raise NotImplementedError + + def finalize(self, array): + """Return a version of the context-defined array *array* that + is 'finalized', i.e. suitable for long-term storage and reuse. + For example, in the context of OpenCL, this might entail + stripping the array of an associated queue, whereas in a + lazily-evaluated context, it might mean that the array is + evaluated and stored. + """ + raise NotImplementedError + +# }}} + + +# {{{ PyOpenCLArrayContext + +class PyOpenCLArrayContext(ArrayContext): + """ + A :class:`ArrayContext` that uses :class:`pyopencl.array.Array` instances + for DOF arrays. + + .. attribute:: context + + A :class:`pyopencl.Context`. + + .. attribute:: queue + + A :class:`pyopencl.CommandQueue` or *None*. + + .. attribute:: allocator + """ + + def __init__(self, queue, allocator=None): + self.context = queue.context + self.queue = queue + self.allocator = allocator + + # {{{ ArrayContext interface + + def empty(self, shape, dtype): + import pyopencl.array as cla + return cla.empty(self.queue, shape=shape, dtype=dtype, + allocator=self.allocator) + + def zeros(self, shape, dtype): + import pyopencl.array as cla + return cla.zeros(self.queue, shape=shape, dtype=dtype, + allocator=self.allocator) + + def from_numpy(self, np_array: np.ndarray): + import pyopencl.array as cla + return cla.to_device(self.queue, np_array, allocator=self.allocator) + + def to_numpy(self, array): + return array.get(queue=self.queue) + + def call_loopy(self, program, **args): + program = self.transform_loopy_program(program) + assert program.options.return_dict + assert program.options.no_numpy + + evt, result = program(self.queue, **args) + return result + + def finalize(self, array): + array.finish() + return array.with_queue(None) + + # }}} + + @memoize_method + def transform_loopy_program(self, program): + # FIXME: This assumes that inames 'idof' and 'iel' exist. + # FIXME: This could be much smarter. + import loopy as lp + program = lp.split_iname(program, "idof", 16, inner_tag="l.0") + return lp.tag_inames(program, dict(iel="g.0")) + +# }}} + + +def make_loopy_program(domains, statements, name=None): + return lp.make_kernel( + domains, + statements, + options=lp.Options( + no_numpy=True, + return_dict=True), + name=name, + lang_version=MOST_RECENT_LANGUAGE_VERSION) + + +# vim: foldmethod=marker -- GitLab From e224001f3d52992a0cc3e5433d5e079d2b983c0d Mon Sep 17 00:00:00 2001 From: "[6~" Date: Mon, 25 May 2020 19:56:22 -0500 Subject: [PATCH 003/106] Rework meshmode.discretization to make use of ArrayContext --- meshmode/discretization/__init__.py | 309 ++++++++++++------------ meshmode/discretization/poly_element.py | 17 +- 2 files changed, 158 insertions(+), 168 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index a3847f2c..289cf7cf 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -1,6 +1,4 @@ -from __future__ import division - -__copyright__ = "Copyright (C) 2013 Andreas Kloeckner" +__copyright__ = "Copyright (C) 2013-2020 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -23,17 +21,16 @@ THE SOFTWARE. """ import numpy as np -import loopy as lp -import pyopencl as cl -import pyopencl.array # noqa -from loopy.version import MOST_RECENT_LANGUAGE_VERSION -from pytools import memoize_method, memoize_in +from pytools import memoize_in, memoize_method +from pytools.obj_array import make_obj_array +from meshmode.array_context import ArrayContext, make_loopy_program __doc__ = """ .. autoclass:: ElementGroupBase .. autoclass:: InterpolatoryElementGroupBase .. autoclass:: ElementGroupFactory +.. autoclass:: DOFArray .. autoclass:: Discretization """ @@ -50,22 +47,22 @@ class ElementGroupBase(object): .. attribute :: mesh_el_group .. attribute :: order - .. attribute :: node_nr_base + .. attribute :: index .. autoattribute:: nelements - .. autoattribute:: nunit_nodes + .. autoattribute:: ndofs .. autoattribute:: nnodes .. autoattribute:: dim .. automethod:: view .. method:: unit_nodes() - Returns a :class:`numpy.ndarray` of shape ``(dim, nunit_nodes)`` + Returns a :class:`numpy.ndarray` of shape ``(dim, ndofs)`` of reference coordinates of interpolation nodes. .. method:: weights() - Returns an array of length :attr:`nunit_nodes` containing + Returns an array of length :attr:`ndofs` containing quadrature weights. .. attribute:: is_affine @@ -75,14 +72,14 @@ class ElementGroupBase(object): :attr:`meshmode.mesh.MeshElementGroup.is_affine`. """ - def __init__(self, mesh_el_group, order, node_nr_base): + def __init__(self, mesh_el_group, order, index): """ :arg mesh_el_group: an instance of :class:`meshmode.mesh.MeshElementGroup` """ self.mesh_el_group = mesh_el_group self.order = order - self.node_nr_base = node_nr_base + self.index = index @property def is_affine(self): @@ -93,13 +90,9 @@ class ElementGroupBase(object): return self.mesh_el_group.nelements @property - def nunit_nodes(self): + def ndofs(self): return self.unit_nodes.shape[-1] - @property - def nnodes(self): - return self.nunit_nodes * self.nelements - @property def dim(self): return self.mesh_el_group.dim @@ -113,27 +106,6 @@ class ElementGroupBase(object): grad_basis = basis diff_matrices = basis - def _nodes(self): - # Not cached, because the global nodes array is what counts. - # This is just used to build that. - - return np.tensordot( - self.mesh_el_group.nodes, - self._from_mesh_interp_matrix(), - (-1, -1)) - - def view(self, global_array): - """Return a view of *global_array* of shape ``(..., nelements, - nunit_nodes)`` where *global_array* is of shape ``(..., nnodes)``, - where *nnodes* is the global (per-discretization) node count. - """ - - return global_array[ - ..., self.node_nr_base:self.node_nr_base + self.nnodes] \ - .reshape( - global_array.shape[:-1] - + (self.nelements, self.nunit_nodes)) - # }}} @@ -192,16 +164,37 @@ class OrderBasedGroupFactory(ElementGroupFactory): # }}} +class DOFArray(object): + """ + .. attribute:: array_context + + An instance of :class:`ArrayContext`, or *None* if the arrays + in :attr:`group_arrays` are :meth:`ArrayContext.finalize`d. + + .. attribute:: group_arrays + + A list of arrays recognized by :attr:`array_context`, + one for each :class:`ElementGroupBase` of the :class:`Discretization` + that generated this :class:`DOFArray`. + """ + + # FIXME: How much arithmetic? + + def __init__(self, array_context, group_arrays): + self.array_context = array_context + self.group_arrays = group_arrays + + def copy(self, group_arrays=None): + return type(self)( + array_context=self.array_context, + group_arrays=( + group_arrays if group_arrays is not None + else self.group_arrays)) + + class Discretization(object): """An unstructured composite discretization. - :param cl_ctx: A :class:`pyopencl.Context` - :param mesh: A :class:`meshmode.mesh.Mesh` over which the discretization is - built - :param group_factory: An :class:`ElementGroupFactory` - :param real_dtype: The :mod:`numpy` data type used for representing real - data, either :class:`numpy.float32` or :class:`numpy.float64` - .. attribute:: real_dtype .. attribute:: complex_dtype @@ -212,8 +205,6 @@ class Discretization(object): .. attribute:: ambient_dim - .. attribute :: nnodes - .. attribute :: groups .. automethod:: empty @@ -222,25 +213,49 @@ class Discretization(object): .. method:: nodes() - shape: ``(ambient_dim, nnodes)`` + An object array of shape ``(ambient_dim,)`` containing + :class:`DOFArray`s of node coordinates. .. method:: num_reference_derivative(queue, ref_axes, vec) .. method:: quad_weights(queue) - shape: ``(nnodes)`` + A :class:`DOFArray` with quadrature weights. """ - def __init__(self, cl_ctx, mesh, group_factory, real_dtype=np.float64): - self.cl_context = cl_ctx + def __init__(self, actx: ArrayContext, mesh, group_factory, + real_dtype=np.float64): + """ + :param actx: A :class:`ArrayContext` used to perform computation needed + during initial set-up of the mesh. + :param mesh: A :class:`meshmode.mesh.Mesh` over which the discretization is + built + :param group_factory: An :class:`ElementGroupFactory` + :param real_dtype: The :mod:`numpy` data type used for representing real + data, either :class:`numpy.float32` or :class:`numpy.float64` + """ + + if not isinstance(actx, ArrayContext): + from meshmode.array_context import PyOpenCLArrayContext + + import pyopencl as cl + if isinstance(actx, cl.Context): + from warnings import warn + warn("Passing a pyopencl Context to Discretization is deprecated. " + "Pass an ArrayContext instead.", + DeprecationWarning, + stacklevel=2) + actx = PyOpenCLArrayContext(cl.CommandQueue(actx)) + else: + raise TypeError("'actx' must be an ArrayContext") self.mesh = mesh - self.nnodes = 0 - self.groups = [] + groups = [] for mg in mesh.groups: - ng = group_factory(mg, self.nnodes) - self.groups.append(ng) - self.nnodes += ng.nnodes + ng = group_factory(mg, len(groups)) + groups.append(ng) + + self.groups = groups self.real_dtype = np.dtype(real_dtype) self.complex_dtype = np.dtype({ @@ -248,6 +263,8 @@ class Discretization(object): np.float64: np.complex128 }[self.real_dtype.type]) + self._setup_actx = actx + @property def dim(self): return self.mesh.dim @@ -256,13 +273,7 @@ class Discretization(object): def ambient_dim(self): return self.mesh.ambient_dim - def empty(self, queue=None, dtype=None, extra_dims=None, allocator=None): - """Return an empty DOF vector. - - :arg dtype: type special value 'c' will result in a - vector of dtype :attr:`self.complex_dtype`. If - *None* (the default), a real vector will be returned. - """ + def _new_array(self, actx, creation_func, dtype=None): if dtype is None: dtype = self.real_dtype elif dtype == "c": @@ -270,45 +281,42 @@ class Discretization(object): else: dtype = np.dtype(dtype) - if queue is None: - first_arg = self.cl_context - else: - first_arg = queue - - shape = (self.nnodes,) - if extra_dims is not None: - shape = extra_dims + shape - - return cl.array.empty(first_arg, shape, dtype=dtype, allocator=allocator) - - def zeros(self, queue, dtype=None, extra_dims=None, allocator=None): - return self.empty(queue, dtype=dtype, extra_dims=extra_dims, - allocator=allocator).fill(0) + return DOFArray(actx, [ + creation_func(shape=(grp.nelements, grp.nunit_nodes), dtype=dtype) + for grp in self.groups]) - def num_reference_derivative(self, queue, ref_axes, vec): - @memoize_in(self, "reference_derivative_knl") - def knl(): - knl = lp.make_kernel( - """{[k,i,j]: - 0<=k Date: Mon, 25 May 2020 20:00:03 -0500 Subject: [PATCH 004/106] Hack simple DG towards ArrayContext compatibility --- examples/simple-dg.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 8fe6f706..0dd78231 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -35,6 +35,7 @@ from pytools.obj_array import ( is_obj_array) import loopy as lp from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa +from meshmode.discretization import DOFArray # noqa: F401 # Features lost vs. https://github.com/inducer/grudge: @@ -45,8 +46,12 @@ from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa def with_queue(queue, ary): - return with_object_array_or_scalar( - lambda x: x.with_queue(queue), ary) + def dof_array_with_queue(dof_array): + return dof_array.copy(group_arrays=[ + grp_ary.with_queue(queue) + for grp_ary in dof_array.group_arrays]) + + return with_object_array_or_scalar(dof_array_with_queue, ary) def without_queue(ary): @@ -457,7 +462,7 @@ def bump(discr, queue, t=0): source_width = 0.05 source_omega = 3 - nodes = discr.volume_discr.nodes().with_queue(queue) + nodes = with_queue(queue, discr.volume_discr.nodes()) center_dist = join_fields([ nodes[0] - source_center[0], nodes[1] - source_center[1], -- GitLab From 1c89c5b606128f63d2fa162ea93b9ef554fb4be9 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:04:14 -0500 Subject: [PATCH 005/106] Bump version requirement for pytools --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a3ccd409..7676f8ce 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ def main(): "modepy", "gmsh_interop", "six", - "pytools>=2018.4", + "pytools>=2020.3", "pytest>=2.3", "loo.py>=2014.1", ], -- GitLab From 4b1168fd7c2a27aac3f9fe32e705c8f9f8882578 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:05:03 -0500 Subject: [PATCH 006/106] Move make_loopy_program to top of array_context, include in docs --- meshmode/array_context.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 9b8e55e1..5124133a 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -29,11 +29,27 @@ from loopy.version import MOST_RECENT_LANGUAGE_VERSION from pytools import memoize_method __doc__ = """ +.. autofunction:: make_loopy_program .. autoclass:: ArrayContext .. autoclass:: PyOpenCLArrayContext """ +def make_loopy_program(domains, statements, kernel_data=["..."], name=None): + """Return a :class:`loopy.Program` suitable for use with + :meth:`ArrayContext.call_loopy`. + """ + return lp.make_kernel( + domains, + statements, + kernel_data=kernel_data, + options=lp.Options( + no_numpy=True, + return_dict=True), + name=name, + lang_version=MOST_RECENT_LANGUAGE_VERSION) + + # {{{ ArrayContext class ArrayContext: @@ -162,15 +178,4 @@ class PyOpenCLArrayContext(ArrayContext): # }}} -def make_loopy_program(domains, statements, name=None): - return lp.make_kernel( - domains, - statements, - options=lp.Options( - no_numpy=True, - return_dict=True), - name=name, - lang_version=MOST_RECENT_LANGUAGE_VERSION) - - # vim: foldmethod=marker -- GitLab From ec844d04304339b986d1954e759fb49ae2a1c77c Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:05:36 -0500 Subject: [PATCH 007/106] Add ArrayContext.{empty,zeros}_like --- meshmode/array_context.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 5124133a..029a4cb5 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -58,6 +58,8 @@ class ArrayContext: .. automethod:: empty .. automethod:: zeros + .. automethod:: empty_like + .. automethod:: zeros_like .. automethod:: from_numpy_constant .. automethod:: from_numpy_data .. automethod:: to_numpy @@ -71,6 +73,12 @@ class ArrayContext: def zeros(self, shape, dtype): raise NotImplementedError + def empty_like(self, ary): + return self.empty(shape=ary.shape, dtype=ary.dtype) + + def zeros_like(self, ary): + return self.zeros(shape=ary.shape, dtype=ary.dtype) + def from_numpy(self, array: np.ndarray): """ :returns: the :class:`numpy.ndarray` *array* converted to the -- GitLab From 241980f6ad53c7e03fb9a218aabe42cba6aba91b Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:07:39 -0500 Subject: [PATCH 008/106] Add ArrayContext.special_func --- meshmode/array_context.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 029a4cb5..76896209 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -65,6 +65,7 @@ class ArrayContext: .. automethod:: to_numpy .. automethod:: call_loopy .. automethod:: finalize + .. automethod:: special_func """ def empty(self, shape, dtype): @@ -112,6 +113,24 @@ class ArrayContext: For example, in the context of OpenCL, this might entail stripping the array of an associated queue, whereas in a lazily-evaluated context, it might mean that the array is + @memoize_method + def special_func(self, name): + """Returns a callable for the special function *name*, where *name* is a + (potentially dotted) function name resolvable by :mod:`loopy`. + """ + prg = make_loopy_program( + "{[iel, idof]: 0<=iel Date: Fri, 5 Jun 2020 11:08:35 -0500 Subject: [PATCH 009/106] Replace ArrayContext.finalize by ArrayContext.{freeze,thaw} --- meshmode/array_context.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 76896209..8cbc3002 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -64,8 +64,9 @@ class ArrayContext: .. automethod:: from_numpy_data .. automethod:: to_numpy .. automethod:: call_loopy - .. automethod:: finalize .. automethod:: special_func + .. automethod:: freeze + .. automethod:: thaw """ def empty(self, shape, dtype): @@ -107,12 +108,6 @@ class ArrayContext: """ raise NotImplementedError - def finalize(self, array): - """Return a version of the context-defined array *array* that - is 'finalized', i.e. suitable for long-term storage and reuse. - For example, in the context of OpenCL, this might entail - stripping the array of an associated queue, whereas in a - lazily-evaluated context, it might mean that the array is @memoize_method def special_func(self, name): """Returns a callable for the special function *name*, where *name* is a @@ -131,7 +126,22 @@ class ArrayContext: return f + def freeze(self, array): + """Return a version of the context-defined array *array* that is + 'frozen', i.e. suitable for long-term storage and reuse. Frozen arrays + do not support arithmetic. For example, in the context of + OpenCL, this might entail stripping the array of an associated queue, + whereas in a lazily-evaluated context, it might mean that the array is evaluated and stored. + + Freezing makes the array independent of this :class:`ArrayContext`; + it is permitted to :meth:`thaw` it in a different one, as long as that + context understands the array format. + """ + raise NotImplementedError + + def thaw(self, array): + """Take a 'frozen' array """ raise NotImplementedError @@ -188,10 +198,13 @@ class PyOpenCLArrayContext(ArrayContext): evt, result = program(self.queue, **args) return result - def finalize(self, array): + def freeze(self, array): array.finish() return array.with_queue(None) + def thaw(self, array): + return array.with_queue(self.queue) + # }}} @memoize_method -- GitLab From 589ae0fc6ebc786f6d7e49a9d900042773e8b215 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:18:09 -0500 Subject: [PATCH 010/106] Discr ElementGroup: Rename nunit_nodes -> nunit_dofs, remove nnodes, implement ndofs --- examples/simple-dg.py | 4 ++-- meshmode/discretization/__init__.py | 20 ++++++++++++-------- meshmode/discretization/visualization.py | 10 ++++++---- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 0dd78231..361f6d39 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -287,9 +287,9 @@ class DGDiscretization: assert afgrp.nelements == nfaces * volgrp.nelements matrix = np.empty( - (volgrp.nunit_nodes, + (volgrp.nunit_dofs, nfaces, - afgrp.nunit_nodes), + afgrp.nunit_dofs), dtype=dtype) from modepy.tools import UNIT_VERTICES diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 289cf7cf..03ff0d22 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -50,19 +50,19 @@ class ElementGroupBase(object): .. attribute :: index .. autoattribute:: nelements + .. autoattribute:: nunit_dofs .. autoattribute:: ndofs - .. autoattribute:: nnodes .. autoattribute:: dim .. automethod:: view .. method:: unit_nodes() - Returns a :class:`numpy.ndarray` of shape ``(dim, ndofs)`` + Returns a :class:`numpy.ndarray` of shape ``(dim, nunit_dofs)`` of reference coordinates of interpolation nodes. .. method:: weights() - Returns an array of length :attr:`ndofs` containing + Returns an array of length :attr:`nunit_dofs` containing quadrature weights. .. attribute:: is_affine @@ -90,9 +90,13 @@ class ElementGroupBase(object): return self.mesh_el_group.nelements @property - def ndofs(self): + def nunit_dofs(self): return self.unit_nodes.shape[-1] + @property + def ndofs(self): + return self.nunit_dofs * self.nelements + @property def dim(self): return self.mesh_el_group.dim @@ -281,8 +285,8 @@ class Discretization(object): else: dtype = np.dtype(dtype) - return DOFArray(actx, [ - creation_func(shape=(grp.nelements, grp.nunit_nodes), dtype=dtype) + return DOFArray.from_list(actx, [ + creation_func(shape=(grp.nelements, grp.nunit_dofs), dtype=dtype) for grp in self.groups]) def empty(self, actx: ArrayContext, dtype=None): @@ -309,7 +313,7 @@ class Discretization(object): return make_loopy_program( """{[iel,idof,j]: 0<=iel Date: Fri, 5 Jun 2020 11:21:06 -0500 Subject: [PATCH 011/106] Add Discretization.{empty,zeros}_like --- meshmode/discretization/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 03ff0d22..3af33abf 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -214,6 +214,8 @@ class Discretization(object): .. automethod:: empty .. automethod:: zeros + .. automethod:: empty_like + .. automethod:: zeros_like .. method:: nodes() @@ -308,6 +310,12 @@ class Discretization(object): return self._new_array(actx, actx.zeros, dtype=dtype) def num_reference_derivative(self, ref_axes, vec, actx: ArrayContext = None): + def empty_like(self, array: DOFArray): + return self.empty(array.array_context, dtype=array.entry_dtype) + + def zeros_like(self, array: DOFArray): + return self.zeros(array.array_context, dtype=array.entry_dtype) + @memoize_in(self, "reference_derivative_prg") def prg(): return make_loopy_program( -- GitLab From f975b2392c955b57953444f5d8b7a5311bc9eaf0 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:30:14 -0500 Subject: [PATCH 012/106] DOFArray: bring back array_context add freeze/thaw, entry_dtype --- meshmode/discretization/__init__.py | 109 +++++++++++++++++++++------- 1 file changed, 83 insertions(+), 26 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 3af33abf..5af1cb7c 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -22,8 +22,11 @@ THE SOFTWARE. import numpy as np -from pytools import memoize_in, memoize_method -from pytools.obj_array import make_obj_array +from functools import partial +from typing import Optional + +from pytools import memoize_in, memoize_method, single_valued +from pytools.obj_array import make_obj_array, oarray_vectorize from meshmode.array_context import ArrayContext, make_loopy_program __doc__ = """ @@ -31,6 +34,8 @@ __doc__ = """ .. autoclass:: InterpolatoryElementGroupBase .. autoclass:: ElementGroupFactory .. autoclass:: DOFArray +.. autofunction:: thaw +.. autofunction:: freeze .. autoclass:: Discretization """ @@ -168,32 +173,85 @@ class OrderBasedGroupFactory(ElementGroupFactory): # }}} -class DOFArray(object): - """ - .. attribute:: array_context +# {{{ DOFArray - An instance of :class:`ArrayContext`, or *None* if the arrays - in :attr:`group_arrays` are :meth:`ArrayContext.finalize`d. +class DOFArray(np.ndarray): + """This array type is a subclass of :class:`numpy.ndarray` that only + offers :meth:`from_list` as additional functionality. Its intended use + is for degree-of-freedom arrays associated with a discretization, + with one entry per element group. - .. attribute:: group_arrays + The main purpose of this class is to better describe the data structure, + i.e. when a :class:`DOFArray` occurs inside of a numpy object array, + the level representing the array of element groups will be more + easily recognized. - A list of arrays recognized by :attr:`array_context`, - one for each :class:`ElementGroupBase` of the :class:`Discretization` - that generated this :class:`DOFArray`. + .. attribute:: entry_dtype + .. automethod:: from_list """ - # FIXME: How much arithmetic? + def __new__(cls, actx: Optional[ArrayContext], input_array): + if not (actx is None or isinstance(actx, ArrayContext)): + raise TypeError("actx must be of type ArrayContext") + + result = np.asarray(input_array).view(cls) + if len(result.shape) != 1: + raise ValueError("DOFArray instances must have one-dimensional " + "shape, with one entry per element group") + + result.array_context = actx + return result + + def __array_finalize__(self, obj): + if obj is None: + return + self.array_context = getattr(obj, 'array_context', None) + + @property + def entry_dtype(self): + return single_valued(subary.dtype for subary in self.flat) + + @classmethod + def from_list(cls, actx: Optional[ArrayContext], res_list): + if not (actx is None or isinstance(actx, ArrayContext)): + raise TypeError("actx must be of type ArrayContext") + + result = np.empty((len(res_list),), dtype=object).view(cls) + result[:] = res_list + result.array_context = actx + return result + - def __init__(self, array_context, group_arrays): - self.array_context = array_context - self.group_arrays = group_arrays +def thaw(actx: ArrayContext, ary): + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return oarray_vectorize(partial(thaw, actx), ary) - def copy(self, group_arrays=None): - return type(self)( - array_context=self.array_context, - group_arrays=( - group_arrays if group_arrays is not None - else self.group_arrays)) + if ary.array_context is not None: + raise ValueError("DOFArray passed to thaw is not frozen") + + return DOFArray.from_list(actx, [ + actx.thaw(subary) + for subary in ary + ]) + + +def freeze(ary): + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return oarray_vectorize(freeze, ary) + + if ary.array_context is None: + raise ValueError("DOFArray passed to freeze is already frozen") + + return DOFArray.from_list(None, [ + ary.array_context.freeze(subary) + for subary in ary + ]) + +# }}} class Discretization(object): @@ -212,7 +270,6 @@ class Discretization(object): .. attribute :: groups .. automethod:: empty - .. automethod:: zeros .. automethod:: empty_like .. automethod:: zeros_like @@ -339,7 +396,7 @@ class Discretization(object): return mat - return DOFArray(actx, [ + return DOFArray.from_list(actx, [ actx.call_loopy( prg(), diff_mat=actx.from_numpy(get_mat(grp)), vec=vec[grp.index] )["result"] @@ -357,7 +414,7 @@ class Discretization(object): actx = self._setup_actx return DOFArray(None, [ - actx.finalize( + actx.freeze( actx.call_loopy( prg(), weights=actx.from_numpy(grp.weights) )["result"]) @@ -381,8 +438,8 @@ class Discretization(object): actx = self._setup_actx return make_obj_array([ - DOFArray(None, [ - actx.finalize( + DOFArray.from_list(None, [ + actx.freeze( actx.call_loopy( prg(), resampling_mat=actx.from_numpy( -- GitLab From 2513bf241ceab9dc29543e4adc709442367dfacf Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:31:10 -0500 Subject: [PATCH 013/106] Discretization: kill implicit ArrayContext creation --- meshmode/discretization/__init__.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 5af1cb7c..e9e024f8 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -299,18 +299,7 @@ class Discretization(object): """ if not isinstance(actx, ArrayContext): - from meshmode.array_context import PyOpenCLArrayContext - - import pyopencl as cl - if isinstance(actx, cl.Context): - from warnings import warn - warn("Passing a pyopencl Context to Discretization is deprecated. " - "Pass an ArrayContext instead.", - DeprecationWarning, - stacklevel=2) - actx = PyOpenCLArrayContext(cl.CommandQueue(actx)) - else: - raise TypeError("'actx' must be an ArrayContext") + raise TypeError("'actx' must be an ArrayContext") self.mesh = mesh groups = [] -- GitLab From 395a194772b05a78284f196884c53d43d4ae47e3 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:31:40 -0500 Subject: [PATCH 014/106] Discretization: ArrayContext type error reporting --- meshmode/discretization/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index e9e024f8..6825b538 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -344,6 +344,10 @@ class Discretization(object): vector of dtype :attr:`self.complex_dtype`. If *None* (the default), a real vector will be returned. """ + if not isinstance(actx, ArrayContext): + raise TypeError("'actx' must be an ArrayContext, not '%s'" + % type(actx).__name__) + return self._new_array(actx, actx.empty, dtype=dtype) def zeros(self, actx: ArrayContext, dtype=None): @@ -353,6 +357,10 @@ class Discretization(object): vector of dtype :attr:`self.complex_dtype`. If *None* (the default), a real vector will be returned. """ + if not isinstance(actx, ArrayContext): + raise TypeError("'actx' must be an ArrayContext, not '%s'" + % type(actx).__name__) + return self._new_array(actx, actx.zeros, dtype=dtype) def num_reference_derivative(self, ref_axes, vec, actx: ArrayContext = None): -- GitLab From ad68d8803fbc8db494368bd4b9f5d40a618da553 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:32:04 -0500 Subject: [PATCH 015/106] Discretization: don't allow passing different array contexts from the ones in the array --- meshmode/discretization/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 6825b538..964ac4b9 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -363,13 +363,13 @@ class Discretization(object): return self._new_array(actx, actx.zeros, dtype=dtype) - def num_reference_derivative(self, ref_axes, vec, actx: ArrayContext = None): def empty_like(self, array: DOFArray): return self.empty(array.array_context, dtype=array.entry_dtype) def zeros_like(self, array: DOFArray): return self.zeros(array.array_context, dtype=array.entry_dtype) + def num_reference_derivative(self, ref_axes, vec): @memoize_in(self, "reference_derivative_prg") def prg(): return make_loopy_program( @@ -379,8 +379,7 @@ class Discretization(object): "result[iel,idof] = sum(j, diff_mat[idof, j] * vec[iel, j])", name="diff") - if actx is None: - actx = vec.array_context + actx = vec.array_context def get_mat(grp): mat = None -- GitLab From 6ea8eb0a4c186d97d6da496f36b99b17760c3328 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:36:11 -0500 Subject: [PATCH 016/106] Remove (most) pyopencl imports in connection --- meshmode/discretization/connection/__init__.py | 3 --- meshmode/discretization/connection/direct.py | 2 -- meshmode/discretization/connection/face.py | 2 -- meshmode/discretization/connection/opposite_face.py | 2 -- meshmode/discretization/connection/same_mesh.py | 2 -- meshmode/discretization/visualization.py | 1 - 6 files changed, 12 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index 5942c7e2..0e2aafbe 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -25,9 +25,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import pyopencl as cl -import pyopencl.array # noqa - from meshmode.discretization.connection.direct import ( InterpolationBatch, DiscretizationConnectionElementGroup, diff --git a/meshmode/discretization/connection/direct.py b/meshmode/discretization/connection/direct.py index 20f5cdb9..199465a0 100644 --- a/meshmode/discretization/connection/direct.py +++ b/meshmode/discretization/connection/direct.py @@ -25,8 +25,6 @@ THE SOFTWARE. from six.moves import range, zip import numpy as np -import pyopencl as cl -import pyopencl.array # noqa from loopy.version import MOST_RECENT_LANGUAGE_VERSION from pytools import memoize_method, memoize_in diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py index f45a95c1..129d03f2 100644 --- a/meshmode/discretization/connection/face.py +++ b/meshmode/discretization/connection/face.py @@ -28,8 +28,6 @@ from six.moves import range, zip from pytools import Record import numpy as np -import pyopencl as cl -import pyopencl.array # noqa import modepy as mp import logging diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index e084525b..a7f4383e 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -26,8 +26,6 @@ from six.moves import range import numpy as np import numpy.linalg as la -import pyopencl as cl -import pyopencl.array # noqa import logging logger = logging.getLogger(__name__) diff --git a/meshmode/discretization/connection/same_mesh.py b/meshmode/discretization/connection/same_mesh.py index e7b781cd..afa07dad 100644 --- a/meshmode/discretization/connection/same_mesh.py +++ b/meshmode/discretization/connection/same_mesh.py @@ -23,8 +23,6 @@ THE SOFTWARE. """ import numpy as np -import pyopencl as cl -import pyopencl.array # noqa # {{{ same-mesh constructor diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index ae34e51e..45f824a3 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -25,7 +25,6 @@ THE SOFTWARE. from six.moves import range import numpy as np from pytools import memoize_method, Record -import pyopencl as cl __doc__ = """ -- GitLab From 5f4618e6a79849e0a511a73e27d0f5826135753a Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:39:48 -0500 Subject: [PATCH 017/106] Refactor DiscretizationConnection, DirectDiscretizationConnection for ArrayContext --- meshmode/discretization/connection/direct.py | 117 +++++++++---------- 1 file changed, 54 insertions(+), 63 deletions(-) diff --git a/meshmode/discretization/connection/direct.py b/meshmode/discretization/connection/direct.py index 199465a0..018e651f 100644 --- a/meshmode/discretization/connection/direct.py +++ b/meshmode/discretization/connection/direct.py @@ -26,8 +26,9 @@ from six.moves import range, zip import numpy as np -from loopy.version import MOST_RECENT_LANGUAGE_VERSION -from pytools import memoize_method, memoize_in +import loopy as lp +from pytools import memoize_in, keyed_memoize_method +from meshmode.array_context import ArrayContext, make_loopy_program # {{{ interpolation batch @@ -132,12 +133,6 @@ class DiscretizationConnection(object): .. automethod:: __call__ """ def __init__(self, from_discr, to_discr, is_surjective): - if from_discr.cl_context != to_discr.cl_context: - raise ValueError("from_discr and to_discr must live in the " - "same OpenCL context") - - self.cl_context = from_discr.cl_context - if from_discr.mesh.vertex_id_dtype != to_discr.mesh.vertex_id_dtype: raise ValueError("from_discr and to_discr must agree on the " "vertex_id_dtype") @@ -186,8 +181,9 @@ class DirectDiscretizationConnection(DiscretizationConnection): self.groups = groups - @memoize_method - def _resample_matrix(self, to_group_index, ibatch_index): + @keyed_memoize_method(key=lambda actx, to_group_index, ibatch_index: + (to_group_index, ibatch_index)) + def _resample_matrix(self, actx: ArrayContext, to_group_index, ibatch_index): import modepy as mp ibatch = self.groups[to_group_index].batches[ibatch_index] from_grp = self.from_discr.groups[ibatch.from_group_index] @@ -212,11 +208,12 @@ class DirectDiscretizationConnection(DiscretizationConnection): from_grp.basis(), ibatch.result_unit_nodes, from_grp.unit_nodes) - with cl.CommandQueue(self.cl_context) as queue: - return cl.array.to_device(queue, result).with_queue(None) + return actx.freeze(actx.from_numpy(result)) - @memoize_method - def _resample_point_pick_indices(self, to_group_index, ibatch_index, + @keyed_memoize_method(lambda actx, to_group_index, ibatch_index, + tol_multiplier=None: (to_group_index, ibatch_index, tol_multiplier)) + def _resample_point_pick_indices(self, actx: ArrayContext, + to_group_index, ibatch_index, tol_multiplier=None): """If :meth:`_resample_matrix` *R* is a row subset of a permutation matrix *P*, return the index subset I so that, loosely, ``x[I] == R @ x``. @@ -225,9 +222,8 @@ class DirectDiscretizationConnection(DiscretizationConnection): :class:`pyopencl.array.Array` containing the index subset. """ - with cl.CommandQueue(self.cl_context) as queue: - mat = self._resample_matrix(to_group_index, ibatch_index).get( - queue=queue) + mat = actx.to_numpy(actx.thaw( + self._resample_matrix(actx, to_group_index, ibatch_index))) nrows, ncols = mat.shape result = np.zeros(nrows, dtype=self.to_discr.mesh.element_id_dtype) @@ -249,8 +245,7 @@ class DirectDiscretizationConnection(DiscretizationConnection): one_index, = one_indices result[irow] = one_index - with cl.CommandQueue(self.cl_context) as queue: - return cl.array.to_device(queue, result).with_queue(None) + return actx.freeze(actx.from_numpy(result)) def full_resample_matrix(self, queue): """Build a dense matrix representing this discretization connection. @@ -264,9 +259,8 @@ class DirectDiscretizationConnection(DiscretizationConnection): @memoize_in(self, "oversample_mat_knl") def knl(): - import loopy as lp - knl = lp.make_kernel( - """{[k,i,j]: + knl = make_loopy_program( + """{[k, i, j]: 0<=k Date: Fri, 5 Jun 2020 11:55:26 -0500 Subject: [PATCH 018/106] Refactor connection.face for ArrayContext --- meshmode/discretization/connection/face.py | 119 ++++++++++----------- 1 file changed, 57 insertions(+), 62 deletions(-) diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py index 129d03f2..6bba284a 100644 --- a/meshmode/discretization/connection/face.py +++ b/meshmode/discretization/connection/face.py @@ -61,7 +61,7 @@ class _ConnectionBatchData(Record): pass -def _build_boundary_connection(queue, vol_discr, bdry_discr, connection_data, +def _build_boundary_connection(actx, vol_discr, bdry_discr, connection_data, per_face_groups): from meshmode.discretization.connection.direct import ( InterpolationBatch, @@ -85,13 +85,11 @@ def _build_boundary_connection(queue, vol_discr, bdry_discr, connection_data, batches.append( InterpolationBatch( from_group_index=igrp, - from_element_indices=cl.array.to_device( - queue, - data.group_source_element_indices) + from_element_indices=actx.freeze(actx.from_numpy( + data.group_source_element_indices)) .with_queue(None), - to_element_indices=cl.array.to_device( - queue, - data.group_target_element_indices) + to_element_indices=actx.freeze(actx.from_numpy( + data.group_target_element_indices)) .with_queue(None), result_unit_nodes=result_unit_nodes, to_element_face=face_id @@ -158,7 +156,7 @@ def _get_face_vertices(mesh, boundary_tag): # }}} -def make_face_restriction(discr, group_factory, boundary_tag, +def make_face_restriction(actx, discr, group_factory, boundary_tag, per_face_groups=False): """Create a mesh, a discretization and a connection to restrict a function on *discr* to its values on the edges of element faces @@ -371,12 +369,11 @@ def make_face_restriction(discr, group_factory, boundary_tag, from meshmode.discretization import Discretization bdry_discr = Discretization( - discr.cl_context, bdry_mesh, group_factory) + actx, bdry_mesh, group_factory) - with cl.CommandQueue(discr.cl_context) as queue: - connection = _build_boundary_connection( - queue, discr, bdry_discr, connection_data, - per_face_groups) + connection = _build_boundary_connection( + actx, discr, bdry_discr, connection_data, + per_face_groups) logger.info("building face restriction: done") @@ -387,7 +384,7 @@ def make_face_restriction(discr, group_factory, boundary_tag, # {{{ face -> all_faces connection -def make_face_to_all_faces_embedding(faces_connection, all_faces_discr, +def make_face_to_all_faces_embedding(actx, faces_connection, all_faces_discr, from_discr=None): """Return a :class:`meshmode.discretization.connection.DiscretizationConnection` @@ -435,61 +432,59 @@ def make_face_to_all_faces_embedding(faces_connection, all_faces_discr, i_faces_grp = 0 - with cl.CommandQueue(vol_discr.cl_context) as queue: - groups = [] - for ivol_grp, vol_grp in enumerate(vol_discr.groups): - batches = [] + groups = [] + for ivol_grp, vol_grp in enumerate(vol_discr.groups): + batches = [] - nfaces = vol_grp.mesh_el_group.nfaces - for iface in range(nfaces): - all_faces_grp = all_faces_discr.groups[i_faces_grp] + nfaces = vol_grp.mesh_el_group.nfaces + for iface in range(nfaces): + all_faces_grp = all_faces_discr.groups[i_faces_grp] - if per_face_groups: - assert len(faces_connection.groups[i_faces_grp].batches) == 1 - else: - assert (len(faces_connection.groups[i_faces_grp].batches) - == nfaces) + if per_face_groups: + assert len(faces_connection.groups[i_faces_grp].batches) == 1 + else: + assert (len(faces_connection.groups[i_faces_grp].batches) + == nfaces) - assert np.array_equal( - from_discr.groups[i_faces_grp].unit_nodes, - all_faces_grp.unit_nodes) + assert np.array_equal( + from_discr.groups[i_faces_grp].unit_nodes, + all_faces_grp.unit_nodes) - # {{{ find src_batch + # {{{ find src_batch - src_batches = faces_connection.groups[i_faces_grp].batches - if per_face_groups: - src_batch, = src_batches - else: - src_batch = src_batches[iface] - del src_batches + src_batches = faces_connection.groups[i_faces_grp].batches + if per_face_groups: + src_batch, = src_batches + else: + src_batch = src_batches[iface] + del src_batches - # }}} + # }}} - if per_face_groups: - to_element_indices = src_batch.from_element_indices - else: - assert all_faces_grp.nelements == nfaces * vol_grp.nelements - - to_element_indices = ( - vol_grp.nelements*iface - + src_batch.from_element_indices.with_queue(queue) - ).with_queue(None) - - batches.append( - InterpolationBatch( - from_group_index=i_faces_grp, - from_element_indices=src_batch.to_element_indices, - to_element_indices=to_element_indices, - result_unit_nodes=all_faces_grp.unit_nodes, - to_element_face=None)) - - is_last_face = iface + 1 == nfaces - if per_face_groups or is_last_face: - groups.append( - DiscretizationConnectionElementGroup(batches=batches)) - batches = [] - - i_faces_grp += 1 + if per_face_groups: + to_element_indices = src_batch.from_element_indices + else: + assert all_faces_grp.nelements == nfaces * vol_grp.nelements + + to_element_indices = actx.freeze( + vol_grp.nelements*iface + + actx.thaw(src_batch.from_element_indices)) + + batches.append( + InterpolationBatch( + from_group_index=i_faces_grp, + from_element_indices=src_batch.to_element_indices, + to_element_indices=to_element_indices, + result_unit_nodes=all_faces_grp.unit_nodes, + to_element_face=None)) + + is_last_face = iface + 1 == nfaces + if per_face_groups or is_last_face: + groups.append( + DiscretizationConnectionElementGroup(batches=batches)) + batches = [] + + i_faces_grp += 1 return DirectDiscretizationConnection( from_discr, -- GitLab From bf8015128f1ebd5287bd654afa9bdab63b0b194e Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 11:56:34 -0500 Subject: [PATCH 019/106] Visualization: Remove some py2 gunk --- meshmode/discretization/visualization.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index 45f824a3..1415de03 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -1,5 +1,3 @@ -from __future__ import division, absolute_import - __copyright__ = "Copyright (C) 2014 Andreas Kloeckner" __license__ = """ @@ -22,7 +20,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -from six.moves import range import numpy as np from pytools import memoize_method, Record -- GitLab From aa055df1a1e35173cf11a89187f9e6a2b44dc8bf Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 12:00:31 -0500 Subject: [PATCH 020/106] Refactor connection.same_mesh for ArrayContext --- .../discretization/connection/same_mesh.py | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/meshmode/discretization/connection/same_mesh.py b/meshmode/discretization/connection/same_mesh.py index afa07dad..355d5349 100644 --- a/meshmode/discretization/connection/same_mesh.py +++ b/meshmode/discretization/connection/same_mesh.py @@ -27,7 +27,7 @@ import numpy as np # {{{ same-mesh constructor -def make_same_mesh_connection(to_discr, from_discr): +def make_same_mesh_connection(actx, to_discr, from_discr): from meshmode.discretization.connection.direct import ( InterpolationBatch, DiscretizationConnectionElementGroup, @@ -37,23 +37,22 @@ def make_same_mesh_connection(to_discr, from_discr): raise ValueError("from_discr and to_discr must be based on " "the same mesh") - assert to_discr.cl_context == from_discr.cl_context - - with cl.CommandQueue(to_discr.cl_context) as queue: - groups = [] - for igrp, (fgrp, tgrp) in enumerate(zip(from_discr.groups, to_discr.groups)): - all_elements = cl.array.arange(queue, - fgrp.nelements, - dtype=np.intp).with_queue(None) - ibatch = InterpolationBatch( - from_group_index=igrp, - from_element_indices=all_elements, - to_element_indices=all_elements, - result_unit_nodes=tgrp.unit_nodes, - to_element_face=None) - - groups.append( - DiscretizationConnectionElementGroup([ibatch])) + groups = [] + for igrp, (fgrp, tgrp) in enumerate(zip(from_discr.groups, to_discr.groups)): + all_elements = actx.freeze( + actx.from_numpy( + np.arange( + fgrp.nelements, + dtype=np.intp))) + ibatch = InterpolationBatch( + from_group_index=igrp, + from_element_indices=all_elements, + to_element_indices=all_elements, + result_unit_nodes=tgrp.unit_nodes, + to_element_face=None) + + groups.append( + DiscretizationConnectionElementGroup([ibatch])) return DirectDiscretizationConnection( from_discr, to_discr, groups, -- GitLab From a77244b6e7fd2fcc23674fbeb89b1ddaf65d37c2 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 12:25:48 -0500 Subject: [PATCH 021/106] Refactor connection.opposite_face --- .../connection/opposite_face.py | 197 +++++++++--------- 1 file changed, 102 insertions(+), 95 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index a7f4383e..a0af22fa 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -31,36 +31,40 @@ import logging logger = logging.getLogger(__name__) +def freeze_from_numpy(self, array): + return self.freeze(self.from_numpy(array)) + + +def thaw_to_numpy(self, array): + return self.to_numpy(self.thaw(array)) + + # {{{ _make_cross_face_batches -def _make_cross_face_batches(queue, +def _make_cross_face_batches(actx, tgt_bdry_discr, src_bdry_discr, i_tgt_grp, i_src_grp, tgt_bdry_element_indices, src_bdry_element_indices): - def to_dev(ary): - return cl.array.to_device(queue, ary, array_queue=None) from meshmode.discretization.connection.direct import InterpolationBatch if tgt_bdry_discr.dim == 0: yield InterpolationBatch( from_group_index=i_src_grp, - from_element_indices=to_dev(src_bdry_element_indices), - to_element_indices=to_dev(tgt_bdry_element_indices), + from_element_indices=freeze_from_numpy(src_bdry_element_indices), + to_element_indices=freeze_from_numpy(tgt_bdry_element_indices), result_unit_nodes=src_bdry_discr.groups[i_src_grp].unit_nodes, to_element_face=None) return - # FIXME: This should view-then-transfer - # (but PyOpenCL doesn't do non-contiguous transfers for now). - tgt_bdry_nodes = (tgt_bdry_discr.groups[i_tgt_grp] - .view(tgt_bdry_discr.nodes().get(queue=queue)) - [:, tgt_bdry_element_indices]) + tgt_bdry_nodes = np.array([ + thaw_to_numpy(actx, ary)[tgt_bdry_element_indices] + for ary in tgt_bdry_discr.nodes()[i_tgt_grp] + ]) - # FIXME: This should view-then-transfer - # (but PyOpenCL doesn't do non-contiguous transfers for now). - src_bdry_nodes = (src_bdry_discr.groups[i_src_grp] - .view(src_bdry_discr.nodes().get(queue=queue)) - [:, src_bdry_element_indices]) + src_bdry_nodes = np.array([ + thaw_to_numpy(actx, ary)[src_bdry_element_indices] + for ary in src_bdry_discr.nodes()[i_src_grp] + ]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps @@ -253,8 +257,10 @@ def _make_cross_face_batches(queue, from meshmode.discretization.connection.direct import InterpolationBatch yield InterpolationBatch( from_group_index=i_src_grp, - from_element_indices=to_dev(src_bdry_element_indices[close_els]), - to_element_indices=to_dev(tgt_bdry_element_indices[close_els]), + from_element_indices=freeze_from_numpy( + actx, src_bdry_element_indices[close_els]), + to_element_indices=freeze_from_numpy( + actx, tgt_bdry_element_indices[close_els]), result_unit_nodes=template_unit_nodes, to_element_face=None) @@ -274,7 +280,7 @@ def _find_ibatch_for_face(vbc_tgt_grp_batches, iface): return vbc_tgt_grp_face_batch -def _make_bdry_el_lookup_table(queue, connection, igrp): +def _make_bdry_el_lookup_table(actx, connection, igrp): """Given a volume-to-boundary connection as *connection*, return a table of shape ``(from_nelements, nfaces)`` to look up the element number of the boundary element for that face. @@ -287,9 +293,9 @@ def _make_bdry_el_lookup_table(queue, connection, igrp): iel_lookup.fill(-1) for ibatch, batch in enumerate(connection.groups[igrp].batches): - from_element_indices = batch.from_element_indices.get(queue=queue) + from_element_indices = thaw_to_numpy(actx, batch.from_element_indices) iel_lookup[from_element_indices, batch.to_element_face] = \ - batch.to_element_indices.get(queue=queue) + thaw_to_numpy(actx, batch.to_element_indices) return iel_lookup @@ -298,7 +304,7 @@ def _make_bdry_el_lookup_table(queue, connection, igrp): # {{{ make_opposite_face_connection -def make_opposite_face_connection(volume_to_bdry_conn): +def make_opposite_face_connection(actx, volume_to_bdry_conn): """Given a boundary restriction connection *volume_to_bdry_conn*, return a :class:`DirectDiscretizationConnection` that performs data exchange across opposite faces. @@ -321,103 +327,104 @@ def make_opposite_face_connection(volume_to_bdry_conn): # One interpolation batch in this connection corresponds # to a key (i_tgt_grp,) (i_src_grp, i_face_tgt,) - with cl.CommandQueue(vol_discr.cl_context) as queue: - # a list of batches for each group - groups = [[] for i_tgt_grp in range(ngrps)] + # a list of batches for each group + groups = [[] for i_tgt_grp in range(ngrps)] - for i_src_grp in range(ngrps): - src_grp_el_lookup = _make_bdry_el_lookup_table( - queue, volume_to_bdry_conn, i_src_grp) + for i_src_grp in range(ngrps): + src_grp_el_lookup = _make_bdry_el_lookup_table( + actx, volume_to_bdry_conn, i_src_grp) - for i_tgt_grp in range(ngrps): - vbc_tgt_grp_batches = volume_to_bdry_conn.groups[i_tgt_grp].batches + for i_tgt_grp in range(ngrps): + vbc_tgt_grp_batches = volume_to_bdry_conn.groups[i_tgt_grp].batches - adj = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] + adj = vol_mesh.facial_adjacency_groups[i_tgt_grp][i_src_grp] - for i_face_tgt in range(vol_mesh.groups[i_tgt_grp].nfaces): - vbc_tgt_grp_face_batch = _find_ibatch_for_face( - vbc_tgt_grp_batches, i_face_tgt) + for i_face_tgt in range(vol_mesh.groups[i_tgt_grp].nfaces): + vbc_tgt_grp_face_batch = _find_ibatch_for_face( + vbc_tgt_grp_batches, i_face_tgt) - # {{{ index wrangling + # {{{ index wrangling - # The elements in the adjacency group will be a subset of - # the elements in the restriction interpolation batch: - # Imagine an inter-group boundary. The volume-to-boundary - # connection will include all faces as targets, whereas - # there will be separate adjacency groups for intra- and - # inter-group connections. + # The elements in the adjacency group will be a subset of + # the elements in the restriction interpolation batch: + # Imagine an inter-group boundary. The volume-to-boundary + # connection will include all faces as targets, whereas + # there will be separate adjacency groups for intra- and + # inter-group connections. - adj_tgt_flags = adj.element_faces == i_face_tgt - adj_els = adj.elements[adj_tgt_flags] - if adj_els.size == 0: - # NOTE: this case can happen for inter-group boundaries - # when all elements are adjacent on the same face - # index, so all other ones will be empty - continue + adj_tgt_flags = adj.element_faces == i_face_tgt + adj_els = adj.elements[adj_tgt_flags] + if adj_els.size == 0: + # NOTE: this case can happen for inter-group boundaries + # when all elements are adjacent on the same face + # index, so all other ones will be empty + continue - vbc_els = vbc_tgt_grp_face_batch.from_element_indices.get(queue) + vbc_els = thaw_to_numpy(actx, + vbc_tgt_grp_face_batch.from_element_indices) - if len(adj_els) == len(vbc_els): - # Same length: assert (below) that the two use the same - # ordering. - vbc_used_els = slice(None) + if len(adj_els) == len(vbc_els): + # Same length: assert (below) that the two use the same + # ordering. + vbc_used_els = slice(None) - else: - # Genuine subset: figure out an index mapping. - vbc_els_sort_idx = np.argsort(vbc_els) - vbc_used_els = vbc_els_sort_idx[np.searchsorted( - vbc_els, adj_els, sorter=vbc_els_sort_idx - )] + else: + # Genuine subset: figure out an index mapping. + vbc_els_sort_idx = np.argsort(vbc_els) + vbc_used_els = vbc_els_sort_idx[np.searchsorted( + vbc_els, adj_els, sorter=vbc_els_sort_idx + )] - assert np.array_equal(vbc_els[vbc_used_els], adj_els) + assert np.array_equal(vbc_els[vbc_used_els], adj_els) - # find to_element_indices + # find to_element_indices - tgt_bdry_element_indices = ( - vbc_tgt_grp_face_batch.to_element_indices - .get(queue=queue)[vbc_used_els]) + tgt_bdry_element_indices = thaw_to_numpy( + actx, + vbc_tgt_grp_face_batch.to_element_indices + )[vbc_used_els] - # find from_element_indices + # find from_element_indices - src_vol_element_indices = adj.neighbors[adj_tgt_flags] - src_element_faces = adj.neighbor_faces[adj_tgt_flags] + src_vol_element_indices = adj.neighbors[adj_tgt_flags] + src_element_faces = adj.neighbor_faces[adj_tgt_flags] - src_bdry_element_indices = src_grp_el_lookup[ - src_vol_element_indices, src_element_faces] + src_bdry_element_indices = src_grp_el_lookup[ + src_vol_element_indices, src_element_faces] - # }}} + # }}} - # {{{ visualization (for debugging) + # {{{ visualization (for debugging) - if 0: - print("TVE", adj.elements[adj_tgt_flags]) - print("TBE", tgt_bdry_element_indices) - print("FVE", src_vol_element_indices) - from meshmode.mesh.visualization import draw_2d_mesh - import matplotlib.pyplot as pt - draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) - pt.figure() + if 0: + print("TVE", adj.elements[adj_tgt_flags]) + print("TBE", tgt_bdry_element_indices) + print("FVE", src_vol_element_indices) + from meshmode.mesh.visualization import draw_2d_mesh + import matplotlib.pyplot as pt + draw_2d_mesh(vol_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) + pt.figure() - draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, - set_bounding_box=True, - draw_vertex_numbers=False, - draw_face_numbers=True, - fill=None) + draw_2d_mesh(bdry_discr.mesh, draw_element_numbers=True, + set_bounding_box=True, + draw_vertex_numbers=False, + draw_face_numbers=True, + fill=None) - pt.show() + pt.show() - # }}} + # }}} - batches = _make_cross_face_batches(queue, - bdry_discr, bdry_discr, - i_tgt_grp, i_src_grp, - tgt_bdry_element_indices, - src_bdry_element_indices) - groups[i_tgt_grp].extend(batches) + batches = _make_cross_face_batches(actx, + bdry_discr, bdry_discr, + i_tgt_grp, i_src_grp, + tgt_bdry_element_indices, + src_bdry_element_indices) + groups[i_tgt_grp].extend(batches) from meshmode.discretization.connection import ( DirectDiscretizationConnection, DiscretizationConnectionElementGroup) -- GitLab From 63c4a61d5e050f123380f58a25df4a265d0670d5 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 12:39:09 -0500 Subject: [PATCH 022/106] Refactor simple-dg example for ArrayContext --- examples/simple-dg.py | 232 ++++++++++++++++++++---------------------- 1 file changed, 111 insertions(+), 121 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 361f6d39..b2393068 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -27,15 +27,13 @@ import numpy as np import numpy.linalg as la # noqa import pyopencl as cl import pyopencl.array as cla # noqa -import pyopencl.clmath as clmath from pytools import memoize_method, memoize_in from pytools.obj_array import ( - join_fields, make_obj_array, - with_object_array_or_scalar, - is_obj_array) -import loopy as lp + flat_obj_array, make_obj_array, + oarray_vectorize, oarray_vectorized) from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa -from meshmode.discretization import DOFArray # noqa: F401 +from meshmode.discretization import DOFArray, freeze, thaw +from meshmode.array_context import PyOpenCLArrayContext, make_loopy_program # Features lost vs. https://github.com/inducer/grudge: @@ -45,46 +43,39 @@ from meshmode.discretization import DOFArray # noqa: F401 # - distributed-memory -def with_queue(queue, ary): - def dof_array_with_queue(dof_array): - return dof_array.copy(group_arrays=[ - grp_ary.with_queue(queue) - for grp_ary in dof_array.group_arrays]) - - return with_object_array_or_scalar(dof_array_with_queue, ary) - - -def without_queue(ary): - return with_queue(None, ary) - - # {{{ discretization -def parametrization_derivative(queue, discr): +def parametrization_derivative(actx, discr): + thawed_nodes = thaw(actx, discr.nodes()) + result = np.zeros((discr.ambient_dim, discr.dim), dtype=object) for iambient in range(discr.ambient_dim): for idim in range(discr.dim): result[iambient, idim] = discr.num_reference_derivative( - queue, (idim,), discr.nodes()[iambient]) + (idim,), thawed_nodes[iambient]) return result class DGDiscretization: - def __init__(self, cl_ctx, mesh, order): + def __init__(self, actx, mesh, order): self.order = order from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ PolynomialWarpAndBlendGroupFactory self.group_factory = PolynomialWarpAndBlendGroupFactory(order=order) - self.volume_discr = Discretization(cl_ctx, mesh, self.group_factory) + self.volume_discr = Discretization(actx, mesh, self.group_factory) assert self.volume_discr.dim == 2 @property - def cl_context(self): - return self.volume_discr.cl_context + def _setup_actx(self): + return self.volume_discr._setup_actx + + @property + def array_context(self): + return self.volume_discr.array_context @property def dim(self): @@ -96,6 +87,7 @@ class DGDiscretization: def boundary_connection(self, boundary_tag): from meshmode.discretization.connection import make_face_restriction return make_face_restriction( + self.volume_discr._setup_actx, self.volume_discr, self.group_factory, boundary_tag=boundary_tag) @@ -105,6 +97,7 @@ class DGDiscretization: from meshmode.discretization.connection import ( make_face_restriction, FACE_RESTR_INTERIOR) return make_face_restriction( + self.volume_discr._setup_actx, self.volume_discr, self.group_factory, FACE_RESTR_INTERIOR, @@ -115,13 +108,15 @@ class DGDiscretization: from meshmode.discretization.connection import \ make_opposite_face_connection - return make_opposite_face_connection(self.interior_faces_connection()) + return make_opposite_face_connection( + self._setup_actx, self.interior_faces_connection()) @memoize_method def all_faces_connection(self): from meshmode.discretization.connection import ( make_face_restriction, FACE_RESTR_ALL) return make_face_restriction( + self.volume_discr._setup_actx, self.volume_discr, self.group_factory, FACE_RESTR_ALL, @@ -134,7 +129,7 @@ class DGDiscretization: faces_conn = self.get_connection("vol", where) return make_face_to_all_faces_embedding( - faces_conn, self.get_discr("all_faces")) + self._setup_actx, faces_conn, self.get_discr("all_faces")) def get_connection(self, src, tgt): src_tgt = (src, tgt) @@ -153,11 +148,13 @@ class DGDiscretization: raise ValueError(f"locations '{src}'->'{tgt}' not understood") def interp(self, src, tgt, vec): - if is_obj_array(vec): - return with_object_array_or_scalar( + if (isinstance(vec, np.ndarray) + and vec.dtype.char == "O" + and not isinstance(vec, DOFArray)): + return oarray_vectorize( lambda el: self.interp(src, tgt, el), vec) - return self.get_connection(src, tgt)(vec.queue, vec) + return self.get_connection(src, tgt)(vec) def get_discr(self, where): if where == "vol": @@ -175,40 +172,35 @@ class DGDiscretization: @memoize_method def parametrization_derivative(self): - with cl.CommandQueue(self.cl_context) as queue: - return without_queue( - parametrization_derivative(queue, self.volume_discr)) + return freeze( + parametrization_derivative(self._setup_actx, self.volume_discr)) @memoize_method def vol_jacobian(self): - with cl.CommandQueue(self.cl_context) as queue: - [a, b], [c, d] = with_queue(queue, self.parametrization_derivative()) - return (a*d-b*c).with_queue(None) + [a, b], [c, d] = thaw(self._setup_actx, self.parametrization_derivative()) + return freeze(a*d-b*c) @memoize_method def inverse_parametrization_derivative(self): - with cl.CommandQueue(self.cl_context) as queue: - [a, b], [c, d] = with_queue(queue, self.parametrization_derivative()) + [a, b], [c, d] = thaw(self._setup_actx, self.parametrization_derivative()) - result = np.zeros((2, 2), dtype=object) - det = a*d-b*c - result[0, 0] = d/det - result[0, 1] = -b/det - result[1, 0] = -c/det - result[1, 1] = a/det + result = np.zeros((2, 2), dtype=object) + det = a*d-b*c + result[0, 0] = d/det + result[0, 1] = -b/det + result[1, 0] = -c/det + result[1, 1] = a/det - return without_queue(result) + return freeze(result) - def zeros(self, queue): - return self.volume_discr.zeros(queue) + def zeros(self, actx): + return self.volume_discr.zeros(actx) def grad(self, vec): ipder = self.inverse_parametrization_derivative() - queue = vec.queue dref = [ - self.volume_discr.num_reference_derivative( - queue, (idim,), vec).with_queue(queue) + self.volume_discr.num_reference_derivative((idim,), vec) for idim in range(self.volume_discr.dim)] return make_obj_array([ @@ -223,22 +215,18 @@ class DGDiscretization: def normal(self, where): bdry_discr = self.get_discr(where) - with cl.CommandQueue(self.cl_context) as queue: - ((a,), (b,)) = with_queue( - queue, parametrization_derivative(queue, bdry_discr)) + ((a,), (b,)) = parametrization_derivative(self._setup_actx, bdry_discr) - nrm = 1/(a**2+b**2)**0.5 - return without_queue(join_fields(b*nrm, -a*nrm)) + nrm = 1/(a**2+b**2)**0.5 + return freeze(flat_obj_array(b*nrm, -a*nrm)) @memoize_method def face_jacobian(self, where): bdry_discr = self.get_discr(where) - with cl.CommandQueue(self.cl_context) as queue: - ((a,), (b,)) = with_queue(queue, - parametrization_derivative(queue, bdry_discr)) + ((a,), (b,)) = parametrization_derivative(self._setup_actx, bdry_discr) - return ((a**2 + b**2)**0.5).with_queue(None) + return freeze((a**2 + b**2)**0.5) @memoize_method def get_inverse_mass_matrix(self, grp, dtype): @@ -247,37 +235,36 @@ class DGDiscretization: grp.basis(), grp.unit_nodes) - with cl.CommandQueue(self.cl_context) as queue: - return (cla.to_device(queue, matrix) - .with_queue(None)) + actx = self._setup_actx + return actx.freeze(actx.from_numpy(matrix)) def inverse_mass(self, vec): - if is_obj_array(vec): - return with_object_array_or_scalar( + if (isinstance(vec, np.ndarray) + and vec.dtype.char == "O" + and not isinstance(vec, DOFArray)): + return oarray_vectorize( lambda el: self.inverse_mass(el), vec) @memoize_in(self, "elwise_linear_knl") def knl(): - knl = lp.make_kernel( - """{[k,i,j]: - 0<=k Date: Fri, 5 Jun 2020 23:45:13 -0500 Subject: [PATCH 023/106] Track oarray -> obj_array rename in pytools obj_array revamp --- examples/simple-dg.py | 12 ++++++------ meshmode/discretization/__init__.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index b2393068..172d1b25 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -30,7 +30,7 @@ import pyopencl.array as cla # noqa from pytools import memoize_method, memoize_in from pytools.obj_array import ( flat_obj_array, make_obj_array, - oarray_vectorize, oarray_vectorized) + obj_array_vectorize, obj_array_vectorized) from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa from meshmode.discretization import DOFArray, freeze, thaw from meshmode.array_context import PyOpenCLArrayContext, make_loopy_program @@ -151,7 +151,7 @@ class DGDiscretization: if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): - return oarray_vectorize( + return obj_array_vectorize( lambda el: self.interp(src, tgt, el), vec) return self.get_connection(src, tgt)(vec) @@ -242,7 +242,7 @@ class DGDiscretization: if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): - return oarray_vectorize( + return obj_array_vectorize( lambda el: self.inverse_mass(el), vec) @memoize_in(self, "elwise_linear_knl") @@ -296,7 +296,7 @@ class DGDiscretization: if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): - return oarray_vectorize( + return obj_array_vectorize( lambda el: self.face_mass(el), vec) @memoize_in(self, "face_mass_knl") @@ -370,7 +370,7 @@ class TracePair: def interior_trace_pair(discr, vec): i = discr.interp("vol", "int_faces", vec) - e = oarray_vectorize( + e = obj_array_vectorize( lambda el: discr.opposite_face_connection()(el), i) return TracePair("int_faces", i, e) @@ -455,7 +455,7 @@ def bump(actx, discr, t=0): nodes[1] - source_center[1], ]) - exp = oarray_vectorized(actx.special_func("exp")) + exp = obj_array_vectorized(actx.special_func("exp")) return ( np.cos(source_omega*t) * exp( diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 964ac4b9..b49065c2 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -26,7 +26,7 @@ from functools import partial from typing import Optional from pytools import memoize_in, memoize_method, single_valued -from pytools.obj_array import make_obj_array, oarray_vectorize +from pytools.obj_array import make_obj_array, obj_array_vectorize from meshmode.array_context import ArrayContext, make_loopy_program __doc__ = """ @@ -226,7 +226,7 @@ def thaw(actx: ArrayContext, ary): if (isinstance(ary, np.ndarray) and ary.dtype.char == "O" and not isinstance(ary, DOFArray)): - return oarray_vectorize(partial(thaw, actx), ary) + return obj_array_vectorize(partial(thaw, actx), ary) if ary.array_context is not None: raise ValueError("DOFArray passed to thaw is not frozen") @@ -241,7 +241,7 @@ def freeze(ary): if (isinstance(ary, np.ndarray) and ary.dtype.char == "O" and not isinstance(ary, DOFArray)): - return oarray_vectorize(freeze, ary) + return obj_array_vectorize(freeze, ary) if ary.array_context is None: raise ValueError("DOFArray passed to freeze is already frozen") -- GitLab From 1afd45413a2ef3b55888db0cdac65263ba3f409a Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 23:48:10 -0500 Subject: [PATCH 024/106] Fix incorrect nodes indexing in opposite_face connection --- meshmode/discretization/connection/opposite_face.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index a0af22fa..625bfd0f 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -57,13 +57,13 @@ def _make_cross_face_batches(actx, return tgt_bdry_nodes = np.array([ - thaw_to_numpy(actx, ary)[tgt_bdry_element_indices] - for ary in tgt_bdry_discr.nodes()[i_tgt_grp] + thaw_to_numpy(actx, ary[i_tgt_grp])[tgt_bdry_element_indices] + for ary in tgt_bdry_discr.nodes() ]) src_bdry_nodes = np.array([ - thaw_to_numpy(actx, ary)[src_bdry_element_indices] - for ary in src_bdry_discr.nodes()[i_src_grp] + thaw_to_numpy(actx, ary[i_src_grp])[src_bdry_element_indices] + for ary in src_bdry_discr.nodes() ]) tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps -- GitLab From 3fe67a6311c1691885cf6b16729e7ab1eae43a82 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 23:49:58 -0500 Subject: [PATCH 025/106] Fix norm printing in dg-simple --- examples/simple-dg.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 172d1b25..65035e08 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -503,7 +503,10 @@ def main(): fields = rk4_step(fields, t, dt, rhs) if istep % 10 == 0: - print(istep, t, la.norm(fields[0].get())) + # FIXME: Maybe an integral function to go with the + # DOFArray would be nice? + assert len(fields[0]) == 1 + print(istep, t, la.norm(actx.to_numpy(fields[0][0]))) vis.write_vtk_file("fld-wave-min-%04d.vtu" % istep, [ ("u", fields[0]), -- GitLab From cb9993cac1ef06fd4549f702fb7a3ed3be7ddf68 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Fri, 5 Jun 2020 23:50:28 -0500 Subject: [PATCH 026/106] Refactor discretization vis for ArrayContext --- meshmode/discretization/visualization.py | 72 ++++++++++++++++-------- 1 file changed, 47 insertions(+), 25 deletions(-) diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index 1415de03..2a7abdb5 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -22,6 +22,8 @@ THE SOFTWARE. import numpy as np from pytools import memoize_method, Record +from meshmode.discretization import DOFArray + __doc__ = """ @@ -43,18 +45,18 @@ def separate_by_real_and_imag(data, real_only): if is_obj_array(field): assert len(ls) == 1 from pytools.obj_array import ( - oarray_real_copy, oarray_imag_copy, - with_object_array_or_scalar) + obj_array_real_copy, obj_array_imag_copy, + obj_array_vectorize) if field[0].dtype.kind == "c": if real_only: yield (name, - with_object_array_or_scalar(oarray_real_copy, field)) + obj_array_vectorize(obj_array_real_copy, field)) else: yield (name+"_r", - with_object_array_or_scalar(oarray_real_copy, field)) + obj_array_vectorize(obj_array_real_copy, field)) yield (name+"_i", - with_object_array_or_scalar(oarray_imag_copy, field)) + obj_array_vectorize(obj_array_imag_copy, field)) else: yield (name, field) else: @@ -111,17 +113,23 @@ class Visualizer(object): self.element_shrink_factor = element_shrink_factor - def _resample_and_get(self, queue, vec): - from pytools.obj_array import with_object_array_or_scalar - - def resample_and_get_one(fld): - from numbers import Number - if isinstance(fld, Number): - return np.ones(self.connection.to_discr.nnodes) * fld - else: - return self.connection(queue, fld).get(queue=queue) - - return with_object_array_or_scalar(resample_and_get_one, vec) + def _resample_to_numpy(self, vec): + if (isinstance(vec, np.ndarray) + and vec.dtype.char == "O" + and not isinstance(vec, DOFArray)): + from pytools.obj_array import obj_array_vectorize + return obj_array_vectorize(self._resample_to_numpy, vec) + + from numbers import Number + if isinstance(vec, Number): + raise NotImplementedError("visualizing constants") + #return np.ones(self.connection.to_discr.nnodes) * fld + else: + resampled = self.connection(vec) + if len(resampled) != 1: + raise NotImplementedError("visualization with multiple " + "element groups") + return resampled.array_context.to_numpy(resampled[0]).reshape(-1) # {{{ vis sub-element connectivity @@ -274,6 +282,18 @@ class Visualizer(object): # {{{ vtk + @memoize_method + def _vis_nodes(self): + if len(self.vis_discr.groups) != 1: + raise NotImplementedError("visualization with multiple " + "element groups") + + actx = self.vis_discr._setup_actx + return np.array([ + actx.to_numpy(actx.thaw(ary[0])) + for ary in self.vis_discr.nodes() + ]) + def write_vtk_file(self, file_name, names_and_fields, compressor=None, real_only=False, @@ -284,12 +304,10 @@ class Visualizer(object): AppendedDataXMLGenerator, VF_LIST_OF_COMPONENTS) - with cl.CommandQueue(self.vis_discr.cl_context) as queue: - nodes = self.vis_discr.nodes().with_queue(queue).get() - - names_and_fields = [ - (name, self._resample_and_get(queue, fld)) - for name, fld in names_and_fields] + nodes = self._vis_nodes() + names_and_fields = [ + (name, self._resample_to_numpy(fld)) + for name, fld in names_and_fields] vc_groups = self._vis_connectivity() @@ -316,6 +334,10 @@ class Visualizer(object): + (1-self.element_shrink_factor) * el_centers[:, :, np.newaxis]) + if len(self.vis_discr.groups) != 1: + raise NotImplementedError("visualization with multiple " + "element groups") + grid = UnstructuredGrid( (self.vis_discr.groups[0].ndofs, DataArray("points", @@ -418,14 +440,14 @@ class Visualizer(object): # }}} -def make_visualizer(queue, discr, vis_order, element_shrink_factor=None): +def make_visualizer(actx, discr, vis_order, element_shrink_factor=None): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import ( PolynomialWarpAndBlendElementGroup, LegendreGaussLobattoTensorProductElementGroup, OrderAndTypeBasedGroupFactory) vis_discr = Discretization( - discr.cl_context, discr.mesh, + actx, discr.mesh, OrderAndTypeBasedGroupFactory( vis_order, simplex_group_class=PolynomialWarpAndBlendElementGroup, @@ -436,7 +458,7 @@ def make_visualizer(queue, discr, vis_order, element_shrink_factor=None): make_same_mesh_connection return Visualizer( - make_same_mesh_connection(vis_discr, discr), + make_same_mesh_connection(actx, vis_discr, discr), element_shrink_factor=element_shrink_factor) # }}} -- GitLab From 5af4ada3845c287a6f0c41a11279ebe3b2a826ae Mon Sep 17 00:00:00 2001 From: "[6~" Date: Sat, 6 Jun 2020 12:17:04 -0500 Subject: [PATCH 027/106] Fix some deprecated obj array usage in visualizer --- meshmode/discretization/visualization.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index 2a7abdb5..604e8563 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -38,12 +38,12 @@ __doc__ = """ # {{{ visualizer def separate_by_real_and_imag(data, real_only): - for name, field in data: - from pytools.obj_array import log_shape, is_obj_array - ls = log_shape(field) + # This function is called on numpy data that has already been + # merged into a single vector. - if is_obj_array(field): - assert len(ls) == 1 + for name, field in data: + if isinstance(field, np.ndarray) and field.dtype.char == "O": + assert len(field.shape) == 1 from pytools.obj_array import ( obj_array_real_copy, obj_array_imag_copy, obj_array_vectorize) @@ -60,7 +60,6 @@ def separate_by_real_and_imag(data, real_only): else: yield (name, field) else: - # ls == () if field.dtype.kind == "c": yield (name+"_r", field.real.copy()) yield (name+"_i", field.imag.copy()) @@ -158,9 +157,9 @@ class Visualizer(object): if isinstance(group.mesh_el_group, SimplexElementGroup): node_tuples = list(gnitstam(group.order, group.dim)) - from modepy.tools import submesh + from modepy.tools import simplex_submesh el_connectivity = np.array( - submesh(node_tuples), + simplex_submesh(node_tuples), dtype=np.intp) vtk_cell_type = { 1: VTK_LINE, -- GitLab From e4f5657d210f1a443cf1edb7e1ce80cf36c9a12d Mon Sep 17 00:00:00 2001 From: "[6~" Date: Sat, 6 Jun 2020 12:17:24 -0500 Subject: [PATCH 028/106] Reference numpy subclassing docs from DOFArray --- meshmode/discretization/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index b49065c2..74a2b9d8 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -190,6 +190,8 @@ class DOFArray(np.ndarray): .. automethod:: from_list """ + # Follows https://numpy.org/devdocs/user/basics.subclassing.html + def __new__(cls, actx: Optional[ArrayContext], input_array): if not (actx is None or isinstance(actx, ArrayContext)): raise TypeError("actx must be of type ArrayContext") -- GitLab From e6301442257cdac348157a3526bdcb54b75ca9f2 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Sat, 6 Jun 2020 13:17:16 -0500 Subject: [PATCH 029/106] ArrayContext.special_func: obj_array_vectorized by default --- examples/simple-dg.py | 4 ++-- meshmode/array_context.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 65035e08..deef8b09 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -30,7 +30,7 @@ import pyopencl.array as cla # noqa from pytools import memoize_method, memoize_in from pytools.obj_array import ( flat_obj_array, make_obj_array, - obj_array_vectorize, obj_array_vectorized) + obj_array_vectorize) from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa from meshmode.discretization import DOFArray, freeze, thaw from meshmode.array_context import PyOpenCLArrayContext, make_loopy_program @@ -455,7 +455,7 @@ def bump(actx, discr, t=0): nodes[1] - source_center[1], ]) - exp = obj_array_vectorized(actx.special_func("exp")) + exp = (actx.special_func("exp")) return ( np.cos(source_omega*t) * exp( diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 8cbc3002..b03425a9 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -124,7 +124,8 @@ class ArrayContext: self.call_loopy(prg, inp=ary, out=result) return result - return f + from pytools.obj_array import obj_array_vectorized + return obj_array_vectorized(f) def freeze(self, array): """Return a version of the context-defined array *array* that is -- GitLab From 23e7af29ae8d26fe41126ce3546aabd42978325a Mon Sep 17 00:00:00 2001 From: "[6~" Date: Sat, 6 Jun 2020 13:17:46 -0500 Subject: [PATCH 030/106] Fix doc pointer for array context --- doc/array_context.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/array_context.rst b/doc/array_context.rst index fe16b327..df9b375f 100644 --- a/doc/array_context.rst +++ b/doc/array_context.rst @@ -1,4 +1,4 @@ Array Contexts ============== -.. automodule:: meshmode.array +.. automodule:: meshmode.array_context -- GitLab From b6d2eb0417f61641ccc82cb50b05168e0b3cab10 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Sat, 6 Jun 2020 13:39:14 -0500 Subject: [PATCH 031/106] Move DOFArray into its own file --- doc/array.rst | 9 ++ doc/array_context.rst | 4 - doc/index.rst | 2 +- examples/simple-dg.py | 2 +- meshmode/discretization/__init__.py | 124 +++-------------- meshmode/discretization/connection/direct.py | 2 +- meshmode/discretization/visualization.py | 2 +- meshmode/dof_array.py | 134 +++++++++++++++++++ 8 files changed, 167 insertions(+), 112 deletions(-) create mode 100644 doc/array.rst delete mode 100644 doc/array_context.rst create mode 100644 meshmode/dof_array.py diff --git a/doc/array.rst b/doc/array.rst new file mode 100644 index 00000000..a722c479 --- /dev/null +++ b/doc/array.rst @@ -0,0 +1,9 @@ +DOF (Degree-of-Freedom) Arrays +============================== + +.. automodule:: meshmode.dof_array + +Array Contexts +============== + +.. automodule:: meshmode.array_context diff --git a/doc/array_context.rst b/doc/array_context.rst deleted file mode 100644 index df9b375f..00000000 --- a/doc/array_context.rst +++ /dev/null @@ -1,4 +0,0 @@ -Array Contexts -============== - -.. automodule:: meshmode.array_context diff --git a/doc/index.rst b/doc/index.rst index af5305e6..a5e39233 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -7,7 +7,7 @@ Contents: :maxdepth: 2 mesh - array_context + array discretization connection misc diff --git a/examples/simple-dg.py b/examples/simple-dg.py index deef8b09..2858bb4c 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -32,7 +32,7 @@ from pytools.obj_array import ( flat_obj_array, make_obj_array, obj_array_vectorize) from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa -from meshmode.discretization import DOFArray, freeze, thaw +from meshmode.dof_array import DOFArray, freeze, thaw from meshmode.array_context import PyOpenCLArrayContext, make_loopy_program diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 74a2b9d8..c96e9aae 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -22,20 +22,17 @@ THE SOFTWARE. import numpy as np -from functools import partial -from typing import Optional - -from pytools import memoize_in, memoize_method, single_valued -from pytools.obj_array import make_obj_array, obj_array_vectorize +from pytools import memoize_in, memoize_method +from pytools.obj_array import make_obj_array from meshmode.array_context import ArrayContext, make_loopy_program +# underscored because it shouldn't be imported from here. +from meshmode.dof_array import DOFArray as _DOFArray + __doc__ = """ .. autoclass:: ElementGroupBase .. autoclass:: InterpolatoryElementGroupBase .. autoclass:: ElementGroupFactory -.. autoclass:: DOFArray -.. autofunction:: thaw -.. autofunction:: freeze .. autoclass:: Discretization """ @@ -173,89 +170,6 @@ class OrderBasedGroupFactory(ElementGroupFactory): # }}} -# {{{ DOFArray - -class DOFArray(np.ndarray): - """This array type is a subclass of :class:`numpy.ndarray` that only - offers :meth:`from_list` as additional functionality. Its intended use - is for degree-of-freedom arrays associated with a discretization, - with one entry per element group. - - The main purpose of this class is to better describe the data structure, - i.e. when a :class:`DOFArray` occurs inside of a numpy object array, - the level representing the array of element groups will be more - easily recognized. - - .. attribute:: entry_dtype - .. automethod:: from_list - """ - - # Follows https://numpy.org/devdocs/user/basics.subclassing.html - - def __new__(cls, actx: Optional[ArrayContext], input_array): - if not (actx is None or isinstance(actx, ArrayContext)): - raise TypeError("actx must be of type ArrayContext") - - result = np.asarray(input_array).view(cls) - if len(result.shape) != 1: - raise ValueError("DOFArray instances must have one-dimensional " - "shape, with one entry per element group") - - result.array_context = actx - return result - - def __array_finalize__(self, obj): - if obj is None: - return - self.array_context = getattr(obj, 'array_context', None) - - @property - def entry_dtype(self): - return single_valued(subary.dtype for subary in self.flat) - - @classmethod - def from_list(cls, actx: Optional[ArrayContext], res_list): - if not (actx is None or isinstance(actx, ArrayContext)): - raise TypeError("actx must be of type ArrayContext") - - result = np.empty((len(res_list),), dtype=object).view(cls) - result[:] = res_list - result.array_context = actx - return result - - -def thaw(actx: ArrayContext, ary): - if (isinstance(ary, np.ndarray) - and ary.dtype.char == "O" - and not isinstance(ary, DOFArray)): - return obj_array_vectorize(partial(thaw, actx), ary) - - if ary.array_context is not None: - raise ValueError("DOFArray passed to thaw is not frozen") - - return DOFArray.from_list(actx, [ - actx.thaw(subary) - for subary in ary - ]) - - -def freeze(ary): - if (isinstance(ary, np.ndarray) - and ary.dtype.char == "O" - and not isinstance(ary, DOFArray)): - return obj_array_vectorize(freeze, ary) - - if ary.array_context is None: - raise ValueError("DOFArray passed to freeze is already frozen") - - return DOFArray.from_list(None, [ - ary.array_context.freeze(subary) - for subary in ary - ]) - -# }}} - - class Discretization(object): """An unstructured composite discretization. @@ -276,16 +190,13 @@ class Discretization(object): .. automethod:: empty_like .. automethod:: zeros_like - .. method:: nodes() - - An object array of shape ``(ambient_dim,)`` containing - :class:`DOFArray`s of node coordinates. + .. automethod:: nodes() .. method:: num_reference_derivative(queue, ref_axes, vec) .. method:: quad_weights(queue) - A :class:`DOFArray` with quadrature weights. + A :class:`~meshmode.dof_array.DOFArray` with quadrature weights. """ def __init__(self, actx: ArrayContext, mesh, group_factory, @@ -335,12 +246,12 @@ class Discretization(object): else: dtype = np.dtype(dtype) - return DOFArray.from_list(actx, [ + return _DOFArray.from_list(actx, [ creation_func(shape=(grp.nelements, grp.nunit_dofs), dtype=dtype) for grp in self.groups]) def empty(self, actx: ArrayContext, dtype=None): - """Return an empty DOF vector. + """Return an empty :class:`~meshmode.dof_array.DOFArray`. :arg dtype: type special value 'c' will result in a vector of dtype :attr:`self.complex_dtype`. If @@ -353,7 +264,7 @@ class Discretization(object): return self._new_array(actx, actx.empty, dtype=dtype) def zeros(self, actx: ArrayContext, dtype=None): - """Return a zero-initilaized DOF vector. + """Return a zero-initialized :class:`~meshmode.dof_array.DOFArray`. :arg dtype: type special value 'c' will result in a vector of dtype :attr:`self.complex_dtype`. If @@ -365,10 +276,10 @@ class Discretization(object): return self._new_array(actx, actx.zeros, dtype=dtype) - def empty_like(self, array: DOFArray): + def empty_like(self, array: _DOFArray): return self.empty(array.array_context, dtype=array.entry_dtype) - def zeros_like(self, array: DOFArray): + def zeros_like(self, array: _DOFArray): return self.zeros(array.array_context, dtype=array.entry_dtype) def num_reference_derivative(self, ref_axes, vec): @@ -394,7 +305,7 @@ class Discretization(object): return mat - return DOFArray.from_list(actx, [ + return _DOFArray.from_list(actx, [ actx.call_loopy( prg(), diff_mat=actx.from_numpy(get_mat(grp)), vec=vec[grp.index] )["result"] @@ -411,7 +322,7 @@ class Discretization(object): actx = self._setup_actx - return DOFArray(None, [ + return _DOFArray(None, [ actx.freeze( actx.call_loopy( prg(), weights=actx.from_numpy(grp.weights) @@ -420,6 +331,11 @@ class Discretization(object): @memoize_method def nodes(self): + """ + :returns: object array of shape ``(ambient_dim,)`` containing + :class:`~meshmode.dof_array.DOFArray`s of node coordinates. + """ + @memoize_in(self, "nodes_prg") def prg(): return make_loopy_program( @@ -436,7 +352,7 @@ class Discretization(object): actx = self._setup_actx return make_obj_array([ - DOFArray.from_list(None, [ + _DOFArray.from_list(None, [ actx.freeze( actx.call_loopy( prg(), diff --git a/meshmode/discretization/connection/direct.py b/meshmode/discretization/connection/direct.py index 018e651f..d7616bf0 100644 --- a/meshmode/discretization/connection/direct.py +++ b/meshmode/discretization/connection/direct.py @@ -353,7 +353,7 @@ class DirectDiscretizationConnection(DiscretizationConnection): return knl - from meshmode.discretization import DOFArray + from meshmode.dof_array import DOFArray if not isinstance(ary, DOFArray): raise TypeError("non-array passed to discretization connection") diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index 604e8563..da1dddb0 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -22,7 +22,7 @@ THE SOFTWARE. import numpy as np from pytools import memoize_method, Record -from meshmode.discretization import DOFArray +from meshmode.dof_array import DOFArray __doc__ = """ diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py new file mode 100644 index 00000000..4e3927b5 --- /dev/null +++ b/meshmode/dof_array.py @@ -0,0 +1,134 @@ +__copyright__ = "Copyright (C) 2020 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np +from typing import Optional +from functools import partial + +from pytools import single_valued +from pytools.obj_array import obj_array_vectorize + +from meshmode.array_context import ArrayContext + +__doc__ = """ +.. autoclass:: DOFArray +.. autofunction:: thaw +.. autofunction:: freeze +.. autofunction:: flatten +.. autofunction:: unflatten +""" + + +# {{{ DOFArray + +class DOFArray(np.ndarray): + """This array type is a subclass of :class:`numpy.ndarray` intended to hold + degree-of-freedom arrays for use with + :class:`~meshmode.discretization.Discretization`, + with one entry in the :class:`DOFArray` per element group. + It is derived from :class:`numpy.ndarray` with dtype object ("an object array") + + The main purpose of this class is to better describe the data structure, + i.e. when a :class:`DOFArray` occurs inside of further numpy object array, + the level representing the array of element groups can be recognized (by + people and programs). + + .. attribute:: array_context + .. attribute:: entry_dtype + .. automethod:: from_list + """ + + # Follows https://numpy.org/devdocs/user/basics.subclassing.html + + def __new__(cls, actx: Optional[ArrayContext], input_array): + if not (actx is None or isinstance(actx, ArrayContext)): + raise TypeError("actx must be of type ArrayContext") + + result = np.asarray(input_array).view(cls) + if len(result.shape) != 1: + raise ValueError("DOFArray instances must have one-dimensional " + "shape, with one entry per element group") + + result.array_context = actx + return result + + def __array_finalize__(self, obj): + if obj is None: + return + self.array_context = getattr(obj, 'array_context', None) + + @property + def entry_dtype(self): + return single_valued(subary.dtype for subary in self.flat) + + @classmethod + def from_list(cls, actx: Optional[ArrayContext], res_list): + if not (actx is None or isinstance(actx, ArrayContext)): + raise TypeError("actx must be of type ArrayContext") + + result = np.empty((len(res_list),), dtype=object).view(cls) + result[:] = res_list + result.array_context = actx + return result + +# }}} + + +def thaw(actx: ArrayContext, ary): + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return obj_array_vectorize(partial(thaw, actx), ary) + + if ary.array_context is not None: + raise ValueError("DOFArray passed to thaw is not frozen") + + return DOFArray.from_list(actx, [ + actx.thaw(subary) + for subary in ary + ]) + + +def freeze(ary): + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return obj_array_vectorize(freeze, ary) + + if ary.array_context is None: + raise ValueError("DOFArray passed to freeze is already frozen") + + return DOFArray.from_list(None, [ + ary.array_context.freeze(subary) + for subary in ary + ]) + + +def flatten_dof_array(ary: DOFArray): + pass + + +def unflatten_dof_array(actx: ArrayContext, ary): + pass + + +# vim: foldmethod=marker -- GitLab From c17429a9b5295e01ecaf5b388c8417b6f94ad7f9 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 10:51:12 -0500 Subject: [PATCH 032/106] Document vectorization behavior of ArrayContext.special_func --- meshmode/array_context.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index b03425a9..cdc58483 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -112,6 +112,9 @@ class ArrayContext: def special_func(self, name): """Returns a callable for the special function *name*, where *name* is a (potentially dotted) function name resolvable by :mod:`loopy`. + + The returned callable will vectorize over object arrays, including + :class:`meshmode.dof_array.DOFArray`. """ prg = make_loopy_program( "{[iel, idof]: 0<=iel Date: Wed, 10 Jun 2020 10:52:09 -0500 Subject: [PATCH 033/106] Document dof_array.{thaw,freeze} --- meshmode/dof_array.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 4e3927b5..5230dedd 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -93,7 +93,12 @@ class DOFArray(np.ndarray): # }}} -def thaw(actx: ArrayContext, ary): +def thaw(actx: ArrayContext, ary: np.ndarray) -> np.ndarray: + r"""Call :meth:`~meshmode.array_context.ArrayContext.thaw` on the element + group arrays making up the :class:`DOFArray`, using *actx*. + + Vectorizes over object arrays of :class:`DOFArray`\ s. + """ if (isinstance(ary, np.ndarray) and ary.dtype.char == "O" and not isinstance(ary, DOFArray)): @@ -108,7 +113,13 @@ def thaw(actx: ArrayContext, ary): ]) -def freeze(ary): +def freeze(ary: np.ndarray) -> np.ndarray: + r"""Call :meth:`~meshmode.array_context.arrayContext.freeze` on the element + group arrays making up the :class:`DOFArray`, using the + :class:`~meshmode.array_context.ArrayContext` in *ary*. + + Vectorizes over object arrays of :class:`DOFArray`\ s. + """ if (isinstance(ary, np.ndarray) and ary.dtype.char == "O" and not isinstance(ary, DOFArray)): -- GitLab From 9a93a66b13662bca9d85c0ddeca22325fc29b8ad Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 10:52:43 -0500 Subject: [PATCH 034/106] Add dof_array.{flatten,unflatten} --- meshmode/dof_array.py | 79 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 7 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 5230dedd..be023388 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -21,13 +21,17 @@ THE SOFTWARE. """ import numpy as np -from typing import Optional +from typing import Optional, TYPE_CHECKING from functools import partial -from pytools import single_valued +from pytools import single_valued, memoize_in from pytools.obj_array import obj_array_vectorize -from meshmode.array_context import ArrayContext +from meshmode.array_context import ArrayContext, make_loopy_program + +if TYPE_CHECKING: + from meshmode.discretization import Discretization as _Discretization + __doc__ = """ .. autoclass:: DOFArray @@ -134,12 +138,73 @@ def freeze(ary: np.ndarray) -> np.ndarray: ]) -def flatten_dof_array(ary: DOFArray): - pass +def flatten(ary: np.ndarray) -> np.ndarray: + r"""Convert a :class:`DOFArray` into a "flat" array of degrees of freedom, + where the resulting type of the array is given by the + :attr:`DOFArray.array_context`. + + Array elements are laid out contiguously, with the element group + index varying slowest, element index next, and intra-element DOF + index fastest. + + Vectorizes over object arrays of :class:`DOFArray`\ s. + """ + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return obj_array_vectorize(flatten, ary) + + group_sizes = [grp_ary.shape[0] * grp_ary.shape[1] for grp_ary in ary] + group_starts = np.cumsum([0] + group_sizes) + + actx = ary.array_context + + @memoize_in(actx, "flatten_prg") + def prg(): + return make_loopy_program( + "{[iel,idof]: 0<=iel np.ndarray: + r"""Convert a 'flat' array returned by :func:`flatten` back to a :class:`DOFArray`. + + Vectorizes over object arrays of :class:`DOFArray`\ s. + """ + if (isinstance(ary, np.ndarray) + and ary.dtype.char == "O" + and not isinstance(ary, DOFArray)): + return obj_array_vectorize( + lambda subary: unflatten(actx, discr, subary), + ary) + + actx = ary.array_context + + @memoize_in(actx, "unflatten_prg") + def prg(): + return make_loopy_program( + "{[iel,idof]: 0<=iel Date: Wed, 10 Jun 2020 11:11:37 -0500 Subject: [PATCH 035/106] Adapt DirectDiscretization.full_resample_matrix to ArrayContext --- meshmode/discretization/connection/direct.py | 57 ++++++++++++-------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/meshmode/discretization/connection/direct.py b/meshmode/discretization/connection/direct.py index d7616bf0..a3cc98ac 100644 --- a/meshmode/discretization/connection/direct.py +++ b/meshmode/discretization/connection/direct.py @@ -247,26 +247,32 @@ class DirectDiscretizationConnection(DiscretizationConnection): return actx.freeze(actx.from_numpy(result)) - def full_resample_matrix(self, queue): + def full_resample_matrix(self, actx): """Build a dense matrix representing this discretization connection. .. warning:: On average, this will be exceedingly expensive (:math:`O(N^2)` in the number *N* of discretization points) in terms of memory usage - and thus not what you'd typically want. + and thus not what you'd typically want, other than maybe for + testing. + + .. note:: + + This function assumes a flattened DOF array, as produced by + :class:`~meshmode.dof_array.flatten`. """ @memoize_in(self, "oversample_mat_knl") def knl(): - knl = make_loopy_program( - """{[k, i, j]: - 0<=k Date: Wed, 10 Jun 2020 11:12:17 -0500 Subject: [PATCH 036/106] Adapt check_connection to ArrayContext --- .../discretization/connection/__init__.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/meshmode/discretization/connection/__init__.py b/meshmode/discretization/connection/__init__.py index 0e2aafbe..cf812738 100644 --- a/meshmode/discretization/connection/__init__.py +++ b/meshmode/discretization/connection/__init__.py @@ -35,6 +35,7 @@ from meshmode.discretization.connection.chained import \ from meshmode.discretization.connection.projection import \ L2ProjectionInverseDiscretizationConnection +from meshmode.array_context import ArrayContext from meshmode.discretization.connection.same_mesh import \ make_same_mesh_connection from meshmode.discretization.connection.face import ( @@ -117,27 +118,26 @@ Implementation details # {{{ check connection -def check_connection(connection): +def check_connection(actx: ArrayContext, connection: DirectDiscretizationConnection): from_discr = connection.from_discr to_discr = connection.to_discr assert len(connection.groups) == len(to_discr.groups) - with cl.CommandQueue(to_discr.cl_context) as queue: - for cgrp, tgrp in zip(connection.groups, to_discr.groups): - for batch in cgrp.batches: - fgrp = from_discr.groups[batch.from_group_index] + for cgrp, tgrp in zip(connection.groups, to_discr.groups): + for batch in cgrp.batches: + fgrp = from_discr.groups[batch.from_group_index] - from_element_indices = batch.from_element_indices.get(queue) - to_element_indices = batch.to_element_indices.get(queue) - - assert (0 <= from_element_indices).all() - assert (0 <= to_element_indices).all() - assert (from_element_indices < fgrp.nelements).all() - assert (to_element_indices < tgrp.nelements).all() - if batch.to_element_face is not None: - assert 0 <= batch.to_element_face < fgrp.mesh_el_group.nfaces + from_element_indices = actx.to_numpy( + actx.thaw(batch.from_element_indices)) + to_element_indices = actx.to_numpy(actx.thaw(batch.to_element_indices)) + assert (0 <= from_element_indices).all() + assert (0 <= to_element_indices).all() + assert (from_element_indices < fgrp.nelements).all() + assert (to_element_indices < tgrp.nelements).all() + if batch.to_element_face is not None: + assert 0 <= batch.to_element_face < fgrp.mesh_el_group.nfaces # }}} -- GitLab From 3b9a05a15c00a2e351af1e7179c6bad1a23ebe9e Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 11:12:46 -0500 Subject: [PATCH 037/106] Start porting test_boundary_interpolation to ArrayContext --- test/test_meshmode.py | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 047362d2..3aa93db1 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -26,8 +26,6 @@ from six.moves import range import numpy as np import numpy.linalg as la import pyopencl as cl -import pyopencl.array # noqa -import pyopencl.clmath # noqa from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl @@ -40,6 +38,8 @@ from meshmode.discretization.poly_element import ( PolynomialEquidistantSimplexGroupFactory, ) from meshmode.mesh import Mesh, BTAG_ALL +from meshmode.array_context import PyOpenCLArrayContext +from meshmode.dof_array import thaw, freeze, flatten, DOFArray from meshmode.discretization.connection import \ FACE_RESTR_ALL, FACE_RESTR_INTERIOR import meshmode.mesh.generation as mgen @@ -50,6 +50,12 @@ import logging logger = logging.getLogger(__name__) +def flat_norm(ary: DOFArray, ord=2): + # FIXME This could be done without flattening and copying + actx = ary.array_context + return la.norm(actx.to_numpy(flatten(ary)), ord) + + # {{{ circle mesh def test_circle_mesh(do_plot=False): @@ -223,6 +229,7 @@ def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( @@ -233,8 +240,10 @@ def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, order = 4 + sin = actx.special_func("sin") + def f(x): - return 0.1*cl.clmath.sin(30*x) + return 0.1*sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -267,32 +276,31 @@ def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, # }}} - vol_discr = Discretization(cl_ctx, mesh, - group_factory(order)) + vol_discr = Discretization(actx, mesh, group_factory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) - x = vol_discr.nodes()[0].with_queue(queue) + x = thaw(actx, vol_discr.nodes()[0]) vol_f = f(x) bdry_connection = make_face_restriction( - vol_discr, group_factory(order), + actx, vol_discr, group_factory(order), boundary_tag, per_face_groups=per_face_groups) - check_connection(bdry_connection) + check_connection(actx, bdry_connection) bdry_discr = bdry_connection.to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue) + bdry_x = thaw(actx, bdry_discr.nodes()[0]) bdry_f = f(bdry_x) - bdry_f_2 = bdry_connection(queue, vol_f) + bdry_f_2 = bdry_connection(vol_f) if mesh_name == "blob" and dim == 2 and mesh.nelements < 500: - mat = bdry_connection.full_resample_matrix(queue).get(queue) - bdry_f_2_by_mat = mat.dot(vol_f.get()) + mat = actx.to_numpy(bdry_connection.full_resample_matrix(actx)) + bdry_f_2_by_mat = mat.dot(actx.to_numpy(flatten(vol_f))) - mat_error = la.norm(bdry_f_2.get(queue=queue) - bdry_f_2_by_mat) + mat_error = la.norm(actx.to_numpy(flatten(bdry_f_2)) - bdry_f_2_by_mat) assert mat_error < 1e-14, mat_error - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + err = flat_norm(bdry_f-bdry_f_2, np.inf) eoc_rec.add_data_point(h, err) order_slack = 0.75 if mesh_name == "blob" else 0.5 -- GitLab From df4fb4690586d32d34eb22556613c86a25ff130b Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 13:41:03 -0500 Subject: [PATCH 038/106] Work around numpy/numpy#16564 in DOFArray.from_list --- meshmode/dof_array.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index be023388..11ec69b2 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -89,9 +89,16 @@ class DOFArray(np.ndarray): if not (actx is None or isinstance(actx, ArrayContext)): raise TypeError("actx must be of type ArrayContext") - result = np.empty((len(res_list),), dtype=object).view(cls) - result[:] = res_list + ngroups = len(res_list) + + result = np.empty(ngroups, dtype=object).view(cls) result.array_context = actx + + # 'result[:] = res_list' may look tempting, however: + # https://github.com/numpy/numpy/issues/16564 + for igrp in range(ngroups): + result[igrp] = res_list[igrp] + return result # }}} -- GitLab From c13a98a31561413e31672032648d6e3f36ed7f68 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 18:23:47 -0500 Subject: [PATCH 039/106] Fix argument names in opposite_face.{freeze_from,thaw_to}_numpy --- meshmode/discretization/connection/opposite_face.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index 625bfd0f..cb0d572f 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -31,12 +31,12 @@ import logging logger = logging.getLogger(__name__) -def freeze_from_numpy(self, array): - return self.freeze(self.from_numpy(array)) +def freeze_from_numpy(actx, array): + return actx.freeze(actx.from_numpy(array)) -def thaw_to_numpy(self, array): - return self.to_numpy(self.thaw(array)) +def thaw_to_numpy(actx, array): + return actx.to_numpy(actx.thaw(array)) # {{{ _make_cross_face_batches -- GitLab From 3e1ff78f1a7ecee166d84e05026f829c1aaf92e7 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 18:24:23 -0500 Subject: [PATCH 040/106] Fix handling of 0D opposite-face boundaries with ArrayContext --- meshmode/discretization/connection/opposite_face.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index cb0d572f..d66da0f9 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -50,8 +50,8 @@ def _make_cross_face_batches(actx, if tgt_bdry_discr.dim == 0: yield InterpolationBatch( from_group_index=i_src_grp, - from_element_indices=freeze_from_numpy(src_bdry_element_indices), - to_element_indices=freeze_from_numpy(tgt_bdry_element_indices), + from_element_indices=freeze_from_numpy(actx, src_bdry_element_indices), + to_element_indices=freeze_from_numpy(actx, tgt_bdry_element_indices), result_unit_nodes=src_bdry_discr.groups[i_src_grp].unit_nodes, to_element_face=None) return -- GitLab From ff7ce49118adf92ec381be5e6cdf8141caf5d29f Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 18:24:59 -0500 Subject: [PATCH 041/106] Stop using indices_in_shape in generate_box_mesh --- meshmode/mesh/generation.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/mesh/generation.py b/meshmode/mesh/generation.py index ca5ddfb4..d20c2a9f 100644 --- a/meshmode/mesh/generation.py +++ b/meshmode/mesh/generation.py @@ -811,10 +811,9 @@ def generate_box_mesh(axis_coords, order=1, coord_dtype=np.float64, axes = ["x", "y", "z", "w"] if boundary_tags: - from pytools import indices_in_shape vert_index_to_tuple = { vertex_indices[itup]: itup - for itup in indices_in_shape(shape)} + for itup in np.ndindex(shape)} for tag_idx, tag in enumerate(boundary_tags): # Need to map the correct face vertices to the boundary tags -- GitLab From a0f2f99856f404c135d91a1a37de10fea726b8ce Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 18:26:12 -0500 Subject: [PATCH 042/106] Add missing target_unit argument to generate_gmsh calls in test_meshmode --- test/test_meshmode.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 3aa93db1..75fa13d4 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -65,7 +65,8 @@ def test_circle_mesh(do_plot=False): FileSource("circle.step"), 2, order=2, force_ambient_dim=2, other_options=[ - "-string", "Mesh.CharacteristicLengthMax = 0.05;"] + "-string", "Mesh.CharacteristicLengthMax = 0.05;"], + target_unit="MM", ) print("END GEN") print(mesh.nelements) @@ -352,7 +353,8 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ - "-string", "Mesh.CharacteristicLengthMax = %s;" % h] + "-string", "Mesh.CharacteristicLengthMax = %s;" % h], + target_unit="MM", ) print("END GEN") elif mesh_name == "warp": @@ -475,7 +477,8 @@ def test_opposite_face_interpolation(ctx_factory, group_factory, FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ - "-string", "Mesh.CharacteristicLengthMax = %s;" % h] + "-string", "Mesh.CharacteristicLengthMax = %s;" % h], + target_unit="MM", ) print("END GEN") elif mesh_name == "warp": @@ -526,7 +529,8 @@ def test_element_orientation(): mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=mesh_order, force_ambient_dim=2, - other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"] + other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"], + target_unit="MM", ) from meshmode.mesh.processing import (perform_flips, @@ -624,7 +628,8 @@ def test_merge_and_map(ctx_factory, visualize=False): mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=mesh_order, force_ambient_dim=2, - other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"] + other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"], + target_unit="MM", ) discr_grp_factory = PolynomialWarpAndBlendGroupFactory(3) @@ -1052,6 +1057,7 @@ def test_quad_mesh_2d(): """, ["blob-2d.step"]), force_ambient_dim=2, + target_unit="MM", ) print("END GEN") print(mesh.nelements) @@ -1205,7 +1211,8 @@ def test_mesh_to_tikz(): FileSource("../test/blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ - "-string", "Mesh.CharacteristicLengthMax = %s;" % h] + "-string", "Mesh.CharacteristicLengthMax = %s;" % h], + target_unit="MM", ) from meshmode.mesh.visualization import mesh_to_tikz -- GitLab From be18a1708750ac5fbb46a3b49ce62e2ce52d75b5 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 18:30:57 -0500 Subject: [PATCH 043/106] Port most of test_meshmode to ArrayContext --- test/test_meshmode.py | 117 +++++++++++++++++++++++------------------- 1 file changed, 64 insertions(+), 53 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 75fa13d4..b4de0457 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -39,7 +39,7 @@ from meshmode.discretization.poly_element import ( ) from meshmode.mesh import Mesh, BTAG_ALL from meshmode.array_context import PyOpenCLArrayContext -from meshmode.dof_array import thaw, freeze, flatten, DOFArray +from meshmode.dof_array import thaw, flatten, DOFArray from meshmode.discretization.connection import \ FACE_RESTR_ALL, FACE_RESTR_INTERIOR import meshmode.mesh.generation as mgen @@ -325,6 +325,7 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( @@ -336,8 +337,10 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, order = 4 + sin = actx.special_func("sin") + def f(x): - return 0.1*cl.clmath.sin(30*x) + return 0.1*sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -367,20 +370,20 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, # }}} - vol_discr = Discretization(cl_ctx, mesh, + vol_discr = Discretization(actx, mesh, PolynomialWarpAndBlendGroupFactory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) all_face_bdry_connection = make_face_restriction( - vol_discr, PolynomialWarpAndBlendGroupFactory(order), + actx, vol_discr, PolynomialWarpAndBlendGroupFactory(order), FACE_RESTR_ALL, per_face_groups=per_face_groups) all_face_bdry_discr = all_face_bdry_connection.to_discr for ito_grp, ceg in enumerate(all_face_bdry_connection.groups): for ibatch, batch in enumerate(ceg.batches): assert np.array_equal( - batch.from_element_indices.get(queue), + actx.to_numpy(actx.thaw(batch.from_element_indices)), np.arange(vol_discr.mesh.nelements)) if per_face_groups: @@ -388,31 +391,31 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, else: assert ibatch == batch.to_element_face - all_face_x = all_face_bdry_discr.nodes()[0].with_queue(queue) + all_face_x = thaw(actx, all_face_bdry_discr.nodes()[0]) all_face_f = f(all_face_x) - all_face_f_2 = all_face_bdry_discr.zeros(queue) + all_face_f_2 = all_face_bdry_discr.zeros(actx) for boundary_tag in [ BTAG_ALL, FACE_RESTR_INTERIOR, ]: bdry_connection = make_face_restriction( - vol_discr, PolynomialWarpAndBlendGroupFactory(order), + actx, vol_discr, PolynomialWarpAndBlendGroupFactory(order), boundary_tag, per_face_groups=per_face_groups) bdry_discr = bdry_connection.to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue) + bdry_x = thaw(actx, bdry_discr.nodes()[0]) bdry_f = f(bdry_x) all_face_embedding = make_face_to_all_faces_embedding( - bdry_connection, all_face_bdry_discr) + actx, bdry_connection, all_face_bdry_discr) - check_connection(all_face_embedding) + check_connection(actx, all_face_embedding) - all_face_f_2 += all_face_embedding(queue, bdry_f) + all_face_f_2 = all_face_f_2 + all_face_embedding(bdry_f) - err = la.norm((all_face_f-all_face_f_2).get(), np.inf) + err = flat_norm(all_face_f-all_face_f_2, np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) @@ -441,6 +444,7 @@ def test_opposite_face_interpolation(ctx_factory, group_factory, cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( @@ -452,8 +456,10 @@ def test_opposite_face_interpolation(ctx_factory, group_factory, order = 5 + sin = actx.special_func("sin") + def f(x): - return 0.1*cl.clmath.sin(30*x) + return 0.1*sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -491,24 +497,24 @@ def test_opposite_face_interpolation(ctx_factory, group_factory, # }}} - vol_discr = Discretization(cl_ctx, mesh, + vol_discr = Discretization(actx, mesh, group_factory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) bdry_connection = make_face_restriction( - vol_discr, group_factory(order), + actx, vol_discr, group_factory(order), FACE_RESTR_INTERIOR) bdry_discr = bdry_connection.to_discr - opp_face = make_opposite_face_connection(bdry_connection) - check_connection(opp_face) + opp_face = make_opposite_face_connection(actx, bdry_connection) + check_connection(actx, opp_face) - bdry_x = bdry_discr.nodes()[0].with_queue(queue) + bdry_x = thaw(actx, bdry_discr.nodes()[0]) bdry_f = f(bdry_x) - bdry_f_2 = opp_face(queue, bdry_f) + bdry_f_2 = opp_face(bdry_f) - err = la.norm((bdry_f-bdry_f_2).get(), np.inf) + err = flat_norm(bdry_f-bdry_f_2, np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) @@ -765,6 +771,7 @@ def test_sanity_qhull_nd(ctx_factory, dim, order): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) from scipy.spatial import Delaunay verts = np.random.rand(1000, dim) @@ -775,28 +782,30 @@ def test_sanity_qhull_nd(ctx_factory, dim, order): fix_orientation=True) from meshmode.discretization import Discretization - low_discr = Discretization(ctx, mesh, + low_discr = Discretization(actx, mesh, PolynomialEquidistantSimplexGroupFactory(order)) - high_discr = Discretization(ctx, mesh, + high_discr = Discretization(actx, mesh, PolynomialEquidistantSimplexGroupFactory(order+1)) from meshmode.discretization.connection import make_same_mesh_connection - cnx = make_same_mesh_connection(high_discr, low_discr) + cnx = make_same_mesh_connection(actx, high_discr, low_discr) + + sin = actx.special_func("sin") def f(x): - return 0.1*cl.clmath.sin(x) + return 0.1*sin(x) - x_low = low_discr.nodes()[0].with_queue(queue) + x_low = thaw(actx, low_discr.nodes()[0]) f_low = f(x_low) - x_high = high_discr.nodes()[0].with_queue(queue) + x_high = thaw(actx, high_discr.nodes()[0]) f_high_ref = f(x_high) - f_high_num = cnx(queue, f_low) - - err = (f_high_ref-f_high_num).get() + f_high_num = cnx(f_low) - err = la.norm(err, np.inf)/la.norm(f_high_ref.get(), np.inf) + err = ( + flat_norm(f_high_ref-f_high_num, np.inf) + / flat_norm(f_high_ref, np.inf)) print(err) assert err < 1e-2 @@ -1149,7 +1158,7 @@ def test_quad_multi_element(): # {{{ test_vtk_overwrite -def test_vtk_overwrite(ctx_getter): +def test_vtk_overwrite(ctx_factory): pytest.importorskip("pyvisfile") def _try_write_vtk(writer, obj): @@ -1168,8 +1177,9 @@ def test_vtk_overwrite(ctx_getter): if os.path.exists(filename): os.remove(filename) - ctx = ctx_getter() + ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) target_order = 7 @@ -1180,7 +1190,7 @@ def test_vtk_overwrite(ctx_getter): from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory discr = Discretization( - queue.context, mesh, + actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) from meshmode.discretization.visualization import make_visualizer @@ -1188,7 +1198,7 @@ def test_vtk_overwrite(ctx_getter): write_nodal_adjacency_vtk_file from meshmode.mesh.visualization import write_vertex_vtk_file - vis = make_visualizer(queue, discr, 1) + vis = make_visualizer(actx, discr, 1) _try_write_vtk(vis.write_vtk_file, discr) _try_write_vtk(lambda x, y, **kwargs: @@ -1242,6 +1252,7 @@ def test_affine_map(): def test_mesh_without_vertices(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) # create a mesh from meshmode.mesh.generation import generate_icosphere @@ -1261,11 +1272,11 @@ def test_mesh_without_vertices(ctx_factory): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory as GroupFactory - discr = Discretization(ctx, mesh, GroupFactory(4)) - discr.nodes().with_queue(queue) + discr = Discretization(actx, mesh, GroupFactory(4)) + thaw(actx, discr.nodes()) from meshmode.discretization.visualization import make_visualizer - make_visualizer(queue, discr, 4) + make_visualizer(actx, discr, 4) @pytest.mark.parametrize("curve_name", ["ellipse", "arc"]) @@ -1367,6 +1378,7 @@ def test_is_affine_group_check(mesh_name): def test_mesh_multiple_groups(ctx_factory, ambient_dim, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) order = 4 @@ -1400,16 +1412,16 @@ def test_mesh_multiple_groups(ctx_factory, ambient_dim, visualize=False): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ PolynomialWarpAndBlendGroupFactory as GroupFactory - discr = Discretization(ctx, mesh, GroupFactory(order)) + discr = Discretization(actx, mesh, GroupFactory(order)) if visualize: - group_id = discr.empty(queue, dtype=np.int) + group_id = discr.empty(actx, dtype=np.int) for igrp, grp in enumerate(discr.groups): group_id_view = grp.view(group_id) group_id_view.fill(igrp) from meshmode.discretization.visualization import make_visualizer - vis = make_visualizer(queue, discr, vis_order=order) + vis = make_visualizer(actx, discr, vis_order=order) vis.write_vtk_file("test_mesh_multiple_groups.vtu", [ ("group_id", group_id) ], overwrite=True) @@ -1421,28 +1433,27 @@ def test_mesh_multiple_groups(ctx_factory, ambient_dim, visualize=False): make_opposite_face_connection, check_connection) for boundary_tag in [BTAG_ALL, FACE_RESTR_INTERIOR, FACE_RESTR_ALL]: - conn = make_face_restriction(discr, GroupFactory(order), + conn = make_face_restriction(actx, discr, GroupFactory(order), boundary_tag=boundary_tag, per_face_groups=False) - check_connection(conn) + check_connection(actx, conn) - bdry_f = conn.to_discr.empty(queue) - bdry_f.fill(1.0) + bdry_f = conn.to_discr.zeros(actx) + 1 if boundary_tag == FACE_RESTR_INTERIOR: - opposite = make_opposite_face_connection(conn) - check_connection(opposite) + opposite = make_opposite_face_connection(actx, conn) + check_connection(actx, opposite) - op_bdry_f = opposite(queue, bdry_f) - error = abs(cl.array.sum(bdry_f - op_bdry_f).get(queue)) + op_bdry_f = opposite(bdry_f) + error = flat_norm(bdry_f - op_bdry_f, np.inf) assert error < 1.0e-11, error if boundary_tag == FACE_RESTR_ALL: - embedding = make_face_to_all_faces_embedding(conn, conn.to_discr) - check_connection(embedding) + embedding = make_face_to_all_faces_embedding(actx, conn, conn.to_discr) + check_connection(actx, embedding) - em_bdry_f = embedding(queue, bdry_f) - error = abs(cl.array.sum(bdry_f - em_bdry_f).get(queue)) + em_bdry_f = embedding(bdry_f) + error = flat_norm(bdry_f - em_bdry_f) assert error < 1.0e-11, error -- GitLab From c90977da399d119bd9fc8c2f390984cfe3492690 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 23:18:53 -0500 Subject: [PATCH 044/106] Move flat_norm to dof_array --- meshmode/dof_array.py | 8 ++++++++ test/test_meshmode.py | 8 +------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 11ec69b2..92562e02 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -39,6 +39,7 @@ __doc__ = """ .. autofunction:: freeze .. autofunction:: flatten .. autofunction:: unflatten +.. autofunction:: flat_norm """ @@ -214,4 +215,11 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: for grp_start, grp in zip(group_starts, discr.groups)]) +def flat_norm(ary: DOFArray, ord=2): + # FIXME This could be done without flattening and copying + actx = ary.array_context + import numpy.linalg as la + return la.norm(actx.to_numpy(flatten(ary)), ord) + + # vim: foldmethod=marker diff --git a/test/test_meshmode.py b/test/test_meshmode.py index b4de0457..64e74012 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -39,7 +39,7 @@ from meshmode.discretization.poly_element import ( ) from meshmode.mesh import Mesh, BTAG_ALL from meshmode.array_context import PyOpenCLArrayContext -from meshmode.dof_array import thaw, flatten, DOFArray +from meshmode.dof_array import thaw, flat_norm, flatten from meshmode.discretization.connection import \ FACE_RESTR_ALL, FACE_RESTR_INTERIOR import meshmode.mesh.generation as mgen @@ -50,12 +50,6 @@ import logging logger = logging.getLogger(__name__) -def flat_norm(ary: DOFArray, ord=2): - # FIXME This could be done without flattening and copying - actx = ary.array_context - return la.norm(actx.to_numpy(flatten(ary)), ord) - - # {{{ circle mesh def test_circle_mesh(do_plot=False): -- GitLab From 6daccec7d44a4eccd7a69f563b30c96a145bec08 Mon Sep 17 00:00:00 2001 From: "[6~" Date: Wed, 10 Jun 2020 23:35:45 -0500 Subject: [PATCH 045/106] Refactor refinement connection and test_refinement for ArrayContext --- .../discretization/connection/refinement.py | 29 +++++++++---------- test/test_refinement.py | 25 +++++++++------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/meshmode/discretization/connection/refinement.py b/meshmode/discretization/connection/refinement.py index 9a2d60d8..55026e59 100644 --- a/meshmode/discretization/connection/refinement.py +++ b/meshmode/discretization/connection/refinement.py @@ -24,8 +24,6 @@ THE SOFTWARE. """ import numpy as np -import pyopencl as cl -import pyopencl.array # noqa import logging logger = logging.getLogger(__name__) @@ -36,7 +34,7 @@ from pytools import log_process # {{{ Build interpolation batches for group def _build_interpolation_batches_for_group( - queue, group_idx, coarse_discr_group, fine_discr_group, record): + actx, group_idx, coarse_discr_group, fine_discr_group, record): r""" To map between discretizations, we sort each of the fine mesh elements into an interpolation batch. Which batch they go @@ -104,8 +102,8 @@ def _build_interpolation_batches_for_group( continue yield InterpolationBatch( from_group_index=group_idx, - from_element_indices=cl.array.to_device(queue, np.asarray(from_bin)), - to_element_indices=cl.array.to_device(queue, np.asarray(to_bin)), + from_element_indices=actx.from_numpy(np.asarray(from_bin)), + to_element_indices=actx.from_numpy(np.asarray(to_bin)), result_unit_nodes=unit_nodes, to_element_face=None) @@ -113,7 +111,7 @@ def _build_interpolation_batches_for_group( @log_process(logger) -def make_refinement_connection(refiner, coarse_discr, group_factory): +def make_refinement_connection(actx, refiner, coarse_discr, group_factory): """Return a :class:`meshmode.discretization.connection.DiscretizationConnection` connecting `coarse_discr` to a discretization on the fine mesh. @@ -142,21 +140,20 @@ def make_refinement_connection(refiner, coarse_discr, group_factory): from meshmode.discretization import Discretization fine_discr = Discretization( - coarse_discr.cl_context, + actx, fine_mesh, group_factory, real_dtype=coarse_discr.real_dtype) groups = [] - with cl.CommandQueue(fine_discr.cl_context) as queue: - for group_idx, (coarse_discr_group, fine_discr_group, record) in \ - enumerate(zip(coarse_discr.groups, fine_discr.groups, - refiner.group_refinement_records)): - groups.append( - DiscretizationConnectionElementGroup( - list(_build_interpolation_batches_for_group( - queue, group_idx, coarse_discr_group, - fine_discr_group, record)))) + for group_idx, (coarse_discr_group, fine_discr_group, record) in \ + enumerate(zip(coarse_discr.groups, fine_discr.groups, + refiner.group_refinement_records)): + groups.append( + DiscretizationConnectionElementGroup( + list(_build_interpolation_batches_for_group( + actx, group_idx, coarse_discr_group, + fine_discr_group, record)))) return DirectDiscretizationConnection( from_discr=coarse_discr, diff --git a/test/test_refinement.py b/test/test_refinement.py index 5e10a946..31624610 100644 --- a/test/test_refinement.py +++ b/test/test_refinement.py @@ -27,12 +27,13 @@ from functools import partial import pytest import pyopencl as cl -import pyopencl.clmath # noqa import numpy as np from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) +from meshmode.array_context import PyOpenCLArrayContext +from meshmode.dof_array import thaw, flat_norm from meshmode.mesh.generation import ( # noqa generate_icosahedron, generate_box_mesh, make_curve_mesh, ellipse) from meshmode.mesh.refinement.utils import check_nodal_adj_against_geometry @@ -191,11 +192,14 @@ def test_refinement_connection( cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_refinement_connection, check_connection) + sin = actx.special_func("sin") + from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -235,27 +239,27 @@ def test_refinement_connection( factor = 9 for iaxis in range(len(x)): - result = result * cl.clmath.sin(factor * (x[iaxis]/mesh_ext[iaxis])) + result = result * sin(factor * (x[iaxis]/mesh_ext[iaxis])) return result - discr = Discretization(cl_ctx, mesh, group_factory(order)) + discr = Discretization(actx, mesh, group_factory(order)) refiner = refiner_cls(mesh) flags = refine_flags(mesh) refiner.refine(flags) connection = make_refinement_connection( - refiner, discr, group_factory(order)) - check_connection(connection) + actx, refiner, discr, group_factory(order)) + check_connection(actx, connection) fine_discr = connection.to_discr - x = discr.nodes().with_queue(queue) - x_fine = fine_discr.nodes().with_queue(queue) + x = thaw(actx, discr.nodes()) + x_fine = thaw(actx, fine_discr.nodes()) f_coarse = f(x) - f_interp = connection(queue, f_coarse).with_queue(queue) - f_true = f(x_fine).with_queue(queue) + f_interp = connection(f_coarse) + f_true = f(x_fine) if visualize == "dots": import matplotlib.pyplot as plt @@ -279,8 +283,7 @@ def test_refinement_connection( ("f_true", f_true), ]) - import numpy.linalg as la - err = la.norm((f_interp - f_true).get(queue), np.inf) + err = flat_norm(f_interp - f_true, np.inf) eoc_rec.add_data_point(h, err) order_slack = 0.5 -- GitLab From 315e058ae9fca4dbc46c0d6a8f9567e5d06f3a92 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 13 Jun 2020 17:30:23 -0500 Subject: [PATCH 046/106] PyOpenclArrayContext.transform_loopy_program: check for existence of 'idof' iname --- meshmode/array_context.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index cdc58483..0808444b 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -213,10 +213,11 @@ class PyOpenCLArrayContext(ArrayContext): @memoize_method def transform_loopy_program(self, program): - # FIXME: This assumes that inames 'idof' and 'iel' exist. + # FIXME: This assumes that the iname 'iel' exists. # FIXME: This could be much smarter. import loopy as lp - program = lp.split_iname(program, "idof", 16, inner_tag="l.0") + if "idof" in program.all_inames(): + program = lp.split_iname(program, "idof", 16, inner_tag="l.0") return lp.tag_inames(program, dict(iel="g.0")) # }}} -- GitLab From 16560cba756f693a9b4e49509b5e3163a271e2cc Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 13 Jun 2020 17:48:39 -0500 Subject: [PATCH 047/106] Adapt ChainedConnection, L2ProjectionInverseDiscretizationConnection to ArrayContext --- meshmode/discretization/connection/chained.py | 47 +++--- .../discretization/connection/projection.py | 90 ++++++------ test/test_chained.py | 138 +++++++++--------- 3 files changed, 139 insertions(+), 136 deletions(-) diff --git a/meshmode/discretization/connection/chained.py b/meshmode/discretization/connection/chained.py index bd1da1f4..66028716 100644 --- a/meshmode/discretization/connection/chained.py +++ b/meshmode/discretization/connection/chained.py @@ -23,8 +23,6 @@ THE SOFTWARE. """ import numpy as np -import pyopencl as cl -import pyopencl.array # noqa from pytools import Record @@ -65,9 +63,9 @@ class ChainedDiscretizationConnection(DiscretizationConnection): self.connections = connections - def __call__(self, queue, vec): + def __call__(self, vec): for cnx in self.connections: - vec = cnx(queue, vec) + vec = cnx(vec) return vec @@ -86,13 +84,13 @@ def _iterbatches(groups): yield (igrp, ibatch), (grp, batch) -def _build_element_lookup_table(queue, conn): +def _build_element_lookup_table(actx, conn): el_table = [np.full(g.nelements, -1, dtype=np.int) for g in conn.to_discr.groups] for (igrp, _), (_, batch) in _iterbatches(conn.groups): - el_table[igrp][batch.to_element_indices.get(queue)] = \ - batch.from_element_indices.get(queue) + el_table[igrp][actx.to_numpy(batch.to_element_indices)] = \ + actx.to_numpy(batch.from_element_indices) return el_table @@ -146,12 +144,12 @@ def _build_new_group_table(from_conn, to_conn): return grp_to_grp, batch_info -def _build_batches(queue, from_bins, to_bins, batch): +def _build_batches(actx, from_bins, to_bins, batch): from meshmode.discretization.connection.direct import \ InterpolationBatch def to_device(x): - return cl.array.to_device(queue, np.asarray(x)) + return actx.freeze(actx.from_numpy(np.asarray(x))) for ibatch, (from_bin, to_bin) in enumerate(zip(from_bins, to_bins)): yield InterpolationBatch( @@ -162,7 +160,7 @@ def _build_batches(queue, from_bins, to_bins, batch): to_element_face=batch[ibatch].to_element_face) -def flatten_chained_connection(queue, connection): +def flatten_chained_connection(actx, connection): """Collapse a connection into a direct connection. If the given connection is already a @@ -211,12 +209,12 @@ def flatten_chained_connection(queue, connection): connections = connection.connections direct_connections = [] for conn in connections: - direct_connections.append(flatten_chained_connection(queue, conn)) + direct_connections.append(flatten_chained_connection(actx, conn)) # merge all the direct connections from_conn = direct_connections[0] for to_conn in direct_connections[1:]: - el_table = _build_element_lookup_table(queue, from_conn) + el_table = _build_element_lookup_table(actx, from_conn) grp_to_grp, batch_info = _build_new_group_table(from_conn, to_conn) # distribute the indices to new groups and batches @@ -224,13 +222,13 @@ def flatten_chained_connection(queue, connection): to_bins = [[np.empty(0, dtype=np.int) for _ in g] for g in batch_info] for (igrp, ibatch), (_, from_batch) in _iterbatches(from_conn.groups): - from_to_element_indices = from_batch.to_element_indices.get(queue) + from_to_element_indices = actx.to_numpy(from_batch.to_element_indices) for (jgrp, jbatch), (_, to_batch) in _iterbatches(to_conn.groups): igrp_new, ibatch_new = grp_to_grp[igrp, ibatch, jgrp, jbatch] - jfrom = to_batch.from_element_indices.get(queue) - jto = to_batch.to_element_indices.get(queue) + jfrom = actx.to_numpy(to_batch.from_element_indices) + jto = actx.to_numpy(to_batch.to_element_indices) mask = np.isin(jfrom, from_to_element_indices) from_bins[igrp_new][ibatch_new] = \ @@ -244,7 +242,7 @@ def flatten_chained_connection(queue, connection): groups = [] for igrp, (from_bin, to_bin) in enumerate(zip(from_bins, to_bins)): groups.append(DiscretizationConnectionElementGroup( - list(_build_batches(queue, from_bin, to_bin, + list(_build_batches(actx, from_bin, to_bin, batch_info[igrp])))) from_conn = DirectDiscretizationConnection( @@ -260,7 +258,7 @@ def flatten_chained_connection(queue, connection): # {{{ build chained resample matrix -def make_full_resample_matrix(queue, connection): +def make_full_resample_matrix(actx, connection): """Build a dense matrix representing the discretization connection. This is based on @@ -273,7 +271,7 @@ def make_full_resample_matrix(queue, connection): This method will be very slow, both in terms of speed and memory usage, and should only be used for testing or if absolutely necessary. - :arg queue: a :class:`pyopencl.CommandQueue`. + :arg queue: a :class:`meshmode.array_context.ArrayContext`. :arg connection: a :class:`~meshmode.discretization.connection.DiscretizationConnection`. :return: a :class:`pyopencl.array.Array` of shape @@ -281,21 +279,22 @@ def make_full_resample_matrix(queue, connection): """ if hasattr(connection, "full_resample_matrix"): - return connection.full_resample_matrix(queue) + return connection.full_resample_matrix(actx) if not hasattr(connection, 'connections'): raise TypeError('connection is not chained') if not connection.connections: result = np.eye(connection.to_discr.nnodes) - return cl.array.to_device(queue, result) + return actx.from_numpy(result) - acc = make_full_resample_matrix(queue, connection.connections[0]).get(queue) + acc = actx.to_numpy( + make_full_resample_matrix(actx, connection.connections[0])) for conn in connection.connections[1:]: - resampler = make_full_resample_matrix(queue, conn).get(queue) - acc = resampler.dot(acc) + resampler = actx.to_numpy(make_full_resample_matrix(actx, conn)) + acc = resampler @ acc - return cl.array.to_device(queue, acc) + return actx.from_numpy(acc) # }}} diff --git a/meshmode/discretization/connection/projection.py b/meshmode/discretization/connection/projection.py index fae3f9b7..0638872e 100644 --- a/meshmode/discretization/connection/projection.py +++ b/meshmode/discretization/connection/projection.py @@ -23,12 +23,13 @@ THE SOFTWARE. """ import numpy as np -import pyopencl as cl -import pyopencl.array # noqa -from loopy.version import MOST_RECENT_LANGUAGE_VERSION -from pytools import memoize_method, memoize_in +from pytools import keyed_memoize_method, memoize_in +import loopy as lp + +from meshmode.array_context import make_loopy_program +from meshmode.dof_array import DOFArray from meshmode.discretization.connection.direct import ( DiscretizationConnection, DirectDiscretizationConnection) @@ -78,8 +79,8 @@ class L2ProjectionInverseDiscretizationConnection(DiscretizationConnection): to_discr=self.conn.from_discr, is_surjective=is_surjective) - @memoize_method - def _batch_weights(self): + @keyed_memoize_method(key=lambda actx: ()) + def _batch_weights(self, actx): """Computes scaled quadrature weights for each interpolation batch in :attr:`conn`. The quadrature weights can be used to integrate over child elements in the domain of the parent element, by a change of @@ -111,26 +112,26 @@ class L2ProjectionInverseDiscretizationConnection(DiscretizationConnection): mat = grp.diff_matrices()[iaxis] jac[iaxis] = mat.dot(batch.result_unit_nodes.T) - weights[igrp, ibatch] = det(jac) * grp.weights + weights[igrp, ibatch] = actx.freeze(actx.from_numpy( + det(jac) * grp.weights)) return weights - def __call__(self, queue, vec): + def __call__(self, vec): @memoize_in(self, "conn_projection_knl") def kproj(): - import loopy as lp - knl = lp.make_kernel([ - "{[k]: 0 <= k < nelements}", - "{[j]: 0 <= j < n_from_nodes}" + return make_loopy_program([ + "{[iel]: 0 <= iel < nelements}", + "{[idof_quad]: 0 <= idof_quad < n_from_nodes}" ], """ - for k - <> element_dot = \ - sum(j, vec[from_element_indices[k], j] * \ - basis[j] * weights[j]) + for iel + <> element_dot = sum(idof_quad, + vec[from_element_indices[iel], idof_quad] + * basis[idof_quad] * weights[idof_quad]) - result[to_element_indices[k], ibasis] = \ - result[to_element_indices[k], ibasis] + element_dot + result[to_element_indices[iel], ibasis] = \ + result[to_element_indices[iel], ibasis] + element_dot end """, [ @@ -148,21 +149,17 @@ class L2ProjectionInverseDiscretizationConnection(DiscretizationConnection): lp.ValueArg("ibasis", np.int32), '...' ], - name="conn_projection_knl", - lang_version=MOST_RECENT_LANGUAGE_VERSION) - - return knl + name="conn_projection_knl") @memoize_in(self, "conn_evaluation_knl") def keval(): - import loopy as lp - knl = lp.make_kernel([ - "{[k]: 0 <= k < nelements}", - "{[j]: 0 <= j < n_to_nodes}" + return make_loopy_program([ + "{[iel]: 0 <= iel < nelements}", + "{[idof]: 0 <= idof < n_to_nodes}" ], """ - result[k, j] = result[k, j] + \ - coefficients[k, ibasis] * basis[j] + result[iel, idof] = result[iel, idof] + \ + coefficients[iel, ibasis] * basis[idof] """, [ lp.GlobalArg("coefficients", None, @@ -170,55 +167,54 @@ class L2ProjectionInverseDiscretizationConnection(DiscretizationConnection): lp.ValueArg("ibasis", np.int32), '...' ], - name="conn_evaluate_knl", - default_offset=lp.auto, - lang_version=MOST_RECENT_LANGUAGE_VERSION) + name="conn_evaluate_knl") - return knl - - if not isinstance(vec, cl.array.Array): + if not isinstance(vec, DOFArray): raise TypeError("non-array passed to discretization connection") - if vec.shape != (self.from_discr.nnodes,): - raise ValueError("invalid shape of incoming resampling data") + actx = vec.array_context # compute weights on each refinement of the reference element - weights = self._batch_weights() + weights = self._batch_weights(actx) # perform dot product (on reference element) to get basis coefficients - c = self.to_discr.zeros(queue, dtype=vec.dtype) + c = self.to_discr.zeros(actx, dtype=vec.entry_dtype) + for igrp, (tgrp, cgrp) in enumerate( zip(self.to_discr.groups, self.conn.groups)): for ibatch, batch in enumerate(cgrp.batches): sgrp = self.from_discr.groups[batch.from_group_index] for ibasis, basis_fn in enumerate(sgrp.basis()): - basis = basis_fn(batch.result_unit_nodes).flatten() + basis = actx.from_numpy( + basis_fn(batch.result_unit_nodes).flatten()) # NOTE: batch.*_element_indices are reversed here because # they are from the original forward connection, but # we are going in reverse here. a bit confusing, but # saves on recreating the connection groups and batches. - kproj()(queue, + actx.call_loopy(kproj(), ibasis=ibasis, - vec=sgrp.view(vec), + vec=vec[sgrp.index], basis=basis, weights=weights[igrp, ibatch], - result=tgrp.view(c), + result=c[igrp], from_element_indices=batch.to_element_indices, to_element_indices=batch.from_element_indices) # evaluate at unit_nodes to get the vector on to_discr - result = self.to_discr.zeros(queue, dtype=vec.dtype) + result = self.to_discr.zeros(actx, dtype=vec.entry_dtype) for igrp, grp in enumerate(self.to_discr.groups): for ibasis, basis_fn in enumerate(grp.basis()): - basis = basis_fn(grp.unit_nodes).flatten() + basis = actx.from_numpy( + basis_fn(grp.unit_nodes).flatten()) - keval()(queue, + actx.call_loopy( + keval(), ibasis=ibasis, - result=grp.view(result), + result=result[grp.index], basis=basis, - coefficients=grp.view(c)) + coefficients=c[grp.index]) return result diff --git a/test/test_chained.py b/test/test_chained.py index 21ebbda4..91e61059 100644 --- a/test/test_chained.py +++ b/test/test_chained.py @@ -23,27 +23,25 @@ THE SOFTWARE. """ import numpy as np -import numpy.linalg as la import pyopencl as cl -import pyopencl.array # noqa -import pyopencl.clmath # noqa import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) +from meshmode.array_context import PyOpenCLArrayContext +from meshmode.dof_array import thaw, flat_norm, flatten + import logging logger = logging.getLogger(__name__) -def create_discretization(queue, ndim, +def create_discretization(actx, ndim, nelements=42, mesh_name=None, order=4): - ctx = queue.context - # construct mesh if ndim == 2: from functools import partial @@ -80,13 +78,13 @@ def create_discretization(queue, ndim, from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory - discr = Discretization(ctx, mesh, + discr = Discretization(actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(order)) return discr -def create_refined_connection(queue, discr, threshold=0.3): +def create_refined_connection(actx, discr, threshold=0.3): from meshmode.mesh.refinement import RefinerWithoutAdjacency from meshmode.discretization.connection import make_refinement_connection from meshmode.discretization.poly_element import \ @@ -97,20 +95,20 @@ def create_refined_connection(queue, discr, threshold=0.3): refiner.refine(flags) discr_order = discr.groups[0].order - connection = make_refinement_connection(refiner, discr, + connection = make_refinement_connection(actx, refiner, discr, InterpolatoryQuadratureSimplexGroupFactory(discr_order)) return connection -def create_face_connection(queue, discr): +def create_face_connection(actx, discr): from meshmode.discretization.connection import FACE_RESTR_ALL from meshmode.discretization.connection import make_face_restriction from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory discr_order = discr.groups[0].order - connection = make_face_restriction(discr, + connection = make_face_restriction(actx, discr, InterpolatoryQuadratureSimplexGroupFactory(discr_order), FACE_RESTR_ALL, per_face_groups=True) @@ -126,19 +124,20 @@ def test_chained_batch_table(ctx_factory, ndim, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) - discr = create_discretization(queue, ndim) + discr = create_discretization(actx, ndim) connections = [] - conn = create_refined_connection(queue, discr) + conn = create_refined_connection(actx, discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) from meshmode.discretization.connection import ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) conn = chained.connections[0] - el_table = _build_element_lookup_table(queue, conn) + el_table = _build_element_lookup_table(actx, conn) for igrp, grp in enumerate(conn.groups): for ibatch, batch in enumerate(grp.batches): ifrom = batch.from_element_indices.get(queue) @@ -156,15 +155,15 @@ def test_chained_new_group_table(ctx_factory, ndim, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) - discr = create_discretization(queue, ndim, + discr = create_discretization(actx, ndim, nelements=8, - mesh_order=2, - discr_order=2) + order=2) connections = [] - conn = create_refined_connection(queue, discr) + conn = create_refined_connection(actx, discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) grp_to_grp, grp_info = _build_new_group_table(connections[0], @@ -202,29 +201,31 @@ def test_chained_new_group_table(ctx_factory, ndim, visualize=False): def test_chained_connection(ctx_factory, ndim, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) - discr = create_discretization(queue, ndim, - nelements=10) + discr = create_discretization(actx, ndim, nelements=10) connections = [] - conn = create_refined_connection(queue, discr, threshold=np.inf) + conn = create_refined_connection(actx, discr, threshold=np.inf) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr, threshold=np.inf) + conn = create_refined_connection(actx, conn.to_discr, threshold=np.inf) connections.append(conn) from meshmode.discretization.connection import \ ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) + sin = actx.special_func("sin") + def f(x): from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * cl.clmath.sin(5 * y), x) + return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) - x = connections[0].from_discr.nodes().with_queue(queue) + x = thaw(actx, connections[0].from_discr.nodes()) fx = f(x) - f1 = chained(queue, fx).get(queue) - f2 = connections[1](queue, connections[0](queue, fx)).get(queue) + f1 = chained(fx) + f2 = connections[1](connections[0](fx)) - assert np.allclose(f1, f2) + assert flat_norm(f1-f2, np.inf) / flat_norm(f2) < 1e-11 @pytest.mark.skip(reason='slow test') @@ -235,29 +236,32 @@ def test_chained_full_resample_matrix(ctx_factory, ndim, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) - discr = create_discretization(queue, ndim) + discr = create_discretization(actx, ndim) connections = [] - conn = create_refined_connection(queue, discr) + conn = create_refined_connection(actx, discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) from meshmode.discretization.connection import \ ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) + sin = actx.special_func("sin") + def f(x): from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * cl.clmath.sin(5 * y), x) + return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) - resample_mat = make_full_resample_matrix(queue, chained).get(queue) + resample_mat = actx.to_numpy(make_full_resample_matrix(actx, chained)) - x = connections[0].from_discr.nodes().with_queue(queue) + x = thaw(actx, connections[0].from_discr.nodes()) fx = f(x) - f1 = np.dot(resample_mat, fx.get(queue)) - f2 = chained(queue, fx).get(queue) - f3 = connections[1](queue, connections[0](queue, fx)).get(queue) + f1 = resample_mat @ actx.to_numpy(flatten(fx)) + f2 = actx.to_numpy(flatten(chained(fx))) + f3 = actx.to_numpy(flatten(connections[1](connections[0](fx)))) assert np.allclose(f1, f2) assert np.allclose(f2, f3) @@ -273,25 +277,26 @@ def test_chained_to_direct(ctx_factory, ndim, chain_type, ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) - discr = create_discretization(queue, ndim, nelements=nelements) + discr = create_discretization(actx, ndim, nelements=nelements) connections = [] if chain_type == 1: - conn = create_refined_connection(queue, discr) + conn = create_refined_connection(actx, discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) elif chain_type == 2: - conn = create_refined_connection(queue, discr) + conn = create_refined_connection(actx, discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) - conn = create_refined_connection(queue, conn.to_discr) + conn = create_refined_connection(actx, conn.to_discr) connections.append(conn) elif chain_type == 3 and ndim == 3: - conn = create_refined_connection(queue, discr, threshold=np.inf) + conn = create_refined_connection(actx, discr, threshold=np.inf) connections.append(conn) - conn = create_face_connection(queue, conn.to_discr) + conn = create_face_connection(actx, conn.to_discr) connections.append(conn) else: raise ValueError('unknown test case') @@ -301,7 +306,7 @@ def test_chained_to_direct(ctx_factory, ndim, chain_type, chained = ChainedDiscretizationConnection(connections) t_start = time.time() - direct = flatten_chained_connection(queue, chained) + direct = flatten_chained_connection(actx, chained) t_end = time.time() if visualize: print('[TIME] Flatten: {:.5e}'.format(t_end - t_start)) @@ -315,21 +320,23 @@ def test_chained_to_direct(ctx_factory, ndim, chain_type, to_element_indices[i] += 1 assert np.min(to_element_indices) > 0 + sin = actx.special_func("sin") + def f(x): from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * cl.clmath.sin(5 * y), x) + return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) - x = connections[0].from_discr.nodes().with_queue(queue) + x = thaw(actx, connections[0].from_discr.nodes()) fx = f(x) t_start = time.time() - f1 = direct(queue, fx).get(queue) + f1 = actx.to_numpy(flatten(direct(fx))) t_end = time.time() if visualize: print('[TIME] Direct: {:.5e}'.format(t_end - t_start)) t_start = time.time() - f2 = chained(queue, fx).get(queue) + f2 = actx.to_numpy(flatten(chained(fx))) t_end = time.time() if visualize: print('[TIME] Chained: {:.5e}'.format(t_end - t_start)) @@ -351,27 +358,28 @@ def test_chained_to_direct(ctx_factory, ndim, chain_type, @pytest.mark.parametrize(("ndim", "mesh_name"), [ (2, "starfish"), (3, "torus")]) -def test_reversed_chained_connection(ctx_factory, ndim, mesh_name, visualize=False): +def test_reversed_chained_connection(ctx_factory, ndim, mesh_name): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) def run(nelements, order): - discr = create_discretization(queue, ndim, + discr = create_discretization(actx, ndim, nelements=nelements, order=order, mesh_name=mesh_name) threshold = 1.0 connections = [] - conn = create_refined_connection(queue, + conn = create_refined_connection(actx, discr, threshold=threshold) connections.append(conn) if ndim == 2: # NOTE: additional refinement makes the 3D meshes explode in size - conn = create_refined_connection(queue, + conn = create_refined_connection(actx, conn.to_discr, threshold=threshold) connections.append(conn) - conn = create_refined_connection(queue, + conn = create_refined_connection(actx, conn.to_discr, threshold=threshold) connections.append(conn) @@ -383,21 +391,21 @@ def test_reversed_chained_connection(ctx_factory, ndim, mesh_name, visualize=Fal reverse = L2ProjectionInverseDiscretizationConnection(chained) # create test vector - from_nodes = chained.from_discr.nodes().with_queue(queue) - to_nodes = chained.to_discr.nodes().with_queue(queue) + from_nodes = thaw(actx, chained.from_discr.nodes()) + to_nodes = thaw(actx, chained.to_discr.nodes()) + + cos = actx.special_func("cos") from_x = 0 to_x = 0 for d in range(ndim): - from_x += cl.clmath.cos(from_nodes[d]) ** (d + 1) - to_x += cl.clmath.cos(to_nodes[d]) ** (d + 1) - - from_interp = reverse(queue, to_x) + from_x += cos(from_nodes[d]) ** (d + 1) + to_x += cos(to_nodes[d]) ** (d + 1) - from_interp = from_interp.get(queue) - from_x = from_x.get(queue) + from_interp = reverse(to_x) - return 1.0 / nelements, la.norm(from_interp - from_x) / la.norm(from_x) + return (1.0 / nelements, + flat_norm(from_interp - from_x, np.inf) / flat_norm(from_x, np.inf)) from pytools.convergence import EOCRecorder eoc = EOCRecorder() -- GitLab From fbe843962c10729626785063b98874c4e4bb4866 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 13 Jun 2020 18:28:34 -0500 Subject: [PATCH 048/106] Adapt partition connections to ArrayContext --- .../connection/opposite_face.py | 74 +++++++++---------- test/test_partition.py | 67 +++++++++-------- 2 files changed, 72 insertions(+), 69 deletions(-) diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py index d66da0f9..8f83cd4e 100644 --- a/meshmode/discretization/connection/opposite_face.py +++ b/meshmode/discretization/connection/opposite_face.py @@ -441,7 +441,7 @@ def make_opposite_face_connection(actx, volume_to_bdry_conn): # {{{ make_partition_connection -def make_partition_connection(local_bdry_conn, i_local_part, +def make_partition_connection(actx, local_bdry_conn, i_local_part, remote_bdry, remote_adj_groups, remote_from_elem_faces, remote_from_elem_indices): """ @@ -477,52 +477,50 @@ def make_partition_connection(local_bdry_conn, i_local_part, part_batches = [[] for _ in local_groups] - with cl.CommandQueue(local_bdry_conn.cl_context) as queue: + for i_remote_grp, adj in enumerate(remote_adj_groups): + indices = (i_local_part == adj.neighbor_partitions) + if not np.any(indices): + # Skip because i_remote_grp is not connected to i_local_part. + continue + i_remote_faces = adj.element_faces[indices] + i_local_meshwide_elems = adj.global_neighbors[indices] + i_local_faces = adj.neighbor_faces[indices] - for i_remote_grp, adj in enumerate(remote_adj_groups): - indices = (i_local_part == adj.neighbor_partitions) - if not np.any(indices): - # Skip because i_remote_grp is not connected to i_local_part. - continue - i_remote_faces = adj.element_faces[indices] - i_local_meshwide_elems = adj.global_neighbors[indices] - i_local_faces = adj.neighbor_faces[indices] + i_local_grps = find_group_indices(local_groups, i_local_meshwide_elems) - i_local_grps = find_group_indices(local_groups, i_local_meshwide_elems) + for i_local_grp in np.unique(i_local_grps): - for i_local_grp in np.unique(i_local_grps): + elem_base = local_groups[i_local_grp].element_nr_base + local_el_lookup = _make_bdry_el_lookup_table(actx, + local_bdry_conn, + i_local_grp) - elem_base = local_groups[i_local_grp].element_nr_base - local_el_lookup = _make_bdry_el_lookup_table(queue, - local_bdry_conn, - i_local_grp) + for i_remote_face in i_remote_faces: - for i_remote_face in i_remote_faces: - - index_flags = np.logical_and(i_local_grps == i_local_grp, - i_remote_faces == i_remote_face) - if not np.any(index_flags): - continue + index_flags = np.logical_and(i_local_grps == i_local_grp, + i_remote_faces == i_remote_face) + if not np.any(index_flags): + continue - remote_bdry_indices = None - for idxs, face in zip(remote_from_elem_indices[i_remote_grp], - remote_from_elem_faces[i_remote_grp]): - if face == i_remote_face: - remote_bdry_indices = idxs - break - assert remote_bdry_indices is not None + remote_bdry_indices = None + for idxs, face in zip(remote_from_elem_indices[i_remote_grp], + remote_from_elem_faces[i_remote_grp]): + if face == i_remote_face: + remote_bdry_indices = idxs + break + assert remote_bdry_indices is not None - elems = i_local_meshwide_elems[index_flags] - elem_base - faces = i_local_faces[index_flags] - local_bdry_indices = local_el_lookup[elems, faces] + elems = i_local_meshwide_elems[index_flags] - elem_base + faces = i_local_faces[index_flags] + local_bdry_indices = local_el_lookup[elems, faces] - batches = _make_cross_face_batches(queue, - local_bdry, remote_bdry, - i_local_grp, i_remote_grp, - local_bdry_indices, - remote_bdry_indices) + batches = _make_cross_face_batches(actx, + local_bdry, remote_bdry, + i_local_grp, i_remote_grp, + local_bdry_indices, + remote_bdry_indices) - part_batches[i_local_grp].extend(batches) + part_batches[i_local_grp].extend(batches) return DirectDiscretizationConnection( from_discr=remote_bdry, diff --git a/test/test_partition.py b/test/test_partition.py index 54b9d889..ed378e54 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -27,10 +27,10 @@ THE SOFTWARE. from six.moves import range import numpy as np -import numpy.linalg as la import pyopencl as cl -import pyopencl.array # noqa -import pyopencl.clmath # noqa + +from meshmode.array_context import PyOpenCLArrayContext +from meshmode.dof_array import thaw, flat_norm from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl @@ -69,6 +69,8 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, group_factory = PolynomialWarpAndBlendGroupFactory cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) + order = 4 from pytools.convergence import EOCRecorder @@ -79,8 +81,10 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, continue eoc_rec[i, j] = EOCRecorder() + sin = actx.special_func("sin") + def f(x): - return 10.*cl.clmath.sin(50.*x) + return 10.*sin(50.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -107,7 +111,7 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, partition_mesh(mesh, part_per_element, i)[0] for i in range(num_parts)] from meshmode.discretization import Discretization - vol_discrs = [Discretization(cl_ctx, part_meshes[i], group_factory(order)) + vol_discrs = [Discretization(actx, part_meshes[i], group_factory(order)) for i in range(num_parts)] from meshmode.mesh import BTAG_PARTITION @@ -120,23 +124,26 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, continue # Mark faces within local_mesh that are connected to remote_mesh - local_bdry_conn = make_face_restriction(vol_discrs[i_local_part], + local_bdry_conn = make_face_restriction(actx, vol_discrs[i_local_part], group_factory(order), BTAG_PARTITION(i_remote_part)) # If these parts are not connected, don't bother checking the error - bdry_nodes = local_bdry_conn.to_discr.nodes() - if bdry_nodes.size == 0: + bdry_nelements = sum( + grp.nelements for grp in local_bdry_conn.to_discr.groups) + if bdry_nelements == 0: eoc_rec[i_local_part, i_remote_part] = None continue # Mark faces within remote_mesh that are connected to local_mesh - remote_bdry_conn = make_face_restriction(vol_discrs[i_remote_part], + remote_bdry_conn = make_face_restriction(actx, vol_discrs[i_remote_part], group_factory(order), BTAG_PARTITION(i_local_part)) - assert bdry_nodes.size == remote_bdry_conn.to_discr.nodes().size, \ - "partitions do not have the same number of connected nodes" + remote_bdry_nelements = sum( + grp.nelements for grp in remote_bdry_conn.to_discr.groups) + assert bdry_nelements == remote_bdry_nelements, \ + "partitions do not have the same number of connected elements" # Gather just enough information for the connection local_bdry = local_bdry_conn.to_discr @@ -166,27 +173,25 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, for grp_batches in remote_batches] # Connect from remote_mesh to local_mesh - remote_to_local_conn = make_partition_connection(local_bdry_conn, - i_local_part, - remote_bdry, - remote_adj_groups, - remote_from_elem_faces, - remote_from_elem_indices) + remote_to_local_conn = make_partition_connection( + actx, local_bdry_conn, i_local_part, remote_bdry, + remote_adj_groups, remote_from_elem_faces, + remote_from_elem_indices) + # Connect from local mesh to remote mesh - local_to_remote_conn = make_partition_connection(remote_bdry_conn, - i_remote_part, - local_bdry, - local_adj_groups, - local_from_elem_faces, - local_from_elem_indices) - check_connection(remote_to_local_conn) - check_connection(local_to_remote_conn) - - true_local_points = f(local_bdry.nodes()[0].with_queue(queue)) - remote_points = local_to_remote_conn(queue, true_local_points) - local_points = remote_to_local_conn(queue, remote_points) - - err = la.norm((true_local_points - local_points).get(), np.inf) + local_to_remote_conn = make_partition_connection( + actx, remote_bdry_conn, i_remote_part, local_bdry, + local_adj_groups, local_from_elem_faces, + local_from_elem_indices) + + check_connection(actx, remote_to_local_conn) + check_connection(actx, local_to_remote_conn) + + true_local_points = f(thaw(actx, local_bdry.nodes()[0])) + remote_points = local_to_remote_conn(true_local_points) + local_points = remote_to_local_conn(remote_points) + + err = flat_norm(true_local_points - local_points, np.inf) eoc_rec[i_local_part, i_remote_part].add_data_point(1./n, err) for (i, j), e in eoc_rec.items(): -- GitLab From 439c9943e8b3e9f671ace9ab6e73f57e1421c49a Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 13 Jun 2020 18:29:00 -0500 Subject: [PATCH 049/106] test_partition: Update runner snippet --- test/test_partition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_partition.py b/test/test_partition.py index ed378e54..fcbed917 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -559,7 +559,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker -- GitLab From 1a160936f291ca4603ca3aaa67a403e57a667e1f Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 13 Jun 2020 18:36:30 -0500 Subject: [PATCH 050/106] Remove remaining direct OpenCL usage in meshmode.discretization.visualization --- meshmode/discretization/visualization.py | 58 ++++++++++++------------ 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index da1dddb0..2dd08692 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -130,6 +130,18 @@ class Visualizer(object): "element groups") return resampled.array_context.to_numpy(resampled[0]).reshape(-1) + @memoize_method + def _vis_nodes(self): + if len(self.vis_discr.groups) != 1: + raise NotImplementedError("visualization with multiple " + "element groups") + + actx = self.vis_discr._setup_actx + return np.array([ + actx.to_numpy(actx.thaw(ary[0])) + for ary in self.vis_discr.nodes() + ]) + # {{{ vis sub-element connectivity @memoize_method @@ -233,10 +245,8 @@ class Visualizer(object): do_show = kwargs.pop("do_show", True) - with cl.CommandQueue(self.vis_discr.cl_context) as queue: - nodes = self.vis_discr.nodes().with_queue(queue).get() - - field = self._resample_and_get(queue, field) + nodes = self._vis_nodes() + field = self._resample_to_numpy(field) assert nodes.shape[0] == self.vis_discr.ambient_dim #mlab.points3d(nodes[0], nodes[1], 0*nodes[0]) @@ -281,18 +291,6 @@ class Visualizer(object): # {{{ vtk - @memoize_method - def _vis_nodes(self): - if len(self.vis_discr.groups) != 1: - raise NotImplementedError("visualization with multiple " - "element groups") - - actx = self.vis_discr._setup_actx - return np.array([ - actx.to_numpy(actx.thaw(ary[0])) - for ary in self.vis_discr.nodes() - ]) - def write_vtk_file(self, file_name, names_and_fields, compressor=None, real_only=False, @@ -381,10 +379,8 @@ class Visualizer(object): vmax = kwargs.pop("vmax", None) norm = kwargs.pop("norm", None) - with cl.CommandQueue(self.vis_discr.cl_context) as queue: - nodes = self.vis_discr.nodes().with_queue(queue).get() - - field = self._resample_and_get(queue, field) + nodes = self._vis_nodes() + field = self._resample_to_numpy(field) assert nodes.shape[0] == self.vis_discr.ambient_dim @@ -472,16 +468,18 @@ def draw_curve(discr): plt.plot(mesh.vertices[0], mesh.vertices[1], "o") color = plt.cm.rainbow(np.linspace(0, 1, len(discr.groups))) - with cl.CommandQueue(discr.cl_context) as queue: - for i, group in enumerate(discr.groups): - group_nodes = group.view(discr.nodes()).get(queue=queue) - artist_handles = plt.plot( - group_nodes[0].T, - group_nodes[1].T, "-x", - color=color[i]) - - if artist_handles: - artist_handles[0].set_label("Group %d" % i) + for igrp, group in enumerate(discr.groups): + group_nodes = np.array([ + discr._setup_actx.to_numpy(discr.nodes()[iaxis][igrp]) + for iaxis in range(discr.ambient_dim) + ]) + artist_handles = plt.plot( + group_nodes[0].T, + group_nodes[1].T, "-x", + color=color[igrp]) + + if artist_handles: + artist_handles[0].set_label("Group %d" % igrp) # }}} -- GitLab From c3022b2998c9126f6ffeec3a2c2fd119fd4e8234 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 14 Jun 2020 14:39:13 -0500 Subject: [PATCH 051/106] Fix dof_array.unflatten --- meshmode/dof_array.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 92562e02..016cf92e 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -194,8 +194,6 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: lambda subary: unflatten(actx, discr, subary), ary) - actx = ary.array_context - @memoize_in(actx, "unflatten_prg") def prg(): return make_loopy_program( @@ -204,13 +202,19 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: name="unflatten") group_sizes = [grp.nelements*grp.nunit_dofs for grp in discr.groups] + if ary.size != sum(group_sizes): + raise ValueError("array has size %d, expected %d" + % (ary.size, sum(group_sizes))) + group_starts = np.cumsum([0] + group_sizes) - return DOFArray.from_list(None, [ + return DOFArray.from_list(actx, [ actx.freeze( actx.call_loopy( prg(), - grp_start=grp_start, ary=ary + grp_start=grp_start, ary=ary, + nelements=grp.nelements, + nunit_dofs=grp.nunit_dofs, )["result"]) for grp_start, grp in zip(group_starts, discr.groups)]) -- GitLab From 73f10285693988584ebe5ce34bc13062d1d8b289 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 14 Jun 2020 14:40:01 -0500 Subject: [PATCH 052/106] Adapt MPI setup helper and MPI test to ArrayContext --- meshmode/distributed.py | 26 +++++++++++++------------- test/test_partition.py | 38 +++++++++++++++++++------------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/meshmode/distributed.py b/meshmode/distributed.py index 54135fb0..bbe24bb0 100644 --- a/meshmode/distributed.py +++ b/meshmode/distributed.py @@ -128,15 +128,14 @@ class MPIBoundaryCommSetupHelper(object): .. automethod:: __call__ .. automethod:: is_ready """ - def __init__(self, mpi_comm, queue, local_bdry_conn, i_remote_part, + def __init__(self, mpi_comm, actx, local_bdry_conn, i_remote_part, bdry_grp_factory): """ :arg mpi_comm: A :class:`MPI.Intracomm` - :arg queue: :arg i_remote_part: The part number of the remote partition """ self.mpi_comm = mpi_comm - self.queue = queue + self.array_context = actx self.i_local_part = mpi_comm.Get_rank() self.i_remote_part = i_remote_part self.local_bdry_conn = local_bdry_conn @@ -151,9 +150,11 @@ class MPIBoundaryCommSetupHelper(object): for i in range(len(local_mesh.groups))] local_to_elem_faces = [[batch.to_element_face for batch in grp_batches] for grp_batches in local_batches] - local_to_elem_indices = [[batch.to_element_indices.get(queue=self.queue) - for batch in grp_batches] - for grp_batches in local_batches] + local_to_elem_indices = [ + [ + self.array_context.to_numpy(batch.to_element_indices) + for batch in grp_batches] + for grp_batches in local_batches] local_data = {'bdry_mesh': local_bdry.mesh, 'adj': local_adj_groups, @@ -190,7 +191,7 @@ class MPIBoundaryCommSetupHelper(object): from meshmode.discretization import Discretization remote_bdry_mesh = remote_data['bdry_mesh'] - remote_bdry = Discretization(self.queue.context, remote_bdry_mesh, + remote_bdry = Discretization(self.array_context, remote_bdry_mesh, self.bdry_grp_factory) remote_adj_groups = remote_data['adj'] remote_to_elem_faces = remote_data['to_elem_faces'] @@ -198,12 +199,11 @@ class MPIBoundaryCommSetupHelper(object): # Connect local_mesh to remote_mesh from meshmode.discretization.connection import make_partition_connection - remote_to_local_bdry_conn = make_partition_connection(self.local_bdry_conn, - self.i_local_part, - remote_bdry, - remote_adj_groups, - remote_to_elem_faces, - remote_to_elem_indices) + remote_to_local_bdry_conn = make_partition_connection( + self.array_context, self.local_bdry_conn, self.i_local_part, + remote_bdry, remote_adj_groups, remote_to_elem_faces, + remote_to_elem_indices) + self.send_req.wait() return remote_to_local_bdry_conn diff --git a/test/test_partition.py b/test/test_partition.py index fcbed917..f34fe3e8 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -30,7 +30,7 @@ import numpy as np import pyopencl as cl from meshmode.array_context import PyOpenCLArrayContext -from meshmode.dof_array import thaw, flat_norm +from meshmode.dof_array import thaw, flatten, unflatten, flat_norm from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl @@ -364,9 +364,10 @@ def _test_mpi_boundary_swap(dim, order, num_groups): group_factory = PolynomialWarpAndBlendGroupFactory(order) cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.discretization import Discretization - vol_discr = Discretization(cl_ctx, local_mesh, group_factory) + vol_discr = Discretization(actx, local_mesh, group_factory) from meshmode.distributed import get_connected_partitions connected_parts = get_connected_partitions(local_mesh) @@ -378,11 +379,11 @@ def _test_mpi_boundary_swap(dim, order, num_groups): from meshmode.mesh import BTAG_PARTITION for i_remote_part in connected_parts: local_bdry_conns[i_remote_part] = make_face_restriction( - vol_discr, group_factory, BTAG_PARTITION(i_remote_part)) + actx, vol_discr, group_factory, BTAG_PARTITION(i_remote_part)) setup_helper = bdry_setup_helpers[i_remote_part] = \ MPIBoundaryCommSetupHelper( - mpi_comm, queue, local_bdry_conns[i_remote_part], + mpi_comm, actx, local_bdry_conns[i_remote_part], i_remote_part, bdry_grp_factory=group_factory) setup_helper.post_sends() @@ -394,14 +395,14 @@ def _test_mpi_boundary_swap(dim, order, num_groups): if setup_helper.is_setup_ready(): assert bdry_setup_helpers.pop(i_remote_part) is setup_helper conn = setup_helper.complete_setup() - check_connection(conn) + check_connection(actx, conn) remote_to_local_bdry_conns[i_remote_part] = conn break # FIXME: Not ideal, busy-waits _test_data_transfer(mpi_comm, - queue, + actx, local_bdry_conns, remote_to_local_bdry_conns, connected_parts) @@ -410,12 +411,14 @@ def _test_mpi_boundary_swap(dim, order, num_groups): # TODO -def _test_data_transfer(mpi_comm, queue, local_bdry_conns, +def _test_data_transfer(mpi_comm, actx, local_bdry_conns, remote_to_local_bdry_conns, connected_parts): from mpi4py import MPI + sin = actx.special_func("sin") + def f(x): - return 10*cl.clmath.sin(20.*x) + return 10*sin(20.*x) ''' Here is a simplified example of what happens from @@ -440,13 +443,13 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, for i_remote_part in connected_parts: conn = remote_to_local_bdry_conns[i_remote_part] bdry_discr = local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + bdry_x = thaw(actx, bdry_discr.nodes()[0]) true_local_f = f(bdry_x) - remote_f = conn(queue, true_local_f) + remote_f = conn(true_local_f) # 2. - send_reqs.append(mpi_comm.isend(remote_f.get(queue=queue), + send_reqs.append(mpi_comm.isend(actx.to_numpy(flatten(remote_f)), dest=i_remote_part, tag=TAG_SEND_REMOTE_NODES)) @@ -476,12 +479,9 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, send_reqs = [] for i_remote_part in connected_parts: conn = remote_to_local_bdry_conns[i_remote_part] - local_f_np = remote_to_local_f_data[i_remote_part] - local_f_cl = cl.array.Array(queue, - shape=local_f_np.shape, - dtype=local_f_np.dtype) - local_f_cl.set(local_f_np) - remote_f = conn(queue, local_f_cl).get(queue=queue) + local_f = unflatten(actx, conn.from_discr, + actx.from_numpy(remote_to_local_f_data[i_remote_part])) + remote_f = actx.to_numpy(flatten(conn(local_f))) # 5. send_reqs.append(mpi_comm.isend(remote_f, @@ -513,9 +513,9 @@ def _test_data_transfer(mpi_comm, queue, local_bdry_conns, # 7. for i_remote_part in connected_parts: bdry_discr = local_bdry_conns[i_remote_part].to_discr - bdry_x = bdry_discr.nodes()[0].with_queue(queue=queue) + bdry_x = thaw(actx, bdry_discr.nodes()[0]) - true_local_f = f(bdry_x).get(queue=queue) + true_local_f = actx.to_numpy(flatten(f(bdry_x))) local_f = local_f_data[i_remote_part] from numpy.linalg import norm -- GitLab From d3e1b36ae8dca0cb83902207f5a4624ef680be87 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 14 Jun 2020 18:48:06 -0500 Subject: [PATCH 053/106] ArrayContext: support multi-argument special functions --- meshmode/array_context.py | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 0808444b..d318be75 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -108,6 +108,21 @@ class ArrayContext: """ raise NotImplementedError + @memoize_method + def _get_special_func_loopy_program(self, name, nargs): + from pymbolic import var + iel = var("iel") + idof = var("idof") + return make_loopy_program( + "{[iel, idof]: 0<=iel Date: Sun, 14 Jun 2020 18:48:32 -0500 Subject: [PATCH 054/106] Fix Discretization.quad_weights --- meshmode/discretization/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index c96e9aae..980146fa 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -322,10 +322,12 @@ class Discretization(object): actx = self._setup_actx - return _DOFArray(None, [ + return _DOFArray.from_list(None, [ actx.freeze( actx.call_loopy( - prg(), weights=actx.from_numpy(grp.weights) + prg(), + weights=actx.from_numpy(grp.weights), + nelements=grp.nelements, )["result"]) for grp in self.groups]) -- GitLab From b49b58be41131e13437880ced71e8f07d623e58d Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 14 Jun 2020 18:49:14 -0500 Subject: [PATCH 055/106] Fix pytential-dependent tests in test_meshmode --- test/test_meshmode.py | 72 +++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index 64e74012..d4952e38 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -567,13 +567,14 @@ def test_3d_orientation(ctx_factory, what, mesh_gen_func, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) mesh = mesh_gen_func() logger.info("%d elements" % mesh.nelements) from meshmode.discretization import Discretization - discr = Discretization(ctx, mesh, + discr = Discretization(actx, mesh, PolynomialWarpAndBlendGroupFactory(1)) from pytential import bind, sym @@ -594,17 +595,18 @@ def test_3d_orientation(ctx_factory, what, mesh_gen_func, visualize=False): normal_outward_expr = ( sym.normal(mesh.ambient_dim) | sym.nodes(mesh.ambient_dim)) - normal_outward_check = bind(discr, normal_outward_expr)(queue).as_scalar() > 0 + normal_outward_check = actx.to_numpy( + flatten(bind(discr, normal_outward_expr)(actx).as_scalar())) > 0 - assert normal_outward_check.get().all(), normal_outward_check.get() + assert normal_outward_check.all(), normal_outward_check # }}} - normals = bind(discr, sym.normal(mesh.ambient_dim).xproject(1))(queue) + normals = bind(discr, sym.normal(mesh.ambient_dim).xproject(1))(actx) if visualize: from meshmode.discretization.visualization import make_visualizer - vis = make_visualizer(queue, discr, 1) + vis = make_visualizer(actx, discr, 1) vis.write_vtk_file("normals.vtu", [ ("normals", normals), @@ -677,6 +679,7 @@ def test_sanity_single_element(ctx_factory, dim, order, visualize=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from modepy.tools import unit_vertices vertices = unit_vertices(dim).T.copy() @@ -697,21 +700,20 @@ def test_sanity_single_element(ctx_factory, dim, order, visualize=False): from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ PolynomialWarpAndBlendGroupFactory - vol_discr = Discretization(cl_ctx, mesh, + vol_discr = Discretization(actx, mesh, PolynomialWarpAndBlendGroupFactory(order+3)) # {{{ volume calculation check - vol_x = vol_discr.nodes().with_queue(queue) + vol_x = thaw(actx, vol_discr.nodes()) - vol_one = vol_x[0].copy() - vol_one.fill(1) + vol_one = vol_x[0] * 0 + 1 from pytential import norm, integral # noqa from pytools import factorial true_vol = 1/factorial(dim) * 2**dim - comp_vol = integral(vol_discr, queue, vol_one) + comp_vol = integral(vol_discr, vol_one) rel_vol_err = abs(true_vol - comp_vol) / true_vol assert rel_vol_err < 1e-12 @@ -722,7 +724,7 @@ def test_sanity_single_element(ctx_factory, dim, order, visualize=False): from meshmode.discretization.connection import make_face_restriction bdry_connection = make_face_restriction( - vol_discr, PolynomialWarpAndBlendGroupFactory(order + 3), + actx, vol_discr, PolynomialWarpAndBlendGroupFactory(order + 3), BTAG_ALL) bdry_discr = bdry_connection.to_discr @@ -732,12 +734,12 @@ def test_sanity_single_element(ctx_factory, dim, order, visualize=False): from meshmode.discretization.visualization import make_visualizer #vol_vis = make_visualizer(queue, vol_discr, 4) - bdry_vis = make_visualizer(queue, bdry_discr, 4) + bdry_vis = make_visualizer(actx, bdry_discr, 4) # }}} from pytential import bind, sym - bdry_normals = bind(bdry_discr, sym.normal(dim))(queue).as_vector(dtype=object) + bdry_normals = bind(bdry_discr, sym.normal(dim))(actx).as_vector(dtype=object) if visualize: bdry_vis.write_vtk_file("boundary.vtu", [ @@ -747,9 +749,10 @@ def test_sanity_single_element(ctx_factory, dim, order, visualize=False): normal_outward_check = bind(bdry_discr, sym.normal(dim) | (sym.nodes(dim) + 0.5*sym.ones_vec(dim)), - )(queue).as_scalar() > 0 + )(actx).as_scalar() - assert normal_outward_check.get().all(), normal_outward_check.get() + normal_outward_check = actx.to_numpy(flatten(normal_outward_check) > 0) + assert normal_outward_check.all(), normal_outward_check # }}} @@ -815,14 +818,14 @@ def test_sanity_qhull_nd(ctx_factory, dim, order): ("ball-radius-1.step", 3), ]) @pytest.mark.parametrize("mesh_order", [1, 2]) -def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, - visualize=False): +def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) ctx = ctx_factory() queue = cl.CommandQueue(ctx) + actx = PyOpenCLArrayContext(queue) from pytools.convergence import EOCRecorder vol_eoc_rec = EOCRecorder() @@ -838,18 +841,20 @@ def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, mesh = generate_gmsh( FileSource(src_file), dim, order=mesh_order, other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h], - force_ambient_dim=dim) + force_ambient_dim=dim, + target_unit="MM") logger.info("%d elements" % mesh.nelements) # {{{ discretizations and connections from meshmode.discretization import Discretization - vol_discr = Discretization(ctx, mesh, + vol_discr = Discretization(actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(quad_order)) from meshmode.discretization.connection import make_face_restriction bdry_connection = make_face_restriction( + actx, vol_discr, InterpolatoryQuadratureSimplexGroupFactory(quad_order), BTAG_ALL) @@ -860,8 +865,8 @@ def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, # {{{ visualizers from meshmode.discretization.visualization import make_visualizer - vol_vis = make_visualizer(queue, vol_discr, 20) - bdry_vis = make_visualizer(queue, bdry_discr, 20) + vol_vis = make_visualizer(actx, vol_discr, 20) + bdry_vis = make_visualizer(actx, bdry_discr, 20) # }}} @@ -869,27 +874,25 @@ def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, true_surf = 2*np.pi**(dim/2)/gamma(dim/2) true_vol = true_surf/dim - vol_x = vol_discr.nodes().with_queue(queue) + vol_x = thaw(actx, vol_discr.nodes()) - vol_one = vol_x[0].copy() - vol_one.fill(1) + vol_one = vol_x[0]*0 + 1 from pytential import norm, integral # noqa - comp_vol = integral(vol_discr, queue, vol_one) + comp_vol = integral(vol_discr, vol_one) rel_vol_err = abs(true_vol - comp_vol) / true_vol vol_eoc_rec.add_data_point(h, rel_vol_err) print("VOL", true_vol, comp_vol) - bdry_x = bdry_discr.nodes().with_queue(queue) + bdry_x = thaw(actx, bdry_discr.nodes()) - bdry_one_exact = bdry_x[0].copy() - bdry_one_exact.fill(1) + bdry_one_exact = bdry_x[0] * 0 + 1 - bdry_one = bdry_connection(queue, vol_one).with_queue(queue) - intp_err = norm(bdry_discr, queue, bdry_one-bdry_one_exact) + bdry_one = bdry_connection(vol_one) + intp_err = norm(bdry_discr, bdry_one-bdry_one_exact) assert intp_err < 1e-14 - comp_surf = integral(bdry_discr, queue, bdry_one) + comp_surf = integral(bdry_discr, bdry_one) rel_surf_err = abs(true_surf - comp_surf) / true_surf surf_eoc_rec.add_data_point(h, rel_surf_err) print("SURF", true_surf, comp_surf) @@ -900,7 +903,7 @@ def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, ("area_el", bind( vol_discr, sym.area_element(mesh.ambient_dim, mesh.ambient_dim)) - (queue)), + (actx)), ]) bdry_vis.write_vtk_file("boundary-h=%g.vtu" % h, [("f", bdry_one)]) @@ -908,9 +911,10 @@ def test_sanity_balls(ctx_factory, src_file, dim, mesh_order, normal_outward_check = bind(bdry_discr, sym.normal(mesh.ambient_dim) | sym.nodes(mesh.ambient_dim), - )(queue).as_scalar() > 0 + )(actx).as_scalar() - assert normal_outward_check.get().all(), normal_outward_check.get() + normal_outward_check = actx.to_numpy(flatten(normal_outward_check) > 0) + assert normal_outward_check.all(), normal_outward_check # }}} -- GitLab From 0e7d53160fcd959a600f02e45790f35ca6b00c44 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 15 Jun 2020 13:23:32 -0500 Subject: [PATCH 056/106] Re-dangle requirements.txt for branches of pyopencl, loopy, and pytential --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 62920342..9672411f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy git+https://gitlab.tiker.net/inducer/gmsh_interop.git git+https://gitlab.tiker.net/inducer/modepy.git -git+https://gitlab.tiker.net/inducer/pyopencl.git +git+https://gitlab.tiker.net/inducer/pyopencl.git@better-empty-handling git+https://gitlab.tiker.net/inducer/islpy.git @@ -9,12 +9,12 @@ git+https://gitlab.tiker.net/inducer/islpy.git git+https://gitlab.tiker.net/inducer/pymbolic.git # also depends on pymbolic, so should come after it -git+https://gitlab.tiker.net/inducer/loopy.git +git+https://gitlab.tiker.net/inducer/loopy.git@empty-ndrange-enqueue # more pytential dependencies git+https://gitlab.tiker.net/inducer/boxtree.git git+https://gitlab.tiker.net/inducer/sumpy.git -git+https://gitlab.tiker.net/inducer/pytential.git +git+https://gitlab.tiker.net/inducer/pytential.git@array-context # requires pymetis for tests for partition_mesh git+https://gitlab.tiker.net/inducer/pymetis.git -- GitLab From 9b37a1a4d61bfa7556556d6337b2cbea717d98c9 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 15 Jun 2020 15:00:38 -0500 Subject: [PATCH 057/106] Get pytools from git in requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 9672411f..5c120b3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ numpy +git+https://gitlab.tiker.net/inducer/pytools.git git+https://gitlab.tiker.net/inducer/gmsh_interop.git git+https://gitlab.tiker.net/inducer/modepy.git git+https://gitlab.tiker.net/inducer/pyopencl.git@better-empty-handling -- GitLab From 4d020bc7d3efa95ef055bd359d7df12d10cfe27f Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 15 Jun 2020 15:35:51 -0500 Subject: [PATCH 058/106] Fix examples/plot-connectivity.py for ArrayContext --- examples/plot-connectivity.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/plot-connectivity.py b/examples/plot-connectivity.py index 068983ca..48e6be76 100644 --- a/examples/plot-connectivity.py +++ b/examples/plot-connectivity.py @@ -2,7 +2,8 @@ from __future__ import division import numpy as np # noqa import pyopencl as cl - +from meshmode.array_context import PyOpenCLArrayContext +from meshmode.dof_array import thaw order = 4 @@ -10,6 +11,7 @@ order = 4 def main(): cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) from meshmode.mesh.generation import ( # noqa generate_icosphere, generate_icosahedron, @@ -23,13 +25,13 @@ def main(): PolynomialWarpAndBlendGroupFactory discr = Discretization( - cl_ctx, mesh, PolynomialWarpAndBlendGroupFactory(order)) + actx, mesh, PolynomialWarpAndBlendGroupFactory(order)) from meshmode.discretization.visualization import make_visualizer - vis = make_visualizer(queue, discr, order) + vis = make_visualizer(actx, discr, order) vis.write_vtk_file("geometry.vtu", [ - ("f", discr.nodes()[0]), + ("f", thaw(actx, discr.nodes()[0])), ]) from meshmode.discretization.visualization import \ -- GitLab From 3de08d98fe7dd0b181b364191e52da79bbd38d65 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 10:56:08 -0500 Subject: [PATCH 059/106] Use group.ndofs in unflatten --- meshmode/dof_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 016cf92e..07aa96d4 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -201,7 +201,7 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: "result[iel, idof] = ary[grp_start + iel*nunit_dofs + idof]", name="unflatten") - group_sizes = [grp.nelements*grp.nunit_dofs for grp in discr.groups] + group_sizes = [grp.ndofs for grp in discr.groups] if ary.size != sum(group_sizes): raise ValueError("array has size %d, expected %d" % (ary.size, sum(group_sizes))) -- GitLab From 0278bbe267c663606552e3037c02114891eb0932 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 10:56:27 -0500 Subject: [PATCH 060/106] Bump version for ArrayContext change --- meshmode/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/version.py b/meshmode/version.py index 5cea8857..82ea6fb1 100644 --- a/meshmode/version.py +++ b/meshmode/version.py @@ -1,2 +1,2 @@ -VERSION = (2020, 1) +VERSION = (2020, 2) VERSION_TEXT = ".".join(str(i) for i in VERSION) -- GitLab From 77688a8871074baa8082a058bbdfd7cfb9f9ba6c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 10:57:13 -0500 Subject: [PATCH 061/106] Implement vis for multi-group discretizations --- meshmode/discretization/visualization.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/meshmode/discretization/visualization.py b/meshmode/discretization/visualization.py index 2dd08692..24fe92f6 100644 --- a/meshmode/discretization/visualization.py +++ b/meshmode/discretization/visualization.py @@ -22,7 +22,7 @@ THE SOFTWARE. import numpy as np from pytools import memoize_method, Record -from meshmode.dof_array import DOFArray +from meshmode.dof_array import DOFArray, flatten, thaw __doc__ = """ @@ -121,24 +121,18 @@ class Visualizer(object): from numbers import Number if isinstance(vec, Number): - raise NotImplementedError("visualizing constants") - #return np.ones(self.connection.to_discr.nnodes) * fld + nnodes = sum(grp.ndofs for grp in self.connection.to_discr.groups) + return np.ones(nnodes) * vec else: resampled = self.connection(vec) - if len(resampled) != 1: - raise NotImplementedError("visualization with multiple " - "element groups") - return resampled.array_context.to_numpy(resampled[0]).reshape(-1) + actx = resampled.array_context + return actx.to_numpy(flatten(resampled)) @memoize_method def _vis_nodes(self): - if len(self.vis_discr.groups) != 1: - raise NotImplementedError("visualization with multiple " - "element groups") - actx = self.vis_discr._setup_actx return np.array([ - actx.to_numpy(actx.thaw(ary[0])) + actx.to_numpy(flatten(thaw(actx, ary))) for ary in self.vis_discr.nodes() ]) @@ -336,7 +330,7 @@ class Visualizer(object): "element groups") grid = UnstructuredGrid( - (self.vis_discr.groups[0].ndofs, + (sum(grp.ndofs for grp in self.vis_discr.groups), DataArray("points", nodes.reshape(self.vis_discr.ambient_dim, -1), vector_format=VF_LIST_OF_COMPONENTS)), -- GitLab From 34dd4ede1a4984da8054853b7390ffcb5024b809 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 19:04:13 +0200 Subject: [PATCH 062/106] Point pyopencl back at master in requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5c120b3a..0e1d749b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy git+https://gitlab.tiker.net/inducer/pytools.git git+https://gitlab.tiker.net/inducer/gmsh_interop.git git+https://gitlab.tiker.net/inducer/modepy.git -git+https://gitlab.tiker.net/inducer/pyopencl.git@better-empty-handling +git+https://gitlab.tiker.net/inducer/pyopencl.git git+https://gitlab.tiker.net/inducer/islpy.git -- GitLab From 39f2cf93f0a2d7b5d31bcefdba6944b5cffae214 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 12:58:13 -0500 Subject: [PATCH 063/106] Docs: PyOpenCLArrayContext always has a queue --- meshmode/array_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index d318be75..7063521a 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -177,7 +177,7 @@ class PyOpenCLArrayContext(ArrayContext): .. attribute:: queue - A :class:`pyopencl.CommandQueue` or *None*. + A :class:`pyopencl.CommandQueue`. .. attribute:: allocator """ -- GitLab From 7b534a3f1ddb48ebb231834b0409a1d168732749 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 13:03:47 -0500 Subject: [PATCH 064/106] Remove stale doc points for ArrayContext.from_numpy_{constant,data} --- meshmode/array_context.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 7063521a..d2e5da66 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -60,8 +60,7 @@ class ArrayContext: .. automethod:: zeros .. automethod:: empty_like .. automethod:: zeros_like - .. automethod:: from_numpy_constant - .. automethod:: from_numpy_data + .. automethod:: from_numpy .. automethod:: to_numpy .. automethod:: call_loopy .. automethod:: special_func -- GitLab From 3fa213e26dd403764febbd650504b5d51219a0a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:00 +0200 Subject: [PATCH 065/106] Apply suggestion to meshmode/dof_array.py --- meshmode/dof_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 07aa96d4..0bba5971 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -126,7 +126,7 @@ def thaw(actx: ArrayContext, ary: np.ndarray) -> np.ndarray: def freeze(ary: np.ndarray) -> np.ndarray: - r"""Call :meth:`~meshmode.array_context.arrayContext.freeze` on the element + r"""Call :meth:`~meshmode.array_context.ArrayContext.freeze` on the element group arrays making up the :class:`DOFArray`, using the :class:`~meshmode.array_context.ArrayContext` in *ary*. -- GitLab From f3bd935a2ff9c87bf8383419e492e8ca1dcff411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:08 +0200 Subject: [PATCH 066/106] Apply suggestion to meshmode/discretization/connection/chained.py --- meshmode/discretization/connection/chained.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/chained.py b/meshmode/discretization/connection/chained.py index 66028716..9990e865 100644 --- a/meshmode/discretization/connection/chained.py +++ b/meshmode/discretization/connection/chained.py @@ -271,7 +271,7 @@ def make_full_resample_matrix(actx, connection): This method will be very slow, both in terms of speed and memory usage, and should only be used for testing or if absolutely necessary. - :arg queue: a :class:`meshmode.array_context.ArrayContext`. + :arg actx: a :class:`meshmode.array_context.ArrayContext`. :arg connection: a :class:`~meshmode.discretization.connection.DiscretizationConnection`. :return: a :class:`pyopencl.array.Array` of shape -- GitLab From dc646c5cb9436f7ba123c196c8965c673f0e9d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:19 +0200 Subject: [PATCH 067/106] Apply suggestion to meshmode/dof_array.py --- meshmode/dof_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 0bba5971..1f1a1692 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -79,7 +79,7 @@ class DOFArray(np.ndarray): def __array_finalize__(self, obj): if obj is None: return - self.array_context = getattr(obj, 'array_context', None) + self.array_context = getattr(obj, "array_context", None) @property def entry_dtype(self): -- GitLab From d73814cd1fe6fc32fb760eac25ce8aba8198d301 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:32 +0200 Subject: [PATCH 068/106] Apply suggestion to meshmode/array_context.py --- meshmode/array_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index d2e5da66..1dfa0050 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -103,7 +103,7 @@ class ArrayContext: It must have :class:`loopy.Options.return_dict` set. :return: a :class:`dict` of outputs from the program, each an - array understood by the context. + array understood by the context. """ raise NotImplementedError -- GitLab From 7daadd40b73214ab0e8529637ec7f28007f85038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:41 +0200 Subject: [PATCH 069/106] Apply suggestion to meshmode/discretization/__init__.py --- meshmode/discretization/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 980146fa..095d7322 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -55,7 +55,6 @@ class ElementGroupBase(object): .. autoattribute:: nunit_dofs .. autoattribute:: ndofs .. autoattribute:: dim - .. automethod:: view .. method:: unit_nodes() -- GitLab From f082bcb3c5bc6285c52897f6ad30a57c691dbe54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 17 Jun 2020 20:06:49 +0200 Subject: [PATCH 070/106] Apply suggestion to meshmode/discretization/__init__.py --- meshmode/discretization/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 095d7322..1f3352ae 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -334,7 +334,7 @@ class Discretization(object): def nodes(self): """ :returns: object array of shape ``(ambient_dim,)`` containing - :class:`~meshmode.dof_array.DOFArray`s of node coordinates. + :class:`~meshmode.dof_array.DOFArray`\ s of node coordinates. """ @memoize_in(self, "nodes_prg") -- GitLab From d33b317055f65e7f420dc10b0eab0b2bbac066ae Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 13:16:47 -0500 Subject: [PATCH 071/106] ArrayContext.call_loopy: kwargs not args --- meshmode/array_context.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 1dfa0050..c5a439a5 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -94,9 +94,9 @@ class ArrayContext: """ raise NotImplementedError - def call_loopy(self, program, **args): + def call_loopy(self, program, **kwargs): """Execute the :mod:`loopy` program *program* on the arguments - *args*. + *kwargs*. *program* is a :class:`loopy.LoopKernel` or :class:`loopy.Program`. It is expected to not yet be transformed for execution speed. @@ -205,12 +205,12 @@ class PyOpenCLArrayContext(ArrayContext): def to_numpy(self, array): return array.get(queue=self.queue) - def call_loopy(self, program, **args): + def call_loopy(self, program, **kwargs): program = self.transform_loopy_program(program) assert program.options.return_dict assert program.options.no_numpy - evt, result = program(self.queue, **args) + evt, result = program(self.queue, **kwargs) return result def freeze(self, array): -- GitLab From d1107310e379ff1f6be2c4d9927f90c2f066ced4 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 13:19:06 -0500 Subject: [PATCH 072/106] Teach PyOpenCLArrayContext.call_loopy about allocator --- meshmode/array_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index c5a439a5..d9791b05 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -210,7 +210,7 @@ class PyOpenCLArrayContext(ArrayContext): assert program.options.return_dict assert program.options.no_numpy - evt, result = program(self.queue, **kwargs) + evt, result = program(self.queue, **kwargs, allocator=self.allocator) return result def freeze(self, array): -- GitLab From cc4ff1988c42321b2554e50a687daea195475ea6 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 13:35:51 -0500 Subject: [PATCH 073/106] Add versioning comment for ArrayContext --- meshmode/array_context.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index d9791b05..9b843053 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -66,6 +66,8 @@ class ArrayContext: .. automethod:: special_func .. automethod:: freeze .. automethod:: thaw + + .. versionadded:: 2020.2 """ def empty(self, shape, dtype): -- GitLab From 09ed1f2bc52aef94bfca1fc43c43da9eacd8a4df Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 13:36:11 -0500 Subject: [PATCH 074/106] Docstring work on ArrayContext.{thaw,freeze} --- meshmode/array_context.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 9b843053..619f44c8 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -147,9 +147,9 @@ class ArrayContext: """Return a version of the context-defined array *array* that is 'frozen', i.e. suitable for long-term storage and reuse. Frozen arrays do not support arithmetic. For example, in the context of - OpenCL, this might entail stripping the array of an associated queue, - whereas in a lazily-evaluated context, it might mean that the array is - evaluated and stored. + :class:`~pyopencl.array.Array`, this might mean stripping the array + of an associated command queue, whereas in a lazily-evaluated context, + it might mean that the array is evaluated and stored. Freezing makes the array independent of this :class:`ArrayContext`; it is permitted to :meth:`thaw` it in a different one, as long as that @@ -158,7 +158,15 @@ class ArrayContext: raise NotImplementedError def thaw(self, array): - """Take a 'frozen' array + """Take a 'frozen' array and return a new array representing the data in + *array* that is able to perform arithmetic and other operations, using + the execution resources of this context. In the context of + :class:`~pyopencl.array.Array`, this might mean that the array is + equipped with a command queue, whereas in a lazily-evaluated context, + it might mean that the returned array is a symbol bound to + the data in *array*. + + The returned array may not be used with other contexts while thawed. """ raise NotImplementedError -- GitLab From 2669deab832924903921d7faa4bce26f715bac18 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 15:59:18 -0500 Subject: [PATCH 075/106] Flake8: fix invalid escape seq --- meshmode/discretization/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 1f3352ae..3358d633 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -332,7 +332,7 @@ class Discretization(object): @memoize_method def nodes(self): - """ + r""" :returns: object array of shape ``(ambient_dim,)`` containing :class:`~meshmode.dof_array.DOFArray`\ s of node coordinates. """ -- GitLab From 47ba973c5a17af4006e2b49559faa647e163f0d5 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 15:59:50 -0500 Subject: [PATCH 076/106] ArrayContext: clarify that {to,from}_numpy deal with thawed arrays --- meshmode/array_context.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 619f44c8..5ed7d88c 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -83,16 +83,18 @@ class ArrayContext: return self.zeros(shape=ary.shape, dtype=ary.dtype) def from_numpy(self, array: np.ndarray): - """ + r""" :returns: the :class:`numpy.ndarray` *array* converted to the - array context's array type. + array context's array type. The returned array will be + :meth:`thaw`\ ed. """ raise NotImplementedError def to_numpy(self, array): - """ + r""" :returns: *array*, an array recognized by the context, converted - to a :class:`numpy.ndarray`. + to a :class:`numpy.ndarray`. *array* must be + :meth:`thaw`\ ed. """ raise NotImplementedError -- GitLab From 5248b3fc800517bbe5167f7028c2ccb162f98043 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 16:03:39 -0500 Subject: [PATCH 077/106] Document ElementGroupBase.{nunit_dofs,ndofs} --- meshmode/discretization/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 3358d633..a7c1b841 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -92,10 +92,16 @@ class ElementGroupBase(object): @property def nunit_dofs(self): + """The number of (for now: nodal) degrees of freedom associated with a + single element. + """ return self.unit_nodes.shape[-1] @property def ndofs(self): + """The total number of (for now: nodal) degrees of freedom associated with + the element group. + """ return self.nunit_dofs * self.nelements @property -- GitLab From a22b0d1c4ff8179cd0b9887cbcbd1e07641d74e6 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 16:04:45 -0500 Subject: [PATCH 078/106] ElementGroupBase: Explain DOF acronym --- meshmode/discretization/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index a7c1b841..1829ed28 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -92,14 +92,14 @@ class ElementGroupBase(object): @property def nunit_dofs(self): - """The number of (for now: nodal) degrees of freedom associated with a + """The number of (for now: nodal) degrees of freedom ("DOFs") associated with a single element. """ return self.unit_nodes.shape[-1] @property def ndofs(self): - """The total number of (for now: nodal) degrees of freedom associated with + """The total number of (for now: nodal) degrees of freedom ("DOFs") associated with the element group. """ return self.nunit_dofs * self.nelements -- GitLab From 6b5b4ac988901ccbf96aa6271f8d34076390638c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 16:19:29 -0500 Subject: [PATCH 079/106] Document structure of DOFArray better --- meshmode/dof_array.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 1f1a1692..fa1917b2 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -49,8 +49,10 @@ class DOFArray(np.ndarray): """This array type is a subclass of :class:`numpy.ndarray` intended to hold degree-of-freedom arrays for use with :class:`~meshmode.discretization.Discretization`, - with one entry in the :class:`DOFArray` per element group. - It is derived from :class:`numpy.ndarray` with dtype object ("an object array") + with one entry in the :class:`DOFArray` for each + :class:`~meshmode.discretization.ElementGroupBase`. + It is derived from :class:`numpy.ndarray` with dtype object ("an object array"). + The entries in this array are further arrays managed by :attr:`array_context`. The main purpose of this class is to better describe the data structure, i.e. when a :class:`DOFArray` occurs inside of further numpy object array, @@ -58,7 +60,14 @@ class DOFArray(np.ndarray): people and programs). .. attribute:: array_context + + An :class:`meshmode.array_context.ArrayContext`. + .. attribute:: entry_dtype + + The (assumed uniform) :class:`numpy.dtype` of the group arrays + contained in this instance. + .. automethod:: from_list """ @@ -86,7 +95,13 @@ class DOFArray(np.ndarray): return single_valued(subary.dtype for subary in self.flat) @classmethod - def from_list(cls, actx: Optional[ArrayContext], res_list): + def from_list(cls, actx: Optional[ArrayContext], res_list) -> "DOFArray": + r"""Create a :class:`DOFArray` from a list of arrays + (one per :class:`~meshmode.discretization.ElementGroupBase`). + + :arg actx: If *None*, the arrays in *res_list* must be + :meth:`~meshmode.array_context.ArrayContext.thaw`\ ed. + """ if not (actx is None or isinstance(actx, ArrayContext)): raise TypeError("actx must be of type ArrayContext") -- GitLab From 330e328ecd91705757c2a2ce7bcfc1efc1722b9e Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 16:21:52 -0500 Subject: [PATCH 080/106] Fix missing doc punctuation in Discretization --- meshmode/discretization/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 1829ed28..03a10067 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -210,10 +210,10 @@ class Discretization(object): :param actx: A :class:`ArrayContext` used to perform computation needed during initial set-up of the mesh. :param mesh: A :class:`meshmode.mesh.Mesh` over which the discretization is - built - :param group_factory: An :class:`ElementGroupFactory` + built. + :param group_factory: An :class:`ElementGroupFactory`. :param real_dtype: The :mod:`numpy` data type used for representing real - data, either :class:`numpy.float32` or :class:`numpy.float64` + data, either :class:`numpy.float32` or :class:`numpy.float64`. """ if not isinstance(actx, ArrayContext): -- GitLab From ec6667807c72e1ae888beb9dc45c81472ff5a751 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 16:57:45 -0500 Subject: [PATCH 081/106] Use ArrayContext as container in @memoize_in --- meshmode/discretization/__init__.py | 18 +++++++++--------- meshmode/discretization/connection/direct.py | 19 ++++++++++--------- .../discretization/connection/projection.py | 16 +++++++++------- meshmode/dof_array.py | 4 ++-- requirements.txt | 2 +- 5 files changed, 31 insertions(+), 28 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 03a10067..0b246686 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -288,7 +288,9 @@ class Discretization(object): return self.zeros(array.array_context, dtype=array.entry_dtype) def num_reference_derivative(self, ref_axes, vec): - @memoize_in(self, "reference_derivative_prg") + actx = vec.array_context + + @memoize_in(actx, (Discretization, "reference_derivative_prg")) def prg(): return make_loopy_program( """{[iel,idof,j]: @@ -297,8 +299,6 @@ class Discretization(object): "result[iel,idof] = sum(j, diff_mat[idof, j] * vec[iel, j])", name="diff") - actx = vec.array_context - def get_mat(grp): mat = None for ref_axis in ref_axes: @@ -318,15 +318,15 @@ class Discretization(object): @memoize_method def quad_weights(self): - @memoize_in(self, "quad_weights_prg") + actx = self._setup_actx + + @memoize_in(actx, (Discretization, "quad_weights_prg")) def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel np.ndarray: actx = ary.array_context - @memoize_in(actx, "flatten_prg") + @memoize_in(actx, (flatten, "flatten_prg")) def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel np.ndarray: lambda subary: unflatten(actx, discr, subary), ary) - @memoize_in(actx, "unflatten_prg") + @memoize_in(actx, (unflatten, "unflatten_prg")) def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel Date: Wed, 17 Jun 2020 18:00:50 -0500 Subject: [PATCH 082/106] Implement ArrayContext.np --- examples/simple-dg.py | 3 +- meshmode/array_context.py | 58 +++++++++++++++++++++++++-------------- test/test_chained.py | 20 ++++---------- test/test_meshmode.py | 16 +++-------- test/test_partition.py | 8 ++---- test/test_refinement.py | 4 +-- 6 files changed, 51 insertions(+), 58 deletions(-) diff --git a/examples/simple-dg.py b/examples/simple-dg.py index 2858bb4c..b9456877 100644 --- a/examples/simple-dg.py +++ b/examples/simple-dg.py @@ -455,10 +455,9 @@ def bump(actx, discr, t=0): nodes[1] - source_center[1], ]) - exp = (actx.special_func("exp")) return ( np.cos(source_omega*t) - * exp( + * actx.np.exp( -np.dot(center_dist, center_dist) / source_width**2)) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 5ed7d88c..26da731e 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -52,6 +52,24 @@ def make_loopy_program(domains, statements, kernel_data=["..."], name=None): # {{{ ArrayContext +class _FakeNumpyNamespace: + def __init__(self, array_context): + self._array_context = array_context + + def __getattr__(self, name): + def f(*args): + actx = self._array_context + # FIXME: Maybe involve loopy type inference? + result = actx.empty(args[0].shape, args[0].dtype) + prg = actx._get_scalar_func_loopy_program(name, len(args)) + actx.call_loopy(prg, out=result, + **{"inp%d" % i: arg for i, arg in enumerate(args)}) + return result + + from pytools.obj_array import obj_array_vectorized_n_args + return obj_array_vectorized_n_args(f) + + class ArrayContext: """An interface that allows a :class:`Discretization` to create and interact with arrays of degrees of freedom without fully specifying their types. @@ -63,13 +81,29 @@ class ArrayContext: .. automethod:: from_numpy .. automethod:: to_numpy .. automethod:: call_loopy - .. automethod:: special_func + .. attribute:: np + + Provides access to a namespace that serves as a work-alike to + :mod:`numpy`. The actual level of functionality provided is up to the + individual array context implementation, however the functions and + objects available under this namespace must not behave differently + from :mod:`numpy`. + + As a baseline, special functions available through :mod:`loopy` + (e.g. ``sin``, ``exp``) are accessible through this interface. + + Callables accessible through this namespace vectorize over object + arrays, including :class:`meshmode.dof_array.DOFArray`. + .. automethod:: freeze .. automethod:: thaw .. versionadded:: 2020.2 """ + def __init__(self): + self.np = _FakeNumpyNamespace(self) + def empty(self, shape, dtype): raise NotImplementedError @@ -112,7 +146,7 @@ class ArrayContext: raise NotImplementedError @memoize_method - def _get_special_func_loopy_program(self, name, nargs): + def _get_scalar_func_loopy_program(self, name, nargs): from pymbolic import var iel = var("iel") idof = var("idof") @@ -126,25 +160,6 @@ class ArrayContext: ], name="actx_special_%s" % name) - @memoize_method - def special_func(self, name): - """Returns a callable for the special function *name*, where *name* is a - (potentially dotted) function name resolvable by :mod:`loopy`. - - The returned callable will vectorize over object arrays, including - :class:`meshmode.dof_array.DOFArray`. - """ - def f(*args): - # FIXME: Maybe involve loopy type inference? - result = self.empty(args[0].shape, args[0].dtype) - prg = self._get_special_func_loopy_program(name, len(args)) - self.call_loopy(prg, out=result, - **{"inp%d" % i: arg for i, arg in enumerate(args)}) - return result - - from pytools.obj_array import obj_array_vectorized_n_args - return obj_array_vectorized_n_args(f) - def freeze(self, array): """Return a version of the context-defined array *array* that is 'frozen', i.e. suitable for long-term storage and reuse. Frozen arrays @@ -194,6 +209,7 @@ class PyOpenCLArrayContext(ArrayContext): """ def __init__(self, queue, allocator=None): + super().__init__() self.context = queue.context self.queue = queue self.allocator = allocator diff --git a/test/test_chained.py b/test/test_chained.py index 91e61059..51c1e12c 100644 --- a/test/test_chained.py +++ b/test/test_chained.py @@ -214,11 +214,9 @@ def test_chained_connection(ctx_factory, ndim, visualize=False): ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) - sin = actx.special_func("sin") - def f(x): - from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) + from functools import reduce + return 0.1 * reduce(lambda x, y: x * actx.np.sin(5 * y), x) x = thaw(actx, connections[0].from_discr.nodes()) fx = f(x) @@ -249,11 +247,9 @@ def test_chained_full_resample_matrix(ctx_factory, ndim, visualize=False): ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) - sin = actx.special_func("sin") - def f(x): from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) + return 0.1 * reduce(lambda x, y: x * actx.np.sin(5 * y), x) resample_mat = actx.to_numpy(make_full_resample_matrix(actx, chained)) @@ -320,11 +316,9 @@ def test_chained_to_direct(ctx_factory, ndim, chain_type, to_element_indices[i] += 1 assert np.min(to_element_indices) > 0 - sin = actx.special_func("sin") - def f(x): from six.moves import reduce - return 0.1 * reduce(lambda x, y: x * sin(5 * y), x) + return 0.1 * reduce(lambda x, y: x * actx.np.sin(5 * y), x) x = thaw(actx, connections[0].from_discr.nodes()) fx = f(x) @@ -394,13 +388,11 @@ def test_reversed_chained_connection(ctx_factory, ndim, mesh_name): from_nodes = thaw(actx, chained.from_discr.nodes()) to_nodes = thaw(actx, chained.to_discr.nodes()) - cos = actx.special_func("cos") - from_x = 0 to_x = 0 for d in range(ndim): - from_x += cos(from_nodes[d]) ** (d + 1) - to_x += cos(to_nodes[d]) ** (d + 1) + from_x += actx.np.cos(from_nodes[d]) ** (d + 1) + to_x += actx.np.cos(to_nodes[d]) ** (d + 1) from_interp = reverse(to_x) diff --git a/test/test_meshmode.py b/test/test_meshmode.py index d4952e38..a888beff 100644 --- a/test/test_meshmode.py +++ b/test/test_meshmode.py @@ -235,10 +235,8 @@ def test_boundary_interpolation(ctx_factory, group_factory, boundary_tag, order = 4 - sin = actx.special_func("sin") - def f(x): - return 0.1*sin(30*x) + return 0.1*actx.np.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -331,10 +329,8 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars, order = 4 - sin = actx.special_func("sin") - def f(x): - return 0.1*sin(30*x) + return 0.1*actx.np.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -450,10 +446,8 @@ def test_opposite_face_interpolation(ctx_factory, group_factory, order = 5 - sin = actx.special_func("sin") - def f(x): - return 0.1*sin(30*x) + return 0.1*actx.np.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh @@ -787,10 +781,8 @@ def test_sanity_qhull_nd(ctx_factory, dim, order): from meshmode.discretization.connection import make_same_mesh_connection cnx = make_same_mesh_connection(actx, high_discr, low_discr) - sin = actx.special_func("sin") - def f(x): - return 0.1*sin(x) + return 0.1*actx.np.sin(x) x_low = thaw(actx, low_discr.nodes()[0]) f_low = f(x_low) diff --git a/test/test_partition.py b/test/test_partition.py index f34fe3e8..1e8e6c7f 100644 --- a/test/test_partition.py +++ b/test/test_partition.py @@ -81,10 +81,8 @@ def test_partition_interpolation(ctx_factory, dim, mesh_pars, continue eoc_rec[i, j] = EOCRecorder() - sin = actx.special_func("sin") - def f(x): - return 10.*sin(50.*x) + return 10.*actx.np.sin(50.*x) for n in mesh_pars: from meshmode.mesh.generation import generate_warped_rect_mesh @@ -415,10 +413,8 @@ def _test_data_transfer(mpi_comm, actx, local_bdry_conns, remote_to_local_bdry_conns, connected_parts): from mpi4py import MPI - sin = actx.special_func("sin") - def f(x): - return 10*sin(20.*x) + return 10*actx.np.sin(20.*x) ''' Here is a simplified example of what happens from diff --git a/test/test_refinement.py b/test/test_refinement.py index 31624610..336c8a93 100644 --- a/test/test_refinement.py +++ b/test/test_refinement.py @@ -198,8 +198,6 @@ def test_refinement_connection( from meshmode.discretization.connection import ( make_refinement_connection, check_connection) - sin = actx.special_func("sin") - from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -239,7 +237,7 @@ def test_refinement_connection( factor = 9 for iaxis in range(len(x)): - result = result * sin(factor * (x[iaxis]/mesh_ext[iaxis])) + result = result * actx.np.sin(factor * (x[iaxis]/mesh_ext[iaxis])) return result -- GitLab From 5399843719e5059225c99810b43dcff2dbe2797b Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 17 Jun 2020 18:07:07 -0500 Subject: [PATCH 083/106] Point back at pytools master --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 36086658..0e1d749b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy -git+https://gitlab.tiker.net/inducer/pytools.git@memoize-in-complex-identifiers +git+https://gitlab.tiker.net/inducer/pytools.git git+https://gitlab.tiker.net/inducer/gmsh_interop.git git+https://gitlab.tiker.net/inducer/modepy.git git+https://gitlab.tiker.net/inducer/pyopencl.git -- GitLab From 16d38e21cddcaf364ca1fb386df057c94cc2f3f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Sun, 21 Jun 2020 23:54:40 +0200 Subject: [PATCH 084/106] Point requirements.txt back to loopy master --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0e1d749b..818dd86c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ git+https://gitlab.tiker.net/inducer/islpy.git git+https://gitlab.tiker.net/inducer/pymbolic.git # also depends on pymbolic, so should come after it -git+https://gitlab.tiker.net/inducer/loopy.git@empty-ndrange-enqueue +git+https://gitlab.tiker.net/inducer/loopy.git # more pytential dependencies git+https://gitlab.tiker.net/inducer/boxtree.git -- GitLab From b200e32bdf57c178a79aef1d5b113084bd60dd92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Thu, 25 Jun 2020 15:37:51 +0200 Subject: [PATCH 085/106] Fix default kernel name in array_context.make_loopy_program --- meshmode/array_context.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 26da731e..4375eed4 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -35,7 +35,8 @@ __doc__ = """ """ -def make_loopy_program(domains, statements, kernel_data=["..."], name=None): +def make_loopy_program(domains, statements, kernel_data=["..."], + name="mm_actx_kernel"): """Return a :class:`loopy.Program` suitable for use with :meth:`ArrayContext.call_loopy`. """ -- GitLab From a1f9903884dfdb2d87afa71e1cc0f9de9ba70308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 26 Jun 2020 07:17:33 +0200 Subject: [PATCH 086/106] Apply 1 suggestion(s) to 1 file(s) --- meshmode/discretization/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 0b246686..46f7d2b0 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -199,7 +199,7 @@ class Discretization(object): .. method:: num_reference_derivative(queue, ref_axes, vec) - .. method:: quad_weights(queue) + .. method:: quad_weights A :class:`~meshmode.dof_array.DOFArray` with quadrature weights. """ -- GitLab From 2793de1c6185c27f86d2a35f3080bdfc81ab7340 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 26 Jun 2020 18:30:19 +0200 Subject: [PATCH 087/106] Apply 1 suggestion(s) to 1 file(s) --- meshmode/discretization/connection/direct.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/direct.py b/meshmode/discretization/connection/direct.py index 7c2b5712..0fb957b8 100644 --- a/meshmode/discretization/connection/direct.py +++ b/meshmode/discretization/connection/direct.py @@ -146,7 +146,7 @@ class DiscretizationConnection(object): self.is_surjective = is_surjective - def __call__(self, queue, vec): + def __call__(self, ary): raise NotImplementedError() -- GitLab From 88aa902895dc1d3640cf29b2cc1be7b9cbedfa42 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Fri, 26 Jun 2020 11:38:09 -0500 Subject: [PATCH 088/106] Address comments from Matt's review --- meshmode/discretization/__init__.py | 8 ++++---- meshmode/discretization/connection/chained.py | 2 +- meshmode/discretization/connection/face.py | 6 ++---- meshmode/dof_array.py | 7 ++++++- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index 46f7d2b0..f1dd8915 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -197,11 +197,9 @@ class Discretization(object): .. automethod:: nodes() - .. method:: num_reference_derivative(queue, ref_axes, vec) + .. automethod:: num_reference_derivative - .. method:: quad_weights - - A :class:`~meshmode.dof_array.DOFArray` with quadrature weights. + .. automethod:: quad_weights """ def __init__(self, actx: ArrayContext, mesh, group_factory, @@ -318,6 +316,8 @@ class Discretization(object): @memoize_method def quad_weights(self): + """:returns: A :class:`~meshmode.dof_array.DOFArray` with quadrature weights. + """ actx = self._setup_actx @memoize_in(actx, (Discretization, "quad_weights_prg")) diff --git a/meshmode/discretization/connection/chained.py b/meshmode/discretization/connection/chained.py index 9990e865..1be269bb 100644 --- a/meshmode/discretization/connection/chained.py +++ b/meshmode/discretization/connection/chained.py @@ -187,7 +187,7 @@ def flatten_chained_connection(actx, connection): If a large number of connections is chained, the number of groups and batches can become very large. - :arg queue: An instance of :class:`pyopencl.CommandQueue`. + :arg actx: An instance of :class:`meshmode.array_contex.ArrayContext`. :arg connection: An instance of :class:`~meshmode.discretization.connection.DiscretizationConnection`. :return: An instance of diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py index 6bba284a..bac1ccc9 100644 --- a/meshmode/discretization/connection/face.py +++ b/meshmode/discretization/connection/face.py @@ -86,11 +86,9 @@ def _build_boundary_connection(actx, vol_discr, bdry_discr, connection_data, InterpolationBatch( from_group_index=igrp, from_element_indices=actx.freeze(actx.from_numpy( - data.group_source_element_indices)) - .with_queue(None), + data.group_source_element_indices)), to_element_indices=actx.freeze(actx.from_numpy( - data.group_target_element_indices)) - .with_queue(None), + data.group_target_element_indices)), result_unit_nodes=result_unit_nodes, to_element_face=face_id )) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index d04dedc4..3136484c 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -51,10 +51,15 @@ class DOFArray(np.ndarray): :class:`~meshmode.discretization.Discretization`, with one entry in the :class:`DOFArray` for each :class:`~meshmode.discretization.ElementGroupBase`. + The arrays contained within a :class:`DOFArray` + are expected to be logically two-dimensional, with shape + ``(nelements, nunit_dofs)``, using + :class:`~meshmode.discretization.ElementGroupBase.nelements` + and :class:`~meshmode.discretization.ElementGroupBase.nunit_dofs`. It is derived from :class:`numpy.ndarray` with dtype object ("an object array"). The entries in this array are further arrays managed by :attr:`array_context`. - The main purpose of this class is to better describe the data structure, + One main purpose of this class is to describe the data structure, i.e. when a :class:`DOFArray` occurs inside of further numpy object array, the level representing the array of element groups can be recognized (by people and programs). -- GitLab From cd138164db29d0ecdcc99b03ca006b00a81350e9 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 26 Jun 2020 15:58:27 -0500 Subject: [PATCH 089/106] DOFArray: Permit variable final dimension sizes --- meshmode/dof_array.py | 46 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 3136484c..7edfc2a9 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -21,7 +21,7 @@ THE SOFTWARE. """ import numpy as np -from typing import Optional, TYPE_CHECKING +from typing import Optional, Iterable, TYPE_CHECKING from functools import partial from pytools import single_valued, memoize_in @@ -53,11 +53,15 @@ class DOFArray(np.ndarray): :class:`~meshmode.discretization.ElementGroupBase`. The arrays contained within a :class:`DOFArray` are expected to be logically two-dimensional, with shape - ``(nelements, nunit_dofs)``, using - :class:`~meshmode.discretization.ElementGroupBase.nelements` - and :class:`~meshmode.discretization.ElementGroupBase.nunit_dofs`. - It is derived from :class:`numpy.ndarray` with dtype object ("an object array"). - The entries in this array are further arrays managed by :attr:`array_context`. + ``(nelements, ndofs_per_element)``, where ``nelements`` is the same as + :attr:`~meshmode.discretization.ElementGroupBase.nelements` + of the associated group. + ``ndofs_per_element`` is typically, but not necessarily, the same as + :attr:`~meshmode.discretization.ElementGroupBase.nunit_dofs` + of the associated group. + This array is derived from :class:`numpy.ndarray` with dtype object ("an + object array"). The entries in this array are further arrays managed by + :attr:`array_context`. One main purpose of this class is to describe the data structure, i.e. when a :class:`DOFArray` occurs inside of further numpy object array, @@ -74,6 +78,7 @@ class DOFArray(np.ndarray): contained in this instance. .. automethod:: from_list + """ # Follows https://numpy.org/devdocs/user/basics.subclassing.html @@ -190,7 +195,7 @@ def flatten(ary: np.ndarray) -> np.ndarray: @memoize_in(actx, (flatten, "flatten_prg")) def prg(): return make_loopy_program( - "{[iel,idof]: 0<=iel np.ndarray: return result -def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: +def unflatten(actx: ArrayContext, discr: "_Discretization", ary, + ndofs_per_element_per_group: Optional[Iterable[int]] = None) -> np.ndarray: r"""Convert a 'flat' array returned by :func:`flatten` back to a :class:`DOFArray`. Vectorizes over object arrays of :class:`DOFArray`\ s. @@ -211,17 +217,26 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary) -> np.ndarray: and ary.dtype.char == "O" and not isinstance(ary, DOFArray)): return obj_array_vectorize( - lambda subary: unflatten(actx, discr, subary), + lambda subary: unflatten( + actx, discr, subary, ndofs_per_element_per_group), ary) @memoize_in(actx, (unflatten, "unflatten_prg")) def prg(): return make_loopy_program( - "{[iel,idof]: 0<=iel np.ndarray: prg(), grp_start=grp_start, ary=ary, nelements=grp.nelements, - nunit_dofs=grp.nunit_dofs, + ndofs_per_element=ndofs_per_element, )["result"]) - for grp_start, grp in zip(group_starts, discr.groups)]) + for grp_start, grp, ndofs_per_element in zip( + group_starts, + discr.groups, + ndofs_per_element_per_group)]) def flat_norm(ary: DOFArray, ord=2): -- GitLab From 47619d048906cc7f8a9aa9936956f6765f7b6c37 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 26 Jun 2020 23:00:23 +0200 Subject: [PATCH 090/106] Whitespace fix --- meshmode/dof_array.py | 1 - 1 file changed, 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 7edfc2a9..63c108c1 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -78,7 +78,6 @@ class DOFArray(np.ndarray): contained in this instance. .. automethod:: from_list - """ # Follows https://numpy.org/devdocs/user/basics.subclassing.html -- GitLab From 83acfcfdc423d50912d2488f801fbff0351192f5 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 26 Jun 2020 16:05:39 -0500 Subject: [PATCH 091/106] Fix indentation --- meshmode/dof_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 63c108c1..b26732c3 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -109,7 +109,7 @@ class DOFArray(np.ndarray): (one per :class:`~meshmode.discretization.ElementGroupBase`). :arg actx: If *None*, the arrays in *res_list* must be - :meth:`~meshmode.array_context.ArrayContext.thaw`\ ed. + :meth:`~meshmode.array_context.ArrayContext.thaw`\ ed. """ if not (actx is None or isinstance(actx, ArrayContext)): raise TypeError("actx must be of type ArrayContext") -- GitLab From 542c99fa691736f493de05dbfe47baf59e4bdef7 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Fri, 26 Jun 2020 16:09:56 -0500 Subject: [PATCH 092/106] Fix a loopy kerel error --- meshmode/dof_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index b26732c3..c77fdd0f 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -195,7 +195,7 @@ def flatten(ary: np.ndarray) -> np.ndarray: def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel Date: Sat, 27 Jun 2020 06:47:08 +0200 Subject: [PATCH 093/106] Use a line continuation --- meshmode/dof_array.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index c77fdd0f..12ffe33f 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -195,7 +195,8 @@ def flatten(ary: np.ndarray) -> np.ndarray: def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel Date: Sat, 27 Jun 2020 20:19:00 +0200 Subject: [PATCH 094/106] ndofs_per_element_per_group => ndofs_per_element_by_group --- meshmode/dof_array.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 12ffe33f..824583c8 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -208,7 +208,7 @@ def flatten(ary: np.ndarray) -> np.ndarray: def unflatten(actx: ArrayContext, discr: "_Discretization", ary, - ndofs_per_element_per_group: Optional[Iterable[int]] = None) -> np.ndarray: + ndofs_per_element_by_group: Optional[Iterable[int]] = None) -> np.ndarray: r"""Convert a 'flat' array returned by :func:`flatten` back to a :class:`DOFArray`. Vectorizes over object arrays of :class:`DOFArray`\ s. @@ -218,7 +218,7 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, and not isinstance(ary, DOFArray)): return obj_array_vectorize( lambda subary: unflatten( - actx, discr, subary, ndofs_per_element_per_group), + actx, discr, subary, ndofs_per_element_by_group), ary) @memoize_in(actx, (unflatten, "unflatten_prg")) @@ -228,14 +228,14 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, "result[iel, idof] = ary[grp_start + iel*ndofs_per_element + idof]", name="unflatten") - if ndofs_per_element_per_group is None: - ndofs_per_element_per_group = [ + if ndofs_per_element_by_group is None: + ndofs_per_element_by_group = [ grp.nunit_dofs for grp in discr.groups] group_sizes = [ grp.nelements * ndofs_per_element for grp, ndofs_per_element - in zip(discr.groups, ndofs_per_element_per_group)] + in zip(discr.groups, ndofs_per_element_by_group)] if ary.size != sum(group_sizes): raise ValueError("array has size %d, expected %d" @@ -254,7 +254,7 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, for grp_start, grp, ndofs_per_element in zip( group_starts, discr.groups, - ndofs_per_element_per_group)]) + ndofs_per_element_by_group)]) def flat_norm(ary: DOFArray, ord=2): -- GitLab From 372927fb0bf691486c2720239ecdf799f8606291 Mon Sep 17 00:00:00 2001 From: Matt Wala Date: Sat, 27 Jun 2020 13:21:26 -0500 Subject: [PATCH 095/106] Revert "ndofs_per_element_per_group => ndofs_per_element_by_group" This reverts commit f1825d0b0bf04e5921f2d710314cccc47fb8833a. --- meshmode/dof_array.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 824583c8..12ffe33f 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -208,7 +208,7 @@ def flatten(ary: np.ndarray) -> np.ndarray: def unflatten(actx: ArrayContext, discr: "_Discretization", ary, - ndofs_per_element_by_group: Optional[Iterable[int]] = None) -> np.ndarray: + ndofs_per_element_per_group: Optional[Iterable[int]] = None) -> np.ndarray: r"""Convert a 'flat' array returned by :func:`flatten` back to a :class:`DOFArray`. Vectorizes over object arrays of :class:`DOFArray`\ s. @@ -218,7 +218,7 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, and not isinstance(ary, DOFArray)): return obj_array_vectorize( lambda subary: unflatten( - actx, discr, subary, ndofs_per_element_by_group), + actx, discr, subary, ndofs_per_element_per_group), ary) @memoize_in(actx, (unflatten, "unflatten_prg")) @@ -228,14 +228,14 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, "result[iel, idof] = ary[grp_start + iel*ndofs_per_element + idof]", name="unflatten") - if ndofs_per_element_by_group is None: - ndofs_per_element_by_group = [ + if ndofs_per_element_per_group is None: + ndofs_per_element_per_group = [ grp.nunit_dofs for grp in discr.groups] group_sizes = [ grp.nelements * ndofs_per_element for grp, ndofs_per_element - in zip(discr.groups, ndofs_per_element_by_group)] + in zip(discr.groups, ndofs_per_element_per_group)] if ary.size != sum(group_sizes): raise ValueError("array has size %d, expected %d" @@ -254,7 +254,7 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, for grp_start, grp, ndofs_per_element in zip( group_starts, discr.groups, - ndofs_per_element_by_group)]) + ndofs_per_element_per_group)]) def flat_norm(ary: DOFArray, ord=2): -- GitLab From b892210eb8814dbcb655a916eb209975515fd369 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 13:19:43 -0500 Subject: [PATCH 096/106] Fix type annotation of dof_array.flatten --- meshmode/dof_array.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index 12ffe33f..f4099b33 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -21,7 +21,7 @@ THE SOFTWARE. """ import numpy as np -from typing import Optional, Iterable, TYPE_CHECKING +from typing import Optional, Iterable, TYPE_CHECKING, Any from functools import partial from pytools import single_valued, memoize_in @@ -170,7 +170,7 @@ def freeze(ary: np.ndarray) -> np.ndarray: ]) -def flatten(ary: np.ndarray) -> np.ndarray: +def flatten(ary: np.ndarray) -> Any: r"""Convert a :class:`DOFArray` into a "flat" array of degrees of freedom, where the resulting type of the array is given by the :attr:`DOFArray.array_context`. -- GitLab From 20b12937e028b3e0069417b7766ffb42ae700221 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 13:23:44 -0500 Subject: [PATCH 097/106] Introduce Discretization.ndofs --- meshmode/discretization/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/meshmode/discretization/__init__.py b/meshmode/discretization/__init__.py index f1dd8915..d19e5b92 100644 --- a/meshmode/discretization/__init__.py +++ b/meshmode/discretization/__init__.py @@ -188,6 +188,8 @@ class Discretization(object): .. attribute:: ambient_dim + .. attribute:: ndofs + .. attribute :: groups .. automethod:: empty @@ -241,6 +243,10 @@ class Discretization(object): def ambient_dim(self): return self.mesh.ambient_dim + @property + def ndofs(self): + return sum(grp.ndofs for grp in self.groups) + def _new_array(self, actx, creation_func, dtype=None): if dtype is None: dtype = self.real_dtype -- GitLab From cb806c843fd4db8e41391eef762a8577772aab09 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 15:35:40 -0500 Subject: [PATCH 098/106] _get_scalar_func_loopy_program/loopy transform logic: Allow any number of axes in array --- meshmode/array_context.py | 45 ++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 4375eed4..a0087ea1 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -62,7 +62,8 @@ class _FakeNumpyNamespace: actx = self._array_context # FIXME: Maybe involve loopy type inference? result = actx.empty(args[0].shape, args[0].dtype) - prg = actx._get_scalar_func_loopy_program(name, len(args)) + prg = actx._get_scalar_func_loopy_program( + name, nargs=len(args), naxes=len(args[0].shape)) actx.call_loopy(prg, out=result, **{"inp%d" % i: arg for i, arg in enumerate(args)}) return result @@ -147,17 +148,27 @@ class ArrayContext: raise NotImplementedError @memoize_method - def _get_scalar_func_loopy_program(self, name, nargs): + def _get_scalar_func_loopy_program(self, name, nargs, naxes): from pymbolic import var - iel = var("iel") - idof = var("idof") + + var_names = ["i%d" % i for i in range(naxes)] + size_names = ["n%d" % i for i in range(naxes)] + subscript = tuple(var(vname) for vname in var_names) + from islpy import make_zero_and_vars + v = make_zero_and_vars(var_names, params=size_names) + domain = v[0].domain() + for vname, sname in zip(var_names, size_names): + domain = domain & v[0].le_set(v[vname]) & v[vname].le_set(v[sname]) + + domain_bset, = domain.get_basic_sets() + return make_loopy_program( - "{[iel, idof]: 0<=iel Date: Wed, 1 Jul 2020 15:40:12 -0500 Subject: [PATCH 099/106] Don't freeze in unflatten --- meshmode/dof_array.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/meshmode/dof_array.py b/meshmode/dof_array.py index f4099b33..f74f2090 100644 --- a/meshmode/dof_array.py +++ b/meshmode/dof_array.py @@ -244,17 +244,16 @@ def unflatten(actx: ArrayContext, discr: "_Discretization", ary, group_starts = np.cumsum([0] + group_sizes) return DOFArray.from_list(actx, [ - actx.freeze( - actx.call_loopy( - prg(), - grp_start=grp_start, ary=ary, - nelements=grp.nelements, - ndofs_per_element=ndofs_per_element, - )["result"]) - for grp_start, grp, ndofs_per_element in zip( - group_starts, - discr.groups, - ndofs_per_element_per_group)]) + actx.call_loopy( + prg(), + grp_start=grp_start, ary=ary, + nelements=grp.nelements, + ndofs_per_element=ndofs_per_element, + )["result"] + for grp_start, grp, ndofs_per_element in zip( + group_starts, + discr.groups, + ndofs_per_element_per_group)]) def flat_norm(ary: DOFArray, ord=2): -- GitLab From 2da5c294102053fdbd05c13b8d4464d5ae5828e9 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 15:49:51 -0500 Subject: [PATCH 100/106] Implement minimum/maximum in PyOpenCL fake numpy --- meshmode/array_context.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index a0087ea1..4c049ead 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -53,7 +53,7 @@ def make_loopy_program(domains, statements, kernel_data=["..."], # {{{ ArrayContext -class _FakeNumpyNamespace: +class _BaseFakeNumpyNamespace: def __init__(self, array_context): self._array_context = array_context @@ -104,7 +104,10 @@ class ArrayContext: """ def __init__(self): - self.np = _FakeNumpyNamespace(self) + self.np = self._get_fake_numpy_namespace() + + def _get_fake_numpy_namespace(self): + return _BaseFakeNumpyNamespace(self) def empty(self, shape, dtype): raise NotImplementedError @@ -204,6 +207,15 @@ class ArrayContext: # {{{ PyOpenCLArrayContext +class _PyOpenCLFakeNumpyNamespace(_BaseFakeNumpyNamespace): + def __getattr__(self, name): + if name in ["minimum", "maximum"]: + import pyopencl.array as cl_array + return getattr(cl_array, name) + + return super().__getattr__(name) + + class PyOpenCLArrayContext(ArrayContext): """ A :class:`ArrayContext` that uses :class:`pyopencl.array.Array` instances @@ -226,6 +238,9 @@ class PyOpenCLArrayContext(ArrayContext): self.queue = queue self.allocator = allocator + def _get_fake_numpy_namespace(self): + return _PyOpenCLFakeNumpyNamespace(self) + # {{{ ArrayContext interface def empty(self, shape, dtype): -- GitLab From bd15c865e1cfa16b80cd2860e9fa768549c7c471 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 15:50:09 -0500 Subject: [PATCH 101/106] Remove a comment in the loopy transform logic that's no longer correct --- meshmode/array_context.py | 1 - 1 file changed, 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 4c049ead..6724c2af 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -279,7 +279,6 @@ class PyOpenCLArrayContext(ArrayContext): @memoize_method def transform_loopy_program(self, program): - # FIXME: This assumes that the iname 'iel' exists. # FIXME: This could be much smarter. import loopy as lp all_inames = program.all_inames() -- GitLab From 319f69c65ccdd97b9ce87cda7dbb97c72d3aa5c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 1 Jul 2020 22:51:49 +0200 Subject: [PATCH 102/106] Apply 1 suggestion(s) to 1 file(s) --- meshmode/array_context.py | 1 + 1 file changed, 1 insertion(+) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 6724c2af..ae3b13f4 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -47,6 +47,7 @@ def make_loopy_program(domains, statements, kernel_data=["..."], options=lp.Options( no_numpy=True, return_dict=True), + default_offset=lp.auto, name=name, lang_version=MOST_RECENT_LANGUAGE_VERSION) -- GitLab From a584c89c64012ca57e8365d822411dcb106f8cac Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 1 Jul 2020 16:08:28 -0500 Subject: [PATCH 103/106] CL fake numpy: vectorize minimum/maximum over obj arrays --- meshmode/array_context.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index ae3b13f4..2e3ae4af 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -27,6 +27,7 @@ import numpy as np import loopy as lp from loopy.version import MOST_RECENT_LANGUAGE_VERSION from pytools import memoize_method +from pytools.obj_array import obj_array_vectorized_n_args __doc__ = """ .. autofunction:: make_loopy_program @@ -69,7 +70,6 @@ class _BaseFakeNumpyNamespace: **{"inp%d" % i: arg for i, arg in enumerate(args)}) return result - from pytools.obj_array import obj_array_vectorized_n_args return obj_array_vectorized_n_args(f) @@ -212,7 +212,7 @@ class _PyOpenCLFakeNumpyNamespace(_BaseFakeNumpyNamespace): def __getattr__(self, name): if name in ["minimum", "maximum"]: import pyopencl.array as cl_array - return getattr(cl_array, name) + return obj_array_vectorized_n_args(getattr(cl_array, name)) return super().__getattr__(name) -- GitLab From 358454355ef41f790283fb676d2214ae699af6e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Fri, 3 Jul 2020 17:47:07 +0200 Subject: [PATCH 104/106] Apply 1 suggestion(s) to 1 file(s) --- meshmode/discretization/connection/chained.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/discretization/connection/chained.py b/meshmode/discretization/connection/chained.py index 1be269bb..53e57e2e 100644 --- a/meshmode/discretization/connection/chained.py +++ b/meshmode/discretization/connection/chained.py @@ -202,7 +202,7 @@ def flatten_chained_connection(actx, connection): return connection if not connection.connections: - return make_same_mesh_connection(connection.to_discr, + return make_same_mesh_connection(actx, connection.to_discr, connection.from_discr) # recursively build direct connections -- GitLab From b50b5f640263c61364a46eb9df470402dd5f2a63 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sat, 4 Jul 2020 00:52:25 -0500 Subject: [PATCH 105/106] ArrayContext._get_scalar_func_loopy_program: use lt_set (not le_set) for upper bound (fixes out-of bounds write, thanks @mattwala) --- meshmode/array_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshmode/array_context.py b/meshmode/array_context.py index 2e3ae4af..6118e9d5 100644 --- a/meshmode/array_context.py +++ b/meshmode/array_context.py @@ -162,7 +162,7 @@ class ArrayContext: v = make_zero_and_vars(var_names, params=size_names) domain = v[0].domain() for vname, sname in zip(var_names, size_names): - domain = domain & v[0].le_set(v[vname]) & v[vname].le_set(v[sname]) + domain = domain & v[0].le_set(v[vname]) & v[vname].lt_set(v[sname]) domain_bset, = domain.get_basic_sets() -- GitLab From 76587445072cc4dbf5e48d5bb2b3fdafc1a8c9ed Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Wed, 8 Jul 2020 18:39:09 -0500 Subject: [PATCH 106/106] A little bit of work towards supporting order 0 --- meshmode/discretization/poly_element.py | 6 +++++- meshmode/mesh/generation.py | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/meshmode/discretization/poly_element.py b/meshmode/discretization/poly_element.py index 91fccdf6..d0f3823c 100644 --- a/meshmode/discretization/poly_element.py +++ b/meshmode/discretization/poly_element.py @@ -212,7 +212,11 @@ class PolynomialWarpAndBlendElementGroup(_MassMatrixQuadratureElementGroup): @memoize_method def unit_nodes(self): dim = self.mesh_el_group.dim - result = mp.warp_and_blend_nodes(dim, self.order) + if self.order == 0: + result = mp.warp_and_blend_nodes(dim, 1) + result = np.mean(result, axis=1).reshape(-1, 1) + else: + result = mp.warp_and_blend_nodes(dim, self.order) dim2, nunit_nodes = result.shape assert dim2 == dim diff --git a/meshmode/mesh/generation.py b/meshmode/mesh/generation.py index c4bf2608..c524292c 100644 --- a/meshmode/mesh/generation.py +++ b/meshmode/mesh/generation.py @@ -321,6 +321,9 @@ def make_group_from_vertices(vertices, vertex_indices, order, group_factory = SimplexElementGroup if issubclass(group_factory, SimplexElementGroup): + if order < 1: + raise ValueError("can't represent simplices with mesh order < 1") + el_origins = el_vertices[:, :, 0][:, :, np.newaxis] # ambient_dim, nelements, nspan_vectors spanning_vectors = ( -- GitLab