diff --git a/.gitignore b/.gitignore index 3d5b80428d4d3d96051e085c47612d07a4aeecc0..c5d29d7c34f52c54cf724c91f0f86a9e7b4c96df 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,5 @@ examples/*.pdf .cache tags + +pytential/_git_rev.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 334a41d0fb0b56e6582d71e9242c09d9729fdb0e..750bf6f4cc5018fc14c9195ec688d45bea21d129 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,6 +26,20 @@ Python 3.6 POCL: except: - tags +Python 3.6 POCL Examples: + script: + - export PY_EXE=python3.6 + - export PYOPENCL_TEST=portable + - export EXTRA_INSTALL="numpy mako pyvisfile matplotlib" + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh + - ". ./build-py-project-and-run-examples.sh" + tags: + - python3.6 + - pocl + - large-node + except: + - tags + Python 3.5 Conda: script: - export SUMPY_FORCE_SYMBOLIC_BACKEND=symengine @@ -53,6 +67,21 @@ Python 2.7 POCL: except: - tags +Python 3.5 Conda Apple: + script: + - export LC_ALL=en_US.UTF-8 + - export LANG=en_US.UTF-8 + - export PYTEST_ADDOPTS=-k-slowtest + - CONDA_ENVIRONMENT=.test-conda-env-py3-macos.yml + - REQUIREMENTS_TXT=.test-conda-env-py3-requirements.txt + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh + - ". ./build-and-test-py-project-within-miniconda.sh" + tags: + - apple + except: + - tags + retry: 2 + Documentation: script: - EXTRA_INSTALL="numpy mako" @@ -66,7 +95,7 @@ Documentation: Flake8: script: - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh - - ". ./prepare-and-run-flake8.sh pytential test" + - ". ./prepare-and-run-flake8.sh pytential test examples" tags: - python3.5 except: diff --git a/.test-conda-env-py3-macos.yml b/.test-conda-env-py3-macos.yml new file mode 100644 index 0000000000000000000000000000000000000000..807cd6963d5328c5cbe92494f1f255cece964c4b --- /dev/null +++ b/.test-conda-env-py3-macos.yml @@ -0,0 +1,17 @@ +name: test-conda-env-py3-macos +channels: +- conda-forge +- defaults +dependencies: +- git +- conda-forge::numpy +- conda-forge::sympy +- pocl=1.0 +- islpy +- pyopencl +- python=3.5 +- symengine=0.3.0 +- python-symengine=0.3.0 +- pyfmmlib +- osx-pocl-opencl +# things not in here: loopy boxtree pymbolic meshmode sumpy diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml index b4f204c7f1ee3b43208c3fe5728e1b72bf78c21a..37c60864a80724cdb1117ff6ea0398ac32e6a6cb 100644 --- a/.test-conda-env-py3.yml +++ b/.test-conda-env-py3.yml @@ -6,7 +6,7 @@ dependencies: - git - conda-forge::numpy - conda-forge::sympy -- pocl=0.13 +- pocl=1.0 - islpy - pyopencl - python=3.5 diff --git a/README.rst b/README.rst index cbd540486d43975eb2691f59aa8117f4dd02d697..bababc0a26b9d0f9ff659fa000f9cdfd5329987b 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,10 @@ -pytential -========= +pytential: 2D/3D Layer Potential Evaluation +=========================================== + +.. image:: https://gitlab.tiker.net/inducer/pytential/badges/master/pipeline.svg + :target: https://gitlab.tiker.net/inducer/pytential/commits/master +.. image:: https://badge.fury.io/py/pytential.png + :target: http://pypi.python.org/pypi/pytential pytential helps you accurately evaluate layer potentials (and, sooner or later, volume potentials). @@ -23,9 +28,6 @@ and, indirectly, PyOpenCL is likely the only package you'll have to install by hand, all the others will be installed automatically. -.. image:: https://badge.fury.io/py/pytential.png - :target: http://pypi.python.org/pypi/pytential - Resources: * `documentation `_ diff --git a/doc/misc.rst b/doc/misc.rst index 14f3f76468c290284e4e4cf4624485ab4d332a28..7a3d9abaac86485e230212127871cede80711e3c 100644 --- a/doc/misc.rst +++ b/doc/misc.rst @@ -4,14 +4,15 @@ Installation and Usage Installing :mod:`pytential` --------------------------- -This set of instructions is intended for 64-bit Linux computers. -MacOS support is in the works. +This set of instructions is intended for 64-bit Linux and macOS computers. #. Make sure your system has the basics to build software. On Debian derivatives (Ubuntu and many more), installing ``build-essential`` should do the trick. + On macOS, run ``xcode-select --install`` to install build tools. + Everywhere else, just making sure you have the ``g++`` package should be enough. @@ -30,7 +31,9 @@ MacOS support is in the works. #. ``conda config --add channels conda-forge`` -#. ``conda install git pip pocl=0.13 islpy pyopencl sympy pyfmmlib pytest`` +#. (*macOS only*) ``conda install osx-pocl-opencl`` + +#. ``conda install git pip pocl islpy pyopencl sympy pyfmmlib pytest`` #. Type the following command:: @@ -45,14 +48,6 @@ You may also like to add this to a startup file (like :file:`$HOME/.bashrc`) or After this, you should be able to run the `tests `_ or `examples `_. -.. note:: - - You may have noticed that we prescribed pocl version 0.13 above. That's - because newer versions have a `bug - `_ that we haven't - tracked down just yet. Until this bug is found, we discourage the use of - pocl 0.14 as results may be silently inaccurate. - Troubleshooting the Installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/examples/fmm-error.py b/examples/fmm-error.py index fea97c99fd54398e4e6b3dd6c2092c19e60a31f5..110ec66bc166b9efc35b83e72f11584f07c7d5ee 100644 --- a/examples/fmm-error.py +++ b/examples/fmm-error.py @@ -6,89 +6,93 @@ from meshmode.mesh.generation import ( # noqa from sumpy.visualization import FieldPlotter from sumpy.kernel import LaplaceKernel, HelmholtzKernel -import faulthandler -faulthandler.enable() -import logging -logging.basicConfig(level=logging.INFO) +def main(): + import logging + logging.basicConfig(level=logging.WARNING) # INFO for more progress info -cl_ctx = cl.create_some_context() -queue = cl.CommandQueue(cl_ctx) + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) -target_order = 16 -qbx_order = 3 -nelements = 60 -mode_nr = 0 + target_order = 16 + qbx_order = 3 + nelements = 60 + mode_nr = 0 -k = 0 -if k: - kernel = HelmholtzKernel("k") -else: - kernel = LaplaceKernel() -#kernel = OneKernel() + k = 0 + if k: + kernel = HelmholtzKernel(2) + else: + kernel = LaplaceKernel(2) + #kernel = OneKernel() -mesh = make_curve_mesh( - #lambda t: ellipse(1, t), - starfish, - np.linspace(0, 1, nelements+1), - target_order) + mesh = make_curve_mesh( + #lambda t: ellipse(1, t), + starfish, + np.linspace(0, 1, nelements+1), + target_order) -from pytential.qbx import QBXLayerPotentialSource -from meshmode.discretization import Discretization -from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory -density_discr = Discretization( - cl_ctx, mesh, - InterpolatoryQuadratureSimplexGroupFactory(target_order)) + pre_density_discr = Discretization( + cl_ctx, mesh, + InterpolatoryQuadratureSimplexGroupFactory(target_order)) -qbx = QBXLayerPotentialSource( - density_discr, fine_order=2*target_order, - qbx_order=qbx_order, fmm_order=qbx_order) -slow_qbx = QBXLayerPotentialSource( - density_discr, fine_order=2*target_order, - qbx_order=qbx_order, fmm_order=False) + slow_qbx, _ = QBXLayerPotentialSource( + pre_density_discr, fine_order=2*target_order, + qbx_order=qbx_order, fmm_order=False, + target_association_tolerance=.05 + ).with_refinement() + qbx = slow_qbx.copy(fmm_order=10) + density_discr = slow_qbx.density_discr -nodes = density_discr.nodes().with_queue(queue) + nodes = density_discr.nodes().with_queue(queue) -angle = cl.clmath.atan2(nodes[1], nodes[0]) + angle = cl.clmath.atan2(nodes[1], nodes[0]) -from pytential import bind, sym -d = sym.Derivative() -#op = d.nabla[0] * d(sym.S(kernel, sym.var("sigma"))) -#op = sym.D(kernel, sym.var("sigma")) -op = sym.S(kernel, sym.var("sigma")) + from pytential import bind, sym + #op = sym.d_dx(sym.S(kernel, sym.var("sigma")), qbx_forced_limit=None) + #op = sym.D(kernel, sym.var("sigma"), qbx_forced_limit=None) + op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None) -sigma = cl.clmath.cos(mode_nr*angle) + sigma = cl.clmath.cos(mode_nr*angle) -if isinstance(kernel, HelmholtzKernel): - sigma = sigma.astype(np.complex128) + if isinstance(kernel, HelmholtzKernel): + sigma = sigma.astype(np.complex128) -bound_bdry_op = bind(qbx, op) + fplot = FieldPlotter(np.zeros(2), extent=5, npoints=600) + from pytential.target import PointsTarget -fplot = FieldPlotter(np.zeros(2), extent=5, npoints=600) -from pytential.target import PointsTarget + fld_in_vol = bind( + (slow_qbx, PointsTarget(fplot.points)), + op)(queue, sigma=sigma, k=k).get() -fld_in_vol = bind( - (slow_qbx, PointsTarget(fplot.points)), - op)(queue, sigma=sigma, k=k).get() + fmm_fld_in_vol = bind( + (qbx, PointsTarget(fplot.points)), + op)(queue, sigma=sigma, k=k).get() -fmm_fld_in_vol = bind( - (qbx, PointsTarget(fplot.points)), - op)(queue, sigma=sigma, k=k).get() + err = fmm_fld_in_vol-fld_in_vol -err = fmm_fld_in_vol-fld_in_vol -im = fplot.show_scalar_in_matplotlib(np.log10(np.abs(err))) + import matplotlib + matplotlib.use('Agg') + im = fplot.show_scalar_in_matplotlib(np.log10(np.abs(err))) -from matplotlib.colors import Normalize -im.set_norm(Normalize(vmin=-6, vmax=0)) + from matplotlib.colors import Normalize + im.set_norm(Normalize(vmin=-12, vmax=0)) -import matplotlib.pyplot as pt -from matplotlib.ticker import NullFormatter -pt.gca().xaxis.set_major_formatter(NullFormatter()) -pt.gca().yaxis.set_major_formatter(NullFormatter()) + import matplotlib.pyplot as pt + from matplotlib.ticker import NullFormatter + pt.gca().xaxis.set_major_formatter(NullFormatter()) + pt.gca().yaxis.set_major_formatter(NullFormatter()) -cb = pt.colorbar(shrink=0.9) -cb.set_label(r"$\log_{10}(\mathdefault{Error})$") + cb = pt.colorbar(shrink=0.9) + cb.set_label(r"$\log_{10}(\mathdefault{Error})$") -pt.savefig("fmm-error-order-%d.pdf" % qbx_order) + pt.savefig("fmm-error-order-%d.pdf" % qbx_order) + + +if __name__ == "__main__": + main() diff --git a/examples/blob-2d.step b/examples/geometries/blob-2d.step similarity index 100% rename from examples/blob-2d.step rename to examples/geometries/blob-2d.step diff --git a/examples/circle.step b/examples/geometries/circle.step similarity index 100% rename from examples/circle.step rename to examples/geometries/circle.step diff --git a/examples/circles.step b/examples/geometries/circles.step similarity index 100% rename from examples/circles.step rename to examples/geometries/circles.step diff --git a/examples/ellipsoid.step b/examples/geometries/ellipsoid.step similarity index 100% rename from examples/ellipsoid.step rename to examples/geometries/ellipsoid.step diff --git a/examples/molecule.step b/examples/geometries/molecule.step similarity index 100% rename from examples/molecule.step rename to examples/geometries/molecule.step diff --git a/examples/two-balls.step b/examples/geometries/two-balls.step similarity index 100% rename from examples/two-balls.step rename to examples/geometries/two-balls.step diff --git a/examples/two-cylinders-smooth.step b/examples/geometries/two-cylinders-smooth.step similarity index 100% rename from examples/two-cylinders-smooth.step rename to examples/geometries/two-cylinders-smooth.step diff --git a/examples/helmholtz-dirichlet.py b/examples/helmholtz-dirichlet.py index d6d9baa6c609399028305edb249565d0457d878d..22f8fa8a0083da62f1f17d7c79053056ce3ab33c 100644 --- a/examples/helmholtz-dirichlet.py +++ b/examples/helmholtz-dirichlet.py @@ -25,7 +25,7 @@ k = 3 def main(): import logging - logging.basicConfig(level=logging.INFO) + logging.basicConfig(level=logging.WARNING) # INFO for more progress info cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) @@ -167,7 +167,7 @@ def main(): #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) fplot.write_vtk_file( - "potential.vts", + "potential-helm.vts", [ ("potential", fld_in_vol), ("indicator", indicator), diff --git a/examples/laplace-dirichlet-3d.py b/examples/laplace-dirichlet-3d.py new file mode 100644 index 0000000000000000000000000000000000000000..4166dddfa8d8e1606885e167fa65a8881fe64484 --- /dev/null +++ b/examples/laplace-dirichlet-3d.py @@ -0,0 +1,170 @@ +import numpy as np +import numpy.linalg as la +import pyopencl as cl +import pyopencl.clmath # noqa + +from meshmode.discretization import Discretization +from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + +from pytential import bind, sym, norm # noqa +from pytential.target import PointsTarget + +# {{{ set some constants for use below + +nelements = 20 +bdry_quad_order = 4 +mesh_order = bdry_quad_order +qbx_order = bdry_quad_order +bdry_ovsmp_quad_order = 4*bdry_quad_order +fmm_order = 3 + +# }}} + + +def main(): + import logging + logging.basicConfig(level=logging.WARNING) # INFO for more progress info + + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.mesh.generation import generate_torus + + rout = 10 + rin = 1 + if 1: + base_mesh = generate_torus( + rout, rin, 40, 4, + mesh_order) + + from meshmode.mesh.processing import affine_map, merge_disjoint_meshes + # nx = 1 + # ny = 1 + nz = 1 + dz = 0 + meshes = [ + affine_map( + base_mesh, + A=np.diag([1, 1, 1]), + b=np.array([0, 0, iz*dz])) + for iz in range(nz)] + + mesh = merge_disjoint_meshes(meshes, single_group=True) + + if 0: + from meshmode.mesh.visualization import draw_curve + draw_curve(mesh) + import matplotlib.pyplot as plt + plt.show() + + pre_density_discr = Discretization( + cl_ctx, mesh, + InterpolatoryQuadratureSimplexGroupFactory(bdry_quad_order)) + + from pytential.qbx import ( + QBXLayerPotentialSource, QBXTargetAssociationFailedException) + qbx, _ = QBXLayerPotentialSource( + pre_density_discr, fine_order=bdry_ovsmp_quad_order, qbx_order=qbx_order, + fmm_order=fmm_order + ).with_refinement() + density_discr = qbx.density_discr + + # {{{ describe bvp + + from sumpy.kernel import LaplaceKernel + kernel = LaplaceKernel(3) + + cse = sym.cse + + sigma_sym = sym.var("sigma") + #sqrt_w = sym.sqrt_jac_q_weight(3) + sqrt_w = 1 + inv_sqrt_w_sigma = cse(sigma_sym/sqrt_w) + + # -1 for interior Dirichlet + # +1 for exterior Dirichlet + loc_sign = +1 + + bdry_op_sym = (loc_sign*0.5*sigma_sym + + sqrt_w*( + sym.S(kernel, inv_sqrt_w_sigma) + + sym.D(kernel, inv_sqrt_w_sigma) + )) + + # }}} + + bound_op = bind(qbx, bdry_op_sym) + + # {{{ fix rhs and solve + + nodes = density_discr.nodes().with_queue(queue) + source = np.array([rout, 0, 0]) + + def u_incoming_func(x): + # return 1/cl.clmath.sqrt( (x[0] - source[0])**2 + # +(x[1] - source[1])**2 + # +(x[2] - source[2])**2 ) + return 1.0/la.norm(x.get()-source[:, None], axis=0) + + bc = cl.array.to_device(queue, u_incoming_func(nodes)) + + bvp_rhs = bind(qbx, sqrt_w*sym.var("bc"))(queue, bc=bc) + + from pytential.solve import gmres + gmres_result = gmres( + bound_op.scipy_op(queue, "sigma", dtype=np.float64), + bvp_rhs, tol=1e-14, progress=True, + stall_iterations=0, + hard_failure=True) + + sigma = bind(qbx, sym.var("sigma")/sqrt_w)(queue, sigma=gmres_result.solution) + + # }}} + + from meshmode.discretization.visualization import make_visualizer + bdry_vis = make_visualizer(queue, density_discr, 20) + bdry_vis.write_vtk_file("laplace.vtu", [ + ("sigma", sigma), + ]) + + # {{{ postprocess/visualize + + repr_kwargs = dict(qbx_forced_limit=None) + representation_sym = ( + sym.S(kernel, inv_sqrt_w_sigma, **repr_kwargs) + + sym.D(kernel, inv_sqrt_w_sigma, **repr_kwargs)) + + from sumpy.visualization import FieldPlotter + fplot = FieldPlotter(np.zeros(3), extent=20, npoints=50) + + targets = cl.array.to_device(queue, fplot.points) + + qbx_stick_out = qbx.copy(target_stick_out_factor=0.2) + + try: + fld_in_vol = bind( + (qbx_stick_out, PointsTarget(targets)), + representation_sym)(queue, sigma=sigma).get() + except QBXTargetAssociationFailedException as e: + fplot.write_vtk_file( + "failed-targets.vts", + [ + ("failed", e.failed_target_flags.get(queue)) + ] + ) + raise + + #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) + fplot.write_vtk_file( + "potential-laplace-3d.vts", + [ + ("potential", fld_in_vol), + ] + ) + + # }}} + + +if __name__ == "__main__": + main() diff --git a/examples/layerpot-3d.py b/examples/layerpot-3d.py index 0a35ebd7fa418b8cf517ada57bc7dee5e39f1471..28f0967e8aec28332902a128d0fb1efafb100d4e 100644 --- a/examples/layerpot-3d.py +++ b/examples/layerpot-3d.py @@ -9,10 +9,6 @@ import faulthandler from six.moves import range faulthandler.enable() -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) @@ -21,10 +17,10 @@ qbx_order = 3 mode_nr = 4 if 1: - cad_file_name = "ellipsoid.step" + cad_file_name = "geometries/ellipsoid.step" h = 0.6 else: - cad_file_name = "two-cylinders-smooth.step" + cad_file_name = "geometries/two-cylinders-smooth.step" h = 0.4 k = 0 @@ -34,76 +30,86 @@ else: kernel = LaplaceKernel(3) #kernel = OneKernel() -from meshmode.mesh.io import generate_gmsh, FileSource -mesh = generate_gmsh( - FileSource(cad_file_name), 2, order=2, - other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h]) -from meshmode.mesh.processing import perform_flips -# Flip elements--gmsh generates inside-out geometry. -mesh = perform_flips(mesh, np.ones(mesh.nelements)) +def main(): + import logging + logger = logging.getLogger(__name__) + logging.basicConfig(level=logging.WARNING) # INFO for more progress info + + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource(cad_file_name), 2, order=2, + other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h]) + + from meshmode.mesh.processing import perform_flips + # Flip elements--gmsh generates inside-out geometry. + mesh = perform_flips(mesh, np.ones(mesh.nelements)) + + from meshmode.mesh.processing import find_bounding_box + bbox_min, bbox_max = find_bounding_box(mesh) + bbox_center = 0.5*(bbox_min+bbox_max) + bbox_size = max(bbox_max-bbox_min) / 2 + + logger.info("%d elements" % mesh.nelements) -from meshmode.mesh.processing import find_bounding_box -bbox_min, bbox_max = find_bounding_box(mesh) -bbox_center = 0.5*(bbox_min+bbox_max) -bbox_size = max(bbox_max-bbox_min) / 2 + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory -logger.info("%d elements" % mesh.nelements) + density_discr = Discretization( + cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) -from pytential.qbx import QBXLayerPotentialSource -from meshmode.discretization import Discretization -from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory + qbx, _ = QBXLayerPotentialSource(density_discr, 4*target_order, qbx_order, + fmm_order=qbx_order + 3, + target_association_tolerance=0.15).with_refinement() -density_discr = Discretization( - cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) + nodes = density_discr.nodes().with_queue(queue) -qbx, _ = QBXLayerPotentialSource(density_discr, 4*target_order, qbx_order, - fmm_order=qbx_order + 3, - target_association_tolerance=0.15).with_refinement() + angle = cl.clmath.atan2(nodes[1], nodes[0]) -nodes = density_discr.nodes().with_queue(queue) + from pytential import bind, sym + #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None)) + op = sym.D(kernel, sym.var("sigma"), qbx_forced_limit=None) + #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None) -angle = cl.clmath.atan2(nodes[1], nodes[0]) + sigma = cl.clmath.cos(mode_nr*angle) + if 0: + sigma = 0*angle + from random import randrange + for i in range(5): + sigma[randrange(len(sigma))] = 1 -from pytential import bind, sym -#op = sym.d_dx(sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None)) -op = sym.D(kernel, sym.var("sigma"), qbx_forced_limit=None) -#op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None) + if isinstance(kernel, HelmholtzKernel): + sigma = sigma.astype(np.complex128) -sigma = cl.clmath.cos(mode_nr*angle) -if 0: - sigma = 0*angle - from random import randrange - for i in range(5): - sigma[randrange(len(sigma))] = 1 + fplot = FieldPlotter(bbox_center, extent=3.5*bbox_size, npoints=150) -if isinstance(kernel, HelmholtzKernel): - sigma = sigma.astype(np.complex128) + from pytential.target import PointsTarget + fld_in_vol = bind( + (qbx, PointsTarget(fplot.points)), + op)(queue, sigma=sigma, k=k).get() -fplot = FieldPlotter(bbox_center, extent=3.5*bbox_size, npoints=150) + #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) + fplot.write_vtk_file( + "potential-3d.vts", + [ + ("potential", fld_in_vol) + ] + ) -from pytential.target import PointsTarget -fld_in_vol = bind( - (qbx, PointsTarget(fplot.points)), - op)(queue, sigma=sigma, k=k).get() + bdry_normals = bind( + density_discr, + sym.normal(density_discr.ambient_dim))(queue).as_vector(dtype=object) -#fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) -fplot.write_vtk_file( - "potential.vts", - [ - ("potential", fld_in_vol) - ] - ) + from meshmode.discretization.visualization import make_visualizer + bdry_vis = make_visualizer(queue, density_discr, target_order) -bdry_normals = bind( - density_discr, - sym.normal(density_discr.ambient_dim))(queue).as_vector(dtype=object) + bdry_vis.write_vtk_file("source-3d.vtu", [ + ("sigma", sigma), + ("bdry_normals", bdry_normals), + ]) -from meshmode.discretization.visualization import make_visualizer -bdry_vis = make_visualizer(queue, density_discr, target_order) -bdry_vis.write_vtk_file("source.vtu", [ - ("sigma", sigma), - ("bdry_normals", bdry_normals), - ]) +if __name__ == "__main__": + main() diff --git a/examples/layerpot.py b/examples/layerpot.py index 0371b1ff557f57a7b2f839be6571bded120728bb..7b4737da00d6d1d1cc76e31f39932fc7c12783e8 100644 --- a/examples/layerpot.py +++ b/examples/layerpot.py @@ -15,13 +15,6 @@ import faulthandler from six.moves import range faulthandler.enable() -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -cl_ctx = cl.create_some_context() -queue = cl.CommandQueue(cl_ctx) - target_order = 16 qbx_order = 3 nelements = 60 @@ -36,96 +29,107 @@ else: kernel_kwargs = {} #kernel = OneKernel() -from meshmode.mesh.generation import ( # noqa - make_curve_mesh, starfish, ellipse, drop) -mesh = make_curve_mesh( - #lambda t: ellipse(1, t), - starfish, - np.linspace(0, 1, nelements+1), - target_order) -from pytential.qbx import QBXLayerPotentialSource -from meshmode.discretization import Discretization -from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory +def main(): + import logging + logging.basicConfig(level=logging.WARNING) # INFO for more progress info + + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + from meshmode.mesh.generation import ( # noqa + make_curve_mesh, starfish, ellipse, drop) + mesh = make_curve_mesh( + #lambda t: ellipse(1, t), + starfish, + np.linspace(0, 1, nelements+1), + target_order) + + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + + pre_density_discr = Discretization( + cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) -pre_density_discr = Discretization( - cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) + qbx, _ = QBXLayerPotentialSource(pre_density_discr, 4*target_order, qbx_order, + fmm_order=qbx_order+3, + target_association_tolerance=0.005).with_refinement() -qbx, _ = QBXLayerPotentialSource(pre_density_discr, 4*target_order, qbx_order, - fmm_order=qbx_order+3, - target_association_tolerance=0.005).with_refinement() + density_discr = qbx.density_discr -density_discr = qbx.density_discr + nodes = density_discr.nodes().with_queue(queue) -nodes = density_discr.nodes().with_queue(queue) + angle = cl.clmath.atan2(nodes[1], nodes[0]) -angle = cl.clmath.atan2(nodes[1], nodes[0]) + def op(**kwargs): + kwargs.update(kernel_kwargs) + #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), **kwargs)) + return sym.D(kernel, sym.var("sigma"), **kwargs) + #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None, **kwargs) -def op(**kwargs): - kwargs.update(kernel_kwargs) + sigma = cl.clmath.cos(mode_nr*angle) + if 0: + sigma = 0*angle + from random import randrange + for i in range(5): + sigma[randrange(len(sigma))] = 1 - #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), **kwargs)) - return sym.D(kernel, sym.var("sigma"), **kwargs) - #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None, **kwargs) + if isinstance(kernel, HelmholtzKernel): + sigma = sigma.astype(np.complex128) + bound_bdry_op = bind(qbx, op()) + #mlab.figure(bgcolor=(1, 1, 1)) + if 1: + fplot = FieldPlotter(np.zeros(2), extent=5, npoints=1000) + from pytential.target import PointsTarget -sigma = cl.clmath.cos(mode_nr*angle) -if 0: - sigma = 0*angle - from random import randrange - for i in range(5): - sigma[randrange(len(sigma))] = 1 + targets_dev = cl.array.to_device(queue, fplot.points) + fld_in_vol = bind( + (qbx, PointsTarget(targets_dev)), + op(qbx_forced_limit=None))(queue, sigma=sigma, k=k).get() -if isinstance(kernel, HelmholtzKernel): - sigma = sigma.astype(np.complex128) + if enable_mayavi: + fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) + else: + fplot.write_vtk_file( + "potential-2d.vts", + [ + ("potential", fld_in_vol) + ] + ) -bound_bdry_op = bind(qbx, op()) -#mlab.figure(bgcolor=(1, 1, 1)) -if 1: - fplot = FieldPlotter(np.zeros(2), extent=5, npoints=1000) - from pytential.target import PointsTarget + if 0: + def apply_op(density): + return bound_bdry_op( + queue, sigma=cl.array.to_device(queue, density), k=k).get() - targets_dev = cl.array.to_device(queue, fplot.points) - fld_in_vol = bind( - (qbx, PointsTarget(targets_dev)), - op(qbx_forced_limit=None))(queue, sigma=sigma, k=k).get() + from sumpy.tools import build_matrix + n = len(sigma) + mat = build_matrix(apply_op, dtype=np.float64, shape=(n, n)) + + import matplotlib.pyplot as pt + pt.imshow(mat) + pt.colorbar() + pt.show() if enable_mayavi: - fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) - else: - fplot.write_vtk_file( - "potential.vts", - [ - ("potential", fld_in_vol) - ] - ) - -if 0: - def apply_op(density): - return bound_bdry_op( - queue, sigma=cl.array.to_device(queue, density), k=k).get() - - from sumpy.tools import build_matrix - n = len(sigma) - mat = build_matrix(apply_op, dtype=np.float64, shape=(n, n)) - - import matplotlib.pyplot as pt - pt.imshow(mat) - pt.colorbar() - pt.show() + # {{{ plot boundary field -if enable_mayavi: - # {{{ plot boundary field + fld_on_bdry = bound_bdry_op(queue, sigma=sigma, k=k).get() - fld_on_bdry = bound_bdry_op(queue, sigma=sigma, k=k).get() + nodes_host = density_discr.nodes().get(queue=queue) + mlab.points3d(nodes_host[0], nodes_host[1], + fld_on_bdry.real, scale_factor=0.03) - nodes_host = density_discr.nodes().get(queue=queue) - mlab.points3d(nodes_host[0], nodes_host[1], fld_on_bdry.real, scale_factor=0.03) + # }}} - # }}} + if enable_mayavi: + mlab.colorbar() + mlab.show() -if enable_mayavi: - mlab.colorbar() - mlab.show() + +if __name__ == "__main__": + main() diff --git a/examples/perf-model.py b/examples/perf-model.py deleted file mode 100644 index 3a87d63113ecedda8378485a4869e2771f0aaa4e..0000000000000000000000000000000000000000 --- a/examples/perf-model.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------ -nlevels = 6 -nboxes = 1365 -nsources = 60 -ntargets = 12040 -form_mp = 60*p_fmm -prop_upward = 1365*p_fmm**2 -part_direct = 196560 -m2l = 31920*p_fmm**2 -mp_eval = 0 -form_local = 65000*p_fmm -prop_downward = 1365*p_fmm**2 -eval_part = 12040*p_fmm -ncenters = 2040 -qbxl_direct = 2370940*p_qbx -qbx_m2l = 35339*p_fmm*p_qbx -qbx_l2l = 2040*p_fmm*p_qbx -qbx_eval = 1902*p_qbx diff --git a/examples/scaling-study.py b/examples/scaling-study.py index 183fc915cb286ccb7731a76e9c1ed6cc3869efd5..3327e3c8c6ce71262018551008a203a04d68e70b 100644 --- a/examples/scaling-study.py +++ b/examples/scaling-study.py @@ -16,8 +16,8 @@ bdry_quad_order = 4 mesh_order = bdry_quad_order qbx_order = bdry_quad_order bdry_ovsmp_quad_order = 4*bdry_quad_order -fmm_order = 25 -k = 25 +fmm_order = 10 +k = 0 # }}} @@ -54,7 +54,7 @@ def make_mesh(nx, ny): def timing_run(nx, ny): import logging - logging.basicConfig(level=logging.INFO) + logging.basicConfig(level=logging.WARNING) # INFO for more progress info cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) @@ -154,6 +154,7 @@ def timing_run(nx, ny): sym_op)( queue, sigma=ones_density).get() + qbx_stick_out = qbx.copy(target_stick_out_factor=0.1) try: fld_in_vol = bind( (qbx_stick_out, PointsTarget(targets)), @@ -169,7 +170,7 @@ def timing_run(nx, ny): #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) fplot.write_vtk_file( - "potential.vts", + "potential-scaling.vts", [ ("potential", fld_in_vol), ("indicator", indicator) diff --git a/experiments/README.md b/experiments/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0f56efd2f7e29d95e945154962d5f00fdd98cb1 --- /dev/null +++ b/experiments/README.md @@ -0,0 +1,7 @@ +# Experiments + +What you find in this directory are experiments that *may* have done something +useful at some point (or not). Unlike `examples`, they are not being tested on +an ongoing basis. + +So if what you find here breaks for you, you get to keep both pieces. diff --git a/examples/cahn-hilliard.py b/experiments/cahn-hilliard.py similarity index 100% rename from examples/cahn-hilliard.py rename to experiments/cahn-hilliard.py diff --git a/examples/find-photonic-mode-sk.py b/experiments/find-photonic-mode-sk.py similarity index 100% rename from examples/find-photonic-mode-sk.py rename to experiments/find-photonic-mode-sk.py diff --git a/examples/find-photonic-mode.py b/experiments/find-photonic-mode.py similarity index 100% rename from examples/find-photonic-mode.py rename to experiments/find-photonic-mode.py diff --git a/examples/helmholtz-expression-tree.py b/experiments/helmholtz-expression-tree.py similarity index 100% rename from examples/helmholtz-expression-tree.py rename to experiments/helmholtz-expression-tree.py diff --git a/examples/maxwell.py b/experiments/maxwell.py similarity index 100% rename from examples/maxwell.py rename to experiments/maxwell.py diff --git a/examples/maxwell_sphere.py b/experiments/maxwell_sphere.py similarity index 100% rename from examples/maxwell_sphere.py rename to experiments/maxwell_sphere.py diff --git a/examples/poisson.py b/experiments/poisson.py similarity index 100% rename from examples/poisson.py rename to experiments/poisson.py diff --git a/examples/qbx-tangential-deriv-jump.py b/experiments/qbx-tangential-deriv-jump.py similarity index 100% rename from examples/qbx-tangential-deriv-jump.py rename to experiments/qbx-tangential-deriv-jump.py diff --git a/examples/stokes-2d-interior.py b/experiments/stokes-2d-interior.py similarity index 100% rename from examples/stokes-2d-interior.py rename to experiments/stokes-2d-interior.py diff --git a/examples/two-domain-helmholtz.py b/experiments/two-domain-helmholtz.py similarity index 100% rename from examples/two-domain-helmholtz.py rename to experiments/two-domain-helmholtz.py diff --git a/pytential/qbx/__init__.py b/pytential/qbx/__init__.py index c3ca7d95fc58877182743fb6760b1de04b65d94a..afb532af7b291ba3e9fffe61de3e9b973e0a2c06 100644 --- a/pytential/qbx/__init__.py +++ b/pytential/qbx/__init__.py @@ -82,6 +82,7 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): _box_extent_norm=None, _from_sep_smaller_crit=None, _from_sep_smaller_min_nsources_cumul=None, + _tree_kind="adaptive", geometry_data_inspector=None, fmm_backend="sumpy", target_stick_out_factor=_not_provided): @@ -177,6 +178,7 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): self._from_sep_smaller_crit = _from_sep_smaller_crit self._from_sep_smaller_min_nsources_cumul = \ _from_sep_smaller_min_nsources_cumul + self._tree_kind = _tree_kind self.geometry_data_inspector = geometry_data_inspector # /!\ *All* parameters set here must also be set by copy() below, @@ -189,11 +191,13 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): density_discr=None, fine_order=None, qbx_order=None, + fmm_order=_not_provided, fmm_level_to_order=_not_provided, to_refined_connection=None, target_association_tolerance=_not_provided, _expansions_in_tree_have_extent=_not_provided, _expansion_stick_out_factor=_not_provided, + _tree_kind=None, geometry_data_inspector=None, debug=_not_provided, @@ -224,6 +228,18 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): # }}} + kwargs = {} + + if (fmm_order is not _not_provided + and fmm_level_to_order is not _not_provided): + raise TypeError("may not specify both fmm_order and fmm_level_to_order") + elif fmm_order is not _not_provided: + kwargs["fmm_order"] = fmm_order + elif fmm_level_to_order is not _not_provided: + kwargs["fmm_level_to_order"] = fmm_level_to_order + else: + kwargs["fmm_level_to_order"] = self.fmm_level_to_order + # FIXME Could/should share wrangler and geometry kernels # if no relevant changes have been made. return QBXLayerPotentialSource( @@ -231,11 +247,6 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): fine_order=( fine_order if fine_order is not None else self.fine_order), qbx_order=qbx_order if qbx_order is not None else self.qbx_order, - fmm_level_to_order=( - # False is a valid value here - fmm_level_to_order - if fmm_level_to_order is not _not_provided - else self.fmm_level_to_order), target_association_tolerance=target_association_tolerance, to_refined_connection=( @@ -265,10 +276,11 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): _from_sep_smaller_crit=self._from_sep_smaller_crit, _from_sep_smaller_min_nsources_cumul=( self._from_sep_smaller_min_nsources_cumul), + _tree_kind=_tree_kind or self._tree_kind, geometry_data_inspector=( geometry_data_inspector or self.geometry_data_inspector), fmm_backend=self.fmm_backend, - ) + **kwargs) # }}} @@ -409,12 +421,9 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): @memoize_method def _expansion_radii(self, last_dim_length): if last_dim_length == "npanels": - # FIXME: Make this an error - - from warnings import warn - warn("Passing 'npanels' as last_dim_length to _expansion_radii is " - "deprecated. Expansion radii should be allowed to vary " - "within a panel.", stacklevel=3) + raise ValueError( + "Passing 'npanels' as last_dim_length to _expansion_radii is " + "not allowed. Allowed values are 'nsources' and 'ncenters'.") with cl.CommandQueue(self.cl_context) as queue: return (self._panel_sizes(last_dim_length).with_queue(queue) * 0.5 @@ -457,6 +466,7 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): return QBXFMMGeometryData(self.qbx_fmm_code_getter, self, target_discrs_and_qbx_sides, target_association_tolerance=self.target_association_tolerance, + tree_kind=self._tree_kind, debug=self.debug) # }}} @@ -486,8 +496,13 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): def exec_compute_potential_insn(self, queue, insn, bound_expr, evaluate): from pytools.obj_array import with_object_array_or_scalar - from functools import partial - oversample = partial(self.resampler, queue) + + def oversample_nonscalars(vec): + from numbers import Number + if isinstance(vec, Number): + return vec + else: + return self.resampler(queue, vec) if not self._refined_for_global_qbx: from warnings import warn @@ -497,7 +512,7 @@ class QBXLayerPotentialSource(LayerPotentialSourceBase): def evaluate_wrapper(expr): value = evaluate(expr) - return with_object_array_or_scalar(oversample, value) + return with_object_array_or_scalar(oversample_nonscalars, value) if self.fmm_level_to_order is False: func = self.exec_compute_potential_insn_direct diff --git a/pytential/qbx/direct.py b/pytential/qbx/direct.py index 6dc5cd9abbb7319d0cd7a4029a3a2b22b6a710e5..70fa0d1a9e4bbdc6b01a00ca66ab4bc1ce22ab5a 100644 --- a/pytential/qbx/direct.py +++ b/pytential/qbx/direct.py @@ -25,62 +25,91 @@ THE SOFTWARE. import loopy as lp import numpy as np -from sumpy.qbx import LayerPotential as LayerPotentialBase +from sumpy.qbx import LayerPotentialBase # {{{ qbx applier on a target/center subset class LayerPotentialOnTargetAndCenterSubset(LayerPotentialBase): - def get_compute_a_and_b_vecs(self): - return """ - <> icenter = qbx_center_numbers[itgt] - <> itgt_overall = qbx_tgt_numbers[itgt] - for idim - <> a[idim] = center[idim,icenter] - src[idim,isrc] {id=compute_a} - <> b[idim] = tgt[idim,itgt_overall] - center[idim,icenter] \ - {id=compute_b} - <> rscale = expansion_radii[icenter] - end - """ - - def get_src_tgt_arguments(self): - return [ + default_name = "qbx_tgt_ctr_subset" + + def get_kernel(self): + loopy_insns, result_names = self.get_loopy_insns_and_result_names() + kernel_exprs = self.get_kernel_exprs(result_names) + + from sumpy.tools import gather_loopy_source_arguments + arguments = ( + gather_loopy_source_arguments(self.kernels) + + [ lp.GlobalArg("src", None, shape=(self.dim, "nsources"), order="C"), lp.GlobalArg("tgt", None, shape=(self.dim, "ntargets_total"), order="C"), lp.GlobalArg("center", None, - shape=(self.dim, "ncenters_total"), order="C"), - lp.GlobalArg("expansion_radii", None, shape="ncenters_total"), - lp.GlobalArg("qbx_tgt_numbers", None, shape="ntargets"), - lp.GlobalArg("qbx_center_numbers", None, shape="ntargets"), + shape=(self.dim, "ncenters_total"), dim_tags="sep,C"), + lp.GlobalArg("expansion_radii", None, + shape="ncenters_total"), + lp.GlobalArg("qbx_tgt_numbers", None, + shape="ntargets"), + lp.GlobalArg("qbx_center_numbers", None, + shape="ntargets"), lp.ValueArg("nsources", np.int32), lp.ValueArg("ntargets", np.int32), lp.ValueArg("ntargets_total", np.int32), - lp.ValueArg("ncenters_total", np.int32), - ] - - def get_input_and_output_arguments(self): - return [ - lp.GlobalArg("strength_%d" % i, None, shape="nsources", order="C") - for i in range(self.strength_count) - ]+[ - lp.GlobalArg("result_%d" % i, None, shape="ntargets_total", - order="C") - for i in range(len(self.kernels)) - ] - - def get_result_store_instructions(self): - return [ - """ - result_KNLIDX[itgt_overall] = \ - knl_KNLIDX_scaling*simul_reduce(\ - sum, isrc, pair_result_KNLIDX) {inames=itgt} - """.replace("KNLIDX", str(iknl)) - for iknl in range(len(self.expansions)) - ] + lp.ValueArg("ncenters_total", np.int32)] + + [lp.GlobalArg("strength_%d" % i, None, + shape="nsources", order="C") + for i in range(self.strength_count)] + + [lp.GlobalArg("result_%d" % i, self.value_dtypes[i], + shape="ntargets_total", order="C") + for i in range(len(self.kernels))]) -# }}} + loopy_knl = lp.make_kernel([ + "{[itgt]: 0 <= itgt < ntargets}", + "{[isrc]: 0 <= isrc < nsources}", + "{[idim]: 0 <= idim < dim}" + ], + self.get_kernel_scaling_assignments() + + ["for itgt, isrc"] + + [""" + <> icenter = qbx_center_numbers[itgt] + <> itgt_overall = qbx_tgt_numbers[itgt] + + <> a[idim] = center[idim, icenter] - src[idim, isrc] \ + {dup=idim} + <> b[idim] = tgt[idim, itgt_overall] - center[idim, icenter] \ + {dup=idim} + <> rscale = expansion_radii[icenter] + """] + + loopy_insns + kernel_exprs + + [""" + result_{i}[itgt_overall] = knl_{i}_scaling * \ + simul_reduce(sum, isrc, pair_result_{i}) \ + {{inames=itgt}} + """.format(i=iknl) + for iknl in range(len(self.expansions))] + + ["end"], + arguments, + name=self.name, + assumptions="ntargets>=1 and nsources>=1", + fixed_parameters=dict(dim=self.dim)) + + loopy_knl = lp.tag_inames(loopy_knl, "idim*:unr") + for expn in self.expansions: + loopy_knl = expn.prepare_loopy_kernel(loopy_knl) + return loopy_knl + + def __call__(self, queue, targets, sources, centers, strengths, expansion_radii, + **kwargs): + knl = self.get_cached_optimized_kernel() + + for i, dens in enumerate(strengths): + kwargs["strength_%d" % i] = dens + + return knl(queue, src=sources, tgt=targets, center=centers, + expansion_radii=expansion_radii, **kwargs) + +# }}} # vim: foldmethod=marker diff --git a/pytential/qbx/fmm.py b/pytential/qbx/fmm.py index a5292fdede8bab1f61e0df97cf75e19b7cd63d8f..037f818893a77edbad9a032a3868ac0dec8af514 100644 --- a/pytential/qbx/fmm.py +++ b/pytential/qbx/fmm.py @@ -248,7 +248,9 @@ QBXFMMGeometryData.non_qbx_box_target_lists`), self.multipole_expansions_view(multipole_exps, isrc_level) evt, (qbx_expansions_res,) = m2qbxl(self.queue, - qbx_center_to_target_box=geo_data.qbx_center_to_target_box(), + qbx_center_to_target_box_source_level=( + geo_data.qbx_center_to_target_box_source_level(isrc_level) + ), centers=self.tree.box_centers, qbx_centers=geo_data.centers(), @@ -437,8 +439,7 @@ def drive_fmm(expansion_wrangler, src_weights): # contribution *out* of the downward-propagating local expansions) non_qbx_potentials = non_qbx_potentials + wrangler.eval_multipoles( - traversal.level_start_target_box_nrs, - traversal.target_boxes, + traversal.target_boxes_sep_smaller_by_source_level, traversal.from_sep_smaller_by_level, mpole_exps) @@ -717,13 +718,15 @@ def assemble_performance_data(geo_data, uses_pde_expansions, assert tree.nlevels == len(traversal.from_sep_smaller_by_level) - for itgt_box, tgt_ibox in enumerate(traversal.target_boxes): - ntargets = box_target_counts_nonchild[tgt_ibox] - - for ilevel, sep_smaller_list in enumerate( - traversal.from_sep_smaller_by_level): + for ilevel, sep_smaller_list in enumerate( + traversal.from_sep_smaller_by_level): + for itgt_box, tgt_ibox in enumerate( + traversal.target_boxes_sep_smaller_by_source_level[ilevel]): + ntargets = box_target_counts_nonchild[tgt_ibox] start, end = sep_smaller_list.starts[itgt_box:itgt_box+2] - nmp_eval[ilevel, itgt_box] += ntargets * (end-start) + nmp_eval[ilevel, sep_smaller_list.nonempty_indices[itgt_box]] = ( + ntargets * (end-start) + ) result["mp_eval"] = summarize_parallel(nmp_eval, ncoeffs_fmm) @@ -772,6 +775,13 @@ def assemble_performance_data(geo_data, uses_pde_expansions, global_qbx_centers = geo_data.global_qbx_centers() qbx_center_to_target_box = geo_data.qbx_center_to_target_box() center_to_targets_starts = geo_data.center_to_tree_targets().starts + qbx_center_to_target_box_source_level = np.empty( + (tree.nlevels,), dtype=object + ) + for src_level in range(tree.nlevels): + qbx_center_to_target_box_source_level[src_level] = ( + geo_data.qbx_center_to_target_box_source_level(src_level) + ) with cl.CommandQueue(geo_data.cl_context) as queue: global_qbx_centers = global_qbx_centers.get( @@ -780,6 +790,10 @@ def assemble_performance_data(geo_data, uses_pde_expansions, queue=queue) center_to_targets_starts = center_to_targets_starts.get( queue=queue) + for src_level in range(tree.nlevels): + qbx_center_to_target_box_source_level[src_level] = ( + qbx_center_to_target_box_source_level[src_level].get(queue=queue) + ) def process_form_qbxl(): ncenters = geo_data.ncenters @@ -856,8 +870,13 @@ def assemble_performance_data(geo_data, uses_pde_expansions, assert tree.nlevels == len(traversal.from_sep_smaller_by_level) for isrc_level, ssn in enumerate(traversal.from_sep_smaller_by_level): + for itgt_center, tgt_icenter in enumerate(global_qbx_centers): - icontaining_tgt_box = qbx_center_to_target_box[tgt_icenter] + icontaining_tgt_box = qbx_center_to_target_box_source_level[ + isrc_level][tgt_icenter] + + if icontaining_tgt_box == -1: + continue start, stop = ( ssn.starts[icontaining_tgt_box], diff --git a/pytential/qbx/fmmlib.py b/pytential/qbx/fmmlib.py index 887b3049b568f0b0a98a2a998aeee81d6ebf4dfe..578dadce208da52b81f1e5014381e0aa04df4a17 100644 --- a/pytential/qbx/fmmlib.py +++ b/pytential/qbx/fmmlib.py @@ -99,6 +99,11 @@ class ToHostTransferredGeoDataWrapper(object): def qbx_center_to_target_box(self): return self.geo_data.qbx_center_to_target_box().get(queue=self.queue) + @memoize_method + def qbx_center_to_target_box_source_level(self, source_level): + return self.geo_data.qbx_center_to_target_box_source_level( + source_level).get(queue=self.queue) + @memoize_method def non_qbx_box_target_lists(self): return self.geo_data.non_qbx_box_target_lists().get(queue=self.queue) @@ -347,23 +352,27 @@ class QBXFMMLibExpansionWrangler(FMMLibExpansionWrangler): qbx_exps = self.qbx_local_expansion_zeros() geo_data = self.geo_data - qbx_center_to_target_box = geo_data.qbx_center_to_target_box() qbx_centers = geo_data.centers() centers = self.tree.box_centers ngqbx_centers = len(geo_data.global_qbx_centers()) + traversal = geo_data.traversal() if ngqbx_centers == 0: return qbx_exps mploc = self.get_translation_routine("%ddmploc", vec_suffix="_imany") - for isrc_level, ssn in enumerate( - geo_data.traversal().from_sep_smaller_by_level): + for isrc_level, ssn in enumerate(traversal.from_sep_smaller_by_level): source_level_start_ibox, source_mpoles_view = \ self.multipole_expansions_view(multipole_exps, isrc_level) tgt_icenter_vec = geo_data.global_qbx_centers() - icontaining_tgt_box_vec = qbx_center_to_target_box[tgt_icenter_vec] + qbx_center_to_target_box_source_level = ( + geo_data.qbx_center_to_target_box_source_level(isrc_level) + ) + icontaining_tgt_box_vec = qbx_center_to_target_box_source_level[ + tgt_icenter_vec + ] rscale2 = geo_data.expansion_radii()[geo_data.global_qbx_centers()] @@ -372,9 +381,13 @@ class QBXFMMLibExpansionWrangler(FMMLibExpansionWrangler): kwargs["radius"] = (0.5 * geo_data.expansion_radii()[geo_data.global_qbx_centers()]) - nsrc_boxes_per_gqbx_center = ( - ssn.starts[icontaining_tgt_box_vec+1] - - ssn.starts[icontaining_tgt_box_vec]) + nsrc_boxes_per_gqbx_center = np.zeros(icontaining_tgt_box_vec.shape, + dtype=traversal.tree.box_id_dtype) + mask = (icontaining_tgt_box_vec != -1) + nsrc_boxes_per_gqbx_center[mask] = ( + ssn.starts[icontaining_tgt_box_vec[mask] + 1] - + ssn.starts[icontaining_tgt_box_vec[mask]] + ) nsrc_boxes = np.sum(nsrc_boxes_per_gqbx_center) src_boxes_starts = np.empty(ngqbx_centers+1, dtype=np.int32) @@ -387,7 +400,9 @@ class QBXFMMLibExpansionWrangler(FMMLibExpansionWrangler): src_ibox = np.empty(nsrc_boxes, dtype=np.int32) for itgt_center, tgt_icenter in enumerate( geo_data.global_qbx_centers()): - icontaining_tgt_box = qbx_center_to_target_box[tgt_icenter] + icontaining_tgt_box = qbx_center_to_target_box_source_level[ + tgt_icenter + ] src_ibox[ src_boxes_starts[itgt_center]: src_boxes_starts[itgt_center+1]] = ( diff --git a/pytential/qbx/geometry.py b/pytential/qbx/geometry.py index 71160948543fcdc225be382d2ed2ffcf5ef7aec3..47c578b764f39287e1dce2c39ec32d06e3ca38b1 100644 --- a/pytential/qbx/geometry.py +++ b/pytential/qbx/geometry.py @@ -30,7 +30,9 @@ import pyopencl.array # noqa from pytools import memoize_method from boxtree.tools import DeviceDataRecord import loopy as lp +from loopy.version import MOST_RECENT_LANGUAGE_VERSION from cgen import Enum +from time import time from pytential.qbx.utils import TreeCodeContainerMixin @@ -125,7 +127,8 @@ class QBXFMMGeometryCodeGetter(TreeCodeContainerMixin): """ targets[dim, i] = points[dim, i] """, - default_offset=lp.auto, name="copy_targets") + default_offset=lp.auto, name="copy_targets", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.fix_parameters(knl, ndims=self.ambient_dim) @@ -182,7 +185,8 @@ class QBXFMMGeometryCodeGetter(TreeCodeContainerMixin): "..." ], name="qbx_center_to_target_box_lookup", - silenced_warnings="write_race(tgt_write)") + silenced_warnings="write_race(tgt_write)", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.split_iname(knl, "ibox", 128, inner_tag="l.0", outer_tag="g.0") @@ -244,7 +248,8 @@ class QBXFMMGeometryCodeGetter(TreeCodeContainerMixin): lp.ValueArg("ntargets", np.int32), ], name="pick_used_centers", - silenced_warnings="write_race(center_is_used_write)") + silenced_warnings="write_race(center_is_used_write)", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.split_iname(knl, "i", 128, inner_tag="l.0", outer_tag="g.0") return knl @@ -349,13 +354,16 @@ class QBXFMMGeometryData(object): def __init__(self, code_getter, lpot_source, target_discrs_and_qbx_sides, - target_association_tolerance, debug): + target_association_tolerance, + tree_kind, debug): """ .. rubric:: Constructor arguments See the attributes of the same name for the meaning of most of the constructor arguments. + :arg tree_kind: The tree kind to pass to the tree builder + :arg debug: a :class:`bool` flag for whether to enable potentially costly self-checks """ @@ -365,6 +373,7 @@ class QBXFMMGeometryData(object): self.target_discrs_and_qbx_sides = \ target_discrs_and_qbx_sides self.target_association_tolerance = target_association_tolerance + self.tree_kind = tree_kind self.debug = debug @property @@ -494,11 +503,17 @@ class QBXFMMGeometryData(object): self.coord_dtype) target_radii[:self.ncenters] = self.expansion_radii() - # FIXME: https://gitlab.tiker.net/inducer/pytential/issues/72 - # refine_weights = cl.array.zeros(queue, nparticles, dtype=np.int32) - # refine_weights[:nsources] = 1 refine_weights = cl.array.empty(queue, nparticles, dtype=np.int32) - refine_weights.fill(1) + + # Assign a weight of 1 to all sources, QBX centers, and conventional + # (non-QBX) targets. Assign a weight of 0 to all targets that need + # QBX centers. The potential at the latter targets is mediated + # entirely by the QBX center, so as a matter of evaluation cost, + # their location in the tree is irrelevant. + refine_weights[:-target_info.ntargets] = 1 + user_ttc = self.user_target_to_center().with_queue(queue) + refine_weights[-target_info.ntargets:] = ( + user_ttc == target_state.NO_QBX_NEEDED).astype(np.int32) refine_weights.finish() @@ -511,7 +526,7 @@ class QBXFMMGeometryData(object): debug=self.debug, stick_out_factor=lpot_src._expansion_stick_out_factor, extent_norm=lpot_src._box_extent_norm, - kind="adaptive") + kind=self.tree_kind) if self.debug: tgt_count_2 = cl.array.sum( @@ -604,6 +619,36 @@ class QBXFMMGeometryData(object): return qbx_center_to_target_box.with_queue(None) + @memoize_method + def qbx_center_to_target_box_source_level(self, source_level): + """Return an array for mapping qbx centers to indices into + interaction lists as found in + ``traversal.from_sep_smaller_by_level[source_level].`` + -1 if no such interaction list exist on *source_level*. + """ + traversal = self.traversal() + sep_smaller = traversal.from_sep_smaller_by_level[source_level] + qbx_center_to_target_box = self.qbx_center_to_target_box() + + with cl.CommandQueue(self.cl_context) as queue: + target_box_to_target_box_source_level = cl.array.empty( + queue, len(traversal.target_boxes), + dtype=traversal.tree.box_id_dtype + ) + target_box_to_target_box_source_level.fill(-1) + target_box_to_target_box_source_level[sep_smaller.nonempty_indices] = ( + cl.array.arange(queue, sep_smaller.num_nonempty_lists, + dtype=traversal.tree.box_id_dtype) + ) + + qbx_center_to_target_box_source_level = ( + target_box_to_target_box_source_level[ + qbx_center_to_target_box + ] + ) + + return qbx_center_to_target_box_source_level.with_queue(None) + @memoize_method def global_qbx_flags(self): """Return an array of :class:`numpy.int8` of length @@ -637,7 +682,8 @@ class QBXFMMGeometryData(object): user_target_to_center = self.user_target_to_center() with cl.CommandQueue(self.cl_context) as queue: - logger.info("find global qbx centers: start") + logger.debug("find global qbx centers: start") + center_find_start_time = time() tgt_assoc_result = ( user_target_to_center.with_queue(queue)[self.ncenters:]) @@ -663,7 +709,14 @@ class QBXFMMGeometryData(object): ], queue=queue) - logger.info("find global qbx centers: done") + center_find_elapsed = time() - center_find_start_time + if center_find_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + + done_logger("find global qbx centers: done after %g seconds", + center_find_elapsed) if self.debug: logger.debug( @@ -689,7 +742,7 @@ class QBXFMMGeometryData(object): with cl.CommandQueue(self.cl_context) as queue: target_side_prefs = (self - .target_side_preferences()[self.ncenters:].get(queue=queue)) + .target_side_preferences()[self.ncenters:].get(queue=queue)) target_discrs_and_qbx_sides = [( PointsTarget(tgt_info.targets[:, self.ncenters:]), @@ -706,9 +759,8 @@ class QBXFMMGeometryData(object): target_association_tolerance=( self.target_association_tolerance)) - tree = self.tree() - - result = cl.array.empty(queue, tgt_info.ntargets, tree.particle_id_dtype) + result = cl.array.empty(queue, tgt_info.ntargets, + tgt_assoc_result.target_to_center.dtype) result[:self.ncenters].fill(target_state.NO_QBX_NEEDED) result[self.ncenters:] = tgt_assoc_result.target_to_center @@ -725,7 +777,8 @@ class QBXFMMGeometryData(object): user_ttc = self.user_target_to_center() with cl.CommandQueue(self.cl_context) as queue: - logger.info("build center -> targets lookup table: start") + logger.debug("build center -> targets lookup table: start") + c2t_start_time = time() tree_ttc = cl.array.empty_like(user_ttc).with_queue(queue) tree_ttc[self.tree().sorted_target_ids] = user_ttc @@ -749,7 +802,13 @@ class QBXFMMGeometryData(object): filtered_tree_ttc, filtered_target_ids, self.ncenters, tree_ttc.dtype) - logger.info("build center -> targets lookup table: done") + c2t_elapsed = time() - c2t_start_time + if c2t_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + done_logger("build center -> targets lookup table: " + "done after %g seconds", c2t_elapsed) result = CenterToTargetList( starts=center_target_starts, @@ -768,7 +827,8 @@ class QBXFMMGeometryData(object): """ with cl.CommandQueue(self.cl_context) as queue: - logger.info("find non-qbx box target lists: start") + logger.debug("find non-qbx box target lists: start") + nonqbx_start_time = time() flags = (self.user_target_to_center().with_queue(queue) == target_state.NO_QBX_NEEDED) @@ -781,10 +841,17 @@ class QBXFMMGeometryData(object): nqbx_centers = self.ncenters flags[:nqbx_centers] = 0 - from boxtree.tree import filter_target_lists_in_tree_order - result = filter_target_lists_in_tree_order(queue, self.tree(), flags) - - logger.info("find non-qbx box target lists: done") + tree = self.tree() + plfilt = self.code_getter.particle_list_filter() + result = plfilt.filter_target_lists_in_tree_order(queue, tree, flags) + + nonqbx_elapsed = time() - nonqbx_start_time + if nonqbx_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + done_logger("find non-qbx box target lists: done after %g seconds", + nonqbx_elapsed) return result.with_queue(None) diff --git a/pytential/qbx/interactions.py b/pytential/qbx/interactions.py index 6105472db63a1aad41256798880ecef91e852c49..dad4db1f2988f566fcdd9c2d07ead3c8d455e525 100644 --- a/pytential/qbx/interactions.py +++ b/pytential/qbx/interactions.py @@ -24,6 +24,7 @@ THE SOFTWARE. import numpy as np import loopy as lp +from loopy.version import MOST_RECENT_LANGUAGE_VERSION from pytools import memoize_method from six.moves import range @@ -31,8 +32,7 @@ from sumpy.p2e import P2EBase from sumpy.e2e import E2EBase from sumpy.e2p import E2PBase - -PYTENTIAL_KERNEL_VERSION = 5 +from pytential.version import PYTENTIAL_KERNEL_VERSION # {{{ form qbx expansions from points @@ -106,14 +106,16 @@ class P2QBXLFromCSR(P2EBase): qbx_expansions[tgt_icenter, {i}] = \ simul_reduce(sum, (isrc_box, isrc), strength*coeff{i}) \ {{id_prefix=write_expn}} - """.format(i=i) for i in range(ncoeffs)] + [""" + """.format(i=i) + for i in range(ncoeffs)] + [""" end """], arguments, name=self.name, assumptions="ntgt_centers>=1", silenced_warnings="write_race(write_expn*)", - fixed_parameters=dict(dim=self.dim)) + fixed_parameters=dict(dim=self.dim), + lang_version=MOST_RECENT_LANGUAGE_VERSION) loopy_knl = self.expansion.prepare_loopy_kernel(loopy_knl) loopy_knl = lp.tag_inames(loopy_knl, "idim*:unr") @@ -158,37 +160,42 @@ class M2QBXL(E2EBase): ], [""" for icenter - <> icontaining_tgt_box = qbx_center_to_target_box[icenter] + <> icontaining_tgt_box = \ + qbx_center_to_target_box_source_level[icenter] - <> tgt_center[idim] = qbx_centers[idim, icenter] \ - {id=fetch_tgt_center} - <> tgt_rscale = qbx_expansion_radii[icenter] + if icontaining_tgt_box != -1 + <> tgt_center[idim] = qbx_centers[idim, icenter] \ + {id=fetch_tgt_center} + <> tgt_rscale = qbx_expansion_radii[icenter] - <> isrc_start = src_box_starts[icontaining_tgt_box] - <> isrc_stop = src_box_starts[icontaining_tgt_box+1] + <> isrc_start = src_box_starts[icontaining_tgt_box] + <> isrc_stop = src_box_starts[icontaining_tgt_box+1] - for isrc_box - <> src_ibox = src_box_lists[isrc_box] \ - {id=read_src_ibox} - <> src_center[idim] = centers[idim, src_ibox] {dup=idim} - <> d[idim] = tgt_center[idim] - src_center[idim] {dup=idim} - """] + [""" + for isrc_box + <> src_ibox = src_box_lists[isrc_box] \ + {id=read_src_ibox} + <> src_center[idim] = centers[idim, src_ibox] {dup=idim} + <> d[idim] = tgt_center[idim] - src_center[idim] \ + {dup=idim} + """] + [""" - <> src_coeff{i} = \ - src_expansions[src_ibox - src_base_ibox, {i}] \ - {{dep=read_src_ibox}} + <> src_coeff{i} = \ + src_expansions[src_ibox - src_base_ibox, {i}] \ + {{dep=read_src_ibox}} - """.format(i=i) for i in range(ncoeff_src)] + [ + """.format(i=i) for i in range(ncoeff_src)] + [ - ] + self.get_translation_loopy_insns() + [""" + ] + self.get_translation_loopy_insns() + [""" + end + """] + [""" + qbx_expansions[icenter, {i}] = \ + qbx_expansions[icenter, {i}] + \ + simul_reduce(sum, isrc_box, coeff{i}) \ + {{id_prefix=write_expn}} + """.format(i=i) + for i in range(ncoeff_tgt)] + [""" end - """] + [""" - qbx_expansions[icenter, {i}] = qbx_expansions[icenter, {i}] + \ - simul_reduce(sum, isrc_box, coeff{i}) \ - {{id_prefix=write_expn}} - """.format(i=i) for i in range(ncoeff_tgt)] + [""" - end """], [ @@ -209,7 +216,8 @@ class M2QBXL(E2EBase): ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]), name=self.name, assumptions="ncenters>=1", silenced_warnings="write_race(write_expn*)", - fixed_parameters=dict(dim=self.dim)) + fixed_parameters=dict(dim=self.dim), + lang_version=MOST_RECENT_LANGUAGE_VERSION) for expn in [self.src_expansion, self.tgt_expansion]: loopy_knl = expn.prepare_loopy_kernel(loopy_knl) @@ -289,7 +297,8 @@ class L2QBXL(E2EBase): qbx_expansions[icenter, {i}] = \ qbx_expansions[icenter, {i}] + coeff{i} \ {{id_prefix=write_expn}} - """.format(i=i) for i in range(ncoeff_tgt)] + [""" + """.format(i=i) + for i in range(ncoeff_tgt)] + [""" end end """], @@ -309,7 +318,8 @@ class L2QBXL(E2EBase): name=self.name, assumptions="ncenters>=1", silenced_warnings="write_race(write_expn*)", - fixed_parameters=dict(dim=self.dim, nchildren=2**self.dim)) + fixed_parameters=dict(dim=self.dim, nchildren=2**self.dim), + lang_version=MOST_RECENT_LANGUAGE_VERSION) for expn in [self.src_expansion, self.tgt_expansion]: loopy_knl = expn.prepare_loopy_kernel(loopy_knl) @@ -384,7 +394,8 @@ class QBXL2P(E2PBase): result[{i},center_itgt] = kernel_scaling * result_{i}_p \ {{id_prefix=write_result}} - """.format(i=i) for i in range(len(result_names))] + [""" + """.format(i=i) + for i in range(len(result_names))] + [""" end end """], @@ -405,7 +416,8 @@ class QBXL2P(E2PBase): name=self.name, assumptions="nglobal_qbx_centers>=1", silenced_warnings="write_race(write_result*)", - fixed_parameters=dict(dim=self.dim, nresults=len(result_names))) + fixed_parameters=dict(dim=self.dim, nresults=len(result_names)), + lang_version=MOST_RECENT_LANGUAGE_VERSION) loopy_knl = lp.tag_inames(loopy_knl, "idim*:unr") loopy_knl = self.expansion.prepare_loopy_kernel(loopy_knl) diff --git a/pytential/qbx/refinement.py b/pytential/qbx/refinement.py index 5d90a320015ecd2a249c00634ffcf221cac261ef..08539c6899e696dd8af6778bdefdd41de33d87a8 100644 --- a/pytential/qbx/refinement.py +++ b/pytential/qbx/refinement.py @@ -28,6 +28,7 @@ THE SOFTWARE. import loopy as lp +from loopy.version import MOST_RECENT_LANGUAGE_VERSION import numpy as np import pyopencl as cl @@ -255,7 +256,8 @@ class RefinerCodeContainer(TreeCodeContainerMixin): """, options="return_dict", silenced_warnings="write_race(write_refine_flags_updated)", - name="refine_kernel_length_scale_to_panel_size_ratio") + name="refine_kernel_length_scale_to_panel_size_ratio", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.split_iname(knl, "panel", 128, inner_tag="l.0", outer_tag="g.0") return knl diff --git a/pytential/qbx/target_assoc.py b/pytential/qbx/target_assoc.py index 7b9736ce4b6d34f70dcb411bfcba387ea2ec7889..8d85a5dcf6715182ada6918332303dc48d000b9c 100644 --- a/pytential/qbx/target_assoc.py +++ b/pytential/qbx/target_assoc.py @@ -39,6 +39,7 @@ from cgen import Enum from pytential.qbx.utils import ( QBX_TREE_C_PREAMBLE, QBX_TREE_MAKO_DEFS, TreeWranglerBase, TreeCodeContainerMixin) +from time import time unwrap_args = AreaQueryElementwiseTemplate.unwrap_args @@ -497,7 +498,8 @@ class TargetAssociationWrangler(TreeWranglerBase): wait_for=wait_for) wait_for = [evt] - logger.info("target association: marking targets close to panels") + logger.debug("target association: marking targets close to panels") + mark_start_time = time() tunnel_radius_by_source = lpot_source._close_target_tunnel_radius("nsources") @@ -527,7 +529,13 @@ class TargetAssociationWrangler(TreeWranglerBase): cl.wait_for_events([evt]) - logger.info("target association: done marking targets close to panels") + mark_elapsed = time() - mark_start_time + if mark_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + done_logger("target association: done marking targets close to panels " + "after %g seconds", mark_elapsed) return (found_target_close_to_panel == 1).all().get() @@ -582,7 +590,8 @@ class TargetAssociationWrangler(TreeWranglerBase): wait_for.extend(min_dist_to_center.events) - logger.info("target association: finding centers for targets") + logger.debug("target association: finding centers for targets") + center_find_start_time = time() evt = knl( *unwrap_args( @@ -613,8 +622,15 @@ class TargetAssociationWrangler(TreeWranglerBase): .format(ntargets_associated)) cl.wait_for_events([evt]) - logger.info("target association: done finding centers for targets") - return + + center_find_elapsed = time() - center_find_start_time + if center_find_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + + done_logger("target association: done finding centers for targets " + "after %g seconds", center_find_elapsed) def mark_panels_for_refinement(self, tree, peer_lists, lpot_source, target_status, refine_flags, debug, @@ -653,7 +669,8 @@ class TargetAssociationWrangler(TreeWranglerBase): wait_for=wait_for) wait_for = [evt] - logger.info("target association: marking panels for refinement") + logger.debug("target association: marking panels for refinement") + mark_start_time = time() evt = knl( *unwrap_args( @@ -684,7 +701,14 @@ class TargetAssociationWrangler(TreeWranglerBase): cl.wait_for_events([evt]) - logger.info("target association: done marking panels for refinement") + mark_elapsed = time() - mark_start_time + if mark_elapsed > 0.1: + done_logger = logger.info + else: + done_logger = logger.debug + + done_logger("target association: done marking panels for refinement " + "after %g seconds", mark_elapsed) return (found_panel_to_refine == 1).all().get() diff --git a/pytential/qbx/utils.py b/pytential/qbx/utils.py index d03e8365fa188d55a04a78c41b56279af883a318..6673b450bb7ed3cb2cdf043e95963c0930fab89a 100644 --- a/pytential/qbx/utils.py +++ b/pytential/qbx/utils.py @@ -32,6 +32,7 @@ from boxtree.tree import Tree import pyopencl as cl import pyopencl.array # noqa from pytools import memoize, memoize_method +from loopy.version import MOST_RECENT_LANGUAGE_VERSION import logging logger = logging.getLogger(__name__) @@ -84,7 +85,8 @@ def get_interleaver_kernel(dtype): lp.GlobalArg("dst", shape=(var("dstlen"),), dtype=dtype), "..." ], - assumptions="2*srclen = dstlen") + assumptions="2*srclen = dstlen", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.split_iname(knl, "i", 128, inner_tag="l.0", outer_tag="g.0") return knl @@ -154,6 +156,11 @@ class TreeCodeContainer(object): from boxtree.area_query import PeerListFinder return PeerListFinder(self.cl_context) + @memoize_method + def particle_list_filter(self): + from boxtree.tree import ParticleListFilter + return ParticleListFilter(self.cl_context) + # }}} @@ -170,6 +177,9 @@ class TreeCodeContainerMixin(object): def peer_list_finder(self): return self.tree_code_container.peer_list_finder() + def particle_list_filter(self): + return self.tree_code_container.particle_list_filter() + # }}} @@ -180,9 +190,10 @@ class TreeWranglerBase(object): def build_tree(self, lpot_source, targets_list=(), use_stage2_discr=False): tb = self.code_container.build_tree() + plfilt = self.code_container.particle_list_filter() from pytential.qbx.utils import build_tree_with_qbx_metadata return build_tree_with_qbx_metadata( - self.queue, tb, lpot_source, targets_list=targets_list, + self.queue, tb, plfilt, lpot_source, targets_list=targets_list, use_stage2_discr=use_stage2_discr) def find_peer_lists(self, tree): @@ -208,7 +219,8 @@ def panel_sizes(discr, last_dim_length): knl = lp.make_kernel( "{[i,j,k]: 0<=i`_ for derivation. - Uses :math:`E(x,t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and - :math:`H(x,t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for + Uses :math:`E(x, t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and + :math:`H(x, t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for the :math:`E(x)`, :math:`H(x)` fields using vector and scalar potentials via - the Lorenz Gauage. The DPIE formulates the problem purely in terms of the - vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, - and then backs out :math:`E(x)` and :math:`H(x)` via relationships to + the Lorenz Gauage. The DPIE formulates the problem purely in terms of the + vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, + and then backs out :math:`E(x)` and :math:`H(x)` via relationships to the vector and scalar potentials. """ @@ -61,15 +55,15 @@ class DPIEOperator: from sumpy.kernel import HelmholtzKernel # specify the frequency variable that will be tuned - self.k = k - self.stype = type(self.k) + self.k = k + self.stype = type(self.k) - # specify the 3-D Helmholtz kernel - self.kernel = HelmholtzKernel(3) + # specify the 3-D Helmholtz kernel + self.kernel = HelmholtzKernel(3) # specify a list of strings representing geometry objects - self.geometry_list = geometry_list - self.nobjs = len(geometry_list) + self.geometry_list = geometry_list + self.nobjs = len(geometry_list) def num_distinct_objects(self): return self.nobjs @@ -84,17 +78,16 @@ class DPIEOperator: return 2*len(self.geometry_list) def get_vector_domain_list(self): - """ - Method to return domain list that will be used within the scipy_op method to - solve the system of discretized integral equations. What is returned should just - be a list with values that are strings or None. + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. """ # initialize domain list domain_list = [None]*self.num_vector_potential_densities() # get strings for the actual densities - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): # grab nth location identifier location = self.geometry_list[n] + "t" @@ -110,17 +103,16 @@ class DPIEOperator: return domain_list def get_scalar_domain_list(self): - """ - Method to return domain list that will be used within the scipy_op method to - solve the system of discretized integral equations. What is returned should just - be a list with values that are strings or None. + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. """ # initialize domain list domain_list = [None]*self.num_scalar_potential_densities() # get strings for the actual densities - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): # grab nth location identifier location = self.geometry_list[n] + "t" @@ -132,17 +124,16 @@ class DPIEOperator: return domain_list def get_subproblem_domain_list(self): - """ - Method to return domain list that will be used within the scipy_op method to - solve the system of discretized integral equations. What is returned should just - be a list with values that are strings or None. + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. """ # initialize domain list domain_list = [None]*self.nobjs # get strings for the actual densities - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): # grab nth location identifier location = self.geometry_list[n] + "t" @@ -153,10 +144,10 @@ class DPIEOperator: # return the domain list return domain_list - - def _layerpot_op(self,layerpot_op,density_vec, target=None, qfl="avg", k=None, kernel=None, use_laplace=False): - """ - Generic layer potential operator method that works across all objects within the DPIE model + def _layerpot_op(self, layerpot_op, density_vec, target=None, qfl="avg", k=None, + kernel=None, use_laplace=False): + """Generic layer potential operator method that works across all + objects within the DPIE model """ if kernel is None: kernel = self.kernel @@ -168,9 +159,11 @@ class DPIEOperator: if not use_laplace: kargs['k'] = k - # define a convenient integral operator that functions across the multiple objects + # define a convenient integral operator that functions across the + # multiple objects def int_op(idx): - return layerpot_op(kernel, density_vec[:,idx],qbx_forced_limit=qfl,source=self.geometry_list[idx],target=target, **kargs) + return layerpot_op(kernel, density_vec[:, idx], qbx_forced_limit=qfl, + source=self.geometry_list[idx], target=target, **kargs) # get the shape of density_vec (ndim, nobj) = density_vec.shape @@ -180,7 +173,7 @@ class DPIEOperator: # compute individual double layer potential evaluations at the given # density across all the disjoint objects - for i in range(0,nobj): + for i in range(0, nobj): output = output + int_op(i) # return the output summation @@ -189,42 +182,56 @@ class DPIEOperator: else: return output - def D(self, density_vec, target=None, qfl="avg", k=None, kernel=None, use_laplace=False): + def D(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): """ Double layer potential operator across multiple disjoint objects """ - return self._layerpot_op(layerpot_op=sym.D, density_vec=density_vec, target=target, qfl=qfl, k=k, kernel=kernel, use_laplace=use_laplace) + return self._layerpot_op(layerpot_op=sym.D, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) - def S(self, density_vec, target=None, qfl="avg", k=None, kernel=None, use_laplace=False): + def S(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): """ Single layer potential operator across multiple disjoint objects """ - return self._layerpot_op(layerpot_op=sym.S, density_vec=density_vec, target=target, qfl=qfl, k=k, kernel=kernel, use_laplace=use_laplace) - + return self._layerpot_op(layerpot_op=sym.S, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) - def Dp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, use_laplace=False): + def Dp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): """ D' layer potential operator across multiple disjoint objects """ - return self._layerpot_op(layerpot_op=sym.Dp, density_vec=density_vec, target=target, qfl=qfl, k=k, kernel=kernel, use_laplace=use_laplace) + return self._layerpot_op(layerpot_op=sym.Dp, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) - def Sp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, use_laplace=False): + def Sp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): """ S' layer potential operator across multiple disjoint objects """ - return self._layerpot_op(layerpot_op=sym.Sp, density_vec=density_vec, target=target, qfl=qfl, k=k, kernel=kernel, use_laplace=use_laplace) + return self._layerpot_op(layerpot_op=sym.Sp, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) def n_cross(self, density_vec): - r""" - This method is such that an cross(n,a) can operate across vectors - a and n that are local to a set of disjoint source surfaces. Essentially, - imagine that :math:`\bar{a} = [a_1, a_2, \cdots, a_m]`, where :math:`a_k` represents a vector density - defined on the :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = [n_1, n_2, \cdots, n_m]`, - where :math:`n_k` represents a normal that exists on the :math:`k^{th}` disjoint object. The goal, then, - is to have an operator that does element-wise cross products.. ie: + r"""This method is such that ``cross(n, a)`` can operate across vectors + a and n that are local to a set of disjoint source surfaces. + Essentially, imagine that :math:`\bar{a} = [a_1, a_2, \cdots, a_m]`, + where :math:`a_k` represents a vector density defined on the + :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = [n_1, + n_2, \cdots, n_m]`, where :math:`n_k` represents a normal that exists + on the :math:`k^{th}` disjoint object. The goal, then, is to have an + operator that does element-wise cross products.. ie: .. math:: - \bar{n} \times \bar{a}) = [ \left(n_1 \times a_1\right), ..., \left(n_m \times a_m \right)] + + \bar{n} \times \bar{a}) = [ \left(n_1 \times a_1\right), \dots, + \left(n_m \times a_m \right)] """ # specify the sources to be evaluated at @@ -241,23 +248,28 @@ class DPIEOperator: # loop through the density and sources to construct the appropriate # element-wise cross product operation - for k in range(0,nobj): - output[:,k] = sym.n_cross(density_vec[:,k],where=sources[k]) + for k in range(0, nobj): + output[:, k] = sym.n_cross(density_vec[:, k], where=sources[k]) # return result from element-wise cross product return output def n_times(self, density_vec): - r""" - This method is such that an :math:`\boldsymbol{n} \rho`, for some normal :math:`\boldsymbol{n}` and - some scalar :math:`\rho` can be done across normals and scalars that exist on multiple surfaces. Essentially, - imagine that :math:`\bar{\rho} = [\rho_1, \cdots, \rho_m]`, where :math:`\rho_k` represents a scalar density - defined on the :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = [\boldsymbol{n}_1, \cdots, \boldsymbol{n}_m]`, - where :math:`n_k` represents a normal that exists on the :math:`k^{th}` disjoint object. The goal, then, - is to have an operator that does element-wise products.. ie: + r"""This method is such that an :math:`\boldsymbol{n} \rho`, for some + normal :math:`\boldsymbol{n}` and some scalar :math:`\rho` can be done + across normals and scalars that exist on multiple surfaces. + Essentially, imagine that :math:`\bar{\rho} = [\rho_1, \cdots, + \rho_m]`, where :math:`\rho_k` represents a scalar density defined on + the :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = + [\boldsymbol{n}_1, \cdots, \boldsymbol{n}_m]`, where :math:`n_k` + represents a normal that exists on the :math:`k^{th}` disjoint object. + The goal, then, is to have an operator that does element-wise + products, i.e.: .. math:: - \bar{n}\bar{\rho} = [ \left(\boldsymbol{n}_1 \rho_1\right), ..., \left(\boldsymbol{n}_m \rho_m \right)] + + \bar{n}\bar{\rho} = [ \left(\boldsymbol{n}_1 \rho_1\right), \dots, + \left(\boldsymbol{n}_m \rho_m \right)] """ # specify the sources to be evaluated at @@ -270,69 +282,77 @@ class DPIEOperator: assert ndim == 1 # init output symbolic quantity with zeros - output = np.zeros((3,nobj), dtype=self.stype) + output = np.zeros((3, nobj), dtype=self.stype) # loop through the density and sources to construct the appropriate # element-wise cross product operation - for k in range(0,nobj): - output[:,k] = sym.normal(3,where=sources[k]).as_vector() * density_vec[0,k] + for k in range(0, nobj): + output[:, k] = \ + sym.normal(3, where=sources[k]).as_vector() * density_vec[0, k] # return result from element-wise cross product return output - def _extract_phi_densities(self,phi_densities): - return (phi_densities[:self.nobjs],phi_densities[:self.nobjs].reshape((1,self.nobjs)),phi_densities[self.nobjs:]) + def _extract_phi_densities(self, phi_densities): + return (phi_densities[:self.nobjs], + phi_densities[:self.nobjs].reshape((1, self.nobjs)), + phi_densities[self.nobjs:]) - def _extract_tau_densities(self,tau_densities): - return (tau_densities,tau_densities.reshape((1,self.nobjs))) + def _extract_tau_densities(self, tau_densities): + return (tau_densities, tau_densities.reshape((1, self.nobjs))) - def _extract_a_densities(self,A_densities): + def _extract_a_densities(self, A_densities): a0 = A_densities[:(2*self.nobjs)] - a = np.zeros((3,self.nobjs),dtype=self.stype) + a = np.zeros((3, self.nobjs), dtype=self.stype) rho0 = A_densities[(2*self.nobjs):(3*self.nobjs)] - rho = rho0.reshape((1,self.nobjs)) + rho = rho0.reshape((1, self.nobjs)) v = A_densities[(3*self.nobjs):] - for n in range(0,self.nobjs): - a[:,n] = cse(sym.tangential_to_xyz(a0[2*n:2*(n+1)],where=self.geometry_list[n]),"axyz_{0}".format(n)) + for n in range(0, self.nobjs): + a[:, n] = cse(sym.tangential_to_xyz(a0[2*n:2*(n+1)], + where=self.geometry_list[n]), "axyz_{0}".format(n)) return (a0, a, rho0, rho, v) def _L(self, a, rho, where): # define some useful common sub expressions - Sa = cse(self.S(a,where),"Sa_"+where) - Srho = cse(self.S(rho,where),"Srho_"+where) - Sn_times_rho = cse(self.S(self.n_times(rho),where),"Sn_times_rho_"+where) - Sn_cross_a = cse(self.S(self.n_cross(a),where),"Sn_cross_a_"+where) - Drho = cse(self.D(rho,where),"Drho_"+where) + # Sa = cse(self.S(a, where), "Sa_"+where) + # Srho = cse(self.S(rho, where), "Srho_"+where) + Sn_times_rho = cse(self.S(self.n_times(rho), where), "Sn_times_rho_"+where) + # Sn_cross_a = cse(self.S(self.n_cross(a), where), "Sn_cross_a_"+where) + Drho = cse(self.D(rho, where), "Drho_"+where) return sym.join_fields( - sym.n_cross(sym.curl(self.S(a,where)) - self.k * Sn_times_rho,where=where), + sym.n_cross( + sym.curl(self.S(a, where)) - self.k * Sn_times_rho, + where=where), Drho) def _R(self, a, rho, where): # define some useful common sub expressions - Sa = cse(self.S(a,where),"Sa_"+where) - Srho = cse(self.S(rho,where),"Srho_"+where) - Sn_times_rho = cse(self.S(self.n_times(rho),where),"Sn_times_rho_"+where) - Sn_cross_a = cse(self.S(self.n_cross(a),where),"Sn_cross_a_"+where) - Drho = cse(self.D(rho,where),"Drho_"+where) + # Sa = cse(self.S(a, where), "Sa_"+where) + Srho = cse(self.S(rho, where), "Srho_"+where) + # Sn_times_rho = cse(self.S(self.n_times(rho), where), "Sn_times_rho_"+where) + Sn_cross_a = cse(self.S(self.n_cross(a), where), "Sn_cross_a_"+where) + # Drho = cse(self.D(rho, where), "Drho_"+where) return sym.join_fields( - sym.n_cross( self.k * Sn_cross_a + sym.grad(ambient_dim=3,operand=self.S(rho,where)),where=where), - sym.div(self.S(self.n_cross(a),where)) - self.k * Srho + sym.n_cross(self.k * Sn_cross_a + sym.grad(ambient_dim=3, + operand=self.S(rho, where)), where=where), + sym.div(self.S(self.n_cross(a), where)) - self.k * Srho ) def _scaledDPIEs_integral(self, sigma, sigma_n, where): - qfl="avg" + qfl = "avg" return sym.integral( ambient_dim=3, dim=2, - operand=(self.Dp(sigma,target=where,qfl=qfl)/self.k + 1j*0.5*sigma_n - 1j*self.Sp(sigma,target=where,qfl=qfl)), + operand=(self.Dp(sigma, target=where, qfl=qfl)/self.k + + 1j*0.5*sigma_n - 1j*self.Sp(sigma, target=where, qfl=qfl)), where=where) def _scaledDPIEv_integral(self, **kwargs): - qfl="avg" + qfl = "avg" # grab densities and domain to integrate over a = kwargs['a'] @@ -341,49 +361,55 @@ class DPIEOperator: where = kwargs['where'] # define some useful common sub expressions - Sa = cse(self.S(a,where),"Sa_"+where) - Srho = cse(self.S(rho,where),"Srho_"+where) - Sn_times_rho = cse(self.S(self.n_times(rho),where),"Sn_times_rho_"+where) - Sn_cross_a = cse(self.S(self.n_cross(a),where),"Sn_cross_a_"+where) - Drho = cse(self.D(rho,where),"Drho_"+where) + # Sa = cse(self.S(a, where), "Sa_"+where) + # Srho = cse(self.S(rho, where), "Srho_"+where) + Sn_times_rho = cse(self.S(self.n_times(rho), where), "Sn_times_rho_"+where) + Sn_cross_a = cse(self.S(self.n_cross(a), where), "Sn_cross_a_"+where) + # Drho = cse(self.D(rho, where), "Drho_"+where) return sym.integral( ambient_dim=3, dim=2, operand=( - sym.n_dot( sym.curl(self.S(a,where)),where=where) - self.k*sym.n_dot(Sn_times_rho,where=where) \ - + 1j*(self.k*sym.n_dot(Sn_cross_a,where=where) - 0.5*rho_n + self.Sp(rho,target=where,qfl=qfl)) + sym.n_dot(sym.curl(self.S(a, where)), where=where) - + self.k*sym.n_dot(Sn_times_rho, where=where) + + 1j*(self.k*sym.n_dot(Sn_cross_a, where=where) - 0.5*rho_n + + self.Sp(rho, target=where, qfl=qfl)) ), where=where) - def phi_operator(self, phi_densities): """ Integral Equation operator for obtaining scalar potential, `phi` """ # extract the densities needed to solve the system of equations - (sigma0,sigma,V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) # init output matvec vector for the phi density IE output = np.zeros((2*self.nobjs,), dtype=self.stype) # produce integral equation system over each disjoint object - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): - # get nth disjoint object + # get nth disjoint object obj_n = self.geometry_list[n] # setup IE for evaluation over the nth disjoint object's surface - output[n] = 0.5*sigma0[n] + self.D(sigma,obj_n) - 1j*self.k*self.S(sigma,obj_n) - V[n] + output[n] = ( + 0.5*sigma0[n] + + self.D(sigma, obj_n) + - 1j*self.k*self.S(sigma, obj_n) - V[n] + ) - # setup equation that integrates some integral operators over the nth surface - output[self.nobjs + n] = self._scaledDPIEs_integral(sigma,sigma[0,n],where=obj_n) + # set up equation that integrates some integral operators over the + # nth surface + output[self.nobjs + n] = self._scaledDPIEs_integral(sigma, sigma[0, + n], where=obj_n) # return the resulting system of IE return output - def phi_rhs(self, phi_inc, gradphi_inc): """ The Right-Hand-Side for the Integral Equation for `phi` @@ -391,18 +417,18 @@ class DPIEOperator: # get the scalar f expression for each object f = np.zeros((self.nobjs,), dtype=self.stype) - for i in range(0,self.nobjs): + for i in range(0, self.nobjs): f[i] = -phi_inc[i] # get the Q_{j} terms inside RHS expression Q = np.zeros((self.nobjs,), dtype=self.stype) - for i in range(0,self.nobjs): - Q[i] = -sym.integral(3,2,sym.n_dot(gradphi_inc,where=self.geometry_list[i]),where=self.geometry_list[i]) + for i in range(0, self.nobjs): + Q[i] = -sym.integral(3, 2, sym.n_dot(gradphi_inc, + where=self.geometry_list[i]), where=self.geometry_list[i]) # return the resulting field return sym.join_fields(f, Q/self.k) - def a_operator(self, A_densities): """ Integral Equation operator for obtaining vector potential, `A` @@ -415,7 +441,7 @@ class DPIEOperator: output = np.zeros((4*self.nobjs,), dtype=self.stype) # produce integral equation system over each disjoint object - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): # get the nth target geometry to have IE solved across obj_n = self.geometry_list[n] @@ -426,14 +452,17 @@ class DPIEOperator: # generate the set of equations for the vector densities, a, coupled # across the various geometries involved - output[2*n:2*(n+1)] = xyz_to_tangential(0.5*a[:,n] + L[:3] + 1j*R[:3], where=obj_n) + output[2*n:2*(n+1)] = sym.xyz_to_tangential( + 0.5*a[:, n] + L[:3] + 1j*R[:3], + where=obj_n) # generate the set of equations for the scalar densities, rho, coupled # across the various geometries involved - output[(2*self.nobjs + n)] = 0.5*rho[0,n] + L[-1] + 1j*R[-1] - v[n] + output[(2*self.nobjs + n)] = 0.5*rho[0, n] + L[-1] + 1j*R[-1] - v[n] # add the equation that integrates everything out into some constant - output[3*self.nobjs + n] = self._scaledDPIEv_integral(a=a, rho=rho, rho_n=rho[0,n], where=obj_n) + output[3*self.nobjs + n] = self._scaledDPIEv_integral( + a=a, rho=rho, rho_n=rho[0, n], where=obj_n) # return output equations return output @@ -447,16 +476,18 @@ class DPIEOperator: q = np.zeros((self.nobjs,), dtype=self.stype) h = np.zeros((self.nobjs,), dtype=self.stype) f = np.zeros((2*self.nobjs,), dtype=self.stype) - for i in range(0,self.nobjs): + for i in range(0, self.nobjs): obj_n = self.geometry_list[i] - q[i] = -sym.integral(3,2,sym.n_dot(A_inc[3*i:3*(i+1)],where=obj_n),where=obj_n) + q[i] = -sym.integral(3, 2, sym.n_dot(A_inc[3*i:3*(i+1)], where=obj_n), + where=obj_n) h[i] = -divA_inc[i] - f[2*i:2*(i+1)] = xyz_to_tangential(-sym.n_cross(A_inc[3*i:3*(i+1)],where=obj_n),where=obj_n) + f[2*i:2*(i+1)] = sym.xyz_to_tangential( + -sym.n_cross(A_inc[3*i:3*(i+1)], where=obj_n), where=obj_n) # define RHS for `A` integral equation system - return sym.join_fields( f, h/self.k, q ) + return sym.join_fields(f, h/self.k, q) - def subproblem_operator(self, tau_densities, alpha = 1j): + def subproblem_operator(self, tau_densities, alpha=1j): """ Integral Equation operator for obtaining sub problem solution """ @@ -468,13 +499,13 @@ class DPIEOperator: output = np.zeros((self.nobjs,), dtype=self.stype) # produce integral equation system over each disjoint object - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): - # get nth disjoint object + # get nth disjoint object obj_n = self.geometry_list[n] # setup IE for evaluation over the nth disjoint object's surface - output[n] = 0.5*tau0[n] + self.D(tau,obj_n) - alpha*self.S(tau,obj_n) + output[n] = 0.5*tau0[n] + self.D(tau, obj_n) - alpha*self.S(tau, obj_n) # return the resulting system of IE return output @@ -490,13 +521,13 @@ class DPIEOperator: output = np.zeros((self.nobjs,), dtype=self.stype) # produce integral equation system over each disjoint object - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): - # get nth disjoint object + # get nth disjoint object obj_n = self.geometry_list[n] # setup IE for evaluation over the nth disjoint object's surface - output[n] = sym.div(self.S(a,target=obj_n,qfl="avg")) + output[n] = sym.div(self.S(a, target=obj_n, qfl="avg")) # return the resulting system of IE return output @@ -510,9 +541,9 @@ class DPIEOperator: output = np.zeros((self.nobjs,), dtype=self.stype) # produce integral equation system over each disjoint object - for n in range(0,self.nobjs): + for n in range(0, self.nobjs): - # get nth disjoint object + # get nth disjoint object obj_n = self.geometry_list[n] # setup IE for evaluation over the nth disjoint object's surface @@ -521,7 +552,6 @@ class DPIEOperator: # return the resulting system of IE return output - def scalar_potential_rep(self, phi_densities, target=None, qfl=None): """ This method is a representation of the scalar potential, phi, @@ -529,10 +559,13 @@ class DPIEOperator: """ # extract the densities needed to solve the system of equations - (sigma0,sigma,V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) # evaluate scalar potential representation - return self.D(sigma,target,qfl=qfl) - (1j*self.k)*self.S(sigma,target,qfl=qfl) + return ( + self.D(sigma, target, qfl=qfl) + - (1j*self.k)*self.S(sigma, target, qfl=qfl) + ) def scalar_potential_constants(self, phi_densities): """ @@ -541,7 +574,7 @@ class DPIEOperator: """ # extract the densities needed to solve the system of equations - (sigma0,sigma,V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) # evaluate scalar potential representation return V @@ -553,10 +586,13 @@ class DPIEOperator: """ # extract the densities needed to solve the system of equations - (sigma0,sigma,V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) # evaluate scalar potential representation - return sym.grad(3,self.D(sigma,target,qfl=qfl)) - (1j*self.k)*sym.grad(3,self.S(sigma,target,qfl=qfl)) + return ( + sym.grad(3, self.D(sigma, target, qfl=qfl)) + - (1j*self.k)*sym.grad(3, self.S(sigma, target, qfl=qfl)) + ) def vector_potential_rep(self, A_densities, target=None, qfl=None): """ @@ -568,8 +604,12 @@ class DPIEOperator: (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) # define the vector potential representation - return (sym.curl(self.S(a,target,qfl=qfl)) - self.k*self.S(self.n_times(rho),target,qfl=qfl)) \ - + 1j*(self.k*self.S(self.n_cross(a),target,qfl=qfl) + sym.grad(3,self.S(rho,target,qfl=qfl))) + return ( + (sym.curl(self.S(a, target, qfl=qfl)) + - self.k*self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*(self.k*self.S(self.n_cross(a), target, qfl=qfl) + + sym.grad(3, self.S(rho, target, qfl=qfl))) + ) def div_vector_potential_rep(self, A_densities, target=None, qfl=None): """ @@ -581,10 +621,11 @@ class DPIEOperator: (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) # define the vector potential representation - return self.k*( self.D(rho,target,qfl=qfl) \ - + 1j*(sym.div(self.S(self.n_cross(a),target,qfl=qfl)) - self.k * self.S(rho,target,qfl=qfl))) + return self.k*(self.D(rho, target, qfl=qfl) + + 1j*(sym.div(self.S(self.n_cross(a), target, qfl=qfl)) + - self.k * self.S(rho, target, qfl=qfl))) - def subproblem_rep(self, tau_densities, target=None, alpha = 1j, qfl=None): + def subproblem_rep(self, tau_densities, target=None, alpha=1j, qfl=None): """ This method is a representation of the scalar potential, phi, based on the density `sigma`. @@ -594,13 +635,14 @@ class DPIEOperator: (tau0, tau) = self._extract_tau_densities(tau_densities) # evaluate scalar potential representation - return self.D(tau,target,qfl=qfl) - alpha*self.S(tau,target,qfl=qfl) + return self.D(tau, target, qfl=qfl) - alpha*self.S(tau, target, qfl=qfl) - def scattered_volume_field(self, phi_densities, A_densities, tau_densities, target=None, alpha=1j,qfl=None): + def scattered_volume_field(self, phi_densities, A_densities, tau_densities, + target=None, alpha=1j, qfl=None): """ This will return an object of six entries, the first three of which represent the electric, and the second three of which represent the - magnetic field. + magnetic field. This satisfies the time-domain Maxwell's equations as verified by :func:`sumpy.point_calculus.frequency_domain_maxwell`. @@ -608,20 +650,27 @@ class DPIEOperator: # extract the densities needed (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) - (sigma0,sigma, V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) (tau0, tau) = self._extract_tau_densities(tau_densities) # obtain expressions for scalar and vector potentials - A = self.vector_potential_rep(A_densities, target=target) - phi = self.scalar_potential_rep(phi_densities, target=target) + A = self.vector_potential_rep(A_densities, target=target) + # phi = self.scalar_potential_rep(phi_densities, target=target) # evaluate the potential form for the electric and magnetic fields - E_scat = 1j*self.k*A - sym.grad(3, self.D(sigma,target,qfl=qfl)) + 1j*self.k*sym.grad(3, self.S(sigma,target,qfl=qfl)) - H_scat = sym.grad(3,operand=(self.D(tau,target,qfl=qfl) - alpha*self.S(tau,target,qfl=qfl))) \ - + (self.k**2) * self.S(a,target,qfl=qfl) \ - - self.k * sym.curl(self.S(self.n_times(rho),target,qfl=qfl)) \ - + 1j*self.k*sym.curl(self.S(self.n_cross(a),target,qfl=qfl)) - + E_scat = ( + 1j*self.k*A + - sym.grad(3, self.D(sigma, target, qfl=qfl)) + + 1j*self.k*sym.grad(3, self.S(sigma, target, qfl=qfl)) + ) + H_scat = ( + sym.grad(3, operand=( + self.D(tau, target, qfl=qfl) + - alpha*self.S(tau, target, qfl=qfl))) + + (self.k**2) * self.S(a, target, qfl=qfl) + - self.k * sym.curl(self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*self.k*sym.curl(self.S(self.n_cross(a), target, qfl=qfl)) + ) # join the fields into a vector return sym.join_fields(E_scat, H_scat) @@ -629,21 +678,22 @@ class DPIEOperator: # }}} - # {{{ Decoupled Potential Integral Equation Operator - Based on Journal Paper + class DPIEOperatorEvanescent(DPIEOperator): r""" Decoupled Potential Integral Equation operator with PEC boundary conditions, defaults as scaled DPIE. - See https://onlinelibrary.wiley.com/doi/abs/10.1002/cpa.21585 for journal paper. + See `the journal paper + `_ for details. - Uses :math:`E(x,t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and - :math:`H(x,t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for + Uses :math:`E(x, t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and + :math:`H(x, t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for the :math:`E(x)`, :math:`H(x)` fields using vector and scalar potentials via - the Lorenz Gauage. The DPIE formulates the problem purely in terms of the - vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, - and then backs out :math:`E(x)` and :math:`H(x)` via relationships to + the Lorenz Gauage. The DPIE formulates the problem purely in terms of the + vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, + and then backs out :math:`E(x)` and :math:`H(x)` via relationships to the vector and scalar potentials. """ @@ -652,83 +702,102 @@ class DPIEOperatorEvanescent(DPIEOperator): from sumpy.kernel import LaplaceKernel # specify the frequency variable that will be tuned - self.k = k - self.ik = 1j*k - self.stype = type(self.k) + self.k = k + self.ik = 1j*k + self.stype = type(self.k) - # specify the 3-D Helmholtz kernel - self.kernel = HelmholtzKernel(3) - self.kernel_ik = HelmholtzKernel(3, allow_evanescent=True) + # specify the 3-D Helmholtz kernel + self.kernel = HelmholtzKernel(3) + self.kernel_ik = HelmholtzKernel(3, allow_evanescent=True) self.kernel_laplace = LaplaceKernel(3) # specify a list of strings representing geometry objects - self.geometry_list = geometry_list - self.nobjs = len(geometry_list) + self.geometry_list = geometry_list + self.nobjs = len(geometry_list) def _eval_all_objects(self, density_vec, int_op, qfl="avg", k=None, kernel=None): - """ - This private method is so some input integral operator and input density can be used to - evaluate the set of locations defined by the geometry list + """This private method is so some input integral operator and input + density can be used to evaluate the set of locations defined by the + geometry list """ output = np.zeros(density_vec.shape, dtype=self.stype) (ndim, nobj) = density_vec.shape - for i in range(0,nobj): - output[:,i] = int_op(density_vec=density_vec, target=self.geometry_list[i], qfl=qfl, k=k, kernel=kernel) + for i in range(0, nobj): + output[:, i] = int_op(density_vec=density_vec, + target=self.geometry_list[i], qfl=qfl, k=k, kernel=kernel) return output def _L(self, a, rho, where): # define some useful common sub expressions - Sn_times_rho = cse(self.S(self.n_times(rho),where),"Sn_times_rho_"+where) - Drho = cse(self.D(rho,where),"Drho_"+where) + Sn_times_rho = cse(self.S(self.n_times(rho), where), "Sn_times_rho_"+where) + Drho = cse(self.D(rho, where), "Drho_"+where) return sym.join_fields( - sym.n_cross(sym.curl(self.S(a,where)) - self.k * Sn_times_rho,where=where), + sym.n_cross( + sym.curl(self.S(a, where)) - self.k * Sn_times_rho, + where=where), Drho) def _R(self, a, rho, where): # define some useful common sub expressions - Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, kernel=self.kernel_ik), "Sa_ik_nest") - Srho_ik_nest = cse(self._eval_all_objects(rho,self.S, k=self.ik, kernel=self.kernel_ik),"Srho_ik_nest") - Srho = cse(self.S(Srho_ik_nest,where),"Srho_"+where) - Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest),where),"Sn_cross_a_"+where) + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + Srho = cse(self.S(Srho_ik_nest, where), "Srho_"+where) + Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest), where), + "Sn_cross_a_"+where) return self.k*sym.join_fields( - sym.n_cross( self.k * Sn_cross_a + sym.grad(ambient_dim=3,operand=self.S(Srho_ik_nest,where)),where=where), - sym.div(self.S(self.n_cross(Sa_ik_nest),where)) - self.k * Srho + sym.n_cross(self.k * Sn_cross_a + sym.grad(ambient_dim=3, + operand=self.S(Srho_ik_nest, where)), where=where), + sym.div(self.S(self.n_cross(Sa_ik_nest), where)) - self.k * Srho ) def _scaledDPIEs_integral(self, sigma, sigma_n, where): - qfl="avg" + qfl = "avg" return sym.integral( ambient_dim=3, dim=2, - operand=( (self.Dp(sigma,target=where,qfl=qfl) - self.Dp(sigma,target=where,qfl=qfl,kernel=self.kernel_laplace,use_laplace=True))/self.k + 1j*0.5*sigma_n - 1j*self.Sp(sigma,target=where,qfl=qfl)), + operand=( + (self.Dp(sigma, target=where, qfl=qfl) + - self.Dp(sigma, target=where, qfl=qfl, + kernel=self.kernel_laplace, use_laplace=True)) + / self.k + + 1j*0.5*sigma_n + - 1j*self.Sp(sigma, target=where, qfl=qfl)), where=where) def _scaledDPIEv_integral(self, **kwargs): - qfl="avg" + qfl = "avg" # grab densities and domain to integrate over a = kwargs['a'] rho = kwargs['rho'] - rho_n = kwargs['rho_n'] + # rho_n = kwargs['rho_n'] where = kwargs['where'] # define some useful common sub expressions - Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, kernel=self.kernel_ik), "Sa_ik_nest") - Srho_ik = cse(self.S(rho,where,k=self.ik,kernel=self.kernel_ik),"Srho_ik"+where) - Srho_ik_nest = cse(self._eval_all_objects(rho,self.S, k=self.ik, kernel=self.kernel_ik),"Srho_ik_nest") - Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest),where),"Sn_cross_a_nest_"+where) - Sn_times_rho = cse(self.S(self.n_times(rho),where),"Sn_times_rho_"+where) + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik = cse(self.S(rho, where, k=self.ik, kernel=self.kernel_ik), + "Srho_ik"+where) + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest), where), + "Sn_cross_a_nest_"+where) + Sn_times_rho = cse(self.S(self.n_times(rho), where), "Sn_times_rho_"+where) return sym.integral( ambient_dim=3, dim=2, operand=( - -self.k*sym.n_dot(Sn_times_rho,where=where) \ - + 1j*self.k*(self.k*sym.n_dot(Sn_cross_a,where=where) - 0.5*Srho_ik + self.Sp(Srho_ik_nest,target=where,qfl=qfl)) + -self.k*sym.n_dot(Sn_times_rho, where=where) + + 1j*self.k*( + self.k*sym.n_dot(Sn_cross_a, where=where) + - 0.5*Srho_ik + self.Sp(Srho_ik_nest, target=where, qfl=qfl)) ), where=where) @@ -742,12 +811,18 @@ class DPIEOperatorEvanescent(DPIEOperator): (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) # define some useful quantities - Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, kernel=self.kernel_ik), "Sa_ik_nest") - Srho_ik_nest = cse(self._eval_all_objects(rho,self.S, k=self.ik, kernel=self.kernel_ik),"Srho_ik_nest") + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") # define the vector potential representation - return (sym.curl(self.S(a,target,qfl=qfl)) - self.k*self.S(self.n_times(rho),target,qfl=qfl)) \ - + 1j*self.k*(self.k*self.S(self.n_cross(Sa_ik_nest),target,qfl=qfl) + sym.grad(3,self.S(Srho_ik_nest,target,qfl=qfl))) + return ( + (sym.curl(self.S(a, target, qfl=qfl)) + - self.k*self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*self.k*(self.k*self.S(self.n_cross(Sa_ik_nest), target, qfl=qfl) + + sym.grad(3, self.S(Srho_ik_nest, target, qfl=qfl))) + ) def div_vector_potential_rep(self, A_densities, target=None, qfl=None): """ @@ -759,18 +834,22 @@ class DPIEOperatorEvanescent(DPIEOperator): (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) # define some useful quantities - Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, kernel=self.kernel_ik), "Sa_ik_nest") - Srho_ik_nest = cse(self._eval_all_objects(rho,self.S, k=self.ik, kernel=self.kernel_ik),"Srho_ik_nest") + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") # define the vector potential representation - return self.k*( self.D(rho,target,qfl=qfl) \ - + 1j*self.k*(sym.div(self.S(self.n_cross(Sa_ik_nest),target,qfl=qfl)) - self.k * self.S(Srho_ik_nest,target,qfl=qfl))) + return self.k*(self.D(rho, target, qfl=qfl) + + 1j*self.k*(sym.div(self.S(self.n_cross(Sa_ik_nest), target, + qfl=qfl)) - self.k * self.S(Srho_ik_nest, target, qfl=qfl))) - def scattered_volume_field(self, phi_densities, A_densities, tau_densities, target=None, alpha=1j,qfl=None): + def scattered_volume_field(self, phi_densities, A_densities, tau_densities, + target=None, alpha=1j, qfl=None): """ This will return an object of six entries, the first three of which represent the electric, and the second three of which represent the - magnetic field. + magnetic field. This satisfies the time-domain Maxwell's equations as verified by :func:`sumpy.point_calculus.frequency_domain_maxwell`. @@ -778,23 +857,33 @@ class DPIEOperatorEvanescent(DPIEOperator): # extract the densities needed (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) - (sigma0,sigma, V) = self._extract_phi_densities(phi_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) (tau0, tau) = self._extract_tau_densities(tau_densities) # obtain expressions for scalar and vector potentials - Sa_ik_nest = self._eval_all_objects(a, self.S, k=self.ik, kernel=self.kernel_ik) - A = self.vector_potential_rep(A_densities, target=target) - phi = self.scalar_potential_rep(phi_densities, target=target) + Sa_ik_nest = self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik) + A = self.vector_potential_rep(A_densities, target=target) + # phi = self.scalar_potential_rep(phi_densities, target=target) # evaluate the potential form for the electric and magnetic fields - E_scat = 1j*self.k*A - sym.grad(3, self.D(sigma,target,qfl=qfl)) + 1j*self.k*sym.grad(3, self.S(sigma,target,qfl=qfl)) - H_scat = sym.grad(3,operand=(self.D(tau,target,qfl=qfl) - alpha*self.S(tau,target,qfl=qfl))) \ - + (self.k**2) * self.S(a,target,qfl=qfl) \ - - self.k * sym.curl(self.S(self.n_times(rho),target,qfl=qfl)) \ - + 1j*(self.k**2)*sym.curl(self.S(self.n_cross(Sa_ik_nest),target,qfl=qfl)) - + E_scat = ( + 1j*self.k*A + - sym.grad(3, self.D(sigma, target, qfl=qfl)) + + 1j*self.k*sym.grad(3, self.S(sigma, target, qfl=qfl))) + H_scat = ( + sym.grad(3, operand=( + self.D(tau, target, qfl=qfl) + - alpha*self.S(tau, target, qfl=qfl))) + + (self.k**2) * self.S(a, target, qfl=qfl) + - self.k * sym.curl(self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*(self.k**2)*sym.curl(self.S(self.n_cross(Sa_ik_nest), + target, qfl=qfl)) + ) # join the fields into a vector return sym.join_fields(E_scat, H_scat) # }}} + +# vim: foldmethod=marker diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 2556fb0351d8179d4b6fd70ad87c529a5368ff73..6092b00a21bc50556891ca1e6a62eeecd3dba1d8 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -78,10 +78,10 @@ visible only once evaluated.) Placeholders ^^^^^^^^^^^^ -.. autoclass:: Variable -.. autoclass:: make_sym_vector -.. autoclass:: make_sym_mv -.. autoclass:: make_sym_surface_mv +.. autoclass:: var +.. autofunction:: make_sym_vector +.. autofunction:: make_sym_mv +.. autofunction:: make_sym_surface_mv Functions ^^^^^^^^^ @@ -138,14 +138,18 @@ Elementary numerics .. autofunction:: mean .. autoclass:: IterativeInverse -Calculus (based on Geometric Algebra) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Geometric Calculus (based on Geometric/Clifford Algebra) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: Derivative + +Conventional Calculus +^^^^^^^^^^^^^^^^^^^^^ + .. autofunction:: dd_axis -.. autofunction:: d_dx -.. autofunction:: d_dy -.. autofunction:: d_dz +.. function:: d_dx +.. function:: d_dy +.. function:: d_dz .. autofunction:: grad_mv .. autofunction:: grad .. autofunction:: laplace diff --git a/pytential/unregularized.py b/pytential/unregularized.py index 18e8b65c3efec2e25bc81c6b87a457f238963816..a2fe4ad0ad6e941642a524c4e594718f1412c0b3 100644 --- a/pytential/unregularized.py +++ b/pytential/unregularized.py @@ -31,6 +31,7 @@ import numpy as np import loopy as lp from boxtree.tools import DeviceDataRecord +from loopy.version import MOST_RECENT_LANGUAGE_VERSION from pytential.source import LayerPotentialSourceBase from pytools import memoize_method @@ -312,7 +313,8 @@ class _FMMGeometryCodeContainer(object): """ targets[dim, i] = points[dim, i] """, - default_offset=lp.auto, name="copy_targets") + default_offset=lp.auto, name="copy_targets", + lang_version=MOST_RECENT_LANGUAGE_VERSION) knl = lp.fix_parameters(knl, ndims=self.ambient_dim) diff --git a/pytential/version.py b/pytential/version.py index d26fbc2f9341a880b1119e7e6079bd51e59e11b9..5cb9cc61161073b0cd9e9a5fad471dcc6620763d 100644 --- a/pytential/version.py +++ b/pytential/version.py @@ -1,2 +1,49 @@ +from __future__ import division, absolute_import + +__copyright__ = "Copyright (C) 2018 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +# {{{ find install- or run-time git revision + +import os +if os.environ.get("AKPYTHON_EXEC_FROM_WITHIN_WITHIN_SETUP_PY") is not None: + # We're just being exec'd by setup.py. We can't import anything. + _git_rev = None + +else: + import pytential._git_rev as _git_rev_mod + _git_rev = _git_rev_mod.GIT_REVISION + + # If we're running from a dev tree, the last install (and hence the most + # recent update of the above git rev) could have taken place very long ago. + from pytools import find_module_git_revision + _runtime_git_rev = find_module_git_revision(__file__, n_levels_up=1) + if _runtime_git_rev is not None: + _git_rev = _runtime_git_rev + +# }}} + + VERSION = (2016, 1) VERSION_TEXT = ".".join(str(i) for i in VERSION) + +PYTENTIAL_KERNEL_VERSION = (VERSION, _git_rev, 0) diff --git a/setup.cfg b/setup.cfg index 42291e82433c3c7522f8e267e41321c4d19db099..a353f3f7242d42f689d5721b8f4a104aaf3e4e6b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,4 @@ + [flake8] ignore = E126,E127,E128,E123,E226,E241,E242,E265,E402,W503,N803,N806,N802,D102,D103 max-line-length=85 @@ -5,3 +6,6 @@ exclude= pytential/symbolic/old_diffop_primitives.py, pytential/symbolic/pde/maxwell/generalized_debye.py, +[tool:pytest] +markers= + slowtest: mark a test as slow diff --git a/setup.py b/setup.py index d8d49a9cb1d30b0ac134f308ce983cd8dee0e7b1..84438494a9bf49fd79df6dab0513e763833c3ae4 100644 --- a/setup.py +++ b/setup.py @@ -1,63 +1,110 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import os +from setuptools import setup, find_packages -def main(): - from setuptools import setup, find_packages - - version_dict = {} - init_filename = "pytential/version.py" - exec(compile(open(init_filename, "r").read(), init_filename, "exec"), - version_dict) - - setup(name="pytential", - version=version_dict["VERSION_TEXT"], - description="Evaluate layer and volume potentials accurately. " - "Solve integral equations.", - long_description=open("README.rst", "rt").read(), - author="Andreas Kloeckner", - author_email="inform@tiker.net", - license="MIT", - url="http://wiki.tiker.net/Pytential", - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: Other Audience', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - 'Natural Language :: English', - 'Programming Language :: Python', - - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - # 3.x has not yet been tested. - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Information Analysis', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Scientific/Engineering :: Visualization', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities', - ], - - packages=find_packages(), - - install_requires=[ - "pytest>=2.3", - # FIXME leave out for now - # https://code.google.com/p/sympy/issues/detail?id=3874 - #"sympy>=0.7.2", - - "modepy>=2013.3", - "pyopencl>=2013.1", - "boxtree>=2013.1", - "pymbolic>=2013.2", - "loo.py>=2017.2", - "sumpy>=2013.1", - "cgen>=2013.1.2", - - "six", - ]) - - -if __name__ == '__main__': - main() + +# {{{ capture git revision at install time + +# authoritative version in pytools/__init__.py +def find_git_revision(tree_root): + # Keep this routine self-contained so that it can be copy-pasted into + # setup.py. + + from os.path import join, exists, abspath + tree_root = abspath(tree_root) + + if not exists(join(tree_root, ".git")): + return None + + from subprocess import Popen, PIPE, STDOUT + p = Popen(["git", "rev-parse", "HEAD"], shell=False, + stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, + cwd=tree_root) + (git_rev, _) = p.communicate() + + import sys + if sys.version_info >= (3,): + git_rev = git_rev.decode() + + git_rev = git_rev.rstrip() + + retcode = p.returncode + assert retcode is not None + if retcode != 0: + from warnings import warn + warn("unable to find git revision") + return None + + return git_rev + + +def write_git_revision(package_name): + from os.path import dirname, join + dn = dirname(__file__) + git_rev = find_git_revision(dn) + + with open(join(dn, package_name, "_git_rev.py"), "w") as outf: + outf.write("GIT_REVISION = %s\n" % repr(git_rev)) + + +write_git_revision("pytential") + +# }}} + + +version_dict = {} +init_filename = "pytential/version.py" +os.environ["AKPYTHON_EXEC_FROM_WITHIN_WITHIN_SETUP_PY"] = "1" +exec(compile(open(init_filename, "r").read(), init_filename, "exec"), + version_dict) + +setup(name="pytential", + version=version_dict["VERSION_TEXT"], + description="Evaluate layer and volume potentials accurately. " + "Solve integral equations.", + long_description=open("README.rst", "rt").read(), + author="Andreas Kloeckner", + author_email="inform@tiker.net", + license="MIT", + url="http://wiki.tiker.net/Pytential", + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: Other Audience', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python', + + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + # 3.x has not yet been tested. + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Information Analysis', + 'Topic :: Scientific/Engineering :: Mathematics', + 'Topic :: Scientific/Engineering :: Visualization', + 'Topic :: Software Development :: Libraries', + 'Topic :: Utilities', + ], + + packages=find_packages(), + + install_requires=[ + "pytest>=2.3", + # FIXME leave out for now + # https://code.google.com/p/sympy/issues/detail?id=3874 + #"sympy>=0.7.2", + + "pytools>=2018.2", + "modepy>=2013.3", + "pyopencl>=2013.1", + "boxtree>=2013.1", + "pymbolic>=2013.2", + "loo.py>=2017.2", + "sumpy>=2013.1", + "cgen>=2013.1.2", + + "six", + ]) diff --git a/test/test_global_qbx.py b/test/test_global_qbx.py index cc0f26f0ebbce0cee1ab5c423fb92f6ed94b9c66..6fbd78d1082ed668eaa329afc23792dd210d420b 100644 --- a/test/test_global_qbx.py +++ b/test/test_global_qbx.py @@ -121,7 +121,7 @@ def run_source_refinement_test(ctx_getter, mesh, order, helmholtz_k=None): int_centers = np.array([axis.get(queue) for axis in int_centers]) ext_centers = get_centers_on_side(lpot_source, +1) ext_centers = np.array([axis.get(queue) for axis in ext_centers]) - expansion_radii = lpot_source._expansion_radii("npanels").get(queue) + expansion_radii = lpot_source._expansion_radii("nsources").get(queue) panel_sizes = lpot_source._panel_sizes("npanels").get(queue) fine_panel_sizes = lpot_source._fine_panel_sizes("npanels").get(queue) @@ -150,8 +150,8 @@ def run_source_refinement_test(ctx_getter, mesh, order, helmholtz_k=None): # A center cannot be closer to another panel than to its originating # panel. - rad = expansion_radii[centers_panel.element_nr] - assert dist >= rad * (1-expansion_disturbance_tolerance), \ + rad = expansion_radii[centers_panel.discr_slice] + assert (dist >= rad * (1-expansion_disturbance_tolerance)).all(), \ (dist, rad, centers_panel.element_nr, sources_panel.element_nr) def check_sufficient_quadrature_resolution(centers_panel, sources_panel): @@ -431,7 +431,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_layer_pot.py b/test/test_layer_pot.py index cb8669e4eae4127510b45712b111a3d9fdd67b12..b0766375186a5ad37f9fcd5e3ef4a04fa907c964 100644 --- a/test/test_layer_pot.py +++ b/test/test_layer_pot.py @@ -540,7 +540,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_layer_pot_eigenvalues.py b/test/test_layer_pot_eigenvalues.py index 2da95fcbd473220475719c476e4d4c50bac055ac..e60b72bfc4108cf229114ff5d9c3563afa879d23 100644 --- a/test/test_layer_pot_eigenvalues.py +++ b/test/test_layer_pot_eigenvalues.py @@ -382,7 +382,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_layer_pot_identity.py b/test/test_layer_pot_identity.py index fe001342de665dc58cfdee0d8f9047f74f7c7c3c..c019f9309f2d23b5fe910888e1229cc850dfb51f 100644 --- a/test/test_layer_pot_identity.py +++ b/test/test_layer_pot_identity.py @@ -410,7 +410,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_matrix.py b/test/test_matrix.py index d1558bcc03fcd42c64c4cde780557721a40e3131..5485c50266bf86238e246dc900211acb9ae2f290 100644 --- a/test/test_matrix.py +++ b/test/test_matrix.py @@ -178,7 +178,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_maxwell.py b/test/test_maxwell.py index a3ceca6ef2c6dc18b65596e615d4d8aecdfc2ae9..a96753ea56ef6ddbf618a7fc3c3c96c89556e1cf 100644 --- a/test/test_maxwell.py +++ b/test/test_maxwell.py @@ -213,6 +213,7 @@ class EHField(object): # {{{ driver +@pytest.mark.slowtest @pytest.mark.parametrize("case", [ #tc_int, tc_ext, @@ -506,7 +507,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_maxwell_dpie.py b/test/test_maxwell_dpie.py index 27d0a9d0d4cc8806e9e5f57bdb54bdee6fa074ba..cee57c0bb8397142d0f20cd124381e0cc3077460 100644 --- a/test/test_maxwell_dpie.py +++ b/test/test_maxwell_dpie.py @@ -39,6 +39,17 @@ from sumpy.point_calculus import CalculusPatch, frequency_domain_maxwell from sumpy.tools import vector_from_device from pytential.target import PointsTarget from meshmode.mesh.processing import find_bounding_box +from pytools.convergence import EOCRecorder +from pytential.solve import gmres + +import pytential.symbolic.pde.maxwell as mw +import pytential.symbolic.pde.maxwell.dpie as mw_dpie +from pytential.qbx import QBXLayerPotentialSource +from meshmode.discretization import Discretization +from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory +from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + import logging logger = logging.getLogger(__name__) @@ -112,7 +123,7 @@ class RoundedCubeTestCase(MaxwellTestCase): mesh = affine_map(mesh, b=np.array([-0.5, -0.5, -0.5])) mesh = affine_map(mesh, A=np.eye(3)*2) - # now centered at origin and extends to -1,1 + # now centered at origin and extends to -1, 1 # Flip elements--gmsh generates inside-out geometry. return perform_flips(mesh, np.ones(mesh.nelements)) @@ -158,7 +169,7 @@ class ElliptiPlaneTestCase(MaxwellTestCase): "-string", "Mesh.CharacteristicLengthMax = %g;" % resolution]) - # now centered at origin and extends to -1,1 + # now centered at origin and extends to -1, 1 # Flip elements--gmsh generates inside-out geometry. from meshmode.mesh.processing import perform_flips @@ -192,7 +203,7 @@ class ElliptiPlaneTestCase(MaxwellTestCase): tc_int = SphereTestCase(k=1.2, is_interior=True, resolutions=[0, 1], qbx_order=3, fmm_tolerance=1e-4) -tc_ext = SphereTestCase(k=1.2, is_interior=False, resolutions=[0], +tc_ext = SphereTestCase(k=1.2, is_interior=False, resolutions=[0, 1], qbx_order=7, fmm_tolerance=1e-4) tc_rc_ext = RoundedCubeTestCase(k=6.4, is_interior=False, resolutions=[0.1], @@ -216,51 +227,183 @@ class EHField(object): return self.field[3:] -# {{{ driver +# {{ test_dpie_auxiliary @pytest.mark.parametrize("case", [ #tc_int, tc_ext, ]) -def test_pec_dpie_extinction(ctx_getter, case, visualize=False): - """For (say) is_interior=False (the 'exterior' MFIE), this test verifies +def test_dpie_auxiliary(ctx_factory, case): + logging.basicConfig(level=logging.INFO) + + cl_ctx = ctx_factory() + queue = cl.CommandQueue(cl_ctx) + + pytest.importorskip("pyfmmlib") + + np.random.seed(12) + + knl_kwargs = {"k": case.k, "ik": 1j*case.k} + + geom_list = ["obj0"] + + dpie = mw_dpie.DPIEOperatorEvanescent(geometry_list=geom_list) + tau_densities = sym.make_sym_vector("tau_densities", dpie.num_distinct_objects()) + + calc_patch = CalculusPatch(np.array([-3, 0, 0]), h=0.01) + + # {{{ test the auxiliary problem + + # test that the aux problem is capable of computing the desired derivatives + # of an appropriate input field + + # define method to get locations to evaluate representation + def epsilon_off_boundary(where=None, epsilon=1e-4): + x = sym.nodes(3, where).as_vector() + return x + sym.normal(3, 2, where).as_vector()*epsilon + + # loop through the case's resolutions and compute the scattered field + # solution + + eoc_rec = EOCRecorder() + + for resolution in case.resolutions: + + # get the scattered and observation mesh + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) + + # define the pre-scattered discretization + pre_scat_discr = Discretization( + cl_ctx, scat_mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # use OpenCL random number generator to create a set of random + # source locations for various variables being solved for + dpie0 = mw_dpie.DPIEOperator(geometry_list=geom_list) + qbx0, _ = QBXLayerPotentialSource( + pre_scat_discr, fine_order=4*case.target_order, + #fmm_order=False, + qbx_order=case.qbx_order, + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), + fmm_backend=case.fmm_backend + ).with_refinement(_expansion_disturbance_tolerance=0.05) + + # define the geometry dictionary + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr} + + # define points to evaluate the gradient at + tgt_n = PointsTarget(bind(geom_map, + epsilon_off_boundary(where='obj0', epsilon=1.0))(queue)) + geom_map['tgt'] = tgt_n + + # define the quantity that will have a derivative taken of it and + # its associated derivative + def getTestFunction(where=None): + z = sym.nodes(3, where).as_vector() + z2 = sym.cse(np.dot(z, z), "z_mag_squared") + g = sym.exp(1j*dpie0.k*sym.sqrt(z2))/(4.0*np.pi*sym.sqrt(z2)) + return g + + def getTestGradient(where=None): + z = sym.nodes(3, where).as_vector() + z2 = sym.cse(np.dot(z, z), "z_mag_squared") + grad_g = z*sym.exp(1j*dpie0.k*sym.sqrt(z2))*(1j*dpie.k - + 1.0/sym.sqrt(z2))/(4*np.pi*z2) + return grad_g + + # compute output gradient evaluated at the desired object + tgrad = bind(geom_map, getTestGradient(where="tgt"))(queue, **knl_kwargs) + test_func_d = vector_from_device(queue, tgrad) + + # define the problem that will be solved + test_tau_op = bind(geom_map, + dpie0.subproblem_operator(tau_densities=tau_densities)) + test_tau_rhs = bind(geom_map, + dpie0.subproblem_rhs_func(function=getTestFunction))(queue, + **knl_kwargs) + + # set GMRES settings for solving + gmres_settings = dict( + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05) + + subprob_result = gmres( + test_tau_op.scipy_op(queue, "tau_densities", np.complex128, + domains=dpie0.get_subproblem_domain_list(), + **knl_kwargs), + test_tau_rhs, **gmres_settings) + dummy_tau = subprob_result.solution + + # compute the error between the associated derivative quantities + tgrad = bind(geom_map, sym.grad(3, + dpie0.subproblem_rep(tau_densities=tau_densities, + target='tgt')))(queue, tau_densities=dummy_tau, + **knl_kwargs) + approx_d = vector_from_device(queue, tgrad) + err = ( + calc_patch.norm(test_func_d - approx_d, np.inf) + / + calc_patch.norm(approx_d, np.inf)) + + # append error to the error list + eoc_rec.add_data_point(qbx0.h_max, err) + + print(eoc_rec) + + assert eoc_rec.order_estimate() >= case.qbx_order - 0.5 + + # }}} + +# }}} + + +@pytest.mark.parametrize("case", [ + #tc_int, + tc_ext, + ]) +def test_pec_dpie_extinction( + ctx_factory, case, + visualize=False, + test_representations=False, + test_operators=False, + ): + + """For (say) is_interior=False (the 'exterior' BVP), this test verifies extinction of the combined (incoming + scattered) field on the interior of the scatterer. """ - # setup the basic config for logging logging.basicConfig(level=logging.INFO) - # setup the OpenCL context and queue - cl_ctx = ctx_getter() + cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) - # import or skip pyfmmlib pytest.importorskip("pyfmmlib") - # initialize the random seed np.random.seed(12) - # specify a dictionary with some useful arguments knl_kwargs = {"k": case.k, "ik": 1j*case.k} - # specify the list of geometry objects being used geom_list = ["obj0"] # {{{ come up with a solution to Maxwell's equations - # import some functionality from maxwell into this - # local scope environment - import pytential.symbolic.pde.maxwell as mw - import pytential.symbolic.pde.maxwell.dpie as mw_dpie - # initialize the DPIE operator based on the geometry list dpie = mw_dpie.DPIEOperatorEvanescent(geometry_list=geom_list) # specify some symbolic variables that will be used # in the process to solve integral equations for the DPIE - phi_densities = sym.make_sym_vector("phi_densities", dpie.num_scalar_potential_densities()) - A_densities = sym.make_sym_vector("A_densities", dpie.num_vector_potential_densities()) + phi_densities = sym.make_sym_vector("phi_densities", + dpie.num_scalar_potential_densities()) + A_densities = sym.make_sym_vector("A_densities", + dpie.num_vector_potential_densities()) tau_densities = sym.make_sym_vector("tau_densities", dpie.num_distinct_objects()) # get test source locations from the passed in case's queue @@ -270,45 +413,51 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): calc_patch = CalculusPatch(np.array([-3, 0, 0]), h=0.01) calc_patch_tgt = PointsTarget(cl.array.to_device(queue, calc_patch.points)) - # define a random number generator based on OpenCL - rng = cl.clrandom.PhiloxGenerator(cl_ctx, seed=12) - # define some parameters for the incident wave # direction for the wave - u_dir = np.array([1, 0, 0],dtype=np.complex128) + u_dir = np.array([1, 0, 0], dtype=np.complex128) # polarization vector - Ep = np.array([0, 1, 1],dtype=np.complex128) + Ep = np.array([0, 1, 1], dtype=np.complex128) # define symbolic vectors for use uvar = sym.make_sym_vector("u", 3) - Evar = sym.make_sym_vector("Ep",3) + Evar = sym.make_sym_vector("Ep", 3) - # define functions that can be used to generate incident fields for an input discretization + # define functions that can be used to generate incident fields for an + # input discretization # define potentials based on incident plane wave def get_incident_plane_wave_EHField(tgt): - return bind((test_source,tgt),mw.get_sym_maxwell_plane_wave(amplitude_vec=Evar, v=uvar, omega=dpie.k))(queue,u=u_dir,Ep=Ep,**knl_kwargs) + return bind((test_source, tgt), + mw.get_sym_maxwell_plane_wave(amplitude_vec=Evar, v=uvar, + omega=dpie.k))(queue, u=u_dir, Ep=Ep, **knl_kwargs) # get the gradphi_inc field evaluated at some source locations def get_incident_gradphi(objects, target=None): - return bind(objects,mw.get_sym_maxwell_planewave_gradphi(u=uvar, Ep=Evar, k=dpie.k,where=target))(queue,u=u_dir,Ep=Ep,**knl_kwargs) + return bind(objects, mw.get_sym_maxwell_planewave_gradphi(u=uvar, + Ep=Evar, k=dpie.k, where=target))(queue, u=u_dir, + Ep=Ep, **knl_kwargs) # get the incident plane wave div(A) def get_incident_divA(objects, target=None): - return bind(objects,mw.get_sym_maxwell_planewave_divA(u=uvar, Ep=Evar, k=dpie.k,where=target))(queue,u=u_dir,Ep=Ep,**knl_kwargs) + return bind(objects, mw.get_sym_maxwell_planewave_divA(u=uvar, Ep=Evar, + k=dpie.k, where=target))(queue, u=u_dir, Ep=Ep, **knl_kwargs) - # method to get vector potential and scalar potential for incident + # method to get vector potential and scalar potential for incident # E-M fields def get_incident_potentials(objects, target=None): - return bind(objects,mw.get_sym_maxwell_planewave_potentials(u=uvar, Ep=Evar, k=dpie.k,where=target))(queue,u=u_dir,Ep=Ep,**knl_kwargs) + return bind(objects, mw.get_sym_maxwell_planewave_potentials(u=uvar, + Ep=Evar, k=dpie.k, where=target))(queue, u=u_dir, + Ep=Ep, **knl_kwargs) # define a smooth function to represent the density - def dummy_density(omega = 1.0, where=None): + def dummy_density(omega=1.0, where=None): x = sym.nodes(3, where).as_vector() - return sym.sin(omega*sym.n_dot(x,where)) + return sym.sin(omega*sym.n_dot(x, where)) # get the Electromagnetic field evaluated at the target calculus patch - pde_test_inc = EHField(vector_from_device(queue, get_incident_plane_wave_EHField(calc_patch_tgt))) + pde_test_inc = EHField(vector_from_device(queue, + get_incident_plane_wave_EHField(calc_patch_tgt))) # compute residuals of incident field at source points source_maxwell_resids = [ @@ -319,132 +468,29 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): # make sure Maxwell residuals are small so we know the incident field # properly satisfies the maxwell equations print("Source Maxwell residuals:", source_maxwell_resids) - assert max(source_maxwell_resids) < 1e-6 + assert max(source_maxwell_resids) < 1e-4 # }}} - - # {{{ Test the auxiliary problem is capable of computing the desired derivatives of an appropriate input field - test_auxiliary = False - - if test_auxiliary: - # import a bunch of stuff that will be useful - from pytools.convergence import EOCRecorder - from pytential.qbx import QBXLayerPotentialSource - from meshmode.discretization import Discretization - from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory - from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder - - - # define method to get locations to evaluate representation - def epsilon_off_boundary(where=None, epsilon=1e-4): - x = sym.nodes(3, where).as_vector() - return x + sym.normal(3,2,where).as_vector()*epsilon - - # # loop through the case's resolutions and compute the scattered field solution - deriv_error = [] - for resolution in case.resolutions: - - # get the scattered and observation mesh - scat_mesh = case.get_mesh(resolution, case.target_order) - observation_mesh = case.get_observation_mesh(case.target_order) - - # define the pre-scattered discretization - pre_scat_discr = Discretization( - cl_ctx, scat_mesh, - InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) - - # use OpenCL random number generator to create a set of random - # source locations for various variables being solved for - dpie0 = mw_dpie.DPIEOperator(geometry_list=geom_list) - qbx0, _ = QBXLayerPotentialSource( - pre_scat_discr, fine_order=4*case.target_order, - #fmm_order=False, - qbx_order=case.qbx_order, - fmm_level_to_order=SimpleExpansionOrderFinder(case.fmm_tolerance), - fmm_backend=case.fmm_backend - ).with_refinement(_expansion_disturbance_tolerance=0.05) - - # define the geometry dictionary - geom_map = {"obj0":qbx0, "obj0t":qbx0.density_discr, "scat":qbx0.density_discr} - - # define points to evaluate the gradient at - tgt_n = PointsTarget(bind(geom_map, epsilon_off_boundary(where='obj0',epsilon=1.0))(queue)) - geom_map['tgt'] = tgt_n - - # define the quantity that will have a derivative taken of it and its associated derivative - def getTestFunction(where=None): - z = sym.nodes(3, where).as_vector() - z2 = sym.cse(np.dot(z, z), "z_mag_squared") - g = sym.exp(1j*dpie0.k*sym.sqrt(z2))/(4.0*np.pi*sym.sqrt(z2)) - return g - - def getTestGradient(where=None): - z = sym.nodes(3, where).as_vector() - z2 = sym.cse(np.dot(z, z), "z_mag_squared") - grad_g = z*sym.exp(1j*dpie0.k*sym.sqrt(z2))*(1j*dpie.k - 1.0/sym.sqrt(z2))/(4*np.pi*z2) - return grad_g - - # compute output gradient evaluated at the desired object - tgrad = bind(geom_map,getTestGradient(where="tgt"))(queue,**knl_kwargs) - test_func_d = vector_from_device(queue,tgrad) - - # define the problem that will be solved - test_tau_op= bind(geom_map,dpie0.subproblem_operator(tau_densities=tau_densities)) - test_tau_rhs= bind(geom_map,dpie0.subproblem_rhs_func(function=getTestFunction))(queue,**knl_kwargs) - - # set GMRES settings for solving - gmres_settings = dict( - tol=case.gmres_tol, - progress=True, - hard_failure=True, - stall_iterations=50, no_progress_factor=1.05) - - # get the GMRES functionality - from pytential.solve import gmres - - subprob_result = gmres( - test_tau_op.scipy_op(queue, "tau_densities", np.complex128, domains=dpie0.get_subproblem_domain_list(), **knl_kwargs), - test_tau_rhs, **gmres_settings) - dummy_tau = subprob_result.solution - - # compute the error between the associated derivative quantities - tgrad = bind(geom_map,sym.grad(3,dpie0.subproblem_rep(tau_densities=tau_densities,target='tgt')))(queue,tau_densities=dummy_tau,**knl_kwargs) - approx_d = vector_from_device(queue,tgrad) - err = calc_patch.norm(test_func_d - approx_d, np.inf) - - # append error to the error list - deriv_error.append(err) - - print("Auxiliary Error Results:") - for n in range(0,len(deriv_error)): - print("Case {0}: {1}".format(n+1,deriv_error[n])) - - # }}} - - - # # {{{ Test the representations - test_representations = False + # {{{ test the representations if test_representations: # import a bunch of stuff that will be useful - from pytools.convergence import EOCRecorder from pytential.qbx import QBXLayerPotentialSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder - - # # loop through the case's resolutions and compute the scattered field solution + # loop through the case's resolutions and compute the scattered field + # solution rep_error = [] for resolution in case.resolutions: # get the scattered and observation mesh - scat_mesh = case.get_mesh(resolution, case.target_order) - observation_mesh = case.get_observation_mesh(case.target_order) + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) # define the pre-scattered discretization pre_scat_discr = Discretization( @@ -458,24 +504,36 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): pre_scat_discr, fine_order=4*case.target_order, #fmm_order=False, qbx_order=case.qbx_order, - fmm_level_to_order=SimpleExpansionOrderFinder(case.fmm_tolerance), + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), fmm_backend=case.fmm_backend ).with_refinement(_expansion_disturbance_tolerance=0.05) # define the geometry dictionary - geom_map = {"obj0":qbx0, "obj0t":qbx0.density_discr, "scat":qbx0.density_discr} - dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(),dtype=dpie0.stype) - dummy_A = np.array([None]*dpie0.num_vector_potential_densities(),dtype=dpie0.stype) - v = rng.normal(queue, (qbx0.density_discr.nnodes,), dtype=np.float64) - s = 0*rng.normal(queue, (), dtype=np.float64) + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr + } + + dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(), + dtype=dpie0.stype) + dummy_A = np.array([None]*dpie0.num_vector_potential_densities(), + dtype=dpie0.stype) + + # v = rng.normal(queue, (qbx0.density_discr.nnodes,), dtype=np.float64) + # s = 0*rng.normal(queue, (), dtype=np.float64) n1 = len(dummy_phi) n2 = len(dummy_A) - for i in range(0,n1): - dummy_phi[i] = bind(geom_map,dummy_density(where='obj0'))(queue) - for i in range(0,n2): - dummy_A[i] = bind(geom_map,dummy_density(where='obj0'))(queue) - test_tau_op= bind(geom_map,dpie0.subproblem_operator(tau_densities=tau_densities)) - test_tau_rhs= bind(geom_map,dpie0.subproblem_rhs(A_densities=A_densities))(queue,A_densities=dummy_A,**knl_kwargs) + for i in range(0, n1): + dummy_phi[i] = bind(geom_map, dummy_density(where='obj0'))(queue) + for i in range(0, n2): + dummy_A[i] = bind(geom_map, dummy_density(where='obj0'))(queue) + test_tau_op = bind(geom_map, + dpie0.subproblem_operator(tau_densities=tau_densities)) + test_tau_rhs = bind(geom_map, + dpie0.subproblem_rhs(A_densities=A_densities))(queue, + A_densities=dummy_A, **knl_kwargs) # set GMRES settings for solving gmres_settings = dict( @@ -484,38 +542,42 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): hard_failure=True, stall_iterations=50, no_progress_factor=1.05) - # get the GMRES functionality - from pytential.solve import gmres - subprob_result = gmres( - test_tau_op.scipy_op(queue, "tau_densities", np.complex128, domains=dpie0.get_subproblem_domain_list(), **knl_kwargs), + test_tau_op.scipy_op(queue, "tau_densities", np.complex128, + domains=dpie0.get_subproblem_domain_list(), + **knl_kwargs), test_tau_rhs, **gmres_settings) - dummy_tau = subprob_result.solution + dummy_tau = subprob_result.solution - sym_repr0 = dpie.scattered_volume_field(phi_densities,A_densities,tau_densities,target='tgt') + sym_repr0 = dpie.scattered_volume_field(phi_densities, A_densities, + tau_densities, target='tgt') def eval_test_repr_at(tgt): - map = geom_map + map = geom_map.copy() map['tgt'] = tgt - return bind(map, sym_repr0)(queue, phi_densities=dummy_phi, A_densities=dummy_A, tau_densities=dummy_tau, **knl_kwargs) + return bind(map, sym_repr0)(queue, phi_densities=dummy_phi, + A_densities=dummy_A, tau_densities=dummy_tau, + **knl_kwargs) - pde_test_repr = EHField(vector_from_device(queue, eval_test_repr_at(calc_patch_tgt))) + pde_test_repr = EHField(vector_from_device(queue, + eval_test_repr_at(calc_patch_tgt))) maxwell_residuals = [ - calc_patch.norm(x, np.inf) / calc_patch.norm(pde_test_repr.e, np.inf) - for x in frequency_domain_maxwell(calc_patch, pde_test_repr.e, pde_test_repr.h, case.k)] + calc_patch.norm(x, np.inf) / + calc_patch.norm(pde_test_repr.e, np.inf) + for x in frequency_domain_maxwell(calc_patch, + pde_test_repr.e, pde_test_repr.h, case.k)] print("Maxwell residuals:", maxwell_residuals) rep_error.append(maxwell_residuals) print("Representation Error Results:") - for n in range(0,len(rep_error)): - print("Case {0}: {1}".format(n+1,rep_error[n])) + for n in range(0, len(rep_error)): + print("Case {0}: {1}".format(n+1, rep_error[n])) - # #}}} + # }}} + # {{{ test the operators - # # {{{ Test the operators - test_operators = False if test_operators: # define error array @@ -524,23 +586,22 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): # define method to get locations to evaluate representation def epsilon_off_boundary(where=None, epsilon=1e-4): x = sym.nodes(3, where).as_vector() - return x + sym.normal(3,2,where).as_vector()*epsilon + return x + sym.normal(3, 2, where).as_vector()*epsilon # import a bunch of stuff that will be useful - from pytools.convergence import EOCRecorder from pytential.qbx import QBXLayerPotentialSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder - - # loop through the case's resolutions and compute the scattered field solution + # loop through the case's resolutions and compute the scattered field + # solution for resolution in case.resolutions: # get the scattered and observation mesh - scat_mesh = case.get_mesh(resolution, case.target_order) - observation_mesh = case.get_observation_mesh(case.target_order) + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) # define the pre-scattered discretization pre_scat_discr = Discretization( @@ -554,18 +615,27 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): pre_scat_discr, fine_order=4*case.target_order, #fmm_order=False, qbx_order=case.qbx_order, - fmm_level_to_order=SimpleExpansionOrderFinder(case.fmm_tolerance), + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), fmm_backend=case.fmm_backend ).with_refinement(_expansion_disturbance_tolerance=0.05) # define the geometry dictionary - geom_map = {"obj0":qbx0, "obj0t":qbx0.density_discr, "scat":qbx0.density_discr} - - # compute off-boundary locations that the representation will need to be evaluated at - tgt_n = PointsTarget(bind(geom_map, epsilon_off_boundary(where='obj0',epsilon=1e-4))(queue)) + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr + } + + # compute off-boundary locations that the representation will need + # to be evaluated at + tgt_n = PointsTarget( + bind(geom_map, + epsilon_off_boundary(where='obj0', epsilon=1e-4))(queue)) geom_map['tgt'] = tgt_n - # define a dummy density, specifically to be used for the vector potential A densities + # define a dummy density, specifically to be used for the vector + # potential A densities x, y, z = qbx0.density_discr.nodes().with_queue(queue) m = cl.clmath @@ -586,66 +656,99 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): ])) # redefine a_densities - #A_densities = sym.make_sym_vector("A_densities", dpie.num_vector_potential_densities2()) + #A_densities = sym.make_sym_vector("A_densities", dpie.num_vector_potential_densities2()) # noqa # init random dummy densities for the vector and scalar potentials - dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(),dtype=dpie0.stype) - dummy_A = np.array([None]*dpie0.num_vector_potential_densities(),dtype=dpie0.stype) - dummy_tau = np.array([None]*dpie0.num_distinct_objects(),dtype=dpie0.stype) - - # Compute zero scalar for use in extra constants that are usually solved for in operators + dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(), + dtype=dpie0.stype) + dummy_A = np.array([None]*dpie0.num_vector_potential_densities(), + dtype=dpie0.stype) + dummy_tau = np.array([None]*dpie0.num_distinct_objects(), + dtype=dpie0.stype) + + # compute zero scalar for use in extra constants that are usually + # solved for in operators n1 = len(dummy_phi) n2 = len(dummy_A) n3 = len(dummy_tau) - for i in range(0,n1): + for i in range(0, n1): if i < (n1-1): - dummy_phi[i] = bind(geom_map,dummy_density(where='obj0'))(queue) + dummy_phi[i] = bind(geom_map, dummy_density(where='obj0'))(queue) else: dummy_phi[i] = 0.0 - for i in range(0,n2): + for i in range(0, n2): if i < 2: dummy_A[i] = density[i] elif i < (n2-1): - dummy_A[i] = bind(geom_map,dummy_density(where='obj0'))(queue) + dummy_A[i] = bind(geom_map, dummy_density(where='obj0'))(queue) else: dummy_A[i] = 0.0 - for i in range(0,n3): - dummy_tau[i] = bind(geom_map,dummy_density(where='obj0'))(queue) + for i in range(0, n3): + dummy_tau[i] = bind(geom_map, dummy_density(where='obj0'))(queue) # check that the scalar density operator and representation are similar def vector_op_transform(vec_op_out): a = sym.tangential_to_xyz(vec_op_out[:2], where='obj0') - return sym.join_fields(a,vec_op_out[2:]) + return sym.join_fields(a, vec_op_out[2:]) scalar_op = dpie0.phi_operator(phi_densities=phi_densities) - #vector_op = vector_op_transform(dpie0.a_operator0(A_densities=A_densities)[:-1]) - vector_op = vector_op_transform(dpie0.a_operator(A_densities=A_densities)) + #vector_op = vector_op_transform(dpie0.a_operator0(A_densities=A_densities)[:-1]) # noqa + vector_op = vector_op_transform( + dpie0.a_operator(A_densities=A_densities)) #vector_op = dpie0.a_operator2(A_densities=A_densities)[:-1] tau_op = dpie0.subproblem_operator(tau_densities=tau_densities) # evaluate operators at the dummy densities - scalar_op_eval = vector_from_device(queue,bind(geom_map, scalar_op)(queue, phi_densities=dummy_phi, **knl_kwargs)) - vector_op_eval = vector_from_device(queue,bind(geom_map, vector_op)(queue, A_densities=dummy_A, **knl_kwargs)) - tau_op_eval = vector_from_device(queue,bind(geom_map, tau_op)(queue, tau_densities=dummy_tau, **knl_kwargs)) + scalar_op_eval = vector_from_device(queue, + bind(geom_map, scalar_op)( + queue, phi_densities=dummy_phi, **knl_kwargs)) + vector_op_eval = vector_from_device(queue, + bind(geom_map, vector_op)( + queue, A_densities=dummy_A, **knl_kwargs)) + tau_op_eval = vector_from_device(queue, + bind(geom_map, tau_op)( + queue, tau_densities=dummy_tau, **knl_kwargs)) # define the vector operator equivalent representations #def vec_op_repr(A_densities, target): - # return sym.join_fields(sym.n_cross(dpie0.vector_potential_rep0(A_densities=A_densities, target=target),where='obj0'), - # dpie0.div_vector_potential_rep0(A_densities=A_densities, target=target)/dpie0.k) + # return sym.join_fields(sym.n_cross(dpie0.vector_potential_rep0(A_densities=A_densities, target=target), where='obj0'), # noqa: E501 + # dpie0.div_vector_potential_rep0(A_densities=A_densities, target=target)/dpie0.k) # noqa: E501 def vec_op_repr(A_densities, target): - return sym.join_fields(sym.n_cross(dpie0.vector_potential_rep(A_densities=A_densities, target=target),where='obj0'), - dpie0.div_vector_potential_rep(A_densities=A_densities, target=target)/dpie0.k) - - scalar_rep_eval = vector_from_device(queue,bind(geom_map, dpie0.scalar_potential_rep(phi_densities=phi_densities, target='tgt'))(queue, phi_densities=dummy_phi, **knl_kwargs)) - vector_rep_eval = vector_from_device(queue,bind(geom_map, vec_op_repr(A_densities=A_densities,target='tgt'))(queue, A_densities=dummy_A, **knl_kwargs)) - tau_rep_eval = vector_from_device(queue,bind(geom_map, dpie0.subproblem_rep(tau_densities=tau_densities,target='tgt'))(queue, tau_densities=dummy_tau, **knl_kwargs)) - + return sym.join_fields( + sym.n_cross( + dpie0.vector_potential_rep( + A_densities=A_densities, + target=target), + where='obj0'), + dpie0.div_vector_potential_rep( + A_densities=A_densities, + target=target)/dpie0.k) + + scalar_rep_eval = vector_from_device(queue, bind(geom_map, + dpie0.scalar_potential_rep(phi_densities=phi_densities, + target='tgt'))(queue, phi_densities=dummy_phi, + **knl_kwargs)) + vector_rep_eval = vector_from_device(queue, bind(geom_map, + vec_op_repr(A_densities=A_densities, target='tgt'))(queue, + A_densities=dummy_A, **knl_kwargs)) + tau_rep_eval = vector_from_device(queue, bind(geom_map, + dpie0.subproblem_rep(tau_densities=tau_densities, + target='tgt'))(queue, tau_densities=dummy_tau, + **knl_kwargs)) + + axyz = sym.tangential_to_xyz(density_sym, where='obj0') - axyz = sym.tangential_to_xyz(density_sym,where='obj0') def nxcurlS0(qbx_forced_limit): - return sym.n_cross(sym.curl(dpie0.S(axyz.reshape(3,1),target='obj0t',qfl=qbx_forced_limit)),where='obj0') - test_op_err = vector_from_device(queue,bind(geom_map, 0.5*axyz + nxcurlS0("avg") - nxcurlS0(+1))(queue,density=density,**knl_kwargs)) + return sym.n_cross(sym.curl(dpie0.S(axyz.reshape(3, 1), + target='obj0t', qfl=qbx_forced_limit)), where='obj0') + + test_op_err = vector_from_device(queue, + bind( + geom_map, + 0.5*axyz + + nxcurlS0("avg") - nxcurlS0(+1) + )(queue, density=density, **knl_kwargs)) from sumpy.kernel import LaplaceKernel knl = LaplaceKernel(3) @@ -655,15 +758,21 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): def nxcurlS(qbx_forced_limit): - return sym.n_cross(sym.curl(sym.S( - knl, - sym.cse(sym.tangential_to_xyz(density_sym, where='obj0'), "jxyz"), - k=dpie0.k, - qbx_forced_limit=qbx_forced_limit,source='obj0', target='obj0t')),where='obj0') + return sym.n_cross( + sym.curl(sym.S( + knl, + sym.cse( + sym.tangential_to_xyz(density_sym, where='obj0'), + "jxyz"), + k=dpie0.k, + qbx_forced_limit=qbx_forced_limit, + source='obj0', target='obj0t')), + where='obj0') jump_identity_sym = ( nxcurlS(+1) - - (nxcurlS("avg") + 0.5*sym.tangential_to_xyz(density_sym,where='obj0'))) + - (nxcurlS("avg") + + 0.5*sym.tangential_to_xyz(density_sym, where='obj0'))) bound_jump_identity = bind(geom_map, jump_identity_sym) jump_identity = bound_jump_identity(queue, density=density, **knl_kwargs) @@ -671,35 +780,36 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): err = (norm(qbx0, queue, jump_identity, np.inf)) print("ERROR", qbx0.h_max, err) - # compute the error between the operator values and the representation values - def error_diff(u,v): - return np.linalg.norm(u-v,np.inf) - error_v = [error_diff(scalar_op_eval[0],scalar_rep_eval), - error_diff(vector_op_eval[0],vector_rep_eval[0]), - error_diff(vector_op_eval[1],vector_rep_eval[1]), - error_diff(vector_op_eval[2],vector_rep_eval[2]), - error_diff(vector_op_eval[3],vector_rep_eval[3]), - error_diff(tau_op_eval[0],tau_rep_eval), - np.linalg.norm(test_op_err[0],np.inf), - np.linalg.norm(test_op_err[1],np.inf), - np.linalg.norm(test_op_err[2],np.inf)] + # compute the error between the operator values and the + # representation values + + def error_diff(u, v): + return np.linalg.norm(u-v, np.inf) + error_v = [error_diff(scalar_op_eval[0], scalar_rep_eval), + error_diff(vector_op_eval[0], vector_rep_eval[0]), + error_diff(vector_op_eval[1], vector_rep_eval[1]), + error_diff(vector_op_eval[2], vector_rep_eval[2]), + error_diff(vector_op_eval[3], vector_rep_eval[3]), + error_diff(tau_op_eval[0], tau_rep_eval), + np.linalg.norm(test_op_err[0], np.inf), + np.linalg.norm(test_op_err[1], np.inf), + np.linalg.norm(test_op_err[2], np.inf)] op_error.append(error_v) # print the resulting error results print("Operator Error Results:") - for n in range(0,len(op_error)): - print("Case {0}: {1}".format(n+1,op_error[n])) + for n in range(0, len(op_error)): + print("Case {0}: {1}".format(n+1, op_error[n])) - # #}}} + # }}} + # {{{ solve for the scattered field - # {{{ Solve for the scattered field solve_scattered_field = True if solve_scattered_field: loc_sign = -1 if case.is_interior else +1 # import a bunch of stuff that will be useful - from pytools.convergence import EOCRecorder from pytential.qbx import QBXLayerPotentialSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ @@ -707,10 +817,8 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder # setup an EOC Recorder - eoc_rec_repr_maxwell = EOCRecorder() - eoc_pec_bc = EOCRecorder() - eoc_rec_e = EOCRecorder() - eoc_rec_h = EOCRecorder() + eoc_rec_repr_maxwell = EOCRecorder() + eoc_pec_bc = EOCRecorder() def frequency_domain_gauge_condition(cpatch, A, phi, k): # define constants used for the computation @@ -726,17 +834,18 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): # return the residual for the gauge condition return resid_gauge_cond - def gauge_check(divA,phi,k): + def gauge_check(divA, phi, k): return divA - 1j*k*phi - # loop through the case's resolutions and compute the scattered field solution + # loop through the case's resolutions and compute the scattered field + # solution gauge_err = [] maxwell_err = [] for resolution in case.resolutions: # get the scattered and observation mesh - scat_mesh = case.get_mesh(resolution, case.target_order) - observation_mesh = case.get_observation_mesh(case.target_order) + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) # define the pre-scattered discretization pre_scat_discr = Discretization( @@ -748,21 +857,27 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): pre_scat_discr, fine_order=4*case.target_order, #fmm_order=False, qbx_order=case.qbx_order, - fmm_level_to_order=SimpleExpansionOrderFinder(case.fmm_tolerance), + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), fmm_backend=case.fmm_backend ).with_refinement(_expansion_disturbance_tolerance=0.05) # define the geometry dictionary #geom_map = {"g0": qbx} - geom_map = {"obj0":qbx, "obj0t":qbx.density_discr, "scat":qbx.density_discr} + geom_map = { + "obj0": qbx, + "obj0t": qbx.density_discr, + "scat": qbx.density_discr + } # get the maximum mesh element edge length h_max = qbx.h_max # define the scattered and observation discretization - scat_discr = qbx.density_discr - obs_discr = Discretization(cl_ctx, observation_mesh, - InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + scat_discr = qbx.density_discr + # obs_discr = Discretization( + # cl_ctx, observation_mesh, + # InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) # get the incident field at the scatter and observation locations #inc_EM_field_scat = EHField(eval_inc_field_at(scat_discr)) @@ -772,25 +887,34 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): # {{{ solve the system of integral equations inc_A = sym.make_sym_vector("inc_A", 3) - inc_phi = sym.make_sym_vector("inc_phi",1) - inc_divA = sym.make_sym_vector("inc_divA",1) + inc_phi = sym.make_sym_vector("inc_phi", 1) + inc_divA = sym.make_sym_vector("inc_divA", 1) inc_gradPhi = sym.make_sym_vector("inc_gradPhi", 3) # get the incident fields used for boundary conditions - (phi_inc, A_inc) = get_incident_potentials(geom_map,'scat') - inc_divA_scat = get_incident_divA(geom_map,'scat') - inc_gradPhi_scat = get_incident_gradphi(geom_map,'scat') + (phi_inc, A_inc) = get_incident_potentials(geom_map, 'scat') + inc_divA_scat = get_incident_divA(geom_map, 'scat') + inc_gradPhi_scat = get_incident_gradphi(geom_map, 'scat') # check that the boundary conditions satisfy gauge condition - resid = bind(geom_map,gauge_check(inc_divA, inc_phi, dpie.k))(queue,inc_divA=inc_divA_scat,inc_phi=phi_inc,**knl_kwargs) + # resid = bind(geom_map, gauge_check(inc_divA, inc_phi, + # dpie.k))(queue, inc_divA=inc_divA_scat, inc_phi=phi_inc, + # **knl_kwargs) # setup operators that will be solved - phi_op = bind(geom_map,dpie.phi_operator(phi_densities=phi_densities)) - A_op = bind(geom_map,dpie.a_operator(A_densities=A_densities)) + phi_op = bind(geom_map, dpie.phi_operator(phi_densities=phi_densities)) + A_op = bind(geom_map, dpie.a_operator(A_densities=A_densities)) - # setup the RHS with provided data so we can solve for density values across the domain - phi_rhs = bind(geom_map,dpie.phi_rhs(phi_inc=inc_phi,gradphi_inc=inc_gradPhi))(queue,inc_phi=phi_inc,inc_gradPhi=inc_gradPhi_scat,**knl_kwargs) - A_rhs = bind(geom_map,dpie.a_rhs(A_inc=inc_A,divA_inc=inc_divA))(queue,inc_A=A_inc,inc_divA=inc_divA_scat,**knl_kwargs) + # setup the RHS with provided data so we can solve for density + # values across the domain + + phi_rhs = bind(geom_map, dpie.phi_rhs(phi_inc=inc_phi, + gradphi_inc=inc_gradPhi))(queue, inc_phi=phi_inc, + inc_gradPhi=inc_gradPhi_scat, **knl_kwargs) + A_rhs = bind(geom_map, dpie.a_rhs(A_inc=inc_A, + divA_inc=inc_divA))( + queue, inc_A=A_inc, inc_divA=inc_divA_scat, + **knl_kwargs) # set GMRES settings for solving gmres_settings = dict( @@ -799,26 +923,32 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): hard_failure=True, stall_iterations=50, no_progress_factor=1.05) - # get the GMRES functionality - from pytential.solve import gmres - # solve for the scalar potential densities gmres_result = gmres( - phi_op.scipy_op(queue, "phi_densities", np.complex128, domains=dpie.get_scalar_domain_list(),**knl_kwargs), - phi_rhs, **gmres_settings) + phi_op.scipy_op(queue, "phi_densities", + np.complex128, + domains=dpie.get_scalar_domain_list(), + **knl_kwargs), + phi_rhs, **gmres_settings) phi_dens = gmres_result.solution # solve for the vector potential densities gmres_result = gmres( - A_op.scipy_op(queue, "A_densities", np.complex128, domains=dpie.get_vector_domain_list(), **knl_kwargs), + A_op.scipy_op(queue, "A_densities", np.complex128, + domains=dpie.get_vector_domain_list(), **knl_kwargs), A_rhs, **gmres_settings) A_dens = gmres_result.solution # solve sub problem for sigma densities - tau_op= bind(geom_map,dpie.subproblem_operator(tau_densities=tau_densities)) - tau_rhs= bind(geom_map,dpie.subproblem_rhs(A_densities=A_densities))(queue,A_densities=A_dens,**knl_kwargs) + tau_op = bind(geom_map, + dpie.subproblem_operator(tau_densities=tau_densities)) + tau_rhs = bind(geom_map, + dpie.subproblem_rhs(A_densities=A_densities))(queue, + A_densities=A_dens, **knl_kwargs) gmres_result = gmres( - tau_op.scipy_op(queue, "tau_densities", np.complex128, domains=dpie.get_subproblem_domain_list(), **knl_kwargs), + tau_op.scipy_op(queue, "tau_densities", np.complex128, + domains=dpie.get_subproblem_domain_list(), + **knl_kwargs), tau_rhs, **gmres_settings) tau_dens = gmres_result.solution @@ -826,32 +956,50 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): def eval_potentials(tgt): tmap = geom_map tmap['tgt'] = tgt - phi = vector_from_device(queue,bind(tmap, dpie.scalar_potential_rep(phi_densities=phi_densities,target='tgt'))(queue, phi_densities=phi_dens, **knl_kwargs)) - Axyz = vector_from_device(queue,bind(tmap, dpie.vector_potential_rep(A_densities=A_densities,target='tgt'))(queue, A_densities=A_dens, **knl_kwargs)) - return (phi,Axyz) - - (phi,A) = eval_potentials(calc_patch_tgt) - gauge_residual = frequency_domain_gauge_condition(calc_patch, A, phi, case.k) - err = calc_patch.norm(gauge_residual,np.inf) + phi = vector_from_device(queue, bind(tmap, + dpie.scalar_potential_rep(phi_densities=phi_densities, + target='tgt'))(queue, phi_densities=phi_dens, + **knl_kwargs)) + Axyz = vector_from_device(queue, bind(tmap, + dpie.vector_potential_rep(A_densities=A_densities, + target='tgt'))(queue, A_densities=A_dens, + **knl_kwargs)) + return (phi, Axyz) + + (phi, A) = eval_potentials(calc_patch_tgt) + gauge_residual = frequency_domain_gauge_condition( + calc_patch, A, phi, case.k) + err = calc_patch.norm(gauge_residual, np.inf) gauge_err.append(err) - # }}} # {{{ volume eval - sym_repr = dpie.scattered_volume_field(phi_densities,A_densities,tau_densities,target='tgt') + sym_repr = dpie.scattered_volume_field( + phi_densities, A_densities, tau_densities, target='tgt') - def eval_repr_at(tgt): - map = geom_map + def eval_repr_at(tgt, source=None): + map = geom_map.copy() + if source: + map['obj0'] = source + map['obj0t'] = source.density_discr map['tgt'] = tgt - return bind(map, sym_repr)(queue, phi_densities=phi_dens, A_densities=A_dens, tau_densities=tau_dens, **knl_kwargs) - pde_test_repr = EHField(vector_from_device(queue, eval_repr_at(calc_patch_tgt))) + return bind(map, sym_repr)(queue, phi_densities=phi_dens, + A_densities=A_dens, tau_densities=tau_dens, + **knl_kwargs) + + pde_test_repr = EHField(vector_from_device(queue, + eval_repr_at(calc_patch_tgt))) maxwell_residuals = [ - calc_patch.norm(x, np.inf) / calc_patch.norm(pde_test_repr.e, np.inf) - for x in frequency_domain_maxwell(calc_patch, pde_test_repr.e, pde_test_repr.h, case.k)] + calc_patch.norm(x, np.inf) / + calc_patch.norm(pde_test_repr.e, np.inf) + for x in frequency_domain_maxwell( + calc_patch, pde_test_repr.e, + pde_test_repr.h, case.k)] + print("Maxwell residuals:", maxwell_residuals) maxwell_err.append(maxwell_residuals) @@ -863,33 +1011,44 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): def scalar_pot_PEC_residual(phi, inc_phi, where=None): V = dpie.scalar_potential_constants(phi_densities=phi) - return dpie.scalar_potential_rep(phi_densities=phi,target=where, qfl=loc_sign) + inc_phi - V[0] + return dpie.scalar_potential_rep( + phi_densities=phi, target=where, qfl=loc_sign + ) + inc_phi - V[0] def vector_pot_PEC_residual(a_densities, inc_a, where=None): - return sym.n_cross( dpie.vector_potential_rep(A_densities=a_densities, target=where, qfl=loc_sign) + inc_a, where=where) + return sym.n_cross( + dpie.vector_potential_rep( + A_densities=a_densities, target=where, qfl=loc_sign) + + inc_a, where=where) - phi_pec_bc_resid = scalar_pot_PEC_residual(phi_densities, inc_phi, where="obj0") - A_pec_bc_resid = vector_pot_PEC_residual(A_densities, inc_A, where="obj0") + phi_pec_bc_resid = scalar_pot_PEC_residual( + phi_densities, inc_phi, where="obj0") + A_pec_bc_resid = vector_pot_PEC_residual( + A_densities, inc_A, where="obj0") - scalar_bc_values = bind(geom_map, phi_pec_bc_resid)(queue, phi_densities=phi_dens, inc_phi=phi_inc,**knl_kwargs) - vector_bc_values = bind(geom_map, A_pec_bc_resid)(queue, A_densities=A_dens, inc_A=A_inc,**knl_kwargs) + scalar_bc_values = bind(geom_map, phi_pec_bc_resid)( + queue, phi_densities=phi_dens, inc_phi=phi_inc, **knl_kwargs) + vector_bc_values = bind(geom_map, A_pec_bc_resid)( + queue, A_densities=A_dens, inc_A=A_inc, **knl_kwargs) def scat_norm(f): - return norm(qbx, queue, f, p=np.inf) + return norm(qbx, queue, f, p=np.inf) - scalar_bc_residual = scat_norm(scalar_bc_values) #/ scat_norm(phi_inc) - vector_bc_residual = scat_norm(vector_bc_values) #/ scat_norm(A_inc) + scalar_bc_residual = scat_norm(scalar_bc_values) # / scat_norm(phi_inc) + vector_bc_residual = scat_norm(vector_bc_values) # / scat_norm(A_inc) - print("Potential PEC BC residuals:", h_max, scalar_bc_residual, vector_bc_residual) + print( + "Potential PEC BC residuals:", + h_max, scalar_bc_residual, vector_bc_residual) - eoc_pec_bc.add_data_point(h_max, max(scalar_bc_residual, vector_bc_residual)) + eoc_pec_bc.add_data_point( + h_max, max(scalar_bc_residual, vector_bc_residual)) # }}} - # {{{ check if DPIE helmholtz BCs are satisfied + # {{{ check if dpie helmholtz bcs are satisfied - - #}}} + # }}} # {{{ visualization @@ -897,17 +1056,16 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): from meshmode.discretization.visualization import make_visualizer bdry_vis = make_visualizer(queue, scat_discr, case.target_order+3) - bdry_normals = bind(scat_discr, sym.normal(3))(queue)\ - .as_vector(dtype=object) - bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [ - ("phi", phi), - ("Axyz", Axyz), - ("Einc", inc_EM_field_scat.e), - ("Hinc", inc_EM_field_scat.h), - ("bdry_normals", bdry_normals), - ("e_bc_residual", eh_bc_values[:3]), - ("h_bc_residual", eh_bc_values[3]), + ("phi", phi_dens), + ("A_dens", A_dens), + ("tau_dens", tau_dens), + + ("A_inc", A_inc), + ("phi_inc", phi_inc), + + ("scalar_bc_residual", scalar_bc_residual), + ("vector_bc_residual", vector_bc_residual), ]) fplot = make_field_plotter_from_bbox( @@ -931,16 +1089,19 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): fplot_repr = EHField(vector_from_device(queue, fplot_repr)) - fplot_inc = EHField( - vector_from_device(queue, eval_inc_field_at(fplot_tgt))) + # fplot_inc = EHField( + # vector_from_device(queue, eval_inc_field_at(fplot_tgt))) + + fplot_inc = EHField(vector_from_device(queue, + get_incident_plane_wave_EHField(fplot_tgt))) fplot.write_vtk_file( "potential-%s.vts" % resolution, [ ("E", fplot_repr.e), ("H", fplot_repr.h), - ("Einc", fplot_inc.e), - ("Hinc", fplot_inc.h), + ("E_inc", fplot_inc.e), + ("H_inc", fplot_inc.h), ] ) @@ -991,6 +1152,8 @@ def test_pec_dpie_extinction(ctx_getter, case, visualize=False): assert good + # }}} + # }}} diff --git a/test/test_muller.py b/test/test_muller.py index fb23e911d32c1c720a3284004014cb83088d75d7..1516bb3c51ae9f6b7fba9e048dc15c330f828dcd 100644 --- a/test/test_muller.py +++ b/test/test_muller.py @@ -82,5 +82,5 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index 2fd80990f33878d16accad59d180515b1749b936..f10f471e7c307e752e8a0fe1715fad7c958c132d 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -817,7 +817,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_stokes.py b/test/test_stokes.py index 1b85080fb5343c0364d4851f28e92ca11ad715a0..d8d5c821b775ccc3e8057d7f399cfc79452e5bd9 100644 --- a/test/test_stokes.py +++ b/test/test_stokes.py @@ -26,6 +26,7 @@ THE SOFTWARE. import numpy as np import pyopencl as cl import pyopencl.clmath # noqa +import pytest from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ @@ -270,6 +271,7 @@ def run_exterior_stokes_2d(ctx_factory, nelements, return qbx.h_max, l2_err +@pytest.mark.slowtest def test_exterior_stokes_2d(ctx_factory, qbx_order=3): from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() @@ -290,7 +292,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_symbolic.py b/test/test_symbolic.py index 6894e15e1a5be78324fcae96edabc7f4d32f8420..8b2e7cb409766d522447cf2da6e1f1d81af03bec 100644 --- a/test/test_symbolic.py +++ b/test/test_symbolic.py @@ -177,7 +177,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/test_tools.py b/test/test_tools.py index a0f2ea8e01804684065223fc0fd7a943dd5d7d01..af7740953a36088b77011aeef70e174d4f36cebf 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -101,7 +101,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker diff --git a/test/too_slow_test_helmholtz.py b/test/too_slow_test_helmholtz.py index 9add5d0522b35a05852b3fb1fe9c983286f1a932..25eeb075ee2091110a6de0199938835d68bf2b2f 100644 --- a/test/too_slow_test_helmholtz.py +++ b/test/too_slow_test_helmholtz.py @@ -421,7 +421,7 @@ if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) else: - from py.test.cmdline import main + from pytest import main main([__file__]) # vim: fdm=marker