From b24e1949eab1a8ae2a03a29229bed520c3eea1cb Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 16 Sep 2019 10:49:16 -0500 Subject: [PATCH 01/16] Add biharmonic test case --- pytential/solve.py | 5 +- test/test_biharmonic_int_eq.py | 788 +++++++++++++++++++++++++++++++++ 2 files changed, 791 insertions(+), 2 deletions(-) create mode 100644 test/test_biharmonic_int_eq.py diff --git a/pytential/solve.py b/pytential/solve.py index 82d6f68f..47d5b907 100644 --- a/pytential/solve.py +++ b/pytential/solve.py @@ -301,7 +301,7 @@ def gmres(op, rhs, restart=None, tol=None, x0=None, inner_product=None, maxiter=None, hard_failure=None, no_progress_factor=None, stall_iterations=None, - callback=None, progress=False): + callback=None, progress=False, require_monotonicity=True): """Solve a linear system Ax=b by means of GMRES with restarts. @@ -338,7 +338,8 @@ def gmres(op, rhs, restart=None, tol=None, x0=None, dot=inner_product, maxiter=maxiter, hard_failure=hard_failure, no_progress_factor=no_progress_factor, - stall_iterations=stall_iterations, callback=callback) + stall_iterations=stall_iterations, callback=callback, + require_monotonicity=require_monotonicity) return result.copy(solution=chopper.chop(result.solution)) diff --git a/test/test_biharmonic_int_eq.py b/test/test_biharmonic_int_eq.py new file mode 100644 index 00000000..1672d5ed --- /dev/null +++ b/test/test_biharmonic_int_eq.py @@ -0,0 +1,788 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +import numpy as np +import numpy.linalg as la +import pyopencl as cl +import pyopencl.clmath # noqa +import pytest +from pytools import Record +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + +from functools import partial +from meshmode.mesh.generation import ( # noqa + ellipse, cloverleaf, starfish, drop, n_gon, qbx_peanut, WobblyCircle, + make_curve_mesh) +from meshmode.discretization.visualization import make_visualizer +from sumpy.symbolic import USE_SYMENGINE +from pytential import bind, sym +from pytential.qbx import QBXTargetAssociationFailedException + +import logging +logger = logging.getLogger(__name__) + +circle = partial(ellipse, 1) + +try: + import matplotlib.pyplot as pt +except ImportError: + pass + + +def make_circular_point_group(ambient_dim, npoints, radius, + center=np.array([0., 0.]), func=lambda x: x): + t = func(np.linspace(0, 1, npoints, endpoint=False)) * (2 * np.pi) + center = np.asarray(center) + result = np.zeros((ambient_dim, npoints)) + result[:2, :] = center[:, np.newaxis] + radius*np.vstack((np.cos(t), np.sin(t))) + return result + + +# {{{ test cases + +class IntEqTestCase: + + @property + def default_helmholtz_k(self): + raise NotImplementedError + + @property + def name(self): + raise NotImplementedError + + @property + def qbx_order(self): + raise NotImplementedError + + @property + def target_order(self): + raise NotImplementedError + + def __init__(self, helmholtz_k, bc_type, prob_side): + """ + :arg prob_side: may be -1, +1, or ``'scat'`` for a scattering problem + """ + + if helmholtz_k is None: + helmholtz_k = self.default_helmholtz_k + + self.helmholtz_k = helmholtz_k + self.bc_type = bc_type + self.prob_side = prob_side + + @property + def k(self): + return self.helmholtz_k + + def __str__(self): + return ("name: %s, bc_type: %s, prob_side: %s, " + "helmholtz_k: %s, qbx_order: %d, target_order: %d" + % (self.name, self.bc_type, self.prob_side, self.helmholtz_k, + self.qbx_order, self.target_order)) + + fmm_backend = "sumpy" + gmres_tol = 1e-12 + + +class CurveIntEqTestCase(IntEqTestCase): + resolutions = [40, 50, 60] + + def curve_func(self, *args, **kwargs): + raise NotImplementedError + + def get_mesh(self, resolution, target_order): + return make_curve_mesh( + self.curve_func, + np.linspace(0, 1, resolution+1), + target_order) + + use_refinement = True + + inner_radius = 0.1 + outer_radius = 2 + + qbx_order = 5 + target_order = qbx_order + fmm_backend = None + + check_tangential_deriv = True + check_gradient = False + + +class EllipseIntEqTestCase(CurveIntEqTestCase): + name = "3-to-1 ellipse" + + def curve_func(self, x): + return ellipse(3, x) + + +class Helmholtz3DIntEqTestCase(IntEqTestCase): + fmm_backend = "fmmlib" + use_refinement = False + + @property + def target_order(self): + return self.qbx_order + + check_tangential_deriv = False + + gmres_tol = 1e-7 + + +class EllipsoidIntEqTestCase(Helmholtz3DIntEqTestCase): + resolutions = [2, 0.8] + name = "ellipsoid" + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource("ellipsoid.step"), 2, order=2, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + from meshmode.mesh.processing import perform_flips + # Flip elements--gmsh generates inside-out geometry. + return perform_flips(mesh, np.ones(mesh.nelements)) + + qbx_order = 5 + fmm_order = 13 + + inner_radius = 0.4 + outer_radius = 5 + + check_gradient = True + + +class SphereIntEqTestCase(IntEqTestCase): + resolutions = [1, 2] + name = "sphere" + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.generation import generate_icosphere + from meshmode.mesh.refinement import refine_uniformly + mesh = refine_uniformly( + generate_icosphere(1, target_order), + resolution) + + return mesh + + fmm_backend = "fmmlib" + use_refinement = False + + fmm_tol = 1e-4 + + inner_radius = 0.4 + outer_radius = 5 + + qbx_order = 5 + target_order = 8 + check_gradient = False + check_tangential_deriv = False + + gmres_tol = 1e-7 + + +class MergedCubesIntEqTestCase(Helmholtz3DIntEqTestCase): + resolutions = [1.4] + name = "merged-cubes" + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource("merged-cubes.step"), 2, order=2, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + from meshmode.mesh.processing import perform_flips + # Flip elements--gmsh generates inside-out geometry. + mesh = perform_flips(mesh, np.ones(mesh.nelements)) + + return mesh + + use_refinement = True + + inner_radius = 0.4 + outer_radius = 12 + + +class ManyEllipsoidIntEqTestCase(Helmholtz3DIntEqTestCase): + resolutions = [2, 1] + name = "ellipsoid" + + nx = 2 + ny = 2 + nz = 2 + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.io import generate_gmsh, FileSource + base_mesh = generate_gmsh( + FileSource("ellipsoid.step"), 2, order=2, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + from meshmode.mesh.processing import perform_flips + # Flip elements--gmsh generates inside-out geometry. + base_mesh = perform_flips(base_mesh, np.ones(base_mesh.nelements)) + + from meshmode.mesh.processing import affine_map, merge_disjoint_meshes + from meshmode.mesh.tools import rand_rotation_matrix + pitch = 10 + meshes = [ + affine_map( + base_mesh, + A=rand_rotation_matrix(3), + b=pitch*np.array([ + (ix-self.nx//2), + (iy-self.ny//2), + (iz-self.ny//2)])) + for ix in range(self.nx) + for iy in range(self.ny) + for iz in range(self.nz) + ] + + mesh = merge_disjoint_meshes(meshes, single_group=True) + return mesh + + inner_radius = 0.4 + # This should sit in the area just outside the middle ellipsoid + outer_radius = 5 + + +class ElliptiplaneIntEqTestCase(IntEqTestCase): + name = "elliptiplane" + + resolutions = [0.1] + + fmm_backend = "fmmlib" + use_refinement = True + + qbx_order = 3 + fmm_tol = 1e-4 + target_order = qbx_order + check_gradient = False + check_tangential_deriv = False + + # We're only expecting three digits based on FMM settings. Who are we + # kidding? + gmres_tol = 1e-5 + + # to match the scheme given in the GIGAQBX3D paper + box_extent_norm = "l2" + from_sep_smaller_crit = "static_l2" + + def get_mesh(self, resolution, target_order): + from pytools import download_from_web_if_not_present + + download_from_web_if_not_present( + "https://raw.githubusercontent.com/inducer/geometries/master/" + "surface-3d/elliptiplane.brep") + + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource("elliptiplane.brep"), 2, order=2, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + # now centered at origin and extends to -1,1 + + # Flip elements--gmsh generates inside-out geometry. + from meshmode.mesh.processing import perform_flips + return perform_flips(mesh, np.ones(mesh.nelements)) + + inner_radius = 0.2 + outer_radius = 12 # was '-13' in some large-scale run (?) + + +class BetterplaneIntEqTestCase(IntEqTestCase): + name = "betterplane" + + default_helmholtz_k = 20 + resolutions = [0.2] + # refine_on_helmholtz_k = False + + fmm_backend = "fmmlib" + use_refinement = True + + qbx_order = 3 + fmm_tol = 1e-4 + target_order = 6 + check_gradient = False + check_tangential_deriv = False + + visualize_geometry = True + + #scaled_max_curvature_threshold = 1 + expansion_disturbance_tolerance = 0.3 + + # We're only expecting three digits based on FMM settings. Who are we + # kidding? + gmres_tol = 1e-5 + + vis_grid_spacing = (0.025, 0.2, 0.025) + vis_extend_factor = 0.2 + + def get_mesh(self, resolution, target_order): + from pytools import download_from_web_if_not_present + + download_from_web_if_not_present( + "https://raw.githubusercontent.com/inducer/geometries/a869fc3/" + "surface-3d/betterplane.brep") + + from meshmode.mesh.io import generate_gmsh, ScriptWithFilesSource + mesh = generate_gmsh( + ScriptWithFilesSource(""" + Merge "betterplane.brep"; + + Mesh.CharacteristicLengthMax = %(lcmax)f; + Mesh.ElementOrder = 2; + Mesh.CharacteristicLengthExtendFromBoundary = 0; + + // 2D mesh optimization + // Mesh.Lloyd = 1; + + l_superfine() = Unique(Abs(Boundary{ Surface{ + 27, 25, 17, 13, 18 }; })); + l_fine() = Unique(Abs(Boundary{ Surface{ 2, 6, 7}; })); + l_coarse() = Unique(Abs(Boundary{ Surface{ 14, 16 }; })); + + // p() = Unique(Abs(Boundary{ Line{l_fine()}; })); + // Characteristic Length{p()} = 0.05; + + Field[1] = Attractor; + Field[1].NNodesByEdge = 100; + Field[1].EdgesList = {l_superfine()}; + + Field[2] = Threshold; + Field[2].IField = 1; + Field[2].LcMin = 0.075; + Field[2].LcMax = %(lcmax)f; + Field[2].DistMin = 0.1; + Field[2].DistMax = 0.4; + + Field[3] = Attractor; + Field[3].NNodesByEdge = 100; + Field[3].EdgesList = {l_fine()}; + + Field[4] = Threshold; + Field[4].IField = 3; + Field[4].LcMin = 0.1; + Field[4].LcMax = %(lcmax)f; + Field[4].DistMin = 0.15; + Field[4].DistMax = 0.4; + + Field[5] = Attractor; + Field[5].NNodesByEdge = 100; + Field[5].EdgesList = {l_coarse()}; + + Field[6] = Threshold; + Field[6].IField = 5; + Field[6].LcMin = 0.15; + Field[6].LcMax = %(lcmax)f; + Field[6].DistMin = 0.2; + Field[6].DistMax = 0.4; + + Field[7] = Min; + Field[7].FieldsList = {2, 4, 6}; + + Background Field = 7; + """ % { + "lcmax": resolution, + }, ["betterplane.brep"]), 2) + + # Flip elements--gmsh generates inside-out geometry. + from meshmode.mesh.processing import perform_flips + return perform_flips(mesh, np.ones(mesh.nelements)) + + inner_radius = 0.2 + outer_radius = 15 + +# }}} + + +# {{{ test backend +# }}} + + +ctx_factory = cl._csc +case = EllipseIntEqTestCase(helmholtz_k=0, bc_type="clamped_plate", + prob_side=-1) +visualize=False + + +logging.basicConfig(level=logging.INFO) + +cl_ctx = ctx_factory() +queue = cl.CommandQueue(cl_ctx) +resolution = 30 + +if USE_SYMENGINE and case.fmm_backend is None: + pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25") + +# prevent cache 'splosion +from sympy.core.cache import clear_cache +clear_cache() + + +mesh = case.get_mesh(resolution, case.target_order) +print("%d elements" % mesh.nelements) + +from pytential.qbx import QBXLayerPotentialSource +from meshmode.discretization import Discretization +from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory +pre_density_discr = Discretization( + cl_ctx, mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + +source_order = 4*case.target_order + +refiner_extra_kwargs = {} + +qbx_lpot_kwargs = {} +if case.fmm_backend is None: + qbx_lpot_kwargs["fmm_order"] = False +else: + if hasattr(case, "fmm_tol"): + from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + qbx_lpot_kwargs["fmm_level_to_order"] = SimpleExpansionOrderFinder( + case.fmm_tol) + + elif hasattr(case, "fmm_order"): + qbx_lpot_kwargs["fmm_order"] = case.fmm_order + else: + qbx_lpot_kwargs["fmm_order"] = case.qbx_order + 5 + +qbx = QBXLayerPotentialSource( + pre_density_discr, + fine_order=source_order, + qbx_order=case.qbx_order, + + _box_extent_norm=getattr(case, "box_extent_norm", None), + _from_sep_smaller_crit=getattr(case, "from_sep_smaller_crit", None), + _from_sep_smaller_min_nsources_cumul=30, + fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) + +if case.use_refinement: + if case.k != 0 and getattr(case, "refine_on_helmholtz_k", True): + refiner_extra_kwargs["kernel_length_scale"] = 5/case.k + + if hasattr(case, "scaled_max_curvature_threshold"): + refiner_extra_kwargs["_scaled_max_curvature_threshold"] = \ + case.scaled_max_curvature_threshold + + if hasattr(case, "expansion_disturbance_tolerance"): + refiner_extra_kwargs["_expansion_disturbance_tolerance"] = \ + case.expansion_disturbance_tolerance + + if hasattr(case, "refinement_maxiter"): + refiner_extra_kwargs["maxiter"] = case.refinement_maxiter + + #refiner_extra_kwargs["visualize"] = True + + print("%d elements before refinement" % pre_density_discr.mesh.nelements) + qbx, _ = qbx.with_refinement(**refiner_extra_kwargs) + print("%d stage-1 elements after refinement" + % qbx.density_discr.mesh.nelements) + print("%d stage-2 elements after refinement" + % qbx.stage2_density_discr.mesh.nelements) + print("quad stage-2 elements have %d nodes" + % qbx.quad_stage2_density_discr.groups[0].nunit_nodes) + +density_discr = qbx.density_discr + +if hasattr(case, "visualize_geometry") and case.visualize_geometry: + bdry_normals = bind( + density_discr, sym.normal(mesh.ambient_dim) + )(queue).as_vector(dtype=object) + + bdry_vis = make_visualizer(queue, density_discr, case.target_order) + bdry_vis.write_vtk_file("geometry.vtu", [ + ("normals", bdry_normals) + ]) + +# {{{ plot geometry + +if 0: + if mesh.ambient_dim == 2: + # show geometry, centers, normals + nodes_h = density_discr.nodes().get(queue=queue) + pt.plot(nodes_h[0], nodes_h[1], "x-") + normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) + pt.quiver(nodes_h[0], nodes_h[1], + normal[0].get(queue), normal[1].get(queue)) + pt.gca().set_aspect("equal") + pt.show() + + elif mesh.ambient_dim == 3: + bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) + + bdry_normals = bind(density_discr, sym.normal(3))(queue)\ + .as_vector(dtype=object) + + bdry_vis.write_vtk_file("pre-solve-source-%s.vtu" % resolution, [ + ("bdry_normals", bdry_normals), + ]) + + else: + raise ValueError("invalid mesh dim") + +# }}} + +# {{{ set up operator + +from pytential.symbolic.pde.scalar import ( + DirichletOperator, + NeumannOperator) + +from sumpy.kernel import LaplaceKernel, HelmholtzKernel, BiharmonicKernel + +knl = BiharmonicKernel(mesh.ambient_dim) +knl_kwargs = {} +concrete_knl_kwargs = {} + +sigma_sym = sym.make_sym_vector("sigma", 2) + +if knl.is_complex_valued: + dtype = np.complex128 +else: + dtype = np.float64 + +loc_sign = +1 if case.prob_side in [+1, "scat"] else -1 + +if case.bc_type == "dirichlet": + op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, + kernel_arguments=knl_kwargs) +elif case.bc_type == "neumann": + op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, + use_improved_operator=False, kernel_arguments=knl_kwargs) +elif case.bc_type == "clamped_plate": + pass +else: + assert False + +#op_u = op.operator(sym.var("u")) + +# }}} + +# {{{ set up test data + +if case.prob_side == -1: + test_src_geo_radius = case.outer_radius + test_tgt_geo_radius = case.inner_radius +elif case.prob_side == +1: + test_src_geo_radius = case.inner_radius + test_tgt_geo_radius = case.outer_radius +elif case.prob_side == "scat": + test_src_geo_radius = case.outer_radius + test_tgt_geo_radius = case.outer_radius +else: + raise ValueError("unknown problem_side") + +point_sources = make_circular_point_group( + mesh.ambient_dim, 10, test_src_geo_radius, + func=lambda x: x**1.5) +test_targets = make_circular_point_group( + mesh.ambient_dim, 20, test_tgt_geo_radius) + +np.random.seed(22) +source_charges = np.random.randn(point_sources.shape[1]) +source_charges[-1] = -np.sum(source_charges[:-1]) +source_charges = source_charges.astype(dtype) +assert np.sum(source_charges) < 1e-15 + +source_charges_dev = cl.array.to_device(queue, source_charges) + +# }}} + +# {{{ establish BCs + +from pytential.source import PointPotentialSource +from pytential.target import PointsTarget + +point_source = PointPotentialSource(cl_ctx, point_sources) + +pot_src = sym.IntG( + # FIXME: qbx_forced_limit--really? + knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs) + +test_direct = bind((point_source, PointsTarget(test_targets)), pot_src)( + queue, charges=source_charges_dev, **concrete_knl_kwargs) + +if case.bc_type != "clamped_plate": + raise RuntimeError("Not Implemented") +bc1 = bind((point_source, density_discr), pot_src)( + queue, charges=source_charges_dev, **concrete_knl_kwargs) +bc2 = bind( + (point_source, density_discr), + sym.normal_derivative( + qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) + )(queue, charges=source_charges_dev, **concrete_knl_kwargs) + + +dv = sym.normal(2).as_vector() +dt = [-dv[1], dv[0]] + +from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative, AxisTargetDerivative +def dv(knl): + return DirectionalSourceDerivative(knl, "normal_dir") +def dt(knl): + return DirectionalSourceDerivative(knl, "tangent_dir") + +normal_dir = sym.normal(2).as_vector() +tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) + +def curvature(ambient_dim, dim=None, where=None): + from pymbolic.primitives import make_common_subexpression as cse + + if dim is None: + dim = ambient_dim - 1 + + xp, yp = sym.parametrization_derivative_matrix(ambient_dim, dim, where) + + xpp, ypp = cse( + sym.reference_jacobian([xp[0], yp[0]], ambient_dim, dim, where), + "p2d_matrix", sym.cse_scope.DISCRETIZATION) + + return xp[0]*ypp[0] - yp[0]*xpp[0] + + +if 1: + + k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}) + \ + 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}) + + k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}) + \ + sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}) + + k3 = sym.normal_derivative(2, k1) + k4 = sym.normal_derivative(2, k2) + + int1 = sigma_sym[0]/2 + k1 + k2 + int2 = -curvature(qbx.ambient_dim)*sigma_sym[0] + sigma_sym[1]/2 + k3 + k4 + + bound_op = bind(qbx, np.array([int1, int2])) + +else: + # Only k11 + sigma = sym.var("sigma") + k1 = sym.S(dv(dv(dv(knl))), sigma, kernel_arguments={"normal_dir": normal_dir}) + \ + 3*sym.S(dv(dt(dt(knl))), sigma, kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}) + + bound_op = bind(qbx, k1) + +# }}} + +# {{{ solve + +bvp_rhs = bind(qbx, sym.make_sym_vector("bc",qbx.ambient_dim))(queue, bc=[bc1,bc2]) + +try: + from pytential.solve import gmres + gmres_result = gmres( + bound_op.scipy_op(queue, "sigma", dtype, **concrete_knl_kwargs), + bvp_rhs, + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05) +except QBXTargetAssociationFailedException as e: + bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) + + bdry_vis.write_vtk_file("failed-targets-%s.vtu" % resolution, [ + ("failed_targets", e.failed_target_flags), + ]) + raise + +print("gmres state:", gmres_result.state) +weighted_u = gmres_result.solution + + +points_target = PointsTarget(test_targets) + +_k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit=None) + \ + 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit=None) + +_k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit=None) + \ + sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit=None) + + +# {{{ build matrix for spectrum check + +if 0: + from sumpy.tools import build_matrix + mat = build_matrix( + bound_op.scipy_op( + queue, arg_name="sigma", dtype=dtype, k=case.k)) + w, v = la.eig(mat) + pt.scatter(w.real, w.imag) + pt.show() + if 0: + pt.imshow(np.log10(1e-20+np.abs(mat))) + pt.colorbar() + pt.show() + + #assert abs(s[-1]) < 1e-13, "h + #assert abs(s[-2]) > 1e-7 + #from pudb import set_trace; set_trace() + +# }}} + + # {{{ error check + +points_target = PointsTarget(test_targets) +bound_tgt_op = bind((qbx, points_target), _k1 + _k2) + +test_via_bdry = bound_tgt_op(queue, sigma=weighted_u, k=case.k) + +err = test_via_bdry - test_direct + +err = err.get() +test_direct = test_direct.get() +test_via_bdry = test_via_bdry.get() + +# {{{ remove effect of net source charge + +if case.k == 0 and case.bc_type == "neumann" and loc_sign == -1: + + # remove constant offset in interior Laplace Neumann error + tgt_ones = np.ones_like(test_direct) + tgt_ones = tgt_ones/la.norm(tgt_ones) + err = err - np.vdot(tgt_ones, err)*tgt_ones + +# }}} + +rel_err_2 = la.norm(err)/la.norm(test_direct) +rel_err_inf = la.norm(err, np.inf)/la.norm(test_direct, np.inf) + +# }}} + +print("rel_err_2: %g rel_err_inf: %g" % (rel_err_2, rel_err_inf)) + + -- GitLab From dd8f52ed728ffd2300ed4b1e7935703c8345ade0 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 01:00:48 -0600 Subject: [PATCH 02/16] Add test for biharmonic --- pytential/symbolic/pde/scalar.py | 53 ++ test/test_biharmonic_int_eq.py | 878 ++++++++++++------------------- test/test_scalar_int_eq.py | 120 +++-- 3 files changed, 456 insertions(+), 595 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 840d8044..865a7f5b 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -33,6 +33,7 @@ from pytential import sym from pytential.symbolic.primitives import ( cse, sqrt_jac_q_weight, QWeight, area_element) +from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative import numpy as np # noqa @@ -304,6 +305,58 @@ class NeumannOperator(L2WeightedPDEOperator): + ones_contribution )) + +class BiharmonicClampedPlateOperator: + + def __init__(self, knl): + self.knl = knl + + def prepare_rhs(self, b): + return sym.make_sym_vector("bc", 2) + + def representation(self, + u, map_potentials=None, qbx_forced_limit=None): + + if map_potentials is None: + def map_potentials(x): # pylint:disable=function-redefined + return x + + def dv(knl): + return DirectionalSourceDerivative(knl, "normal_dir") + + def dt(knl): + return DirectionalSourceDerivative(knl, "tangent_dir") + + sigma_sym = sym.make_sym_vector(str(u), 2) + normal_dir = sym.normal(2).as_vector() + tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) + + k1 = sym.S(dv(dv(dv(self.knl))), sigma_sym[0], + kernel_arguments={"normal_dir": normal_dir}, + qbx_forced_limit=qbx_forced_limit) + \ + 3*sym.S(dv(dt(dt(self.knl))), sigma_sym[0], + kernel_arguments={"normal_dir": normal_dir, + "tangent_dir": tangent_dir}, + qbx_forced_limit=qbx_forced_limit) + + k2 = -sym.S(dv(dv(self.knl)), sigma_sym[1], + kernel_arguments={"normal_dir": normal_dir}, + qbx_forced_limit=qbx_forced_limit) + \ + sym.S(dt(dt(self.knl)), sigma_sym[1], + kernel_arguments={"tangent_dir": tangent_dir}, + qbx_forced_limit=qbx_forced_limit) + + return map_potentials(k1 + k2) + + + def operator(self, u): + sigma_sym = sym.make_sym_vector(str(u), 2) + rep = self.representation(u, qbx_forced_limit='avg') + rep_diff = sym.normal_derivative(2, rep) + int_eq1 = sigma_sym[0]/2 + rep + int_eq2 = -sym.mean_curvature(2)*sigma_sym[0] + sigma_sym[1]/2 + rep_diff + return np.array([int_eq1, int_eq2]) + # }}} diff --git a/test/test_biharmonic_int_eq.py b/test/test_biharmonic_int_eq.py index 1672d5ed..29edb07e 100644 --- a/test/test_biharmonic_int_eq.py +++ b/test/test_biharmonic_int_eq.py @@ -21,8 +21,6 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - - import numpy as np import numpy.linalg as la import pyopencl as cl @@ -38,6 +36,8 @@ from meshmode.mesh.generation import ( # noqa make_curve_mesh) from meshmode.discretization.visualization import make_visualizer from sumpy.symbolic import USE_SYMENGINE +from sumpy.tools import build_matrix +from sumpy.visualization import FieldPlotter from pytential import bind, sym from pytential.qbx import QBXTargetAssociationFailedException @@ -125,664 +125,448 @@ class CurveIntEqTestCase(IntEqTestCase): outer_radius = 2 qbx_order = 5 - target_order = qbx_order - fmm_backend = None + target_order = 5 + fmm_order = 15 + fmm_backend = "sumpy" - check_tangential_deriv = True - check_gradient = False class EllipseIntEqTestCase(CurveIntEqTestCase): name = "3-to-1 ellipse" - def curve_func(self, x): - return ellipse(3, x) - + def __init__(self, ratio=3, *args, **kwargs): + super(EllipseIntEqTestCase, self).__init__(*args, **kwargs) + self.ratio = ratio -class Helmholtz3DIntEqTestCase(IntEqTestCase): - fmm_backend = "fmmlib" - use_refinement = False + def curve_func(self, x): + return ellipse(self.ratio, x) - @property - def target_order(self): - return self.qbx_order +# }}} - check_tangential_deriv = False - gmres_tol = 1e-7 +# {{{ test backend +# }}} -class EllipsoidIntEqTestCase(Helmholtz3DIntEqTestCase): - resolutions = [2, 0.8] - name = "ellipsoid" +ctx_factory = cl._csc +case = EllipseIntEqTestCase(helmholtz_k=0, bc_type="clamped_plate", + prob_side=-1, ratio=3) +visualize=False - def get_mesh(self, resolution, target_order): - from meshmode.mesh.io import generate_gmsh, FileSource - mesh = generate_gmsh( - FileSource("ellipsoid.step"), 2, order=2, - other_options=[ - "-string", - "Mesh.CharacteristicLengthMax = %g;" % resolution]) - from meshmode.mesh.processing import perform_flips - # Flip elements--gmsh generates inside-out geometry. - return perform_flips(mesh, np.ones(mesh.nelements)) +logging.basicConfig(level=logging.INFO) - qbx_order = 5 - fmm_order = 13 +cl_ctx = ctx_factory() +queue = cl.CommandQueue(cl_ctx) +resolution = 30 +max_err = [] +x = [] +for resolution in [10]: + x.append(resolution) - inner_radius = 0.4 - outer_radius = 5 + if USE_SYMENGINE and case.fmm_backend is None: + pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25") - check_gradient = True + # prevent cache 'splosion + from sympy.core.cache import clear_cache + clear_cache() -class SphereIntEqTestCase(IntEqTestCase): - resolutions = [1, 2] - name = "sphere" + mesh = case.get_mesh(resolution, case.target_order) + print("%d elements" % mesh.nelements) - def get_mesh(self, resolution, target_order): - from meshmode.mesh.generation import generate_icosphere - from meshmode.mesh.refinement import refine_uniformly - mesh = refine_uniformly( - generate_icosphere(1, target_order), - resolution) + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + pre_density_discr = Discretization( + cl_ctx, mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) - return mesh + source_order = 4*case.target_order - fmm_backend = "fmmlib" - use_refinement = False + refiner_extra_kwargs = {} - fmm_tol = 1e-4 + qbx_lpot_kwargs = {} + if case.fmm_backend is None: + qbx_lpot_kwargs["fmm_order"] = False + else: + if hasattr(case, "fmm_tol"): + from sumpy.expansion.flevel_to_order import SimpleExpansionOrderFinder + qbx_lpot_kwargs["fmm_level_to_order"] = SimpleExpansionOrderFinder( + case.fmm_tol) + + elif hasattr(case, "fmm_order"): + qbx_lpot_kwargs["fmm_order"] = case.fmm_order + else: + qbx_lpot_kwargs["fmm_order"] = case.qbx_order + 5 + + case.qbx_order = 5 + + qbx = QBXLayerPotentialSource( + pre_density_discr, + fine_order=source_order, + qbx_order=case.qbx_order, + target_association_tolerance=0.05, + _box_extent_norm=getattr(case, "box_extent_norm", None), + _from_sep_smaller_crit=getattr(case, "from_sep_smaller_crit", None), + _from_sep_smaller_min_nsources_cumul=30, + fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) + + if case.use_refinement: + if case.k != 0 and getattr(case, "refine_on_helmholtz_k", True): + refiner_extra_kwargs["kernel_length_scale"] = 5/case.k + + if hasattr(case, "scaled_max_curvature_threshold"): + refiner_extra_kwargs["_scaled_max_curvature_threshold"] = \ + case.scaled_max_curvature_threshold + + if hasattr(case, "expansion_disturbance_tolerance"): + refiner_extra_kwargs["_expansion_disturbance_tolerance"] = \ + case.expansion_disturbance_tolerance + + if hasattr(case, "refinement_maxiter"): + refiner_extra_kwargs["maxiter"] = case.refinement_maxiter + + #refiner_extra_kwargs["visualize"] = True + + print("%d elements before refinement" % pre_density_discr.mesh.nelements) + qbx, _ = qbx.with_refinement(**refiner_extra_kwargs) + print("%d stage-1 elements after refinement" + % qbx.density_discr.mesh.nelements) + print("%d stage-2 elements after refinement" + % qbx.stage2_density_discr.mesh.nelements) + print("quad stage-2 elements have %d nodes" + % qbx.quad_stage2_density_discr.groups[0].nunit_nodes) + + density_discr = qbx.density_discr + + if hasattr(case, "visualize_geometry") and case.visualize_geometry: + bdry_normals = bind( + density_discr, sym.normal(mesh.ambient_dim) + )(queue).as_vector(dtype=object) + + bdry_vis = make_visualizer(queue, density_discr, case.target_order) + bdry_vis.write_vtk_file("geometry.vtu", [ + ("normals", bdry_normals) + ]) - inner_radius = 0.4 - outer_radius = 5 + # {{{ plot geometry - qbx_order = 5 - target_order = 8 - check_gradient = False - check_tangential_deriv = False + if 0: + if mesh.ambient_dim == 2: + # show geometry, centers, normals + nodes_h = density_discr.nodes().get(queue=queue) + pt.plot(nodes_h[0], nodes_h[1], "x-") + normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) + pt.quiver(nodes_h[0], nodes_h[1], + normal[0].get(queue), normal[1].get(queue)) + pt.gca().set_aspect("equal") + pt.show() - gmres_tol = 1e-7 + elif mesh.ambient_dim == 3: + bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) + bdry_normals = bind(density_discr, sym.normal(3))(queue)\ + .as_vector(dtype=object) -class MergedCubesIntEqTestCase(Helmholtz3DIntEqTestCase): - resolutions = [1.4] - name = "merged-cubes" + bdry_vis.write_vtk_file("pre-solve-source-%s.vtu" % resolution, [ + ("bdry_normals", bdry_normals), + ]) - def get_mesh(self, resolution, target_order): - from meshmode.mesh.io import generate_gmsh, FileSource - mesh = generate_gmsh( - FileSource("merged-cubes.step"), 2, order=2, - other_options=[ - "-string", - "Mesh.CharacteristicLengthMax = %g;" % resolution]) + else: + raise ValueError("invalid mesh dim") - from meshmode.mesh.processing import perform_flips - # Flip elements--gmsh generates inside-out geometry. - mesh = perform_flips(mesh, np.ones(mesh.nelements)) + # }}} - return mesh + # {{{ set up operator - use_refinement = True + from pytential.symbolic.pde.scalar import ( + DirichletOperator, + NeumannOperator, + BiharmonicClampedPlateOperator, + ) - inner_radius = 0.4 - outer_radius = 12 + from sumpy.kernel import LaplaceKernel, HelmholtzKernel, BiharmonicKernel + knl = BiharmonicKernel(mesh.ambient_dim) + knl_kwargs = {} + concrete_knl_kwargs = {} -class ManyEllipsoidIntEqTestCase(Helmholtz3DIntEqTestCase): - resolutions = [2, 1] - name = "ellipsoid" + sigma_sym = sym.make_sym_vector("sigma", 2) - nx = 2 - ny = 2 - nz = 2 + if knl.is_complex_valued: + dtype = np.complex128 + else: + dtype = np.float64 + + loc_sign = +1 if case.prob_side in [+1, "scat"] else -1 + + if case.bc_type == "dirichlet": + op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, + kernel_arguments=knl_kwargs) + elif case.bc_type == "neumann": + op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, + use_improved_operator=False, kernel_arguments=knl_kwargs) + elif case.bc_type == "clamped_plate": + op = BiharmonicClampedPlateOperator(knl) + else: + assert False - def get_mesh(self, resolution, target_order): - from meshmode.mesh.io import generate_gmsh, FileSource - base_mesh = generate_gmsh( - FileSource("ellipsoid.step"), 2, order=2, - other_options=[ - "-string", - "Mesh.CharacteristicLengthMax = %g;" % resolution]) - - from meshmode.mesh.processing import perform_flips - # Flip elements--gmsh generates inside-out geometry. - base_mesh = perform_flips(base_mesh, np.ones(base_mesh.nelements)) - - from meshmode.mesh.processing import affine_map, merge_disjoint_meshes - from meshmode.mesh.tools import rand_rotation_matrix - pitch = 10 - meshes = [ - affine_map( - base_mesh, - A=rand_rotation_matrix(3), - b=pitch*np.array([ - (ix-self.nx//2), - (iy-self.ny//2), - (iz-self.ny//2)])) - for ix in range(self.nx) - for iy in range(self.ny) - for iz in range(self.nz) - ] - - mesh = merge_disjoint_meshes(meshes, single_group=True) - return mesh - - inner_radius = 0.4 - # This should sit in the area just outside the middle ellipsoid - outer_radius = 5 - - -class ElliptiplaneIntEqTestCase(IntEqTestCase): - name = "elliptiplane" - - resolutions = [0.1] - - fmm_backend = "fmmlib" - use_refinement = True + # }}} - qbx_order = 3 - fmm_tol = 1e-4 - target_order = qbx_order - check_gradient = False - check_tangential_deriv = False + # {{{ set up test data - # We're only expecting three digits based on FMM settings. Who are we - # kidding? - gmres_tol = 1e-5 + if case.prob_side == -1: + test_src_geo_radius = case.outer_radius + test_tgt_geo_radius = case.inner_radius + elif case.prob_side == +1: + test_src_geo_radius = case.inner_radius + test_tgt_geo_radius = case.outer_radius + elif case.prob_side == "scat": + test_src_geo_radius = case.outer_radius + test_tgt_geo_radius = case.outer_radius + else: + raise ValueError("unknown problem_side") - # to match the scheme given in the GIGAQBX3D paper - box_extent_norm = "l2" - from_sep_smaller_crit = "static_l2" + point_sources = make_circular_point_group( + mesh.ambient_dim, 10, test_src_geo_radius, + func=lambda x: x**1.5) + test_targets = make_circular_point_group( + mesh.ambient_dim, 20, test_tgt_geo_radius) - def get_mesh(self, resolution, target_order): - from pytools import download_from_web_if_not_present + np.random.seed(22) + source_charges = np.random.randn(point_sources.shape[1]) + source_charges[-1] = -np.sum(source_charges[:-1]) + source_charges = source_charges.astype(dtype) + assert np.sum(source_charges) < 1e-15 - download_from_web_if_not_present( - "https://raw.githubusercontent.com/inducer/geometries/master/" - "surface-3d/elliptiplane.brep") + source_charges_dev = cl.array.to_device(queue, source_charges) - from meshmode.mesh.io import generate_gmsh, FileSource - mesh = generate_gmsh( - FileSource("elliptiplane.brep"), 2, order=2, - other_options=[ - "-string", - "Mesh.CharacteristicLengthMax = %g;" % resolution]) + # }}} - # now centered at origin and extends to -1,1 + # {{{ establish BCs - # Flip elements--gmsh generates inside-out geometry. - from meshmode.mesh.processing import perform_flips - return perform_flips(mesh, np.ones(mesh.nelements)) + from pytential.source import PointPotentialSource + from pytential.target import PointsTarget - inner_radius = 0.2 - outer_radius = 12 # was '-13' in some large-scale run (?) + point_source = PointPotentialSource(cl_ctx, point_sources) + pot_src = sym.IntG( + # FIXME: qbx_forced_limit--really? + knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs) -class BetterplaneIntEqTestCase(IntEqTestCase): - name = "betterplane" + if case.bc_type != "clamped_plate": + raise RuntimeError("Not Implemented") + bc1 = bind((point_source, density_discr), pot_src)( + queue, charges=source_charges_dev, **concrete_knl_kwargs) + bc2 = bind( + (point_source, density_discr), + sym.normal_derivative( + qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) + )(queue, charges=source_charges_dev, **concrete_knl_kwargs) - default_helmholtz_k = 20 - resolutions = [0.2] - # refine_on_helmholtz_k = False - fmm_backend = "fmmlib" - use_refinement = True + dv = sym.normal(2).as_vector() + dt = [-dv[1], dv[0]] - qbx_order = 3 - fmm_tol = 1e-4 - target_order = 6 - check_gradient = False - check_tangential_deriv = False + from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative, AxisTargetDerivative + def dv(knl): + return DirectionalSourceDerivative(knl, "normal_dir") + def dt(knl): + return DirectionalSourceDerivative(knl, "tangent_dir") - visualize_geometry = True + normal_dir = sym.normal(2).as_vector() + tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) - #scaled_max_curvature_threshold = 1 - expansion_disturbance_tolerance = 0.3 - # We're only expecting three digits based on FMM settings. Who are we - # kidding? - gmres_tol = 1e-5 + if 1: - vis_grid_spacing = (0.025, 0.2, 0.025) - vis_extend_factor = 0.2 + k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ + 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit='avg') - def get_mesh(self, resolution, target_order): - from pytools import download_from_web_if_not_present - - download_from_web_if_not_present( - "https://raw.githubusercontent.com/inducer/geometries/a869fc3/" - "surface-3d/betterplane.brep") - - from meshmode.mesh.io import generate_gmsh, ScriptWithFilesSource - mesh = generate_gmsh( - ScriptWithFilesSource(""" - Merge "betterplane.brep"; - - Mesh.CharacteristicLengthMax = %(lcmax)f; - Mesh.ElementOrder = 2; - Mesh.CharacteristicLengthExtendFromBoundary = 0; - - // 2D mesh optimization - // Mesh.Lloyd = 1; - - l_superfine() = Unique(Abs(Boundary{ Surface{ - 27, 25, 17, 13, 18 }; })); - l_fine() = Unique(Abs(Boundary{ Surface{ 2, 6, 7}; })); - l_coarse() = Unique(Abs(Boundary{ Surface{ 14, 16 }; })); - - // p() = Unique(Abs(Boundary{ Line{l_fine()}; })); - // Characteristic Length{p()} = 0.05; - - Field[1] = Attractor; - Field[1].NNodesByEdge = 100; - Field[1].EdgesList = {l_superfine()}; - - Field[2] = Threshold; - Field[2].IField = 1; - Field[2].LcMin = 0.075; - Field[2].LcMax = %(lcmax)f; - Field[2].DistMin = 0.1; - Field[2].DistMax = 0.4; - - Field[3] = Attractor; - Field[3].NNodesByEdge = 100; - Field[3].EdgesList = {l_fine()}; - - Field[4] = Threshold; - Field[4].IField = 3; - Field[4].LcMin = 0.1; - Field[4].LcMax = %(lcmax)f; - Field[4].DistMin = 0.15; - Field[4].DistMax = 0.4; - - Field[5] = Attractor; - Field[5].NNodesByEdge = 100; - Field[5].EdgesList = {l_coarse()}; - - Field[6] = Threshold; - Field[6].IField = 5; - Field[6].LcMin = 0.15; - Field[6].LcMax = %(lcmax)f; - Field[6].DistMin = 0.2; - Field[6].DistMax = 0.4; - - Field[7] = Min; - Field[7].FieldsList = {2, 4, 6}; - - Background Field = 7; - """ % { - "lcmax": resolution, - }, ["betterplane.brep"]), 2) - - # Flip elements--gmsh generates inside-out geometry. - from meshmode.mesh.processing import perform_flips - return perform_flips(mesh, np.ones(mesh.nelements)) - - inner_radius = 0.2 - outer_radius = 15 + k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ + sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit='avg') -# }}} + k3 = sym.normal_derivative(2, k1) + k4 = sym.normal_derivative(2, k2) + int1 = sigma_sym[0]/2 + k1 + k2 + int2 = -sym.mean_curvature(qbx.ambient_dim)*sigma_sym[0] + sigma_sym[1]/2 + k3 + k4 -# {{{ test backend -# }}} + bound_op = bind(qbx, np.array([int1, int2])) -ctx_factory = cl._csc -case = EllipseIntEqTestCase(helmholtz_k=0, bc_type="clamped_plate", - prob_side=-1) -visualize=False + def dv2(knl): + return DirectionalSourceDerivative(knl, "normal_dir_a") + def dt2(knl): + return DirectionalSourceDerivative(knl, "tangent_dir_a") -logging.basicConfig(level=logging.INFO) + sigma_sym2 = sym.make_sym_vector("sigma2", 2) + _k1 = sym.S(dv2(dv2(dv2(knl))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ + 3*sym.S(dv2(dt2(dt2(knl))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir, "tangent_dir_a": tangent_dir}, qbx_forced_limit=None) -cl_ctx = ctx_factory() -queue = cl.CommandQueue(cl_ctx) -resolution = 30 + _k2 = -sym.S(dv2(dv2(knl)), sigma_sym2[1], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ + sym.S(dt2(dt2(knl)), sigma_sym2[1], kernel_arguments={"tangent_dir_a": tangent_dir}, qbx_forced_limit=None) -if USE_SYMENGINE and case.fmm_backend is None: - pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25") + _k3 = sym.S(AxisTargetDerivative(0, dv2(dv2(dv2(knl)))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ + 3*sym.S(AxisTargetDerivative(0, dv2(dt2(dt2(knl)))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir, "tangent_dir_a": tangent_dir}, qbx_forced_limit=None) -# prevent cache 'splosion -from sympy.core.cache import clear_cache -clear_cache() - - -mesh = case.get_mesh(resolution, case.target_order) -print("%d elements" % mesh.nelements) - -from pytential.qbx import QBXLayerPotentialSource -from meshmode.discretization import Discretization -from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory -pre_density_discr = Discretization( - cl_ctx, mesh, - InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) - -source_order = 4*case.target_order - -refiner_extra_kwargs = {} - -qbx_lpot_kwargs = {} -if case.fmm_backend is None: - qbx_lpot_kwargs["fmm_order"] = False -else: - if hasattr(case, "fmm_tol"): - from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder - qbx_lpot_kwargs["fmm_level_to_order"] = SimpleExpansionOrderFinder( - case.fmm_tol) - - elif hasattr(case, "fmm_order"): - qbx_lpot_kwargs["fmm_order"] = case.fmm_order else: - qbx_lpot_kwargs["fmm_order"] = case.qbx_order + 5 - -qbx = QBXLayerPotentialSource( - pre_density_discr, - fine_order=source_order, - qbx_order=case.qbx_order, - - _box_extent_norm=getattr(case, "box_extent_norm", None), - _from_sep_smaller_crit=getattr(case, "from_sep_smaller_crit", None), - _from_sep_smaller_min_nsources_cumul=30, - fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) - -if case.use_refinement: - if case.k != 0 and getattr(case, "refine_on_helmholtz_k", True): - refiner_extra_kwargs["kernel_length_scale"] = 5/case.k - - if hasattr(case, "scaled_max_curvature_threshold"): - refiner_extra_kwargs["_scaled_max_curvature_threshold"] = \ - case.scaled_max_curvature_threshold - - if hasattr(case, "expansion_disturbance_tolerance"): - refiner_extra_kwargs["_expansion_disturbance_tolerance"] = \ - case.expansion_disturbance_tolerance - - if hasattr(case, "refinement_maxiter"): - refiner_extra_kwargs["maxiter"] = case.refinement_maxiter - - #refiner_extra_kwargs["visualize"] = True - - print("%d elements before refinement" % pre_density_discr.mesh.nelements) - qbx, _ = qbx.with_refinement(**refiner_extra_kwargs) - print("%d stage-1 elements after refinement" - % qbx.density_discr.mesh.nelements) - print("%d stage-2 elements after refinement" - % qbx.stage2_density_discr.mesh.nelements) - print("quad stage-2 elements have %d nodes" - % qbx.quad_stage2_density_discr.groups[0].nunit_nodes) - -density_discr = qbx.density_discr - -if hasattr(case, "visualize_geometry") and case.visualize_geometry: - bdry_normals = bind( - density_discr, sym.normal(mesh.ambient_dim) - )(queue).as_vector(dtype=object) - - bdry_vis = make_visualizer(queue, density_discr, case.target_order) - bdry_vis.write_vtk_file("geometry.vtu", [ - ("normals", bdry_normals) - ]) - -# {{{ plot geometry - -if 0: - if mesh.ambient_dim == 2: - # show geometry, centers, normals - nodes_h = density_discr.nodes().get(queue=queue) - pt.plot(nodes_h[0], nodes_h[1], "x-") - normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) - pt.quiver(nodes_h[0], nodes_h[1], - normal[0].get(queue), normal[1].get(queue)) - pt.gca().set_aspect("equal") - pt.show() - - elif mesh.ambient_dim == 3: - bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) + # Only k11 + sigma = sym.var("sigma") + k1 = 0 + k1 = k1 + sym.S(dv(dv(dv(knl))), sigma, kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + k1 = k1 + 3*sym.S(dv(dt(dt(knl))), sigma, kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit='avg') - bdry_normals = bind(density_discr, sym.normal(3))(queue)\ - .as_vector(dtype=object) + k2 = -sym.S(dv(dv(knl)), sigma, kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ + sym.S(dt(dt(knl)), sigma, kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit='avg') - bdry_vis.write_vtk_file("pre-solve-source-%s.vtu" % resolution, [ - ("bdry_normals", bdry_normals), - ]) + #k1 = sym.normal_derivative(2, sym.S(LaplaceKernel(2), sigma))#, kernel_arguments={"normal_dir": normal_dir}) + bound_op = bind(qbx, sym.normal_derivative(2, k1)) - else: - raise ValueError("invalid mesh dim") + #A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) -# }}} + #eigs = np.linalg.eig(A)[0] + #pt.scatter(eigs.real, eigs.imag) -# {{{ set up operator + op_u = op.operator(sym.var("sigma")) + bound_op = bind(qbx, op_u) + bvp_rhs = bind(density_discr, op.prepare_rhs(sym.var("bc")))(queue, bc=[bc1,bc2]) -from pytential.symbolic.pde.scalar import ( - DirichletOperator, - NeumannOperator) + #bvp_rhs = bind(qbx, sym.make_sym_vector("bc",qbx.ambient_dim))(queue, bc=[bc1,bc2]) -from sumpy.kernel import LaplaceKernel, HelmholtzKernel, BiharmonicKernel + """ + scipy_op = bound_op.scipy_op(queue, "sigma", dtype) + #rand_charges = cl.array.to_device(queue, 1.5 + np.sin(np.linspace(0, 2*np.pi, scipy_op.shape[0]))) + one_charges = cl.array.to_device(queue, np.ones(scipy_op.shape[0])) + bdry = scipy_op.matvec(one_charges) + h = 1/1000.0 -knl = BiharmonicKernel(mesh.ambient_dim) -knl_kwargs = {} -concrete_knl_kwargs = {} - -sigma_sym = sym.make_sym_vector("sigma", 2) - -if knl.is_complex_valued: - dtype = np.complex128 -else: - dtype = np.float64 - -loc_sign = +1 if case.prob_side in [+1, "scat"] else -1 - -if case.bc_type == "dirichlet": - op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, - kernel_arguments=knl_kwargs) -elif case.bc_type == "neumann": - op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, - use_improved_operator=False, kernel_arguments=knl_kwargs) -elif case.bc_type == "clamped_plate": - pass -else: - assert False - -#op_u = op.operator(sym.var("u")) - -# }}} - -# {{{ set up test data - -if case.prob_side == -1: - test_src_geo_radius = case.outer_radius - test_tgt_geo_radius = case.inner_radius -elif case.prob_side == +1: - test_src_geo_radius = case.inner_radius - test_tgt_geo_radius = case.outer_radius -elif case.prob_side == "scat": - test_src_geo_radius = case.outer_radius - test_tgt_geo_radius = case.outer_radius -else: - raise ValueError("unknown problem_side") - -point_sources = make_circular_point_group( - mesh.ambient_dim, 10, test_src_geo_radius, - func=lambda x: x**1.5) -test_targets = make_circular_point_group( - mesh.ambient_dim, 20, test_tgt_geo_radius) - -np.random.seed(22) -source_charges = np.random.randn(point_sources.shape[1]) -source_charges[-1] = -np.sum(source_charges[:-1]) -source_charges = source_charges.astype(dtype) -assert np.sum(source_charges) < 1e-15 - -source_charges_dev = cl.array.to_device(queue, source_charges) - -# }}} + bdry_limit_targets = ellipse(1, np.linspace(0, 1, 40))* (1-h) + bdry_limit_op = bind((qbx, PointsTarget(bdry_limit_targets)), (_k1 + _k2)) + bdry_limit = bdry_limit_op(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) -# {{{ establish BCs + test_normal = sym.make_sym_vector("nt",qbx.ambient_dim) + representation_normal = sym.dd_axis(0, qbx.ambient_dim, _k1 + _k2)*test_normal[0] + sym.dd_axis(1, qbx.ambient_dim, _k1+_k2)*test_normal[1] + representation_normal2 = sym.dd_axis(0, qbx.ambient_dim, _k1 + _k2) -from pytential.source import PointPotentialSource -from pytential.target import PointsTarget + bdry_limit_op = bind((qbx, PointsTarget(bdry_limit_targets)), representation_normal) + bdry_limit = bdry_limit_op(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2), nt=cl.array.to_device(queue, bdry_limit_targets/np.linalg.norm(bdry_limit_targets, 2, axis=0))) -point_source = PointPotentialSource(cl_ctx, point_sources) + N = 200 + fplot = FieldPlotter(np.zeros(2), extent=2.0, npoints=N) + fld_in_vol = bind((qbx, PointsTarget(fplot.points)), _k1+_k2)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) -pot_src = sym.IntG( - # FIXME: qbx_forced_limit--really? - knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs) - -test_direct = bind((point_source, PointsTarget(test_targets)), pot_src)( - queue, charges=source_charges_dev, **concrete_knl_kwargs) - -if case.bc_type != "clamped_plate": - raise RuntimeError("Not Implemented") -bc1 = bind((point_source, density_discr), pot_src)( - queue, charges=source_charges_dev, **concrete_knl_kwargs) -bc2 = bind( - (point_source, density_discr), - sym.normal_derivative( - qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) - )(queue, charges=source_charges_dev, **concrete_knl_kwargs) - - -dv = sym.normal(2).as_vector() -dt = [-dv[1], dv[0]] - -from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative, AxisTargetDerivative -def dv(knl): - return DirectionalSourceDerivative(knl, "normal_dir") -def dt(knl): - return DirectionalSourceDerivative(knl, "tangent_dir") - -normal_dir = sym.normal(2).as_vector() -tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) - -def curvature(ambient_dim, dim=None, where=None): - from pymbolic.primitives import make_common_subexpression as cse - - if dim is None: - dim = ambient_dim - 1 - - xp, yp = sym.parametrization_derivative_matrix(ambient_dim, dim, where) - - xpp, ypp = cse( - sym.reference_jacobian([xp[0], yp[0]], ambient_dim, dim, where), - "p2d_matrix", sym.cse_scope.DISCRETIZATION) - - return xp[0]*ypp[0] - yp[0]*xpp[0] + points = fplot.points + #points = np.array([[0]*N, list(np.linspace(-1, 1, N))]) + fld_in_vol = bind((qbx, PointsTarget(points)), representation_normal)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2), nt=cl.array.to_device(queue, points/np.linalg.norm(points, 2, axis=0))) + fplot.show_scalar_in_matplotlib(fld_in_vol.get()) + import matplotlib.pyplot as pt + pt.colorbar() + pt.show() + pt.title("normal derivative of u(x)") -if 1: - k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}) + \ - 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}) + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.scatter(xs=bdry_limit_targets[0,:], ys=bdry_limit_targets[1,:], zs=bdry_limit.get()) - k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}) + \ - sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}) + ax.set_xlabel('X Label') + ax.set_ylabel('Y Label') + ax.set_zlabel('Z Label') - k3 = sym.normal_derivative(2, k1) - k4 = sym.normal_derivative(2, k2) + A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) - int1 = sigma_sym[0]/2 + k1 + k2 - int2 = -curvature(qbx.ambient_dim)*sigma_sym[0] + sigma_sym[1]/2 + k3 + k4 + eigs = np.linalg.eig(A)[0] + pt.scatter(eigs.real, eigs.imag) - bound_op = bind(qbx, np.array([int1, int2])) + # }}} -else: - # Only k11 - sigma = sym.var("sigma") - k1 = sym.S(dv(dv(dv(knl))), sigma, kernel_arguments={"normal_dir": normal_dir}) + \ - 3*sym.S(dv(dt(dt(knl))), sigma, kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}) + # {{{ solve - bound_op = bind(qbx, k1) + b = np.concatenate((bvp_rhs[0].get(), bvp_rhs[1].get())) -# }}} + u = np.linalg.solve(A, b) -# {{{ solve + u0 = cl.array.to_device(queue, u[:len(u)//2]) + u1 = cl.array.to_device(queue, u[len(u)//2:]) + u_d = [u0, u1] -bvp_rhs = bind(qbx, sym.make_sym_vector("bc",qbx.ambient_dim))(queue, bc=[bc1,bc2]) -try: - from pytential.solve import gmres - gmres_result = gmres( - bound_op.scipy_op(queue, "sigma", dtype, **concrete_knl_kwargs), - bvp_rhs, - tol=case.gmres_tol, - progress=True, - hard_failure=True, - stall_iterations=50, no_progress_factor=1.05) -except QBXTargetAssociationFailedException as e: - bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) - bdry_vis.write_vtk_file("failed-targets-%s.vtu" % resolution, [ - ("failed_targets", e.failed_target_flags), - ]) - raise + point_source = PointPotentialSource(cl_ctx, [[0],[0]]) + log_points = list(np.logspace(-20, 0)) + test_targets = np.array([log_points, [0]*len(log_points)]) + pot_src = sym.IntG( + # FIXME: qbx_forced_limit--really? + knl, sym.var("sigma"), qbx_forced_limit=None, **knl_kwargs) + """ -print("gmres state:", gmres_result.state) -weighted_u = gmres_result.solution + try: + from pytential.solve import gmres + gmres_result = gmres( + bound_op.scipy_op(queue, "sigma", dtype, **concrete_knl_kwargs), + bvp_rhs, + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05, require_monotonicity=True) + except QBXTargetAssociationFailedException as e: + bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) + bdry_vis.write_vtk_file("failed-targets-%s.vtu" % resolution, [ + ("failed_targets", e.failed_target_flags), + ]) + raise -points_target = PointsTarget(test_targets) + print("gmres state:", gmres_result.state) + weighted_u = gmres_result.solution -_k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit=None) + \ - 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit=None) -_k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit=None) + \ - sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit=None) + #test_targets = ellipse(1, np.linspace(0, 1, 100))* (1-h) + fplot = FieldPlotter(np.zeros(2), extent=2.0, npoints=1000) + test_targets = fplot.points -# {{{ build matrix for spectrum check + test_direct = bind((point_source, PointsTarget(test_targets)), pot_src)( + queue, charges=source_charges_dev, **concrete_knl_kwargs) -if 0: - from sumpy.tools import build_matrix - mat = build_matrix( - bound_op.scipy_op( - queue, arg_name="sigma", dtype=dtype, k=case.k)) - w, v = la.eig(mat) - pt.scatter(w.real, w.imag) - pt.show() - if 0: - pt.imshow(np.log10(1e-20+np.abs(mat))) - pt.colorbar() - pt.show() + points_target = PointsTarget(test_targets) - #assert abs(s[-1]) < 1e-13, "h - #assert abs(s[-2]) > 1e-7 - #from pudb import set_trace; set_trace() + bound_tgt_op = bind((qbx, points_target), _k1 + _k2) -# }}} + test_via_bdry = bound_tgt_op(queue, sigma2=weighted_u) - # {{{ error check + err = test_via_bdry - test_direct -points_target = PointsTarget(test_targets) -bound_tgt_op = bind((qbx, points_target), _k1 + _k2) + err = np.abs(err.get()) -test_via_bdry = bound_tgt_op(queue, sigma=weighted_u, k=case.k) + print(err/test_direct.get()) -err = test_via_bdry - test_direct + points = [] + for i, point in enumerate(fplot.points.T): + if point[0]**2+3*point[1]**2>=1: + err[i] = 1e-12 + max_err.append(np.max(np.abs(err))) + print("====================================================================") + print(max_err) -err = err.get() -test_direct = test_direct.get() -test_via_bdry = test_via_bdry.get() +pt.imshow(np.abs(err).reshape(int(np.sqrt(len(err))), int(np.sqrt(len(err)))), norm=LogNorm()) +pt.colorbar() -# {{{ remove effect of net source charge +test_targets = np.array(points).T.copy() -if case.k == 0 and case.bc_type == "neumann" and loc_sign == -1: +fld_in_vol = bind((qbx, PointsTarget(fplot.points)), _k1+_k2)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) - # remove constant offset in interior Laplace Neumann error - tgt_ones = np.ones_like(test_direct) - tgt_ones = tgt_ones/la.norm(tgt_ones) - err = err - np.vdot(tgt_ones, err)*tgt_ones -# }}} +fplot.show_scalar_in_matplotlib(fld_in_vol.get()) -rel_err_2 = la.norm(err)/la.norm(test_direct) -rel_err_inf = la.norm(err, np.inf)/la.norm(test_direct, np.inf) - -# }}} -print("rel_err_2: %g rel_err_inf: %g" % (rel_err_2, rel_err_inf)) +A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) +eigs = np.linalg.eig(A)[0] +pt.scatter(eigs.real, eigs.imag) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index 811a912d..e1fa46a0 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -40,6 +40,7 @@ from meshmode.discretization.visualization import make_visualizer from sumpy.symbolic import USE_SYMENGINE from pytential import bind, sym from pytential.qbx import QBXTargetAssociationFailedException +from sumpy.kernel import LaplaceKernel, HelmholtzKernel, BiharmonicKernel import logging logger = logging.getLogger(__name__) @@ -65,10 +66,6 @@ def make_circular_point_group(ambient_dim, npoints, radius, class IntEqTestCase: - @property - def default_helmholtz_k(self): - raise NotImplementedError - @property def name(self): raise NotImplementedError @@ -81,27 +78,37 @@ class IntEqTestCase: def target_order(self): raise NotImplementedError - def __init__(self, helmholtz_k, bc_type, prob_side): + def __init__(self, knl_class_or_helmholtz_k, bc_type, + prob_side, knl_kwargs={}, **kwargs): """ :arg prob_side: may be -1, +1, or ``'scat'`` for a scattering problem """ - - if helmholtz_k is None: - helmholtz_k = self.default_helmholtz_k - - self.helmholtz_k = helmholtz_k self.bc_type = bc_type self.prob_side = prob_side - - @property - def k(self): - return self.helmholtz_k + if not isinstance(knl_class_or_helmholtz_k, type): + if knl_class_or_helmholtz_k == 0: + knl_class = LaplaceKernel + else: + knl_kwargs = {"k": knl_class_or_helmholtz_k} + knl_class = HelmholtzKernel + else: + knl_class = knl_class_or_helmholtz_k + self.knl_class = knl_class + self.knl_kwargs = knl_kwargs + self.knl_kwargs_syms = dict((k, sym.var(k)) for k in self.knl_kwargs.keys()) + for k, v in self.knl_kwargs.items(): + setattr(self, k, v) + for k, v in kwargs.items(): + setattr(self, k, v) def __str__(self): + kwargs = ", ".join("%s: %s" % (k, v) for k, v in self.knl_kwargs.items()) + if kwargs: + kwargs = " , " + kwargs return ("name: %s, bc_type: %s, prob_side: %s, " - "helmholtz_k: %s, qbx_order: %d, target_order: %d" - % (self.name, self.bc_type, self.prob_side, self.helmholtz_k, - self.qbx_order, self.target_order)) + "knl_class: %s, qbx_order: %d, target_order: %d%s" + % (self.name, self.bc_type, self.prob_side, self.knl_class, + self.qbx_order, self.target_order, kwargs)) fmm_backend = "sumpy" gmres_tol = 1e-14 @@ -125,8 +132,9 @@ class CurveIntEqTestCase(IntEqTestCase): outer_radius = 2 qbx_order = 5 - target_order = qbx_order - fmm_backend = None + target_order = 5 + fmm_order = 15 + fmm_backend = "sumpy" check_tangential_deriv = True check_gradient = False @@ -323,7 +331,6 @@ class ElliptiplaneIntEqTestCase(IntEqTestCase): class BetterplaneIntEqTestCase(IntEqTestCase): name = "betterplane" - default_helmholtz_k = 20 resolutions = [0.2] # refine_on_helmholtz_k = False @@ -469,7 +476,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) if case.use_refinement: - if case.k != 0 and getattr(case, "refine_on_helmholtz_k", True): + if case.knl_class == HelmholtzKernel and getattr(case, "refine_on_helmholtz_k", True): refiner_extra_kwargs["kernel_length_scale"] = 5/case.k if hasattr(case, "scaled_max_curvature_threshold"): @@ -537,18 +544,14 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # {{{ set up operator from pytential.symbolic.pde.scalar import ( - DirichletOperator, - NeumannOperator) - - from sumpy.kernel import LaplaceKernel, HelmholtzKernel - if case.k: - knl = HelmholtzKernel(mesh.ambient_dim) - knl_kwargs = {"k": sym.var("k")} - concrete_knl_kwargs = {"k": case.k} - else: - knl = LaplaceKernel(mesh.ambient_dim) - knl_kwargs = {} - concrete_knl_kwargs = {} + DirichletOperator, + NeumannOperator, + BiharmonicClampedPlateOperator, + ) + + knl = case.knl_class(mesh.ambient_dim) + knl_kwargs_syms = case.knl_kwargs_syms + concrete_knl_kwargs = case.knl_kwargs if knl.is_complex_valued: dtype = np.complex128 @@ -559,10 +562,12 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): if case.bc_type == "dirichlet": op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, - kernel_arguments=knl_kwargs) + kernel_arguments=knl_kwargs_syms) elif case.bc_type == "neumann": op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, - use_improved_operator=False, kernel_arguments=knl_kwargs) + use_improved_operator=False, kernel_arguments=knl_kwargs_syms) + elif case.bc_type == "clamped_plate": + op = BiharmonicClampedPlateOperator(knl) else: assert False @@ -609,7 +614,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): pot_src = sym.IntG( # FIXME: qbx_forced_limit--really? - knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs) + knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs_syms) test_direct = bind((point_source, PointsTarget(test_targets)), pot_src)( queue, charges=source_charges_dev, **concrete_knl_kwargs) @@ -625,6 +630,17 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) )(queue, charges=source_charges_dev, **concrete_knl_kwargs) + elif case.bc_type == "clamped_plate": + bc_u = bind((point_source, density_discr), pot_src)( + queue, charges=source_charges_dev, **concrete_knl_kwargs) + bc_du = bind( + (point_source, density_discr), + sym.normal_derivative( + qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) + )(queue, charges=source_charges_dev, **concrete_knl_kwargs) + bc = [bc_u, bc_du] + + # }}} # {{{ solve @@ -660,7 +676,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): from sumpy.tools import build_matrix mat = build_matrix( bound_op.scipy_op( - queue, arg_name="u", dtype=dtype, k=case.k)) + queue, arg_name="u", dtype=dtype, **concrete_knl_kwargs)) w, v = la.eig(mat) if 0: pt.imshow(np.log10(1e-20+np.abs(mat))) @@ -680,7 +696,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): bound_tgt_op = bind((qbx, points_target), op.representation(sym.var("u"))) - test_via_bdry = bound_tgt_op(queue, u=weighted_u, k=case.k) + test_via_bdry = bound_tgt_op(queue, u=weighted_u, **concrete_knl_kwargs) err = test_via_bdry - test_direct @@ -690,7 +706,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # {{{ remove effect of net source charge - if case.k == 0 and case.bc_type == "neumann" and loc_sign == -1: + if case.knl_class == LaplaceKernel and case.bc_type == "neumann" and loc_sign == -1: # remove constant offset in interior Laplace Neumann error tgt_ones = np.ones_like(test_direct) @@ -815,7 +831,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): solved_pot = bind( (qbx_tgt_tol, PointsTarget(fplot.points)), op.representation(sym.var("u")) - )(queue, u=weighted_u, k=case.k) + )(queue, u=weighted_u, **concrete_knl_kwargs) except QBXTargetAssociationFailedException as e: fplot.write_vtk_file( "failed-targets.vts", @@ -824,7 +840,6 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): ]) raise - from sumpy.kernel import LaplaceKernel ones_density = density_discr.zeros(queue) ones_density.fill(1) indicator = bind( @@ -873,18 +888,25 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # }}} - -# {{{ test frontend - -@pytest.mark.parametrize("case", [ - EllipseIntEqTestCase(helmholtz_k=helmholtz_k, bc_type=bc_type, +cases = [ + EllipseIntEqTestCase(helmholtz_k, bc_type=bc_type, prob_side=prob_side) for helmholtz_k in [0, 1.2] for bc_type in ["dirichlet", "neumann"] for prob_side in [-1, +1] - ]) +] + +cases += [ + EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", + prob_side=prob_side) + for prob_side in [-1, +1] +] + +# {{{ test frontend + +@pytest.mark.parametrize("case", cases) # Sample test run: -# 'test_integral_equation(cl._csc, EllipseIntEqTestCase(0, "dirichlet", +1), visualize=True)' # noqa: E501 +# 'test_integral_equation(cl._csc, EllipseIntEqTestCase(LaplaceKernel, "dirichlet", +1), visualize=True)' # noqa: E501 def test_integral_equation(ctx_factory, case, visualize=False): logging.basicConfig(level=logging.INFO) @@ -920,6 +942,8 @@ def test_integral_equation(ctx_factory, case, visualize=False): tgt_order = case.qbx_order elif case.bc_type == "neumann": tgt_order = case.qbx_order-1 + elif case.bc_type == "clamped_plate": + tgt_order = case.qbx_order else: assert False -- GitLab From 8761a9003592c69ffb990d99f471043b67df14dc Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 01:01:50 -0600 Subject: [PATCH 03/16] Clean up --- test/test_biharmonic_int_eq.py | 572 --------------------------------- test/test_scalar_int_eq.py | 7 +- 2 files changed, 3 insertions(+), 576 deletions(-) delete mode 100644 test/test_biharmonic_int_eq.py diff --git a/test/test_biharmonic_int_eq.py b/test/test_biharmonic_int_eq.py deleted file mode 100644 index 29edb07e..00000000 --- a/test/test_biharmonic_int_eq.py +++ /dev/null @@ -1,572 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" - -__license__ = """ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -""" -import numpy as np -import numpy.linalg as la -import pyopencl as cl -import pyopencl.clmath # noqa -import pytest -from pytools import Record -from pyopencl.tools import ( # noqa - pytest_generate_tests_for_pyopencl as pytest_generate_tests) - -from functools import partial -from meshmode.mesh.generation import ( # noqa - ellipse, cloverleaf, starfish, drop, n_gon, qbx_peanut, WobblyCircle, - make_curve_mesh) -from meshmode.discretization.visualization import make_visualizer -from sumpy.symbolic import USE_SYMENGINE -from sumpy.tools import build_matrix -from sumpy.visualization import FieldPlotter -from pytential import bind, sym -from pytential.qbx import QBXTargetAssociationFailedException - -import logging -logger = logging.getLogger(__name__) - -circle = partial(ellipse, 1) - -try: - import matplotlib.pyplot as pt -except ImportError: - pass - - -def make_circular_point_group(ambient_dim, npoints, radius, - center=np.array([0., 0.]), func=lambda x: x): - t = func(np.linspace(0, 1, npoints, endpoint=False)) * (2 * np.pi) - center = np.asarray(center) - result = np.zeros((ambient_dim, npoints)) - result[:2, :] = center[:, np.newaxis] + radius*np.vstack((np.cos(t), np.sin(t))) - return result - - -# {{{ test cases - -class IntEqTestCase: - - @property - def default_helmholtz_k(self): - raise NotImplementedError - - @property - def name(self): - raise NotImplementedError - - @property - def qbx_order(self): - raise NotImplementedError - - @property - def target_order(self): - raise NotImplementedError - - def __init__(self, helmholtz_k, bc_type, prob_side): - """ - :arg prob_side: may be -1, +1, or ``'scat'`` for a scattering problem - """ - - if helmholtz_k is None: - helmholtz_k = self.default_helmholtz_k - - self.helmholtz_k = helmholtz_k - self.bc_type = bc_type - self.prob_side = prob_side - - @property - def k(self): - return self.helmholtz_k - - def __str__(self): - return ("name: %s, bc_type: %s, prob_side: %s, " - "helmholtz_k: %s, qbx_order: %d, target_order: %d" - % (self.name, self.bc_type, self.prob_side, self.helmholtz_k, - self.qbx_order, self.target_order)) - - fmm_backend = "sumpy" - gmres_tol = 1e-12 - - -class CurveIntEqTestCase(IntEqTestCase): - resolutions = [40, 50, 60] - - def curve_func(self, *args, **kwargs): - raise NotImplementedError - - def get_mesh(self, resolution, target_order): - return make_curve_mesh( - self.curve_func, - np.linspace(0, 1, resolution+1), - target_order) - - use_refinement = True - - inner_radius = 0.1 - outer_radius = 2 - - qbx_order = 5 - target_order = 5 - fmm_order = 15 - fmm_backend = "sumpy" - - - -class EllipseIntEqTestCase(CurveIntEqTestCase): - name = "3-to-1 ellipse" - - def __init__(self, ratio=3, *args, **kwargs): - super(EllipseIntEqTestCase, self).__init__(*args, **kwargs) - self.ratio = ratio - - def curve_func(self, x): - return ellipse(self.ratio, x) - -# }}} - - -# {{{ test backend -# }}} - - -ctx_factory = cl._csc -case = EllipseIntEqTestCase(helmholtz_k=0, bc_type="clamped_plate", - prob_side=-1, ratio=3) -visualize=False - - -logging.basicConfig(level=logging.INFO) - -cl_ctx = ctx_factory() -queue = cl.CommandQueue(cl_ctx) -resolution = 30 -max_err = [] -x = [] -for resolution in [10]: - x.append(resolution) - - if USE_SYMENGINE and case.fmm_backend is None: - pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25") - - # prevent cache 'splosion - from sympy.core.cache import clear_cache - clear_cache() - - - mesh = case.get_mesh(resolution, case.target_order) - print("%d elements" % mesh.nelements) - - from pytential.qbx import QBXLayerPotentialSource - from meshmode.discretization import Discretization - from meshmode.discretization.poly_element import \ - InterpolatoryQuadratureSimplexGroupFactory - pre_density_discr = Discretization( - cl_ctx, mesh, - InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) - - source_order = 4*case.target_order - - refiner_extra_kwargs = {} - - qbx_lpot_kwargs = {} - if case.fmm_backend is None: - qbx_lpot_kwargs["fmm_order"] = False - else: - if hasattr(case, "fmm_tol"): - from sumpy.expansion.flevel_to_order import SimpleExpansionOrderFinder - qbx_lpot_kwargs["fmm_level_to_order"] = SimpleExpansionOrderFinder( - case.fmm_tol) - - elif hasattr(case, "fmm_order"): - qbx_lpot_kwargs["fmm_order"] = case.fmm_order - else: - qbx_lpot_kwargs["fmm_order"] = case.qbx_order + 5 - - case.qbx_order = 5 - - qbx = QBXLayerPotentialSource( - pre_density_discr, - fine_order=source_order, - qbx_order=case.qbx_order, - target_association_tolerance=0.05, - _box_extent_norm=getattr(case, "box_extent_norm", None), - _from_sep_smaller_crit=getattr(case, "from_sep_smaller_crit", None), - _from_sep_smaller_min_nsources_cumul=30, - fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) - - if case.use_refinement: - if case.k != 0 and getattr(case, "refine_on_helmholtz_k", True): - refiner_extra_kwargs["kernel_length_scale"] = 5/case.k - - if hasattr(case, "scaled_max_curvature_threshold"): - refiner_extra_kwargs["_scaled_max_curvature_threshold"] = \ - case.scaled_max_curvature_threshold - - if hasattr(case, "expansion_disturbance_tolerance"): - refiner_extra_kwargs["_expansion_disturbance_tolerance"] = \ - case.expansion_disturbance_tolerance - - if hasattr(case, "refinement_maxiter"): - refiner_extra_kwargs["maxiter"] = case.refinement_maxiter - - #refiner_extra_kwargs["visualize"] = True - - print("%d elements before refinement" % pre_density_discr.mesh.nelements) - qbx, _ = qbx.with_refinement(**refiner_extra_kwargs) - print("%d stage-1 elements after refinement" - % qbx.density_discr.mesh.nelements) - print("%d stage-2 elements after refinement" - % qbx.stage2_density_discr.mesh.nelements) - print("quad stage-2 elements have %d nodes" - % qbx.quad_stage2_density_discr.groups[0].nunit_nodes) - - density_discr = qbx.density_discr - - if hasattr(case, "visualize_geometry") and case.visualize_geometry: - bdry_normals = bind( - density_discr, sym.normal(mesh.ambient_dim) - )(queue).as_vector(dtype=object) - - bdry_vis = make_visualizer(queue, density_discr, case.target_order) - bdry_vis.write_vtk_file("geometry.vtu", [ - ("normals", bdry_normals) - ]) - - # {{{ plot geometry - - if 0: - if mesh.ambient_dim == 2: - # show geometry, centers, normals - nodes_h = density_discr.nodes().get(queue=queue) - pt.plot(nodes_h[0], nodes_h[1], "x-") - normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) - pt.quiver(nodes_h[0], nodes_h[1], - normal[0].get(queue), normal[1].get(queue)) - pt.gca().set_aspect("equal") - pt.show() - - elif mesh.ambient_dim == 3: - bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) - - bdry_normals = bind(density_discr, sym.normal(3))(queue)\ - .as_vector(dtype=object) - - bdry_vis.write_vtk_file("pre-solve-source-%s.vtu" % resolution, [ - ("bdry_normals", bdry_normals), - ]) - - else: - raise ValueError("invalid mesh dim") - - # }}} - - # {{{ set up operator - - from pytential.symbolic.pde.scalar import ( - DirichletOperator, - NeumannOperator, - BiharmonicClampedPlateOperator, - ) - - from sumpy.kernel import LaplaceKernel, HelmholtzKernel, BiharmonicKernel - - knl = BiharmonicKernel(mesh.ambient_dim) - knl_kwargs = {} - concrete_knl_kwargs = {} - - sigma_sym = sym.make_sym_vector("sigma", 2) - - if knl.is_complex_valued: - dtype = np.complex128 - else: - dtype = np.float64 - - loc_sign = +1 if case.prob_side in [+1, "scat"] else -1 - - if case.bc_type == "dirichlet": - op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, - kernel_arguments=knl_kwargs) - elif case.bc_type == "neumann": - op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, - use_improved_operator=False, kernel_arguments=knl_kwargs) - elif case.bc_type == "clamped_plate": - op = BiharmonicClampedPlateOperator(knl) - else: - assert False - - # }}} - - # {{{ set up test data - - if case.prob_side == -1: - test_src_geo_radius = case.outer_radius - test_tgt_geo_radius = case.inner_radius - elif case.prob_side == +1: - test_src_geo_radius = case.inner_radius - test_tgt_geo_radius = case.outer_radius - elif case.prob_side == "scat": - test_src_geo_radius = case.outer_radius - test_tgt_geo_radius = case.outer_radius - else: - raise ValueError("unknown problem_side") - - point_sources = make_circular_point_group( - mesh.ambient_dim, 10, test_src_geo_radius, - func=lambda x: x**1.5) - test_targets = make_circular_point_group( - mesh.ambient_dim, 20, test_tgt_geo_radius) - - np.random.seed(22) - source_charges = np.random.randn(point_sources.shape[1]) - source_charges[-1] = -np.sum(source_charges[:-1]) - source_charges = source_charges.astype(dtype) - assert np.sum(source_charges) < 1e-15 - - source_charges_dev = cl.array.to_device(queue, source_charges) - - # }}} - - # {{{ establish BCs - - from pytential.source import PointPotentialSource - from pytential.target import PointsTarget - - point_source = PointPotentialSource(cl_ctx, point_sources) - - pot_src = sym.IntG( - # FIXME: qbx_forced_limit--really? - knl, sym.var("charges"), qbx_forced_limit=None, **knl_kwargs) - - if case.bc_type != "clamped_plate": - raise RuntimeError("Not Implemented") - bc1 = bind((point_source, density_discr), pot_src)( - queue, charges=source_charges_dev, **concrete_knl_kwargs) - bc2 = bind( - (point_source, density_discr), - sym.normal_derivative( - qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) - )(queue, charges=source_charges_dev, **concrete_knl_kwargs) - - - dv = sym.normal(2).as_vector() - dt = [-dv[1], dv[0]] - - from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative, AxisTargetDerivative - def dv(knl): - return DirectionalSourceDerivative(knl, "normal_dir") - def dt(knl): - return DirectionalSourceDerivative(knl, "tangent_dir") - - normal_dir = sym.normal(2).as_vector() - tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) - - - if 1: - - k1 = sym.S(dv(dv(dv(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ - 3*sym.S(dv(dt(dt(knl))), sigma_sym[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit='avg') - - k2 = -sym.S(dv(dv(knl)), sigma_sym[1], kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ - sym.S(dt(dt(knl)), sigma_sym[1], kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit='avg') - - k3 = sym.normal_derivative(2, k1) - k4 = sym.normal_derivative(2, k2) - - int1 = sigma_sym[0]/2 + k1 + k2 - int2 = -sym.mean_curvature(qbx.ambient_dim)*sigma_sym[0] + sigma_sym[1]/2 + k3 + k4 - - bound_op = bind(qbx, np.array([int1, int2])) - - - def dv2(knl): - return DirectionalSourceDerivative(knl, "normal_dir_a") - def dt2(knl): - return DirectionalSourceDerivative(knl, "tangent_dir_a") - - - sigma_sym2 = sym.make_sym_vector("sigma2", 2) - _k1 = sym.S(dv2(dv2(dv2(knl))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ - 3*sym.S(dv2(dt2(dt2(knl))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir, "tangent_dir_a": tangent_dir}, qbx_forced_limit=None) - - _k2 = -sym.S(dv2(dv2(knl)), sigma_sym2[1], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ - sym.S(dt2(dt2(knl)), sigma_sym2[1], kernel_arguments={"tangent_dir_a": tangent_dir}, qbx_forced_limit=None) - - _k3 = sym.S(AxisTargetDerivative(0, dv2(dv2(dv2(knl)))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir}, qbx_forced_limit=None) + \ - 3*sym.S(AxisTargetDerivative(0, dv2(dt2(dt2(knl)))), sigma_sym2[0], kernel_arguments={"normal_dir_a": normal_dir, "tangent_dir_a": tangent_dir}, qbx_forced_limit=None) - - else: - # Only k11 - sigma = sym.var("sigma") - k1 = 0 - k1 = k1 + sym.S(dv(dv(dv(knl))), sigma, kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') - k1 = k1 + 3*sym.S(dv(dt(dt(knl))), sigma, kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, qbx_forced_limit='avg') - - k2 = -sym.S(dv(dv(knl)), sigma, kernel_arguments={"normal_dir": normal_dir}, qbx_forced_limit='avg') + \ - sym.S(dt(dt(knl)), sigma, kernel_arguments={"tangent_dir": tangent_dir}, qbx_forced_limit='avg') - - #k1 = sym.normal_derivative(2, sym.S(LaplaceKernel(2), sigma))#, kernel_arguments={"normal_dir": normal_dir}) - bound_op = bind(qbx, sym.normal_derivative(2, k1)) - - #A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) - - #eigs = np.linalg.eig(A)[0] - #pt.scatter(eigs.real, eigs.imag) - - op_u = op.operator(sym.var("sigma")) - bound_op = bind(qbx, op_u) - bvp_rhs = bind(density_discr, op.prepare_rhs(sym.var("bc")))(queue, bc=[bc1,bc2]) - - #bvp_rhs = bind(qbx, sym.make_sym_vector("bc",qbx.ambient_dim))(queue, bc=[bc1,bc2]) - - """ - scipy_op = bound_op.scipy_op(queue, "sigma", dtype) - #rand_charges = cl.array.to_device(queue, 1.5 + np.sin(np.linspace(0, 2*np.pi, scipy_op.shape[0]))) - one_charges = cl.array.to_device(queue, np.ones(scipy_op.shape[0])) - bdry = scipy_op.matvec(one_charges) - h = 1/1000.0 - - bdry_limit_targets = ellipse(1, np.linspace(0, 1, 40))* (1-h) - bdry_limit_op = bind((qbx, PointsTarget(bdry_limit_targets)), (_k1 + _k2)) - bdry_limit = bdry_limit_op(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) - - test_normal = sym.make_sym_vector("nt",qbx.ambient_dim) - representation_normal = sym.dd_axis(0, qbx.ambient_dim, _k1 + _k2)*test_normal[0] + sym.dd_axis(1, qbx.ambient_dim, _k1+_k2)*test_normal[1] - representation_normal2 = sym.dd_axis(0, qbx.ambient_dim, _k1 + _k2) - - bdry_limit_op = bind((qbx, PointsTarget(bdry_limit_targets)), representation_normal) - bdry_limit = bdry_limit_op(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2), nt=cl.array.to_device(queue, bdry_limit_targets/np.linalg.norm(bdry_limit_targets, 2, axis=0))) - - N = 200 - fplot = FieldPlotter(np.zeros(2), extent=2.0, npoints=N) - fld_in_vol = bind((qbx, PointsTarget(fplot.points)), _k1+_k2)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) - - points = fplot.points - #points = np.array([[0]*N, list(np.linspace(-1, 1, N))]) - fld_in_vol = bind((qbx, PointsTarget(points)), representation_normal)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2), nt=cl.array.to_device(queue, points/np.linalg.norm(points, 2, axis=0))) - - fplot.show_scalar_in_matplotlib(fld_in_vol.get()) - import matplotlib.pyplot as pt - pt.colorbar() - pt.show() - pt.title("normal derivative of u(x)") - - - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - ax.scatter(xs=bdry_limit_targets[0,:], ys=bdry_limit_targets[1,:], zs=bdry_limit.get()) - - ax.set_xlabel('X Label') - ax.set_ylabel('Y Label') - ax.set_zlabel('Z Label') - - A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) - - eigs = np.linalg.eig(A)[0] - pt.scatter(eigs.real, eigs.imag) - - # }}} - - # {{{ solve - - b = np.concatenate((bvp_rhs[0].get(), bvp_rhs[1].get())) - - u = np.linalg.solve(A, b) - - u0 = cl.array.to_device(queue, u[:len(u)//2]) - u1 = cl.array.to_device(queue, u[len(u)//2:]) - u_d = [u0, u1] - - - - point_source = PointPotentialSource(cl_ctx, [[0],[0]]) - log_points = list(np.logspace(-20, 0)) - test_targets = np.array([log_points, [0]*len(log_points)]) - pot_src = sym.IntG( - # FIXME: qbx_forced_limit--really? - knl, sym.var("sigma"), qbx_forced_limit=None, **knl_kwargs) - """ - - try: - from pytential.solve import gmres - gmres_result = gmres( - bound_op.scipy_op(queue, "sigma", dtype, **concrete_knl_kwargs), - bvp_rhs, - tol=case.gmres_tol, - progress=True, - hard_failure=True, - stall_iterations=50, no_progress_factor=1.05, require_monotonicity=True) - except QBXTargetAssociationFailedException as e: - bdry_vis = make_visualizer(queue, density_discr, case.target_order+3) - - bdry_vis.write_vtk_file("failed-targets-%s.vtu" % resolution, [ - ("failed_targets", e.failed_target_flags), - ]) - raise - - print("gmres state:", gmres_result.state) - weighted_u = gmres_result.solution - - - #test_targets = ellipse(1, np.linspace(0, 1, 100))* (1-h) - - fplot = FieldPlotter(np.zeros(2), extent=2.0, npoints=1000) - test_targets = fplot.points - - test_direct = bind((point_source, PointsTarget(test_targets)), pot_src)( - queue, charges=source_charges_dev, **concrete_knl_kwargs) - - points_target = PointsTarget(test_targets) - - bound_tgt_op = bind((qbx, points_target), _k1 + _k2) - - test_via_bdry = bound_tgt_op(queue, sigma2=weighted_u) - - err = test_via_bdry - test_direct - - err = np.abs(err.get()) - - print(err/test_direct.get()) - - points = [] - for i, point in enumerate(fplot.points.T): - if point[0]**2+3*point[1]**2>=1: - err[i] = 1e-12 - max_err.append(np.max(np.abs(err))) - print("====================================================================") - print(max_err) - -pt.imshow(np.abs(err).reshape(int(np.sqrt(len(err))), int(np.sqrt(len(err)))), norm=LogNorm()) -pt.colorbar() - -test_targets = np.array(points).T.copy() - -fld_in_vol = bind((qbx, PointsTarget(fplot.points)), _k1+_k2)(queue, sigma2=one_charges.reshape(2, scipy_op.shape[0]//2)) - - -fplot.show_scalar_in_matplotlib(fld_in_vol.get()) - - - -A = build_matrix(bound_op.scipy_op(queue, "sigma", dtype)) - -eigs = np.linalg.eig(A)[0] -pt.scatter(eigs.real, eigs.imag) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index e1fa46a0..c743306c 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -132,9 +132,8 @@ class CurveIntEqTestCase(IntEqTestCase): outer_radius = 2 qbx_order = 5 - target_order = 5 - fmm_order = 15 - fmm_backend = "sumpy" + target_order = qbx_order + fmm_backend = None check_tangential_deriv = True check_gradient = False @@ -898,7 +897,7 @@ cases = [ cases += [ EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", - prob_side=prob_side) + prob_side=prob_side, fmm_order = 15, fmm_backend = "sympy") for prob_side in [-1, +1] ] -- GitLab From 1931afb2a0fd504dfc1eb07eb18ccfeb56b1695f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 02:06:05 -0600 Subject: [PATCH 04/16] Check both fmm_backend=None and fmm_backend=sumpy --- test/test_scalar_int_eq.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index c743306c..8790a7ac 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -897,8 +897,9 @@ cases = [ cases += [ EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", - prob_side=prob_side, fmm_order = 15, fmm_backend = "sympy") - for prob_side in [-1, +1] + prob_side=-1, fmm_backend = None), + EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", + prob_side=-1, fmm_backend = "sumpy", fmm_order=15), ] # {{{ test frontend -- GitLab From 9d2dd19dffc396c111d142890ba3404b077397ed Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 12:48:20 -0600 Subject: [PATCH 05/16] Ask for sym var or array --- pytential/symbolic/pde/scalar.py | 69 +++++++++++++++++++++----------- test/test_scalar_int_eq.py | 18 ++++----- 2 files changed, 54 insertions(+), 33 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 865a7f5b..0616d7ff 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -64,6 +64,13 @@ class L2WeightedPDEOperator(object): def prepare_rhs(self, b): return self.get_sqrt_weight()*b + def get_density_var(self, name): + """ + Returns a symbolic variable/array corresponding to the density with the + given name. + """ + return sym.var(name) + # }}} @@ -125,8 +132,7 @@ class DirichletOperator(L2WeightedPDEOperator): inv_sqrt_w_u = cse(u/sqrt_w) if map_potentials is None: - def map_potentials(x): # pylint:disable=function-redefined - return x + map_potentials = lambda x: x def S(density): # noqa return sym.S(self.kernel, density, @@ -237,8 +243,7 @@ class NeumannOperator(L2WeightedPDEOperator): inv_sqrt_w_u = cse(u/sqrt_w) if map_potentials is None: - def map_potentials(x): # pylint:disable=function-redefined - return x + map_potentials = lambda x: x kwargs["qbx_forced_limit"] = qbx_forced_limit kwargs["kernel_arguments"] = self.kernel_arguments @@ -307,19 +312,37 @@ class NeumannOperator(L2WeightedPDEOperator): class BiharmonicClampedPlateOperator: + """IE operator and field representation for solving clamped plate Biharmonic + equation. Supports only interior problem. + + Ref: Farkas, Peter. Mathematical foundations for fast algorithms for the + biharmonic equation. Diss. University of Chicago, Dept. of Mathematics, 1989. + + .. automethod:: representation + .. automethod:: operator + """ - def __init__(self, knl): + def __init__(self, knl, loc_sign): self.knl = knl + self.loc_sign = loc_sign + if loc_sign != -1: + raise ValueError("loc_sign!=-1 is not supported.") def prepare_rhs(self, b): - return sym.make_sym_vector("bc", 2) + return b + + def get_density_var(self, name): + """ + Returns a symbolic variable/array corresponding to the density with the + given name. + """ + return sym.make_sym_vector(name, 2) def representation(self, - u, map_potentials=None, qbx_forced_limit=None): + sigma, map_potentials=None, qbx_forced_limit=None): if map_potentials is None: - def map_potentials(x): # pylint:disable=function-redefined - return x + map_potentials = lambda x: x def dv(knl): return DirectionalSourceDerivative(knl, "normal_dir") @@ -327,34 +350,32 @@ class BiharmonicClampedPlateOperator: def dt(knl): return DirectionalSourceDerivative(knl, "tangent_dir") - sigma_sym = sym.make_sym_vector(str(u), 2) normal_dir = sym.normal(2).as_vector() tangent_dir = np.array([-normal_dir[1], normal_dir[0]]) - k1 = sym.S(dv(dv(dv(self.knl))), sigma_sym[0], + k1 = map_potentials(sym.S(dv(dv(dv(self.knl))), sigma[0], kernel_arguments={"normal_dir": normal_dir}, - qbx_forced_limit=qbx_forced_limit) + \ - 3*sym.S(dv(dt(dt(self.knl))), sigma_sym[0], + qbx_forced_limit=qbx_forced_limit)) + \ + 3*map_potentials(sym.S(dv(dt(dt(self.knl))), sigma[0], kernel_arguments={"normal_dir": normal_dir, "tangent_dir": tangent_dir}, - qbx_forced_limit=qbx_forced_limit) + qbx_forced_limit=qbx_forced_limit)) - k2 = -sym.S(dv(dv(self.knl)), sigma_sym[1], + k2 = -map_potentials(sym.S(dv(dv(self.knl)), sigma[1], kernel_arguments={"normal_dir": normal_dir}, - qbx_forced_limit=qbx_forced_limit) + \ - sym.S(dt(dt(self.knl)), sigma_sym[1], + qbx_forced_limit=qbx_forced_limit)) + \ + map_potentials(sym.S(dt(dt(self.knl)), sigma[1], kernel_arguments={"tangent_dir": tangent_dir}, - qbx_forced_limit=qbx_forced_limit) + qbx_forced_limit=qbx_forced_limit)) - return map_potentials(k1 + k2) + return k1 + k2 - def operator(self, u): - sigma_sym = sym.make_sym_vector(str(u), 2) - rep = self.representation(u, qbx_forced_limit='avg') + def operator(self, sigma): + rep = self.representation(sigma, qbx_forced_limit='avg') rep_diff = sym.normal_derivative(2, rep) - int_eq1 = sigma_sym[0]/2 + rep - int_eq2 = -sym.mean_curvature(2)*sigma_sym[0] + sigma_sym[1]/2 + rep_diff + int_eq1 = sigma[0]/2 + rep + int_eq2 = -sym.mean_curvature(2)*sigma[0] + sigma[1]/2 + rep_diff return np.array([int_eq1, int_eq2]) # }}} diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index 8790a7ac..2c5cbe28 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -566,11 +566,11 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, use_improved_operator=False, kernel_arguments=knl_kwargs_syms) elif case.bc_type == "clamped_plate": - op = BiharmonicClampedPlateOperator(knl) + op = BiharmonicClampedPlateOperator(knl, loc_sign) else: assert False - op_u = op.operator(sym.var("u")) + op_u = op.operator(op.get_density_var("u")) # }}} @@ -645,7 +645,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # {{{ solve bound_op = bind(qbx, op_u) - rhs = bind(density_discr, op.prepare_rhs(sym.var("bc")))(queue, bc=bc) + rhs = bind(density_discr, op.prepare_rhs(op.get_var("bc")))(queue, bc=bc) try: from pytential.solve import gmres @@ -693,7 +693,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): points_target = PointsTarget(test_targets) bound_tgt_op = bind((qbx, points_target), - op.representation(sym.var("u"))) + op.representation(op.get_density_var("u"))) test_via_bdry = bound_tgt_op(queue, u=weighted_u, **concrete_knl_kwargs) @@ -730,7 +730,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): if case.check_gradient and case.prob_side != "scat": bound_grad_op = bind((qbx, points_target), op.representation( - sym.var("u"), + op.get_density_var("u"), map_potentials=lambda pot: sym.grad(mesh.ambient_dim, pot), qbx_forced_limit=None)) @@ -760,7 +760,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): if case.check_tangential_deriv and case.prob_side != "scat": bound_t_deriv_op = bind(qbx, op.representation( - sym.var("u"), + op.get_density_var("u"), map_potentials=lambda pot: sym.tangential_derivative(2, pot), qbx_forced_limit=loc_sign)) @@ -800,7 +800,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): .as_vector(dtype=object) sym_sqrt_j = sym.sqrt_jac_q_weight(density_discr.ambient_dim) - u = bind(density_discr, sym.var("u")/sym_sqrt_j)(queue, u=weighted_u) + u = bind(density_discr, op.get_density_var("u")/sym_sqrt_j)(queue, u=weighted_u) bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [ ("u", u), @@ -829,7 +829,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): try: solved_pot = bind( (qbx_tgt_tol, PointsTarget(fplot.points)), - op.representation(sym.var("u")) + op.representation(op.get_density_var("u")) )(queue, u=weighted_u, **concrete_knl_kwargs) except QBXTargetAssociationFailedException as e: fplot.write_vtk_file( @@ -844,7 +844,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): indicator = bind( (qbx_tgt_tol, PointsTarget(fplot.points)), -sym.D(LaplaceKernel(density_discr.ambient_dim), - sym.var("sigma"), + op.get_density_var("sigma"), qbx_forced_limit=None))( queue, sigma=ones_density).get() -- GitLab From 54578a213340143418486a2c92ebe350679ad2b4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 12:54:35 -0600 Subject: [PATCH 06/16] Fix formatting --- pytential/symbolic/pde/scalar.py | 12 +++++++----- test/test_scalar_int_eq.py | 16 ++++++++++------ 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 0616d7ff..c53309a4 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -33,7 +33,7 @@ from pytential import sym from pytential.symbolic.primitives import ( cse, sqrt_jac_q_weight, QWeight, area_element) -from sumpy.kernel import DirectionalSourceDerivative, DirectionalTargetDerivative +from sumpy.kernel import DirectionalSourceDerivative import numpy as np # noqa @@ -132,7 +132,8 @@ class DirichletOperator(L2WeightedPDEOperator): inv_sqrt_w_u = cse(u/sqrt_w) if map_potentials is None: - map_potentials = lambda x: x + def map_potentials(x): # pylint:disable=function-redefined + return x def S(density): # noqa return sym.S(self.kernel, density, @@ -243,7 +244,8 @@ class NeumannOperator(L2WeightedPDEOperator): inv_sqrt_w_u = cse(u/sqrt_w) if map_potentials is None: - map_potentials = lambda x: x + def map_potentials(x): # pylint:disable=function-redefined + return x kwargs["qbx_forced_limit"] = qbx_forced_limit kwargs["kernel_arguments"] = self.kernel_arguments @@ -342,7 +344,8 @@ class BiharmonicClampedPlateOperator: sigma, map_potentials=None, qbx_forced_limit=None): if map_potentials is None: - map_potentials = lambda x: x + def map_potentials(x): + return x def dv(knl): return DirectionalSourceDerivative(knl, "normal_dir") @@ -370,7 +373,6 @@ class BiharmonicClampedPlateOperator: return k1 + k2 - def operator(self, sigma): rep = self.representation(sigma, qbx_forced_limit='avg') rep_diff = sym.normal_derivative(2, rep) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index 2c5cbe28..2f95614e 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -475,7 +475,8 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): fmm_backend=case.fmm_backend, **qbx_lpot_kwargs) if case.use_refinement: - if case.knl_class == HelmholtzKernel and getattr(case, "refine_on_helmholtz_k", True): + if case.knl_class == HelmholtzKernel and \ + getattr(case, "refine_on_helmholtz_k", True): refiner_extra_kwargs["kernel_length_scale"] = 5/case.k if hasattr(case, "scaled_max_curvature_threshold"): @@ -639,7 +640,6 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): )(queue, charges=source_charges_dev, **concrete_knl_kwargs) bc = [bc_u, bc_du] - # }}} # {{{ solve @@ -705,7 +705,8 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # {{{ remove effect of net source charge - if case.knl_class == LaplaceKernel and case.bc_type == "neumann" and loc_sign == -1: + if case.knl_class == LaplaceKernel and case.bc_type == "neumann" \ + and loc_sign == -1: # remove constant offset in interior Laplace Neumann error tgt_ones = np.ones_like(test_direct) @@ -800,7 +801,8 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): .as_vector(dtype=object) sym_sqrt_j = sym.sqrt_jac_q_weight(density_discr.ambient_dim) - u = bind(density_discr, op.get_density_var("u")/sym_sqrt_j)(queue, u=weighted_u) + u = bind(density_discr, op.get_density_var("u")/sym_sqrt_j)(queue, + u=weighted_u) bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [ ("u", u), @@ -887,6 +889,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # }}} + cases = [ EllipseIntEqTestCase(helmholtz_k, bc_type=bc_type, prob_side=prob_side) @@ -897,13 +900,14 @@ cases = [ cases += [ EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", - prob_side=-1, fmm_backend = None), + prob_side=-1, fmm_backend=None), EllipseIntEqTestCase(BiharmonicKernel, bc_type="clamped_plate", - prob_side=-1, fmm_backend = "sumpy", fmm_order=15), + prob_side=-1, fmm_backend="sumpy", fmm_order=15), ] # {{{ test frontend + @pytest.mark.parametrize("case", cases) # Sample test run: # 'test_integral_equation(cl._csc, EllipseIntEqTestCase(LaplaceKernel, "dirichlet", +1), visualize=True)' # noqa: E501 -- GitLab From 44c2f0b9c3694d57e35b45c145ca07eb61f9713f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 12:57:46 -0600 Subject: [PATCH 07/16] Use pytools master --- .test-conda-env-py3-requirements.txt | 2 +- requirements.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.test-conda-env-py3-requirements.txt b/.test-conda-env-py3-requirements.txt index fa6c0426..fd2ed4d9 100644 --- a/.test-conda-env-py3-requirements.txt +++ b/.test-conda-env-py3-requirements.txt @@ -1,4 +1,4 @@ -git+https://gitlab.tiker.net/inducer/boxtree +git+https://gitlab.tiker.net/inducer/pytools git+https://github.com/inducer/pymbolic git+https://github.com/inducer/loopy git+https://gitlab.tiker.net/inducer/sumpy diff --git a/requirements.txt b/requirements.txt index dd15a69e..625deb28 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ numpy +git+git://github.com/inducer/pytools git+git://github.com/inducer/pymbolic sympy==1.1.1 git+https://github.com/inducer/modepy -- GitLab From 46e50c3bd60ea943e1e86ae7f63eb03fac8bfec9 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 13:01:13 -0600 Subject: [PATCH 08/16] Use pytools master --- .test-conda-env-py3-macos.yml | 1 + .test-conda-env-py3.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.test-conda-env-py3-macos.yml b/.test-conda-env-py3-macos.yml index cbf0efad..b4057a83 100644 --- a/.test-conda-env-py3-macos.yml +++ b/.test-conda-env-py3-macos.yml @@ -22,6 +22,7 @@ dependencies: - pip - pip: + - git+https://github.com/inducer/pytools - git+https://gitlab.tiker.net/inducer/boxtree - git+https://github.com/inducer/pymbolic - git+https://github.com/inducer/loopy diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml index 750b0072..81849ec0 100644 --- a/.test-conda-env-py3.yml +++ b/.test-conda-env-py3.yml @@ -18,6 +18,7 @@ dependencies: - pip - pip: + - git+https://github.com/inducer/pytools - git+https://gitlab.tiker.net/inducer/boxtree - git+https://github.com/inducer/pymbolic - git+https://github.com/inducer/loopy -- GitLab From 12a83aa31d8704ad49789b75cefcfc37a58908ed Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 13:08:39 -0600 Subject: [PATCH 09/16] Fix typo. get_var->get_density_var --- test/test_scalar_int_eq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index e1a49f71..f61d4164 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -645,7 +645,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): # {{{ solve bound_op = bind(qbx, op_u) - rhs = bind(density_discr, op.prepare_rhs(op.get_var("bc")))(queue, bc=bc) + rhs = bind(density_discr, op.prepare_rhs(op.get_density_var("bc")))(queue, bc=bc) try: from pytential.solve import gmres -- GitLab From 89f695304d68ceda675a3cba12faadcd02249e61 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 13:21:19 -0600 Subject: [PATCH 10/16] Fix for renaming in master --- test/test_scalar_int_eq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_scalar_int_eq.py b/test/test_scalar_int_eq.py index f61d4164..82faf4cc 100644 --- a/test/test_scalar_int_eq.py +++ b/test/test_scalar_int_eq.py @@ -636,7 +636,7 @@ def run_int_eq_test(cl_ctx, queue, case, resolution, visualize): bc_du = bind( (point_source, density_discr), sym.normal_derivative( - qbx.ambient_dim, pot_src, where=sym.DEFAULT_TARGET) + qbx.ambient_dim, pot_src, dofdesc=sym.DEFAULT_TARGET) )(queue, charges=source_charges_dev, **concrete_knl_kwargs) bc = [bc_u, bc_du] -- GitLab From 3f03476906ed689e6922cd2adde48473051a4997 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 14:38:35 -0600 Subject: [PATCH 11/16] pylint fix --- pytential/symbolic/pde/scalar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 44ac9017..2343bd43 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -345,7 +345,7 @@ class BiharmonicClampedPlateOperator: sigma, map_potentials=None, qbx_forced_limit=None): if map_potentials is None: - def map_potentials(x): + def map_potentials(x): # pylint:disable=function-redefined return x def dv(knl): -- GitLab From 14330850452ecd1febce5e8de5a1ad4f644823e0 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 25 Nov 2019 23:53:55 -0600 Subject: [PATCH 12/16] Fix reference --- pytential/symbolic/pde/scalar.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 2343bd43..73c9e4e6 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -319,7 +319,8 @@ class BiharmonicClampedPlateOperator: equation. Supports only interior problem. Ref: Farkas, Peter. Mathematical foundations for fast algorithms for the - biharmonic equation. Diss. University of Chicago, Dept. of Mathematics, 1989. + biharmonic equation. Technical Report 765, Department of Computer Science, + Yale University, 1990. .. automethod:: representation .. automethod:: operator -- GitLab From ad3d2cefeb55f402b6a7b6f4d56d031ad6b704b2 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 26 Nov 2019 02:24:56 -0600 Subject: [PATCH 13/16] Install new version of pytools --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6a1c1be7..ebf12132 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -64,7 +64,7 @@ Python 3 POCL Examples: - test -n "$SKIP_EXAMPLES" && exit - export PY_EXE=python3 - export PYOPENCL_TEST=portable:pthread - - export EXTRA_INSTALL="Cython pybind11 numpy mako pyvisfile matplotlib" + - export EXTRA_INSTALL="Cython pybind11 numpy mako git+git://github.com/inducer/pytools pyvisfile matplotlib" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh - ". ./build-py-project-and-run-examples.sh" tags: -- GitLab From 9da151a60db7f38b9bb2e7edbe21e0584a947ad4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 26 Nov 2019 13:17:25 -0600 Subject: [PATCH 14/16] Reference BiharmonicClampedPlateOperator --- pytential/symbolic/pde/scalar.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 73c9e4e6..46d21394 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -26,6 +26,7 @@ __doc__ = """ .. autoclass:: L2WeightedPDEOperator .. autoclass:: DirichletOperator .. autoclass:: NeumannOperator +.. autoclass:: BiharmonicClampedPlateOperator """ -- GitLab From 1903cda5b31d70f74646a99b39b235355958bafe Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 1 Dec 2019 22:12:46 -0600 Subject: [PATCH 15/16] Add PDE and boundary conditions --- pytential/symbolic/pde/scalar.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 46d21394..04e795bb 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -316,8 +316,17 @@ class NeumannOperator(L2WeightedPDEOperator): class BiharmonicClampedPlateOperator: - """IE operator and field representation for solving clamped plate Biharmonic - equation. Supports only interior problem. + r"""IE operator and field representation for solving clamped plate Biharmonic + equation where, + + .. math:: + \begin{align*} + \Delta^2 u &= 0 \text{ on } D \\ + u &= g_1 \text{ on } \delta D \\ + \frac{\partial u}{\partial \nu} &= g_2 \text{ on } \delta D. + \end{align*} + + Supports only interior problem. Ref: Farkas, Peter. Mathematical foundations for fast algorithms for the biharmonic equation. Technical Report 765, Department of Computer Science, @@ -338,8 +347,8 @@ class BiharmonicClampedPlateOperator: def get_density_var(self, name): """ - Returns a symbolic variable/array corresponding to the density with the - given name. + Returns a symbolic variable of length 2 corresponding to the density with + the given name. """ return sym.make_sym_vector(name, 2) @@ -377,6 +386,9 @@ class BiharmonicClampedPlateOperator: return k1 + k2 def operator(self, sigma): + """ + Returns the two second kind integral equations. + """ rep = self.representation(sigma, qbx_forced_limit='avg') rep_diff = sym.normal_derivative(2, rep) int_eq1 = sigma[0]/2 + rep -- GitLab From 4ad7c5abcf6214052f7fa3654e36a58e40a8fadf Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 1 Dec 2019 22:20:55 -0600 Subject: [PATCH 16/16] Clarify boundary conditions --- pytential/symbolic/pde/scalar.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 04e795bb..763a403f 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -326,7 +326,10 @@ class BiharmonicClampedPlateOperator: \frac{\partial u}{\partial \nu} &= g_2 \text{ on } \delta D. \end{align*} - Supports only interior problem. + This operator assumes that the boundary data :math:`g_1, g_2` are + represented as column vectors and vertically stacked. + + .. note :: This operator supports only interior problem. Ref: Farkas, Peter. Mathematical foundations for fast algorithms for the biharmonic equation. Technical Report 765, Department of Computer Science, -- GitLab