From a53a67dee88a2e02e0bc7e9a79a2a0eafc9701f3 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 11 Dec 2016 18:01:13 -0600 Subject: [PATCH 1/9] Get rid of automatic 'dimensionalization' --- pytential/__init__.py | 7 +- pytential/qbx/__init__.py | 21 +- pytential/qbx/geometry.py | 9 +- pytential/symbolic/compiler.py | 2 +- pytential/symbolic/execution.py | 10 +- pytential/symbolic/mappers.py | 319 ++----------------------------- pytential/symbolic/pde/scalar.py | 98 ++++++---- pytential/symbolic/primitives.py | 304 +++++++++++++++++++---------- test/test_layer_pot.py | 71 ++++--- 9 files changed, 356 insertions(+), 485 deletions(-) diff --git a/pytential/__init__.py b/pytential/__init__.py index 64eb8989..9be87e78 100644 --- a/pytential/__init__.py +++ b/pytential/__init__.py @@ -34,7 +34,9 @@ from pytools import memoize_on_first_arg @memoize_on_first_arg def _integral_op(discr): from pytential import sym, bind - return bind(discr, sym.integral(sym.var("integrand"))) + return bind(discr, + sym.integral( + discr.ambient_dim, discr.dim, sym.var("integrand"))) def integral(discr, queue, x): @@ -51,7 +53,8 @@ def _norm_op(discr, num_components): else: integrand = sym.abs(sym.var("integrand"))**2 - return bind(discr, sym.integral(integrand)) + return bind(discr, + sym.integral(discr.ambient_dim, discr.dim, integrand)) def norm(discr, queue, x): diff --git a/pytential/qbx/__init__.py b/pytential/qbx/__init__.py index a2af8377..a800a2be 100644 --- a/pytential/qbx/__init__.py +++ b/pytential/qbx/__init__.py @@ -173,6 +173,10 @@ class QBXLayerPotentialSource(LayerPotentialSource): def ambient_dim(self): return self.density_discr.ambient_dim + @property + def dim(self): + return self.density_discr.dim + @property def cl_context(self): return self.density_discr.cl_context @@ -189,9 +193,13 @@ class QBXLayerPotentialSource(LayerPotentialSource): def centers(self, target_discr, sign): from pytential import sym, bind with cl.CommandQueue(self.cl_context) as queue: + amb_dim = self.ambient_dim + dim = self.dim + return bind(target_discr, - sym.Nodes() + 2*sign*sym.area_element()*sym.normal())(queue) \ - .as_vector(np.object) + sym.nodes(amb_dim) + + 2*sign*sym.area_element(amb_dim, dim) + * sym.normal(amb_dim, dim))(queue).as_vector(np.object) @memoize_method def weights_and_area_elements(self): @@ -203,8 +211,10 @@ class QBXLayerPotentialSource(LayerPotentialSource): # area element instead, then upsample that. area_element = self.resampler(queue, - bind(self.density_discr, - p.area_element())(queue)) + bind( + self.density_discr, + p.area_element(self.ambient_dim, self.dim) + )(queue)) qweight = bind(self.fine_density_discr, p.QWeight())(queue) @@ -221,9 +231,6 @@ class QBXLayerPotentialSource(LayerPotentialSource): return QBXPreprocessor(name, discretizations)(expr) def op_group_features(self, expr): - from pytential.symbolic.primitives import IntGdSource - assert not isinstance(expr, IntGdSource) - from sumpy.kernel import AxisTargetDerivativeRemover result = ( expr.source, expr.density, diff --git a/pytential/qbx/geometry.py b/pytential/qbx/geometry.py index 08a363ae..6c8bf551 100644 --- a/pytential/qbx/geometry.py +++ b/pytential/qbx/geometry.py @@ -765,15 +765,18 @@ class QBXFMMGeometryData(object): # two: one for positive side, one for negative side ncenters += 2 * len(kept_indices) * el_group.nelements + adim = self_discr.ambient_dim + dim = self_discr.dim + from pytential import sym, bind from pytools.obj_array import make_obj_array with cl.CommandQueue(self.cl_context) as queue: - radii_sym = sym.cse(2*sym.area_element(), "radii") + radii_sym = sym.cse(2*sym.area_element(adim, dim), "radii") all_radii, all_pos_centers, all_neg_centers = bind(self_discr, make_obj_array([ radii_sym, - sym.Nodes() + radii_sym*sym.normal(), - sym.Nodes() - radii_sym*sym.normal() + sym.nodes(adim) + radii_sym*sym.normal(adim), + sym.nodes(adim) - radii_sym*sym.normal(adim) ]))(queue) # The centers are returned from the above as multivectors. diff --git a/pytential/symbolic/compiler.py b/pytential/symbolic/compiler.py index d24b0a09..09bf814e 100644 --- a/pytential/symbolic/compiler.py +++ b/pytential/symbolic/compiler.py @@ -198,7 +198,7 @@ class LayerPotentialInstruction(Instruction): limit_str = "[+] " elif o.qbx_forced_limit == -1: limit_str = "[-] " - if o.qbx_forced_limit == 2: + elif o.qbx_forced_limit == 2: limit_str = "[(+)] " elif o.qbx_forced_limit == -2: limit_str = "[(-)] " diff --git a/pytential/symbolic/execution.py b/pytential/symbolic/execution.py index 4b42d727..e2847243 100644 --- a/pytential/symbolic/execution.py +++ b/pytential/symbolic/execution.py @@ -349,7 +349,6 @@ def prepare_expr(places, expr, auto_where=None): from pytential.symbolic.mappers import ( ToTargetTagger, - Dimensionalizer, DerivativeBinder, ) @@ -362,20 +361,13 @@ def prepare_expr(places, expr, auto_where=None): if auto_where: expr = ToTargetTagger(*auto_where)(expr) - # Dimensionalize so that preprocessing only has to deal with - # dimension-specific layer potentials. - - expr = Dimensionalizer(places)(expr) - expr = DerivativeBinder()(expr) for name, place in six.iteritems(places): if isinstance(place, LayerPotentialSource): expr = place.preprocess_optemplate(name, places, expr) - # Dimensionalize again, in case the preprocessor spit out - # dimension-independent stuff. - return Dimensionalizer(places)(expr) + return expr # }}} diff --git a/pytential/symbolic/mappers.py b/pytential/symbolic/mappers.py index 0295505c..0c30cb4b 100644 --- a/pytential/symbolic/mappers.py +++ b/pytential/symbolic/mappers.py @@ -23,10 +23,8 @@ THE SOFTWARE. """ import six -from six.moves import range from functools import reduce -import numpy as np from pymbolic.mapper.stringifier import ( CSESplittingStringifyMapperMixin, PREC_NONE, PREC_PRODUCT) @@ -36,9 +34,7 @@ from pymbolic.mapper import ( ) from pymbolic.mapper.dependency import ( DependencyMapper as DependencyMapperBase) -from pymbolic.mapper.coefficient import ( - CoefficientCollector as CoefficientCollectorBase) -from pymbolic.geometric_algebra import MultiVector, componentwise +from pymbolic.geometric_algebra import componentwise from pymbolic.geometric_algebra.mapper import ( CombineMapper as CombineMapperBase, IdentityMapper as IdentityMapperBase, @@ -46,8 +42,6 @@ from pymbolic.geometric_algebra.mapper import ( DerivativeBinder as DerivativeBinderBase, EvaluationMapper as EvaluationMapperBase, - Dimensionalizer as DimensionalizerBase, - StringifyMapper as BaseStringifyMapper, DerivativeSourceAndNablaComponentCollector @@ -63,9 +57,6 @@ import pytential.symbolic.primitives as prim class IdentityMapper(IdentityMapperBase): - def map_dimensionalized_expression(self, expr): - return type(expr)(self.rec(expr.child)) - def map_node_sum(self, expr): return type(expr)(self.rec(expr.operand)) @@ -75,14 +66,13 @@ class IdentityMapper(IdentityMapperBase): # {{{ childless -- no need to rebuild - def map_vector_variable(self, expr): + def map_ones(self, expr): return expr - map_ones = map_vector_variable - map_q_weight = map_vector_variable - map_node_coordinate_component = map_vector_variable - map_parametrization_gradient = map_vector_variable - map_parametrization_derivative = map_vector_variable + map_q_weight = map_ones + map_node_coordinate_component = map_ones + map_parametrization_gradient = map_ones + map_parametrization_derivative = map_ones # }}} @@ -105,15 +95,6 @@ class IdentityMapper(IdentityMapperBase): for name, arg_expr in expr.kernel_arguments.items() )) - def map_int_g_ds(self, expr): - return expr.copy( - density=self.rec(expr.density), - dsource=self.rec(expr.dsource), - kernel_arguments=dict( - (name, self.rec(arg_expr)) - for name, arg_expr in expr.kernel_arguments.items() - )) - class CombineMapper(CombineMapperBase): def map_node_sum(self, expr): @@ -122,39 +103,32 @@ class CombineMapper(CombineMapperBase): map_num_reference_derivative = map_node_sum def map_int_g(self, expr): - result = self.rec(expr.density) - for arg_expr in expr.kernel_arguments.values(): - result.update(self.rec(arg_expr)) - return result - - def map_int_g_ds(self, expr): - return self.rec(expr.dsource) | self.map_int_g(expr) + return self.combine( + [self.rec(expr.density)] + + [self.rec(arg_expr) + for arg_expr in expr.kernel_arguments.values()]) def map_inverse(self, expr): - from operator import or_ - return self.rec(expr.rhs) | reduce(or_, + return self.combine([ + self.rec(expr.rhs)] + [ (self.rec(name_expr) - for name_expr in six.itervalues(expr.extra_vars)), - set()) + for name_expr in six.itervalues(expr.extra_vars)) + ]) class Collector(CollectorBase, CombineMapper): - def map_vector_variable(self, expr): + def map_ones(self, expr): return set() - map_ones = map_vector_variable - map_node_coordinate_component = map_vector_variable - map_parametrization_derivative = map_vector_variable - map_q_weight = map_vector_variable + map_node_coordinate_component = map_ones + map_parametrization_derivative = map_ones + map_q_weight = map_ones class OperatorCollector(Collector): def map_int_g(self, expr): return set([expr]) | Collector.map_int_g(self, expr) - def map_int_g_ds(self, expr): - return set([expr]) | Collector.map_int_g(self, expr) - class DependencyMapper(DependencyMapperBase, Collector): pass @@ -171,9 +145,6 @@ class EvaluationMapper(EvaluationMapperBase): def map_variable(self, expr): return expr - def map_vector_variable(self, expr): - return expr - def map_subscript(self, expr): return self.rec(expr.aggregate)[self.rec(expr.index)] @@ -204,19 +175,6 @@ class EvaluationMapper(EvaluationMapperBase): )), expr.density) - def map_int_g_ds(self, expr): - return componentwise( - lambda subexpr: type(expr)( - self.rec(expr.dsource), - expr.kernel, - self.rec(subexpr), - expr.qbx_forced_limit, expr.source, expr.target, - kernel_arguments=dict( - (name, self.rec(arg_expr)) - for name, arg_expr in expr.kernel_arguments.items() - )), - expr.density) - def map_common_subexpression(self, expr): return prim.cse( self.rec(expr.child), @@ -252,9 +210,6 @@ class LocationTagger(CSECachingMapperMixin, IdentityMapper): else: return expr - map_parametrization_derivative = map_ones - map_nodes = map_ones - def map_node_coordinate_component(self, expr): if expr.where is None: return type(expr)( @@ -283,26 +238,7 @@ class LocationTagger(CSECachingMapperMixin, IdentityMapper): self.operand_rec(expr.density), expr.qbx_forced_limit, source, target, kernel_arguments=dict( - (name, self.rec(arg_expr)) - for name, arg_expr in expr.kernel_arguments.items() - )) - - def map_int_g_ds(self, expr): - source = expr.source - target = expr.target - - if source is None: - source = self.default_source - if target is None: - target = self.default_where - - return type(expr)( - self.operand_rec(expr.dsource), - expr.kernel, - self.operand_rec(expr.density), - expr.qbx_forced_limit, source, target, - kernel_arguments=dict( - (name, self.rec(arg_expr)) + (name, self.operand_rec(arg_expr)) for name, arg_expr in expr.kernel_arguments.items() )) @@ -340,157 +276,6 @@ class ToTargetTagger(LocationTagger): # }}} -# {{{ dimensionalizer - -class _DSourceCoefficientFinder(CoefficientCollectorBase): - def map_nabla_component(self, expr): - return {expr: 1} - - def map_variable(self, expr): - return {1: expr} - - def map_common_subexpression(self, expr): - return {1: expr} - - -_DIR_VEC_NAME = "dsource_vec" - - -def _insert_source_derivative_into_kernel(kernel): - # Inserts the source derivative at the innermost - # kernel wrapping level. - from sumpy.kernel import DirectionalSourceDerivative - - if kernel.get_base_kernel() is kernel: - return DirectionalSourceDerivative( - kernel, dir_vec_name=_DIR_VEC_NAME) - else: - return kernel.replace_inner_kernel( - _insert_source_derivative_into_kernel(kernel.kernel)) - - -def _get_dir_vec(dsource, ambient_dim): - coeffs = _DSourceCoefficientFinder()(dsource) - - dir_vec = np.zeros(ambient_dim, np.object) - for i in range(ambient_dim): - dir_vec[i] = coeffs.pop(prim.NablaComponent(i, None), 0) - - if coeffs: - raise RuntimeError("source derivative expression contained constant term") - - return dir_vec - - -class Dimensionalizer(DimensionalizerBase, EvaluationMapper): - """Once the discretization is known, the dimension count is, too. - This mapper plugs in dimension-specific quantities for their - non-dimensional symbolic counterparts. - """ - - def __init__(self, discr_dict): - self.discr_dict = discr_dict - super(Dimensionalizer, self).__init__() - - @property - def ambient_dim(self): - from pytools import single_valued - return single_valued( - discr.ambient_dim - for discr in six.itervalues(self.discr_dict)) - - def map_vector_variable(self, expr): - from pymbolic import make_sym_vector - num_components = expr.num_components - - if num_components is None: - num_components = self.ambient_dim - - return MultiVector(make_sym_vector(expr.name, num_components)) - - def map_dimensionalized_expression(self, expr): - return expr.child - - def map_parametrization_derivative(self, expr): - discr = self.discr_dict[expr.where] - - from pytential.qbx import LayerPotentialSource - if isinstance(discr, LayerPotentialSource): - discr = discr.fine_density_discr - - from meshmode.discretization import Discretization - if not isinstance(discr, Discretization): - raise RuntimeError("Cannot compute the parametrization derivative " - "of something that is not a discretization (a target perhaps?). " - "For example, you will receive this error if you try to " - "evaluate S' in the volume.") - - par_grad = np.zeros((discr.ambient_dim, discr.dim), np.object) - for i in range(discr.ambient_dim): - for j in range(discr.dim): - par_grad[i, j] = prim.NumReferenceDerivative( - frozenset([j]), - prim.NodeCoordinateComponent(i, expr.where), - expr.where) - - from pytools import product - return product(MultiVector(vec) for vec in par_grad.T) - - def map_nodes(self, expr): - discr = self.discr_dict[expr.where] - from pytools.obj_array import make_obj_array - return MultiVector( - make_obj_array([ - prim.NodeCoordinateComponent(i, expr.where) - for i in range(discr.ambient_dim)])) - - def map_int_g(self, expr): - from sumpy.kernel import KernelDimensionSetter - return componentwise( - lambda subexpr: type(expr)( - KernelDimensionSetter(self.ambient_dim)(expr.kernel), - self.rec(subexpr), - expr.qbx_forced_limit, expr.source, expr.target, - kernel_arguments=dict( - (name, self.rec(arg_expr)) - for name, arg_expr in expr.kernel_arguments.items() - )), - expr.density) - - def map_int_g_ds(self, expr): - dsource = self.rec(expr.dsource) - - ambient_dim = self.ambient_dim - - from sumpy.kernel import KernelDimensionSetter - kernel = _insert_source_derivative_into_kernel( - KernelDimensionSetter(ambient_dim)(expr.kernel)) - - from pytools.obj_array import make_obj_array - nabla = MultiVector(make_obj_array( - [prim.NablaComponent(axis, None) - for axis in range(ambient_dim)])) - - kernel_arguments = dict( - (name, self.rec(arg_expr)) - for name, arg_expr in expr.kernel_arguments.items() - ) - - def add_dir_vec_to_kernel_args(coeff): - result = kernel_arguments.copy() - result[_DIR_VEC_NAME] = _get_dir_vec(coeff, ambient_dim) - return result - - rec_operand = prim.cse(self.rec(expr.density)) - return (dsource*nabla).map( - lambda coeff: prim.IntG( - kernel, - rec_operand, expr.qbx_forced_limit, expr.source, expr.target, - kernel_arguments=add_dir_vec_to_kernel_args(coeff))) - -# }}} - - # {{{ derivative binder class DerivativeTaker(Mapper): @@ -589,10 +374,6 @@ class QBXPreprocessor(IdentityMapper): else: return expr - def map_int_g_ds(self, expr): - raise RuntimeError("user-facing source derivative operators are expected " - "to have been eliminated by the time QBXPreprocessor is called.") - # }}} @@ -611,15 +392,6 @@ def stringify_where(where): class StringifyMapper(BaseStringifyMapper): - def map_nodes(self, expr, enclosing_prec): - return "x" - - def map_vector_variable(self, expr, enclosing_prec): - return " %s> " % expr.name - - def map_dimensionalized_expression(self, expr, enclosing_prec): - return self.rec(expr.child, enclosing_prec) - def map_ones(self, expr, enclosing_prec): return "Ones.%s" % stringify_where(expr.where) @@ -680,27 +452,6 @@ class StringifyMapper(BaseStringifyMapper): expr.kernel, self.rec(expr.density, PREC_PRODUCT)) - def map_int_g_ds(self, expr, enclosing_prec): - if isinstance(expr.dsource, MultiVector): - deriv_term = r"(%s*\/)" % self.rec(expr.dsource, PREC_PRODUCT) - else: - deriv_term = self.rec(expr.dsource, PREC_PRODUCT) - - result = u"Int[%s->%s]@(%s)%s %s G_%s %s" % ( - stringify_where(expr.source), - stringify_where(expr.target), - expr.qbx_forced_limit, - self._stringify_kernel_args( - expr.kernel_arguments), - deriv_term, - expr.kernel, - self.rec(expr.density, PREC_NONE)) - - if enclosing_prec >= PREC_PRODUCT: - return "(%s)" % result - else: - return result - # }}} @@ -724,19 +475,6 @@ class GraphvizMapper(GraphvizMapperBase): if self.visit(expr, node_printed=True): self.post_visit(expr) - map_nodes = map_pytential_leaf - map_vector_variable = map_pytential_leaf - - def map_dimensionalized_expression(self, expr): - self.lines.append( - "%s [label=\"%s\",shape=circle];" % ( - self.get_id(expr), type(expr).__name__)) - if not self.visit(expr, node_printed=True): - return - - self.rec(expr.child) - self.post_visit(expr) - map_ones = map_pytential_leaf def map_map_node_sum(self, expr): @@ -774,25 +512,6 @@ class GraphvizMapper(GraphvizMapperBase): self.post_visit(expr) - def map_int_g_ds(self, expr): - descr = u"Int[%s->%s]@(%d) (%s)" % ( - stringify_where(expr.source), - stringify_where(expr.target), - expr.qbx_forced_limit, - expr.kernel, - ) - self.lines.append( - "%s [label=\"%s\",shape=box];" % ( - self.get_id(expr), descr)) - if not self.visit(expr, node_printed=True): - return - - self.rec(expr.density) - for arg_expr in expr.kernel_arguments.values(): - self.rec(arg_expr) - self.rec(expr.dsource) - self.post_visit(expr) - # }}} diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 8cb2f825..92295c5d 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -39,7 +39,6 @@ __doc__ = """ from pytential import sym from pytential.symbolic.primitives import ( cse, - S, D, Sp, Dp, Ones, mean, sqrt_jac_q_weight, QWeight, area_element) import numpy as np @@ -50,7 +49,8 @@ from six.moves import range # {{{ L^2 weighting class L2WeightedPDEOperator(object): - def __init__(self, use_l2_weighting): + def __init__(self, kernel, use_l2_weighting): + self.kernel = kernel self.use_l2_weighting = use_l2_weighting if not use_l2_weighting: @@ -60,13 +60,13 @@ class L2WeightedPDEOperator(object): def get_weight(self, where=None): if self.use_l2_weighting: - return cse(area_element(where)*QWeight(where)) + return cse(area_element(self.kernel.dim, where=where)*QWeight(where)) else: return 1 def get_sqrt_weight(self, where=None): if self.use_l2_weighting: - return sqrt_jac_q_weight(where) + return sqrt_jac_q_weight(self.kernel.dim, where=where) else: return 1 @@ -84,21 +84,25 @@ class DirichletOperator(L2WeightedPDEOperator): charge. (This is true at least in 2D.) """ - def __init__(self, kernel, loc_sign, alpha=None, use_l2_weighting=False): + def __init__(self, kernel, loc_sign, alpha=None, use_l2_weighting=False, + kernel_arguments=None): """ :arg loc_sign: +1 for exterior, -1 for interior :arg alpha: the coefficient for the combined-field representation Set to 0 for Laplace. """ - L2WeightedPDEOperator.__init__(self, use_l2_weighting) - assert loc_sign in [-1, 1] - from sumpy.kernel import to_kernel_and_args, LaplaceKernel - self.kernel_and_args = to_kernel_and_args(kernel) + from sumpy.kernel import Kernel, LaplaceKernel + assert isinstance(kernel, Kernel) + + if kernel_arguments is None: + kernel_arguments = {} + self.kernel_arguments = kernel_arguments + self.loc_sign = loc_sign - self.kernel, _ = self.kernel_and_args + L2WeightedPDEOperator.__init__(self, kernel, use_l2_weighting) if alpha is None: if isinstance(self.kernel, LaplaceKernel): @@ -114,7 +118,7 @@ class DirichletOperator(L2WeightedPDEOperator): return isinstance(self.kernel, LaplaceKernel) and self.loc_sign > 0 def representation(self, - u, map_potentials=None, qbx_forced_limit=None, **kwargs): + u, map_potentials=None, qbx_forced_limit=None): sqrt_w = self.get_sqrt_weight() inv_sqrt_w_u = cse(u/sqrt_w) @@ -122,12 +126,19 @@ class DirichletOperator(L2WeightedPDEOperator): def map_potentials(x): return x - kwargs["qbx_forced_limit"] = qbx_forced_limit + def S(density): # noqa + return sym.S(self.kernel, density, + kernel_arguments=self.kernel_arguments, + qbx_forced_limit=qbx_forced_limit) + + def D(density): # noqa + return sym.D(self.kernel, density, + kernel_arguments=self.kernel_arguments, + qbx_forced_limit=qbx_forced_limit) return ( - self.alpha*map_potentials( - S(self.kernel_and_args, inv_sqrt_w_u, **kwargs)) - - map_potentials(D(self.kernel_and_args, inv_sqrt_w_u, **kwargs))) + self.alpha*map_potentials(S(inv_sqrt_w_u)) + - map_potentials(D(inv_sqrt_w_u))) def operator(self, u): sqrt_w = self.get_sqrt_weight() @@ -148,10 +159,11 @@ class DirichletOperator(L2WeightedPDEOperator): return (-self.loc_sign*0.5*u + sqrt_w*( - self.alpha*S(self.kernel_and_args, inv_sqrt_w_u, - qbx_forced_limit=+1) - - D(self.kernel_and_args, inv_sqrt_w_u, - qbx_forced_limit="avg") + self.alpha*sym.S(self.kernel, inv_sqrt_w_u, + qbx_forced_limit=+1, kernel_arguments=self.kernel_arguments) + - sym.D(self.kernel, inv_sqrt_w_u, + qbx_forced_limit="avg", + kernel_arguments=self.kernel_arguments) + ones_contribution)) # }}} @@ -162,7 +174,8 @@ class DirichletOperator(L2WeightedPDEOperator): class NeumannOperator(L2WeightedPDEOperator): def __init__(self, kernel, loc_sign, alpha=None, use_improved_operator=True, - laplace_kernel=0, use_l2_weighting=False): + laplace_kernel=0, use_l2_weighting=False, + kernel_arguments=None): """ :arg loc_sign: +1 for exterior, -1 for interior :arg alpha: the coefficient for the combined-field representation @@ -170,17 +183,20 @@ class NeumannOperator(L2WeightedPDEOperator): :arg use_improved_operator: Whether to use the least singular operator available """ - L2WeightedPDEOperator.__init__(self, use_l2_weighting) assert loc_sign in [-1, 1] - from sumpy.kernel import to_kernel_and_args, LaplaceKernel + from sumpy.kernel import Kernel, LaplaceKernel + assert isinstance(kernel, Kernel) + + if kernel_arguments is None: + kernel_arguments = {} + self.kernel_arguments = kernel_arguments - self.kernel_and_args = to_kernel_and_args(kernel) self.loc_sign = loc_sign - self.laplace_kernel_and_args = to_kernel_and_args(laplace_kernel) + self.laplace_kernel = LaplaceKernel(kernel.dim) - self.kernel, _ = self.kernel_and_args + L2WeightedPDEOperator.__init__(self, kernel, use_l2_weighting) if alpha is None: if isinstance(self.kernel, LaplaceKernel): @@ -205,14 +221,15 @@ class NeumannOperator(L2WeightedPDEOperator): return x kwargs["qbx_forced_limit"] = qbx_forced_limit + kwargs["kernel_arguments"] = self.kernel_arguments return ( map_potentials( - S(self.kernel_and_args, inv_sqrt_w_u, **kwargs)) + sym.S(self.kernel, inv_sqrt_w_u, **kwargs)) - self.alpha * map_potentials( - D(self.kernel_and_args, - S(self.laplace_kernel_and_args, inv_sqrt_w_u), + sym.D(self.kernel, + sym.S(self.laplace_kernel, inv_sqrt_w_u), **kwargs))) def operator(self, u): @@ -221,24 +238,29 @@ class NeumannOperator(L2WeightedPDEOperator): sqrt_w = self.get_sqrt_weight() inv_sqrt_w_u = cse(u/sqrt_w) - lknl = self.laplace_kernel_and_args + knl = self.kernel + lknl = self.laplace_kernel + + knl_kwargs = {} + knl_kwargs["kernel_arguments"] = self.kernel_arguments - DpS0u = Dp(self.kernel_and_args, # noqa - cse(S(lknl, inv_sqrt_w_u))) + DpS0u = sym.Dp(knl, # noqa + cse(sym.S(lknl, inv_sqrt_w_u)), + **knl_kwargs) if self.use_improved_operator: - Dp0S0u = -0.25*u + Sp( # noqa + Dp0S0u = -0.25*u + sym.Sp( # noqa lknl, # noqa - Sp(lknl, inv_sqrt_w_u, qbx_forced_limit="avg"), + sym.Sp(lknl, inv_sqrt_w_u, qbx_forced_limit="avg"), qbx_forced_limit="avg") if isinstance(self.kernel, HelmholtzKernel): DpS0u = ( # noqa - Dp(self.kernel_and_args - lknl, # noqa - cse(S(lknl, inv_sqrt_w_u, qbx_forced_limit=+1)), - qbx_forced_limit=+1) + sym.Dp(knl - lknl, # noqa + cse(sym.S(lknl, inv_sqrt_w_u, qbx_forced_limit=+1)), + qbx_forced_limit=+1, **knl_kwargs) + Dp0S0u) - elif isinstance(self.kernel_and_args, LaplaceKernel): + elif isinstance(knl, LaplaceKernel): DpS0u = Dp0S0u # noqa else: raise ValueError("no improved operator for %s known" @@ -256,7 +278,7 @@ class NeumannOperator(L2WeightedPDEOperator): return (-self.loc_sign*0.5*u + sqrt_w*( - Sp(self.kernel_and_args, inv_sqrt_w_u, qbx_forced_limit="avg") + sym.Sp(knl, inv_sqrt_w_u, qbx_forced_limit="avg", **knl_kwargs) - self.alpha*DpS0u + ones_contribution )) diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 99d1e7f8..1eb79475 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -33,7 +33,7 @@ from pymbolic.primitives import ( # noqa make_common_subexpression as cse) from pymbolic.geometric_algebra import MultiVector, componentwise from pymbolic.geometric_algebra.primitives import ( # noqa - Nabla, NablaComponent, DerivativeSource, Derivative) + NablaComponent, DerivativeSource, Derivative as DerivativeBase) from pymbolic.primitives import make_sym_vector # noqa @@ -42,7 +42,8 @@ __doc__ = """ :class:`pytential.discretization.Discretization` .. autoclass:: Variable -.. autoclass:: VectorVariable +.. autoclass:: make_sym_vector +.. autoclass:: make_sym_mv Functions ^^^^^^^^^ @@ -57,12 +58,12 @@ Discretization properties ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: QWeight -.. autoclass:: Nodes -.. autoclass:: ParametrizationDerivative -.. autoclass:: pseudoscalar -.. autoclass:: area_element -.. autoclass:: sqrt_jac_q_weight -.. autoclass:: normal +.. autofunction:: nodes +.. autofunction:: parametrization_derivative +.. autofunction:: pseudoscalar +.. autofunction:: area_element +.. autofunction:: sqrt_jac_q_weight +.. autofunction:: normal Elementary numerics ^^^^^^^^^^^^^^^^^^^ @@ -75,16 +76,15 @@ Elementary numerics .. autofunction:: area .. autoclass:: IterativeInverse +Calculus +^^^^^^^^ +.. autoclass:: Derivative + Layer potentials ^^^^^^^^^^^^^^^^ .. autoclass:: IntG -.. autoclass:: IntGdSource - -Internal helpers -^^^^^^^^^^^^^^^^ - -.. autoclass:: DimensionalizedExpression +.. autofunction:: int_g_dsource """ @@ -121,33 +121,8 @@ class Expression(ExpressionBase): return StringifyMapper -class VectorVariable(ExpressionBase): - """:class:`pytential.symbolic.mappers.Dimensionalizer` - turns this into a :class:`pymbolic.geometric_algebra.MultiVector` - of scalar variables. - """ - - def __init__(self, name, num_components=None): - """ - :arg num_components: if None, defaults to the dimension of the - ambient space - """ - self.name = name - self.num_components = num_components - - mapper_method = "map_vector_variable" - - -class DimensionalizedExpression(Expression): - """Preserves an already-dimensionalized expression until it hits - the :class:`pytential.symbolic.mappers.Dimensionalizer`, which - will unpack it and discard this wrapper. - """ - - def __init__(self, child): - self.child = child - - mapper_method = "map_dimensionalized_expression" +def make_sym_mv(name, num_components): + return MultiVector(make_sym_vector(name, num_components)) class Function(var): @@ -211,17 +186,16 @@ class NodeCoordinateComponent(DiscretizationProperty): mapper_method = intern("map_node_coordinate_component") -class Nodes(DiscretizationProperty): - """Node location of the discretization. +def nodes(ambient_dim, where=None): + """Return a :class:`pymbolic.geometric_algebra.MultiVector` of node + locations. """ - def __init__(self, where=None): - DiscretizationProperty.__init__(self, where) - - def __getinitargs__(self): - return (self.where,) - - mapper_method = intern("map_nodes") + from pytools.obj_array import make_obj_array + return MultiVector( + make_obj_array([ + NodeCoordinateComponent(i, where) + for i in range(ambient_dim)])) class NumReferenceDerivative(DiscretizationProperty): @@ -251,39 +225,57 @@ class NumReferenceDerivative(DiscretizationProperty): mapper_method = intern("map_num_reference_derivative") -class ParametrizationDerivative(DiscretizationProperty): - """A :class:`pymbolic.geometric_algebra.MultiVector` representing +def parametrization_derivative(ambient_dim, dim, where=None): + """Return a :class:`pymbolic.geometric_algebra.MultiVector` representing the derivative of the reference-to-global parametrization. """ - mapper_method = "map_parametrization_derivative" + par_grad = np.zeros((ambient_dim, dim), np.object) + for i in range(ambient_dim): + for j in range(dim): + par_grad[i, j] = NumReferenceDerivative( + frozenset([j]), + NodeCoordinateComponent(i, where), + where) + + from pytools import product + return product(MultiVector(vec) for vec in par_grad.T) -def pseudoscalar(where=None): +def pseudoscalar(ambient_dim, dim=None, where=None): + if dim is None: + dim = ambient_dim - 1 + return cse( - ParametrizationDerivative(where).a.project_max_grade(), + parametrization_derivative(ambient_dim, dim, where) + .project_max_grade(), "pseudoscalar", cse_scope.DISCRETIZATION) -def area_element(where=None): +def area_element(ambient_dim, dim=None, where=None): return cse( - sqrt(pseudoscalar(where).a.norm_squared()), + sqrt(pseudoscalar(ambient_dim, dim, where).norm_squared()), "area_element", cse_scope.DISCRETIZATION) -def sqrt_jac_q_weight(where=None): - return cse(sqrt(area_element(where) * QWeight(where)), +def sqrt_jac_q_weight(ambient_dim, dim=None, where=None): + return cse( + sqrt( + area_element(ambient_dim, dim, where) + * QWeight(where)), "sqrt_jac_q_weight", cse_scope.DISCRETIZATION) -def normal(where=None): +def normal(ambient_dim, dim=None, where=None): """Exterior unit normals.""" # Don't be tempted to add a sign here. As it is, it produces # exterior normals for positively oriented curves. - pder = pseudoscalar(where) / area_element(where) - return cse(pder.a.I | pder, "normal", + pder = ( + pseudoscalar(ambient_dim, dim, where) + / area_element(ambient_dim, dim, where)) + return cse(pder.I | pder, "normal", cse_scope.DISCRETIZATION) @@ -319,14 +311,17 @@ class NodeSum(Expression): mapper_method = "map_node_sum" -def integral(operand, where=None): +def integral(ambient_dim, dim, operand, where=None): """A volume integral of *operand*.""" - return NodeSum(area_element(where) * QWeight(where) * operand) + return NodeSum( + area_element(ambient_dim, dim, where) + * QWeight(where) + * operand) class Ones(Expression): - """A vector that is constant *one* on the whole + """A DOF-vector that is constant *one* on the whole discretization. """ @@ -341,9 +336,8 @@ class Ones(Expression): def ones_vec(dim, where=None): from pytools.obj_array import make_obj_array - return DimensionalizedExpression( - MultiVector( - make_obj_array(dim*[Ones(where)]))) + return MultiVector( + make_obj_array(dim*[Ones(where)])) def area(where=None): @@ -376,6 +370,12 @@ class IterativeInverse(Expression): mapper_method = intern("map_inverse") +class Derivative(DerivativeBase): + def resolve(self, expr): + from pytential.symbolic.mappers import DerivativeBinder + return DerivativeBinder(self.my_id)(expr) + + # {{{ potentials def hashable_kernel_args(kernel_arguments): @@ -529,10 +529,51 @@ class IntG(Expression): mapper_method = intern("map_int_g") -class IntGdSource(IntG): - # FIXME: Unclear if this class is still fully needed - # now that the kernel_arguments mechanism exists. +_DIR_VEC_NAME = "dsource_vec" + + +def _insert_source_derivative_into_kernel(kernel): + # Inserts the source derivative at the innermost + # kernel wrapping level. + from sumpy.kernel import DirectionalSourceDerivative + + if kernel.get_base_kernel() is kernel: + return DirectionalSourceDerivative( + kernel, dir_vec_name=_DIR_VEC_NAME) + else: + return kernel.replace_inner_kernel( + _insert_source_derivative_into_kernel(kernel.kernel)) + + +def _get_dir_vec(dsource, ambient_dim): + from pymbolic.mapper.coefficient import ( + CoefficientCollector as CoefficientCollectorBase) + + class _DSourceCoefficientFinder(CoefficientCollectorBase): + def map_nabla_component(self, expr): + return {expr: 1} + + def map_variable(self, expr): + return {1: expr} + + def map_common_subexpression(self, expr): + return {1: expr} + + coeffs = _DSourceCoefficientFinder()(dsource) + + dir_vec = np.zeros(ambient_dim, np.object) + for i in range(ambient_dim): + dir_vec[i] = coeffs.pop(NablaComponent(i, None), 0) + + if coeffs: + raise RuntimeError("source derivative expression contained constant term") + return dir_vec + + +def int_g_dsource(ambient_dim, dsource, kernel, density, + qbx_forced_limit, source=None, target=None, + kernel_arguments=None, **kwargs): r""" .. math:: @@ -549,20 +590,31 @@ class IntGdSource(IntG): A :class:`pymbolic.geometric_algebra.MultiVector`. """ - def __init__(self, dsource, kernel, density, - qbx_forced_limit=0, source=None, target=None, - kernel_arguments=None, - **kwargs): - IntG.__init__(self, kernel, density, - qbx_forced_limit, source, target, - kernel_arguments=kernel_arguments, - **kwargs) - self.dsource = dsource + if kernel_arguments is None: + kernel_arguments = {} - def __getinitargs__(self): - return (self.dsource,) + IntG.__getinitargs__(self) + if isinstance(kernel_arguments, tuple): + kernel_arguments = dict(kernel_arguments) - mapper_method = intern("map_int_g_ds") + kernel = _insert_source_derivative_into_kernel(kernel) + + from pytools.obj_array import make_obj_array + nabla = MultiVector(make_obj_array( + [NablaComponent(axis, None) + for axis in range(ambient_dim)])) + + def add_dir_vec_to_kernel_args(coeff): + result = kernel_arguments.copy() + result[_DIR_VEC_NAME] = _get_dir_vec(coeff, ambient_dim) + return result + + density = cse(density) + return (dsource*nabla).map( + lambda coeff: IntG( + kernel, + density, qbx_forced_limit, source, target, + kernel_arguments=add_dir_vec_to_kernel_args(coeff), + **kwargs)) # }}} @@ -592,50 +644,104 @@ def S(kernel, density, # noqa kernel_arguments, **kwargs) -def tangential_derivative(operand, where=None): - pder = pseudoscalar(where) / area_element(where) +def tangential_derivative(ambient_dim, operand, dim=None, where=None): + pder = ( + pseudoscalar(ambient_dim, dim, where) + / area_element(ambient_dim, dim, where)) # FIXME: Should be formula (3.25) in Dorst et al. d = Derivative() - return (d.nabla * d(operand)) >> pder + return d.resolve( + (d.dnabla(ambient_dim) * d(operand)) >> pder) -def normal_derivative(operand, where=None): +def normal_derivative(ambient_dim, operand, dim=None, where=None): d = Derivative() - return (normal(where).a.scalar_product(d.nabla)) * d(operand) + return d.resolve( + (normal(ambient_dim, dim, where).scalar_product(d.dnabla(ambient_dim))) + * d(operand)) -def Sp(*args, **kwargs): # noqa +def Sp(kernel, *args, **kwargs): # noqa where = kwargs.get("target") if "qbx_forced_limit" not in kwargs: warn("not specifying qbx_forced_limit on call to 'Sp' is deprecated, " "defaulting to 'avg'", DeprecationWarning, stacklevel=2) kwargs["qbx_forced_limit"] = "avg" - return normal_derivative(S(*args, **kwargs), where) - + ambient_dim = kwargs.get("ambient_dim") + from sumpy.kernel import Kernel + if ambient_dim is None and isinstance(kernel, Kernel): + ambient_dim = kernel.dim + if ambient_dim is None: + raise ValueError("ambient_dim must be specified, either through " + "the kernel, or directly") + dim = kwargs.pop("dim", None) + + return normal_derivative( + ambient_dim, + S(kernel, *args, **kwargs), + dim=dim, where=where) + + +def Spp(kernel, *args, **kwargs): # noqa + ambient_dim = kwargs.get("ambient_dim") + from sumpy.kernel import Kernel + if ambient_dim is None and isinstance(kernel, Kernel): + ambient_dim = kernel.dim + if ambient_dim is None: + raise ValueError("ambient_dim must be specified, either through " + "the kernel, or directly") + dim = kwargs.pop("dim", None) -def Spp(*args, **kwargs): # noqa where = kwargs.get("target") - return normal_derivative(Sp(*args, **kwargs), where) + return normal_derivative( + ambient_dim, + Sp(kernel, *args, **kwargs), + dim=dim, where=where) + + +def D(kernel, *args, **kwargs): # noqa + ambient_dim = kwargs.get("ambient_dim") + from sumpy.kernel import Kernel + if ambient_dim is None and isinstance(kernel, Kernel): + ambient_dim = kernel.dim + if ambient_dim is None: + raise ValueError("ambient_dim must be specified, either through " + "the kernel, or directly") + dim = kwargs.pop("dim", None) - -def D(*args, **kwargs): # noqa where = kwargs.get("source") + if "qbx_forced_limit" not in kwargs: warn("not specifying qbx_forced_limit on call to 'D' is deprecated, " "defaulting to 'avg'", DeprecationWarning, stacklevel=2) kwargs["qbx_forced_limit"] = "avg" - return IntGdSource(normal(where), *args, **kwargs).a.xproject(0) - -def Dp(*args, **kwargs): # noqa + return int_g_dsource( + ambient_dim, + normal(ambient_dim, dim, where), + kernel, *args, **kwargs).xproject(0) + + +def Dp(kernel, *args, **kwargs): # noqa + ambient_dim = kwargs.get("ambient_dim") + from sumpy.kernel import Kernel + if ambient_dim is None and isinstance(kernel, Kernel): + ambient_dim = kernel.dim + if ambient_dim is None: + raise ValueError("ambient_dim must be specified, either through " + "the kernel, or directly") + dim = kwargs.pop("dim", None) target = kwargs.get("target") if "qbx_forced_limit" not in kwargs: warn("not specifying qbx_forced_limit on call to 'Dp' is deprecated, " "defaulting to +1", DeprecationWarning, stacklevel=2) kwargs["qbx_forced_limit"] = +1 - return normal_derivative(D(*args, **kwargs), target) + return normal_derivative( + ambient_dim, + D(kernel, *args, **kwargs), + dim=dim, where=target) # }}} diff --git a/test/test_layer_pot.py b/test/test_layer_pot.py index 2503835c..513e2fa1 100644 --- a/test/test_layer_pot.py +++ b/test/test_layer_pot.py @@ -186,11 +186,14 @@ def test_ellipse_eigenvalues(ctx_getter, ellipse_aspect, mode_nr, qbx_order): cl.clmath.sin(angle)**2 + (1/ellipse_aspect)**2 * cl.clmath.cos(angle)**2) + from sumpy.kernel import LaplaceKernel + lap_knl = LaplaceKernel(2) + # {{{ single layer sigma = cl.clmath.cos(mode_nr*angle)/J - s_sigma_op = bind(qbx, sym.S(0, sym.var("sigma"))) + s_sigma_op = bind(qbx, sym.S(lap_knl, sym.var("sigma"))) s_sigma = s_sigma_op(queue=queue, sigma=sigma) # SIGN BINGO! :) @@ -218,7 +221,8 @@ def test_ellipse_eigenvalues(ctx_getter, ellipse_aspect, mode_nr, qbx_order): sigma = cl.clmath.cos(mode_nr*angle) - d_sigma_op = bind(qbx, sym.D(0, sym.var("sigma"))) + d_sigma_op = bind(qbx, + sym.D(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) d_sigma = d_sigma_op(queue=queue, sigma=sigma) # SIGN BINGO! :) @@ -250,7 +254,8 @@ def test_ellipse_eigenvalues(ctx_getter, ellipse_aspect, mode_nr, qbx_order): sigma = cl.clmath.cos(mode_nr*angle) - sp_sigma_op = bind(qbx, sym.Sp(0, sym.var("sigma"))) + sp_sigma_op = bind(qbx, + sym.Sp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) sp_sigma = sp_sigma_op(queue=queue, sigma=sigma) sp_eigval = 0 @@ -324,10 +329,12 @@ def run_int_eq_test( from sumpy.kernel import LaplaceKernel, HelmholtzKernel, AxisTargetDerivative if k: knl = HelmholtzKernel(2) - knl_kwargs = {"k": k} + knl_kwargs = {"k": sym.var("k")} + concrete_knl_kwargs = {"k": k} else: knl = LaplaceKernel(2) knl_kwargs = {} + concrete_knl_kwargs = {} if knl.is_complex_valued: dtype = np.complex128 @@ -335,10 +342,11 @@ def run_int_eq_test( dtype = np.float64 if bc_type == "dirichlet": - op = DirichletOperator((knl, knl_kwargs), loc_sign, use_l2_weighting=True) + op = DirichletOperator(knl, loc_sign, use_l2_weighting=True, + kernel_arguments=knl_kwargs) elif bc_type == "neumann": - op = NeumannOperator((knl, knl_kwargs), loc_sign, use_l2_weighting=True, - use_improved_operator=False) + op = NeumannOperator(knl, loc_sign, use_l2_weighting=True, + use_improved_operator=False, kernel_arguments=knl_kwargs) else: assert False @@ -374,7 +382,7 @@ def run_int_eq_test( # show geometry, centers, normals nodes_h = density_discr.nodes().get(queue=queue) pt.plot(nodes_h[0], nodes_h[1], "x-") - normal = bind(density_discr, sym.normal())(queue).as_vector(np.object) + normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) pt.quiver(nodes_h[0], nodes_h[1], normal[0].get(queue), normal[1].get(queue)) pt.gca().set_aspect("equal") pt.show() @@ -387,25 +395,25 @@ def run_int_eq_test( evt, (test_direct,) = pot_p2p( queue, test_targets, point_sources, [source_charges], - out_host=False, **knl_kwargs) + out_host=False, **concrete_knl_kwargs) nodes = density_discr.nodes() evt, (src_pot,) = pot_p2p( queue, nodes, point_sources, [source_charges], - **knl_kwargs) + **concrete_knl_kwargs) grad_p2p = P2P(cl_ctx, [AxisTargetDerivative(0, knl), AxisTargetDerivative(1, knl)], exclude_self=False, value_dtypes=dtype) evt, (src_grad0, src_grad1) = grad_p2p( queue, nodes, point_sources, [source_charges], - **knl_kwargs) + **concrete_knl_kwargs) if bc_type == "dirichlet": bc = src_pot elif bc_type == "neumann": - normal = bind(density_discr, sym.normal())(queue).as_vector(np.object) + normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) bc = (src_grad0*normal[0] + src_grad1*normal[1]) # }}} @@ -418,7 +426,7 @@ def run_int_eq_test( from pytential.solve import gmres gmres_result = gmres( - bound_op.scipy_op(queue, "u", dtype, k=k), + bound_op.scipy_op(queue, "u", dtype, **concrete_knl_kwargs), rhs, tol=1e-14, progress=True, hard_failure=False) @@ -480,16 +488,19 @@ def run_int_eq_test( bound_t_deriv_op = bind(qbx, op.representation( - sym.var("u"), map_potentials=sym.tangential_derivative, + sym.var("u"), + map_potentials=lambda pot: sym.tangential_derivative(2, pot), qbx_forced_limit=loc_sign)) #print(bound_t_deriv_op.code) - tang_deriv_from_src = bound_t_deriv_op(queue, u=u).as_scalar().get() + tang_deriv_from_src = bound_t_deriv_op( + queue, u=u, **concrete_knl_kwargs).as_scalar().get() tangent = bind( density_discr, - sym.pseudoscalar()/sym.area_element())(queue).as_vector(np.object) + sym.pseudoscalar(2)/sym.area_element(2))( + queue, **concrete_knl_kwargs).as_vector(np.object) tang_deriv_ref = (src_grad0 * tangent[0] + src_grad1 * tangent[1]).get() @@ -711,27 +722,35 @@ def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k) target_order = 7 u_sym = sym.var("u") - grad_u_sym = sym.VectorVariable("grad_u") + grad_u_sym = sym.make_sym_mv("grad_u", 2) dn_u_sym = sym.var("dn_u") + from sumpy.kernel import LaplaceKernel, HelmholtzKernel + lap_k_sym = LaplaceKernel(2) if k == 0: - k_sym = 0 + k_sym = lap_k_sym + knl_kwargs = {} else: - k_sym = "k" + k_sym = HelmholtzKernel(2) + knl_kwargs = {"k": sym.var("k")} zero_op_table = { "green": - sym.S(k_sym, dn_u_sym) - sym.D(k_sym, u_sym) - 0.5*u_sym, + sym.S(k_sym, dn_u_sym, qbx_forced_limit=-1, **knl_kwargs) + - sym.D(k_sym, u_sym, qbx_forced_limit="avg", **knl_kwargs) + - 0.5*u_sym, "green_grad": - d1.nabla * d1(sym.S(k_sym, dn_u_sym, qbx_forced_limit="avg")) - - d2.nabla * d2(sym.D(k_sym, u_sym, qbx_forced_limit="avg")) + d1.nabla * d1(sym.S(k_sym, dn_u_sym, + qbx_forced_limit="avg", **knl_kwargs)) + - d2.nabla * d2(sym.D(k_sym, u_sym, + qbx_forced_limit="avg", **knl_kwargs)) - 0.5*grad_u_sym, # only for k==0: "zero_calderon": - -sym.Dp(0, sym.S(0, u_sym)) - - 0.25*u_sym + sym.Sp(0, sym.Sp(0, u_sym)) + -sym.Dp(lap_k_sym, sym.S(lap_k_sym, u_sym)) + - 0.25*u_sym + sym.Sp(lap_k_sym, sym.Sp(lap_k_sym, u_sym)) } order_table = { "green": qbx_order, @@ -765,7 +784,7 @@ def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k) # {{{ compute values of a solution to the PDE nodes_host = density_discr.nodes().get(queue) - normal = bind(density_discr, sym.normal())(queue).as_vector(np.object) + normal = bind(density_discr, sym.normal(2))(queue).as_vector(np.object) normal_host = [normal[0].get(), normal[1].get()] if k != 0: @@ -846,7 +865,7 @@ def test_off_surface_eval(ctx_getter, use_fmm, do_plot=False): fmm_order=fmm_order) from sumpy.kernel import LaplaceKernel - op = sym.D(LaplaceKernel(), sym.var("sigma"), qbx_forced_limit=-2) + op = sym.D(LaplaceKernel(2), sym.var("sigma"), qbx_forced_limit=-2) sigma = density_discr.zeros(queue) + 1 -- GitLab From b322d6f9043bc51409941c512b4520205fc8e9d8 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 11 Dec 2016 18:35:57 -0600 Subject: [PATCH 2/9] Fix test_geometry --- test/test_layer_pot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_layer_pot.py b/test/test_layer_pot.py index 513e2fa1..9558af00 100644 --- a/test/test_layer_pot.py +++ b/test/test_layer_pot.py @@ -86,7 +86,7 @@ def test_geometry(ctx_getter): InterpolatoryQuadratureSimplexGroupFactory(order)) import pytential.symbolic.primitives as prim - area_sym = prim.integral(1) + area_sym = prim.integral(2, 1, 1) area = bind(discr, area_sym)(queue) -- GitLab From 55b8ee6e3a8dd05314638dd14c6b61d29c2ef762 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 11 Dec 2016 18:36:23 -0600 Subject: [PATCH 3/9] Fix test_identities: grad(Green) --- test/test_layer_pot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_layer_pot.py b/test/test_layer_pot.py index 9558af00..ff637700 100644 --- a/test/test_layer_pot.py +++ b/test/test_layer_pot.py @@ -741,10 +741,10 @@ def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k) - 0.5*u_sym, "green_grad": - d1.nabla * d1(sym.S(k_sym, dn_u_sym, - qbx_forced_limit="avg", **knl_kwargs)) - - d2.nabla * d2(sym.D(k_sym, u_sym, - qbx_forced_limit="avg", **knl_kwargs)) + d1.resolve(d1.nabla * d1(sym.S(k_sym, dn_u_sym, + qbx_forced_limit="avg", **knl_kwargs))) + - d2.resolve(d2.nabla * d2(sym.D(k_sym, u_sym, + qbx_forced_limit="avg", **knl_kwargs))) - 0.5*grad_u_sym, # only for k==0: -- GitLab From 47296bbfceac5ae489cec46125ca21358baed5b2 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 11 Dec 2016 18:37:47 -0600 Subject: [PATCH 4/9] Add make_sym_surface_mv --- pytential/symbolic/primitives.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 1eb79475..330a4dff 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -44,6 +44,7 @@ __doc__ = """ .. autoclass:: Variable .. autoclass:: make_sym_vector .. autoclass:: make_sym_mv +.. autoclass:: make_sym_surface_mv Functions ^^^^^^^^^ @@ -60,6 +61,7 @@ Discretization properties .. autoclass:: QWeight .. autofunction:: nodes .. autofunction:: parametrization_derivative +.. autofunction:: parametrization_derivative_matrix .. autofunction:: pseudoscalar .. autofunction:: area_element .. autofunction:: sqrt_jac_q_weight @@ -125,6 +127,16 @@ def make_sym_mv(name, num_components): return MultiVector(make_sym_vector(name, num_components)) +def make_sym_surface_mv(name, ambient_dim, dim, where=None): + par_grad = parametrization_derivative_matrix(ambient_dim, dim, where) + + return sum( + var("%s%d" % (name, i)) + * + cse(MultiVector(vec), "tangent%d" % i, cse_scope.DISCRETIZATION) + for i, vec in enumerate(par_grad.T)) + + class Function(var): def __call__(self, operand, *args, **kwargs): # If the call is handed an object array full of operands, @@ -225,7 +237,7 @@ class NumReferenceDerivative(DiscretizationProperty): mapper_method = intern("map_num_reference_derivative") -def parametrization_derivative(ambient_dim, dim, where=None): +def parametrization_derivative_matrix(ambient_dim, dim, where=None): """Return a :class:`pymbolic.geometric_algebra.MultiVector` representing the derivative of the reference-to-global parametrization. """ @@ -238,6 +250,16 @@ def parametrization_derivative(ambient_dim, dim, where=None): NodeCoordinateComponent(i, where), where) + return par_grad + + +def parametrization_derivative(ambient_dim, dim, where=None): + """Return a :class:`pymbolic.geometric_algebra.MultiVector` representing + the derivative of the reference-to-global parametrization. + """ + + par_grad = parametrization_derivative_matrix(ambient_dim, dim, where) + from pytools import product return product(MultiVector(vec) for vec in par_grad.T) -- GitLab From b5ffe0d5b258b97a961973a66ae26c17bb035661 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Sun, 11 Dec 2016 18:38:04 -0600 Subject: [PATCH 5/9] Fix sym.{area,mean} --- pytential/symbolic/primitives.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 330a4dff..add36193 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -362,13 +362,16 @@ def ones_vec(dim, where=None): make_obj_array(dim*[Ones(where)])) -def area(where=None): - return cse(integral(Ones(where), where), "area", +def area(ambient_dim, dim, where=None): + return cse(integral(ambient_dim, dim, Ones(where), where), "area", cse_scope.DISCRETIZATION) -def mean(operand, where=None): - return integral(operand, where) / area(where) +def mean(ambient_dim, dim, operand, where=None): + return ( + integral(ambient_dim, dim, operand, where) + / + area(ambient_dim, dim, where)) class IterativeInverse(Expression): -- GitLab From 5cfe842a5fa1f4adfc0e3f1a9b4646a420cbc46b Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 12 Dec 2016 01:53:33 -0600 Subject: [PATCH 6/9] Fix sym.mean() invocations in scalar PDE --- pytential/symbolic/pde/scalar.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 92295c5d..57231af2 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -39,7 +39,6 @@ __doc__ = """ from pytential import sym from pytential.symbolic.primitives import ( cse, - Ones, mean, sqrt_jac_q_weight, QWeight, area_element) import numpy as np from collections import namedtuple @@ -153,7 +152,8 @@ class DirichletOperator(L2WeightedPDEOperator): # See Hackbusch, http://books.google.com/books?id=Ssnf7SZB0ZMC # Theorem 8.2.18b - ones_contribution = Ones() * mean(inv_sqrt_w_u) + amb_dim = self.kernel.dim + ones_contribution = sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u) else: ones_contribution = 0 @@ -272,7 +272,8 @@ class NeumannOperator(L2WeightedPDEOperator): # to the desired solution separately. As is, this operator # returns a mean that is not well-specified. - ones_contribution = Ones() * mean(inv_sqrt_w_u) + amb_dim = self.kernel.dim + ones_contribution = sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u) else: ones_contribution = 0 -- GitLab From a6500b87164b52aab79474f74cf70c8f4425445c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 12 Dec 2016 02:17:02 -0600 Subject: [PATCH 7/9] Further kill-dimensionalizer fixes in test_identities --- test/test_layer_pot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_layer_pot.py b/test/test_layer_pot.py index ff637700..64714160 100644 --- a/test/test_layer_pot.py +++ b/test/test_layer_pot.py @@ -741,9 +741,9 @@ def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k) - 0.5*u_sym, "green_grad": - d1.resolve(d1.nabla * d1(sym.S(k_sym, dn_u_sym, + d1.resolve(d1.dnabla(2) * d1(sym.S(k_sym, dn_u_sym, qbx_forced_limit="avg", **knl_kwargs))) - - d2.resolve(d2.nabla * d2(sym.D(k_sym, u_sym, + - d2.resolve(d2.dnabla(2) * d2(sym.D(k_sym, u_sym, qbx_forced_limit="avg", **knl_kwargs))) - 0.5*grad_u_sym, -- GitLab From 27d4d08abf61c510fddb44bb9e122e48c2dbeafc Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 12 Dec 2016 02:17:11 -0600 Subject: [PATCH 8/9] GA derivative resolve: Resolve all derivative by default --- pytential/symbolic/primitives.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index add36193..5e8ff991 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -396,9 +396,10 @@ class IterativeInverse(Expression): class Derivative(DerivativeBase): - def resolve(self, expr): + @staticmethod + def resolve(expr): from pytential.symbolic.mappers import DerivativeBinder - return DerivativeBinder(self.my_id)(expr) + return DerivativeBinder()(expr) # {{{ potentials -- GitLab From 5e2812ddde0e60b0aae8428127df7f74fff921c1 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 12 Dec 2016 09:14:00 -0600 Subject: [PATCH 9/9] Placate flake8 --- pytential/symbolic/pde/scalar.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pytential/symbolic/pde/scalar.py b/pytential/symbolic/pde/scalar.py index 57231af2..ff5a22e2 100644 --- a/pytential/symbolic/pde/scalar.py +++ b/pytential/symbolic/pde/scalar.py @@ -153,7 +153,8 @@ class DirichletOperator(L2WeightedPDEOperator): # Theorem 8.2.18b amb_dim = self.kernel.dim - ones_contribution = sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u) + ones_contribution = ( + sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u)) else: ones_contribution = 0 @@ -273,7 +274,8 @@ class NeumannOperator(L2WeightedPDEOperator): # returns a mean that is not well-specified. amb_dim = self.kernel.dim - ones_contribution = sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u) + ones_contribution = ( + sym.Ones() * sym.mean(amb_dim, amb_dim-1, inv_sqrt_w_u)) else: ones_contribution = 0 -- GitLab