diff --git a/doc/dpie_doc.pdf b/doc/dpie_doc.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e2f037d5d3cb117a25978aff14bc2f6dec9cb2c7 Binary files /dev/null and b/doc/dpie_doc.pdf differ diff --git a/doc/symbolic.rst b/doc/symbolic.rst index 2ad1f4ad6ebd0d9a0eab85a5d170c60fb6fe88f6..9ab76192675050fff5cf5c74cd65f80d67fbeca9 100644 --- a/doc/symbolic.rst +++ b/doc/symbolic.rst @@ -29,6 +29,7 @@ Maxwell's equations ^^^^^^^^^^^^^^^^^^^ .. automodule:: pytential.symbolic.pde.maxwell +.. automodule:: pytential.symbolic.pde.maxwell.dpie Stokes' equations ^^^^^^^^^^^^^^^^^ diff --git a/pytential/solve.py b/pytential/solve.py index 979a7550bef74b8569ad0d4dc3c63c5c74a9ddf0..7575ff892c54af542dfc4e1f1221c338f5a2e365 100644 --- a/pytential/solve.py +++ b/pytential/solve.py @@ -1,4 +1,7 @@ -__copyright__ = "Copyright (C) 2012-2013 Andreas Kloeckner" +__copyright__ = """ +Copyright (C) 2012-2018 Andreas Kloeckner +Copyright (C) 2018 Christian Howard +""" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -265,7 +268,8 @@ def gmres(op, rhs, restart=None, tol=None, x0=None, inner_product=structured_vdot, maxiter=None, hard_failure=None, no_progress_factor=None, stall_iterations=None, - callback=None, progress=False, require_monotonicity=True): + callback=None, progress=False, cl_queue=None, + require_monotonicity=True): """Solve a linear system Ax=b by means of GMRES with restarts. @@ -281,6 +285,8 @@ def gmres(op, rhs, restart=None, tol=None, x0=None, :arg stall_iterations: Number of iterations with residual decrease below *no_progress_factor* indicates stall. Set to 0 to disable stall detection. + :arg cl_queue: a :class:`pyopencl.CommandQueue` instance, to support + automatic vector splitting/assembly for systems of equations :return: a :class:`GMRESResult` """ diff --git a/pytential/symbolic/execution.py b/pytential/symbolic/execution.py index 77c80b4a1aeb7c4b950c5cf22742f1b0107ece89..1a01f55a6ce72410c961212f345cfb87888f5826 100644 --- a/pytential/symbolic/execution.py +++ b/pytential/symbolic/execution.py @@ -126,23 +126,38 @@ class EvaluationMapperBase(PymbolicEvaluationMapper): def map_node_sum(self, expr): # FIXME: make less CL specific queue = self.array_context.queue + expr_val = self.rec(expr.operand) + from numbers import Number + if isinstance(expr_val, Number) and expr_val == 0: + return expr_val + return sum( cl.array.sum(grp_ary, queue=queue).get()[()] - for grp_ary in self.rec(expr.operand)) + for grp_ary in expr_val) def map_node_max(self, expr): # FIXME: make less CL specific queue = self.array_context.queue + expr_val = self.rec(expr.operand) + from numbers import Number + if isinstance(expr_val, Number) and expr_val == 0: + return expr_val + return max( cl.array.max(grp_ary, queue=queue).get()[()] - for grp_ary in self.rec(expr.operand)) + for grp_ary in expr_val) def map_node_min(self, expr): # FIXME: make less CL specific queue = self.array_context.queue + expr_val = self.rec(expr.operand) + from numbers import Number + if isinstance(expr_val, Number) and expr_val == 0: + return expr_val + return min( cl.array.min(grp_ary, queue=queue).get()[()] - for grp_ary in self.rec(expr.operand)) + for grp_ary in expr_val) def _map_elementwise_reduction(self, reduction_name, expr): @memoize_in(self.places, "elementwise_node_"+reduction_name) @@ -460,7 +475,8 @@ class MatVecOp: def __init__(self, bound_expr, actx: PyOpenCLArrayContext, - arg_name, dtype, total_dofs, discrs, starts_and_ends, extra_args): + arg_name, dtype, total_dofs, discrs, starts_and_ends, extra_args, + is_scalar=False): self.bound_expr = bound_expr self.array_context = actx self.arg_name = arg_name @@ -469,6 +485,7 @@ class MatVecOp: self.discrs = discrs self.starts_and_ends = starts_and_ends self.extra_args = extra_args + self.is_scalar = is_scalar @property def shape(self): @@ -532,6 +549,11 @@ class MatVecOp: else: raise ValueError("unsupported input type") + # make any scalar terms actually scalars + for n in range(0, len(x)): + if x[n].shape == (1,): + x[n] = x[n].get(self.queue)[0] + args = self.extra_args.copy() args[self.arg_name] = self.unflatten(x) if flat else x result = self.bound_expr(self.array_context, **args) @@ -548,7 +570,7 @@ class MatVecOp: # {{{ expression prep -def _prepare_domains(nresults, places, domains, default_domain): +def _prepare_domains(nresults, places, domains, default_domain, preserve_none=False): """ :arg nresults: number of results. :arg places: a :class:`~pytential.GeometryCollection`. @@ -568,7 +590,10 @@ def _prepare_domains(nresults, places, domains, default_domain): dom_name = domains return nresults * [dom_name] - domains = [sym.as_dofdesc(d) for d in domains] + domains = [ + sym.as_dofdesc(d) if d is not None else None + for d in domains] + assert len(domains) == nresults return domains @@ -1012,10 +1037,12 @@ class BoundExpression: accepting and returning :class:`pyopencl.array.Array` arrays. """ + is_scalar = False if isinstance(self.code.result, np.ndarray): nresults = len(self.code.result) else: nresults = 1 + is_scalar = True domains = _prepare_domains(nresults, self.places, domains, self.places.auto_target) @@ -1042,7 +1069,8 @@ class BoundExpression: # for linear system solving, in which case the assumption # has to be true. return MatVecOp(self, actx, - arg_name, dtype, total_dofs, discrs, starts_and_ends, extra_args) + arg_name, dtype, total_dofs, discrs, starts_and_ends, extra_args, + is_scalar=is_scalar) def eval(self, context=None, timing_data=None, array_context: Optional[PyOpenCLArrayContext] = None): diff --git a/pytential/symbolic/pde/maxwell/__init__.py b/pytential/symbolic/pde/maxwell/__init__.py index f09dc21789a6313c9392c506a4bb4300997169c4..9bff4deebbd3bef7fe1cf5f06b3e381e50dfb60b 100644 --- a/pytential/symbolic/pde/maxwell/__init__.py +++ b/pytential/symbolic/pde/maxwell/__init__.py @@ -1,4 +1,7 @@ -__copyright__ = "Copyright (C) 2010-2013 Andreas Kloeckner" +__copyright__ = """ +Copyright (C) 2010-2018 Andreas Kloeckner +Copyright (C) 2018 Christian Howard +""" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -33,13 +36,16 @@ __doc__ = """ .. autofunction:: get_sym_maxwell_point_source .. autofunction:: get_sym_maxwell_plane_wave +.. autofunction:: get_sym_maxwell_point_source_potentials +.. autofunction:: get_sym_maxwell_planewave_gradphi +.. autofunction:: get_sym_maxwell_planewave_divA .. autoclass:: PECChargeCurrentMFIEOperator """ # {{{ point source -def get_sym_maxwell_point_source(kernel, jxyz, k): +def get_sym_maxwell_point_source_em(kernel, jxyz, k): r"""Return a symbolic expression that, when bound to a :class:`pytential.source.PointPotentialSource` will yield a field satisfying Maxwell's equations. @@ -111,6 +117,87 @@ def get_sym_maxwell_plane_wave(amplitude_vec, v, omega, # }}} +# {{{ Maxwell sources for scalar/vector potentials + +def get_sym_maxwell_point_source_potentials(kernel, jxyz, k): + r"""Return a symbolic expression that, when bound to a + :class:`pytential.source.PointPotentialSource` will yield + a potential fields satisfying Maxwell's equations. + + Uses the sign convention :math:`\exp(-1 \omega t)` for the time dependency. + + This will return an object of four entries, the first being the + scalar potential and the last three being the components of the + vector potential. + """ + field = get_sym_maxwell_point_source_em(kernel, jxyz, k) + return sym.join_fields( + 0*1j, # scalar potential + field[:3]/(1j*k) # vector potential + ) + + +def get_sym_maxwell_planewave_gradphi(u, Ep, k, dofdesc=None): + r""" Return symbolic expression that can be bound to a + :class:`pytential.source.PointPotentialSource` and yield the gradient of a + scalar potential field satisfying Maxwell's equations. + + Represents the following: + + .. math:: + + \nabla \phi(x) = - e^{i k x^T u} E_p^T \left( 1 + i k x^T u\right) + """ + x = sym.nodes(3, dofdesc).as_vector() + grad_phi = -sym.exp(1j*k*np.dot(x, u)) * (Ep.T + 1j*k*np.dot(Ep, x)*u.T) + return grad_phi + + +def get_sym_maxwell_planewave_divA(u, Ep, k, epsilon=1, mu=1, dofdesc=None): + r"""Return symbolic expression that can be bound to a + :class:`pytential.source.PointPotentialSource` and yield the divergence of + a vector potential field satisfying Maxwell's equations. + + Represents the following: + + .. math:: + + \nabla \cdot \boldsymbol{A} = -\sqrt{\mu \epsilon} + e^{i k x^T u} E_p^T \left( u + i k x\right) + """ + x = sym.nodes(3, dofdesc).as_vector() + divA = sym.join_fields( + -sym.sqrt(epsilon*mu) * sym.exp(1j*k*np.dot(x, u)) + * np.dot(Ep, u + 1j*k*x)) + + return divA + + +def get_sym_maxwell_planewave_potentials(u, Ep, k, epsilon=1, mu=1, dofdesc=None): + r"""Return a 2-tuple of symbolic expressions that can be bound to a + :class:`pytential.source.PointPotentialSource` and yield the scalar and + vector potential fields satisfying Maxwell's equations that represent a + plane wave. + + Represents the following: + + .. math:: + + \boldsymbol{A} = -u \left(x \cdot E_p \right) + \sqrt{\mu \epsilon} e^{i k x^T u} + + .. math:: + + \phi = - \left(x \cdot E_p\right) e^{i k x^T u} + """ + x = sym.nodes(3, dofdesc).as_vector() + A = -u * np.dot(x, Ep) * sym.sqrt(epsilon*mu) * sym.exp(1j*k*np.dot(x, u)) + phi = sym.join_fields(-np.dot(x, Ep) * sym.exp(1j*k*np.dot(x, u))) + return phi, A + +# }}} + + # {{{ Charge-Current MFIE class PECChargeCurrentMFIEOperator: @@ -227,8 +314,9 @@ class MuellerAugmentedMFIEOperator: S = partial(sym.S, self.kernel, qbx_forced_limit="avg") - def curl_S(dens, k): - return sym.curl(sym.S(self.kernel, dens, qbx_forced_limit="avg", k=k)) + def curl_S(dens, **kwargs): + return sym.curl(sym.S(self.kernel, dens, qbx_forced_limit="avg", + **kwargs)) grad = partial(sym.grad, 3) @@ -284,5 +372,4 @@ class MuellerAugmentedMFIEOperator: # }}} - # vim: foldmethod=marker diff --git a/pytential/symbolic/pde/maxwell/dpie.py b/pytential/symbolic/pde/maxwell/dpie.py new file mode 100644 index 0000000000000000000000000000000000000000..1c35b142c7eec3bcec59e826196ac6b8f7b6f9a1 --- /dev/null +++ b/pytential/symbolic/pde/maxwell/dpie.py @@ -0,0 +1,894 @@ +from __future__ import division, absolute_import + +__copyright__ = "Copyright (C) 2018 Christian Howard" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np # noqa +from pytential import sym + +__doc__ = """ +.. autoclass:: DPIEOperator +.. autoclass:: DPIEOperatorEvanescent +""" + +cse = sym.cse + + +# {{{ Decoupled Potential Integral Equation Operator - based on Arxiv paper + +class DPIEOperator(object): + r""" + Decoupled Potential Integral Equation operator with PEC boundary + conditions, defaults as scaled DPIE. + + See `the arxiv paper `_ for derivation. + + Uses :math:`E(x, t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and + :math:`H(x, t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for + the :math:`E(x)`, :math:`H(x)` fields using vector and scalar potentials via + the Lorenz Gauage. The DPIE formulates the problem purely in terms of the + vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, + and then backs out :math:`E(x)` and :math:`H(x)` via relationships to + the vector and scalar potentials. + """ + + def __init__(self, geometry_list, k=sym.var("k")): + from sumpy.kernel import HelmholtzKernel + + # specify the frequency variable that will be tuned + self.k = k + self.stype = type(self.k) + + # specify the 3-D Helmholtz kernel + self.kernel = HelmholtzKernel(3) + + # specify a list of strings representing geometry objects + self.geometry_list = geometry_list + self.nobjs = len(geometry_list) + + def num_distinct_objects(self): + return self.nobjs + + def num_vector_potential_densities(self): + return 4*len(self.geometry_list) + + def num_vector_potential_densities2(self): + return 5*len(self.geometry_list) + + def num_scalar_potential_densities(self): + return 2*len(self.geometry_list) + + def get_vector_domain_list(self): + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. + """ + + # initialize domain list + domain_list = [None]*self.num_vector_potential_densities() + + # get strings for the actual densities + for n in range(0, self.nobjs): + + # grab nth location identifier + location = self.geometry_list[n] + "t" + + # assign domain for nth vector density + domain_list[2*n] = location + domain_list[2*n+1] = location + + # assign domain for nth scalar density + domain_list[2*self.nobjs + n] = location + + # return the domain list + return domain_list + + def get_scalar_domain_list(self): + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. + """ + + # initialize domain list + domain_list = [None]*self.num_scalar_potential_densities() + + # get strings for the actual densities + for n in range(0, self.nobjs): + + # grab nth location identifier + location = self.geometry_list[n] + "t" + + # assign domain for nth scalar density + domain_list[n] = location + + # return the domain list + return domain_list + + def get_subproblem_domain_list(self): + """Method to return domain list that will be used within the scipy_op + method to solve the system of discretized integral equations. What is + returned should just be a list with values that are strings or None. + """ + + # initialize domain list + domain_list = [None]*self.nobjs + + # get strings for the actual densities + for n in range(0, self.nobjs): + + # grab nth location identifier + location = self.geometry_list[n] + "t" + + # assign domain for nth scalar density + domain_list[n] = location + + # return the domain list + return domain_list + + def _layerpot_op(self, layerpot_op, density_vec, target=None, qfl="avg", k=None, + kernel=None, use_laplace=False): + """Generic layer potential operator method that works across all + objects within the DPIE model + """ + if kernel is None: + kernel = self.kernel + + if k is None: + k = self.k + + kargs = dict() + if not use_laplace: + kargs['k'] = k + + # define a convenient integral operator that functions across the + # multiple objects + def int_op(idx): + return layerpot_op(kernel, density_vec[:, idx], qbx_forced_limit=qfl, + source=self.geometry_list[idx], target=target, **kargs) + + # get the shape of density_vec + (ndim, nobj) = density_vec.shape + + # init output symbolic quantity with zeros + output = np.zeros((ndim,), dtype=self.stype) + + # compute individual double layer potential evaluations at the given + # density across all the disjoint objects + for i in range(0, nobj): + output = output + int_op(i) + + # return the output summation + if ndim == 1: + return output[0] + else: + return output + + def D(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): + """ + Double layer potential operator across multiple disjoint objects + """ + return self._layerpot_op(layerpot_op=sym.D, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) + + def S(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): + """ + Single layer potential operator across multiple disjoint objects + """ + return self._layerpot_op(layerpot_op=sym.S, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) + + def Dp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): + """ + D' layer potential operator across multiple disjoint objects + """ + return self._layerpot_op(layerpot_op=sym.Dp, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) + + def Sp(self, density_vec, target=None, qfl="avg", k=None, kernel=None, + use_laplace=False): + """ + S' layer potential operator across multiple disjoint objects + """ + return self._layerpot_op(layerpot_op=sym.Sp, density_vec=density_vec, + target=target, qfl=qfl, k=k, kernel=kernel, + use_laplace=use_laplace) + + def n_cross(self, density_vec): + r"""This method is such that ``cross(n, a)`` can operate across vectors + a and n that are local to a set of disjoint source surfaces. + Essentially, imagine that :math:`\bar{a} = [a_1, a_2, \cdots, a_m]`, + where :math:`a_k` represents a vector density defined on the + :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = [n_1, + n_2, \cdots, n_m]`, where :math:`n_k` represents a normal that exists + on the :math:`k^{th}` disjoint object. The goal, then, is to have an + operator that does element-wise cross products.. ie: + + .. math:: + + \bar{n} \times \bar{a}) = [ \left(n_1 \times a_1\right), \dots, + \left(n_m \times a_m \right)] + """ + + # specify the sources to be evaluated at + sources = self.geometry_list + + # get the shape of density_vec + (ndim, nobj) = density_vec.shape + + # assert that the ndim value is 3 + assert ndim == 3 + + # init output symbolic quantity with zeros + output = np.zeros(density_vec.shape, dtype=self.stype) + + # loop through the density and sources to construct the appropriate + # element-wise cross product operation + for k in range(0, nobj): + output[:, k] = sym.n_cross(density_vec[:, k], dofdesc=sources[k]) + + # return result from element-wise cross product + return output + + def n_times(self, density_vec): + r"""This method is such that an :math:`\boldsymbol{n} \rho`, for some + normal :math:`\boldsymbol{n}` and some scalar :math:`\rho` can be done + across normals and scalars that exist on multiple surfaces. + Essentially, imagine that :math:`\bar{\rho} = [\rho_1, \cdots, + \rho_m]`, where :math:`\rho_k` represents a scalar density defined on + the :math:`k^{th}` disjoint object. Also imagine that :math:`bar{n} = + [\boldsymbol{n}_1, \cdots, \boldsymbol{n}_m]`, where :math:`n_k` + represents a normal that exists on the :math:`k^{th}` disjoint object. + The goal, then, is to have an operator that does element-wise + products, i.e.: + + .. math:: + + \bar{n}\bar{\rho} = [ \left(\boldsymbol{n}_1 \rho_1\right), \dots, + \left(\boldsymbol{n}_m \rho_m \right)] + """ + + # specify the sources to be evaluated at + sources = self.geometry_list + + # get the shape of density_vec + (ndim, nobj) = density_vec.shape + + # assert that the ndim value is 1 + assert ndim == 1 + + # init output symbolic quantity with zeros + output = np.zeros((3, nobj), dtype=self.stype) + + # loop through the density and sources to construct the appropriate + # element-wise cross product operation + for k in range(0, nobj): + output[:, k] = \ + sym.normal(3, dofdesc=sources[k]).as_vector() * density_vec[0, k] + + # return result from element-wise cross product + return output + + def _extract_phi_densities(self, phi_densities): + return (phi_densities[:self.nobjs], + phi_densities[:self.nobjs].reshape((1, self.nobjs)), + phi_densities[self.nobjs:]) + + def _extract_tau_densities(self, tau_densities): + return (tau_densities, tau_densities.reshape((1, self.nobjs))) + + def _extract_a_densities(self, A_densities): + a0 = A_densities[:(2*self.nobjs)] + a = np.zeros((3, self.nobjs), dtype=self.stype) + rho0 = A_densities[(2*self.nobjs):(3*self.nobjs)] + rho = rho0.reshape((1, self.nobjs)) + v = A_densities[(3*self.nobjs):] + for n in range(0, self.nobjs): + a[:, n] = cse(sym.tangential_to_xyz(a0[2*n:2*(n+1)], + dofdesc=self.geometry_list[n]), "axyz_{0}".format(n)) + return (a0, a, rho0, rho, v) + + def _L(self, a, rho, dofdesc): + + # define some useful common sub expressions + # Sa = cse(self.S(a, dofdesc), "Sa_"+dofdesc) + # Srho = cse(self.S(rho, dofdesc), "Srho_"+dofdesc) + Sn_times_rho = cse( + self.S(self.n_times(rho), dofdesc), "Sn_times_rho_"+dofdesc) + # Sn_cross_a = cse(self.S(self.n_cross(a), dofdesc), "Sn_cross_a_"+dofdesc) + Drho = cse(self.D(rho, dofdesc), "Drho_"+dofdesc) + + return sym.join_fields( + sym.n_cross( + sym.curl(self.S(a, dofdesc)) - self.k * Sn_times_rho, + dofdesc=dofdesc), + Drho) + + def _R(self, a, rho, dofdesc): + # define some useful common sub expressions + # Sa = cse(self.S(a, dofdesc), "Sa_"+dofdesc) + Srho = cse(self.S(rho, dofdesc), "Srho_"+dofdesc) + # Sn_times_rho = cse( + # self.S(self.n_times(rho), dofdesc), "Sn_times_rho_"+dofdesc) + Sn_cross_a = cse(self.S(self.n_cross(a), dofdesc), "Sn_cross_a_"+dofdesc) + # Drho = cse(self.D(rho, dofdesc), "Drho_"+dofdesc) + + return sym.join_fields( + sym.n_cross(self.k * Sn_cross_a + sym.grad(ambient_dim=3, + operand=self.S(rho, dofdesc)), dofdesc=dofdesc), + sym.div(self.S(self.n_cross(a), dofdesc)) - self.k * Srho + ) + + def _scaledDPIEs_integral(self, sigma, sigma_n, dofdesc): + qfl = "avg" + + return sym.integral( + ambient_dim=3, + dim=2, + operand=(self.Dp(sigma, target=dofdesc, qfl=qfl)/self.k + + 1j*0.5*sigma_n - 1j*self.Sp(sigma, target=dofdesc, qfl=qfl)), + dofdesc=dofdesc) + + def _scaledDPIEv_integral(self, **kwargs): + qfl = "avg" + + # grab densities and domain to integrate over + a = kwargs['a'] + rho = kwargs['rho'] + rho_n = kwargs['rho_n'] + dofdesc = kwargs['dofdesc'] + + # define some useful common sub expressions + # Sa = cse(self.S(a, dofdesc), "Sa_"+dofdesc) + # Srho = cse(self.S(rho, dofdesc), "Srho_"+dofdesc) + Sn_times_rho = cse( + self.S(self.n_times(rho), dofdesc), "Sn_times_rho_"+dofdesc) + Sn_cross_a = cse(self.S(self.n_cross(a), dofdesc), "Sn_cross_a_"+dofdesc) + # Drho = cse(self.D(rho, dofdesc), "Drho_"+dofdesc) + + return sym.integral( + ambient_dim=3, + dim=2, + operand=( + sym.n_dot(sym.curl(self.S(a, dofdesc)), dofdesc=dofdesc) + - self.k*sym.n_dot(Sn_times_rho, dofdesc=dofdesc) + + 1j*(self.k*sym.n_dot(Sn_cross_a, dofdesc=dofdesc) - 0.5*rho_n + + self.Sp(rho, target=dofdesc, qfl=qfl)) + ), + dofdesc=dofdesc) + + def phi_operator(self, phi_densities): + """ + Integral Equation operator for obtaining scalar potential, `phi` + """ + + # extract the densities needed to solve the system of equations + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + + # init output matvec vector for the phi density IE + output = np.zeros((2*self.nobjs,), dtype=self.stype) + + # produce integral equation system over each disjoint object + for n in range(0, self.nobjs): + + # get nth disjoint object + obj_n = self.geometry_list[n] + + # setup IE for evaluation over the nth disjoint object's surface + output[n] = ( + 0.5*sigma0[n] + + self.D(sigma, obj_n) + - 1j*self.k*self.S(sigma, obj_n) - V[n] + ) + + # set up equation that integrates some integral operators over the + # nth surface + output[self.nobjs + n] = self._scaledDPIEs_integral(sigma, sigma[0, + n], dofdesc=obj_n) + + # return the resulting system of IE + return output + + def phi_rhs(self, phi_inc, gradphi_inc): + """ + The Right-Hand-Side for the Integral Equation for `phi` + """ + + # get the scalar f expression for each object + f = np.zeros((self.nobjs,), dtype=self.stype) + for i in range(0, self.nobjs): + f[i] = -phi_inc[i] + + # get the Q_{j} terms inside RHS expression + Q = np.zeros((self.nobjs,), dtype=self.stype) + for i in range(0, self.nobjs): + Q[i] = -sym.integral(3, 2, sym.n_dot(gradphi_inc, + dofdesc=self.geometry_list[i]), dofdesc=self.geometry_list[i]) + + # return the resulting field + return sym.join_fields(f, Q/self.k) + + def a_operator(self, A_densities): + """ + Integral Equation operator for obtaining vector potential, `A` + """ + + # extract the densities needed to solve the system of equations + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # init output matvec vector for the phi density IE + output = np.zeros((4*self.nobjs,), dtype=self.stype) + + # produce integral equation system over each disjoint object + for n in range(0, self.nobjs): + + # get the nth target geometry to have IE solved across + obj_n = self.geometry_list[n] + + # Compute two IE Operators on a and rho densities + L = self._L(a, rho, obj_n) + R = self._R(a, rho, obj_n) + + # generate the set of equations for the vector densities, a, coupled + # across the various geometries involved + output[2*n:2*(n+1)] = sym.xyz_to_tangential( + 0.5*a[:, n] + L[:3] + 1j*R[:3], + dofdesc=obj_n) + + # generate the set of equations for the scalar densities, rho, coupled + # across the various geometries involved + output[(2*self.nobjs + n)] = 0.5*rho[0, n] + L[-1] + 1j*R[-1] - v[n] + + # add the equation that integrates everything out into some constant + output[3*self.nobjs + n] = self._scaledDPIEv_integral( + a=a, rho=rho, rho_n=rho[0, n], dofdesc=obj_n) + + # return output equations + return output + + def a_rhs(self, A_inc, divA_inc): + """ + The Right-Hand-Side for the Integral Equation for `A` + """ + + # get the q , h , and vec(f) associated with each object + q = np.zeros((self.nobjs,), dtype=self.stype) + h = np.zeros((self.nobjs,), dtype=self.stype) + f = np.zeros((2*self.nobjs,), dtype=self.stype) + for i in range(0, self.nobjs): + obj_n = self.geometry_list[i] + q[i] = -sym.integral(3, 2, sym.n_dot(A_inc[3*i:3*(i+1)], dofdesc=obj_n), + dofdesc=obj_n) + h[i] = -divA_inc[i] + f[2*i:2*(i+1)] = sym.xyz_to_tangential( + -sym.n_cross(A_inc[3*i:3*(i+1)], dofdesc=obj_n), dofdesc=obj_n) + + # define RHS for `A` integral equation system + return sym.join_fields(f, h/self.k, q) + + def subproblem_operator(self, tau_densities, alpha=1j): + """ + Integral Equation operator for obtaining sub problem solution + """ + + # extract the densities from the sub problem solution + (tau0, tau) = self._extract_tau_densities(tau_densities) + + # init output matvec vector for the phi density IE + output = np.zeros((self.nobjs,), dtype=self.stype) + + # produce integral equation system over each disjoint object + for n in range(0, self.nobjs): + + # get nth disjoint object + obj_n = self.geometry_list[n] + + # setup IE for evaluation over the nth disjoint object's surface + output[n] = 0.5*tau0[n] + self.D(tau, obj_n) - alpha*self.S(tau, obj_n) + + # return the resulting system of IE + return output + + def subproblem_rhs(self, A_densities): + """ + Integral Equation RHS for obtaining sub problem solution + """ + # extract the densities needed to solve the system of equations + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # init output matvec vector for the phi density IE + output = np.zeros((self.nobjs,), dtype=self.stype) + + # produce integral equation system over each disjoint object + for n in range(0, self.nobjs): + + # get nth disjoint object + obj_n = self.geometry_list[n] + + # setup IE for evaluation over the nth disjoint object's surface + output[n] = sym.div(self.S(a, target=obj_n, qfl="avg")) + + # return the resulting system of IE + return output + + def subproblem_rhs_func(self, function): + """ + Integral Equation RHS for obtaining sub problem solution + """ + + # init output matvec vector for the phi density IE + output = np.zeros((self.nobjs,), dtype=self.stype) + + # produce integral equation system over each disjoint object + for n in range(0, self.nobjs): + + # get nth disjoint object + obj_n = self.geometry_list[n] + + # setup IE for evaluation over the nth disjoint object's surface + output[n] = function(dofdesc=obj_n) + + # return the resulting system of IE + return output + + def scalar_potential_rep(self, phi_densities, target=None, qfl=None): + """ + This method is a representation of the scalar potential, phi, + based on the density `sigma`. + """ + + # extract the densities needed to solve the system of equations + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + + # evaluate scalar potential representation + return ( + self.D(sigma, target, qfl=qfl) + - (1j*self.k)*self.S(sigma, target, qfl=qfl) + ) + + def scalar_potential_constants(self, phi_densities): + """ + This method is a representation of the scalar potential, phi, + based on the density `sigma`. + """ + + # extract the densities needed to solve the system of equations + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + + # evaluate scalar potential representation + return V + + def grad_scalar_potential_rep(self, phi_densities, target=None, qfl=None): + """ + This method is a representation of the scalar potential, phi, + based on the density `sigma`. + """ + + # extract the densities needed to solve the system of equations + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + + # evaluate scalar potential representation + return ( + sym.grad(3, self.D(sigma, target, qfl=qfl)) + - (1j*self.k)*sym.grad(3, self.S(sigma, target, qfl=qfl)) + ) + + def vector_potential_rep(self, A_densities, target=None, qfl=None): + """ + This method is a representation of the vector potential, phi, + based on the vector density `a` and scalar density `rho` + """ + + # extract the densities from the main IE solution + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # define the vector potential representation + return ( + (sym.curl(self.S(a, target, qfl=qfl)) + - self.k*self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*(self.k*self.S(self.n_cross(a), target, qfl=qfl) + + sym.grad(3, self.S(rho, target, qfl=qfl))) + ) + + def div_vector_potential_rep(self, A_densities, target=None, qfl=None): + """ + This method is a representation of the vector potential, phi, + based on the vector density `a` and scalar density `rho` + """ + + # extract the densities from the main IE solution + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # define the vector potential representation + return self.k*(self.D(rho, target, qfl=qfl) + + 1j*(sym.div(self.S(self.n_cross(a), target, qfl=qfl)) + - self.k * self.S(rho, target, qfl=qfl))) + + def subproblem_rep(self, tau_densities, target=None, alpha=1j, qfl=None): + """ + This method is a representation of the scalar potential, phi, + based on the density `sigma`. + """ + + # extract the densities needed to solve the system of equations + (tau0, tau) = self._extract_tau_densities(tau_densities) + + # evaluate scalar potential representation + return self.D(tau, target, qfl=qfl) - alpha*self.S(tau, target, qfl=qfl) + + def scattered_volume_field(self, phi_densities, A_densities, tau_densities, + target=None, alpha=1j, qfl=None): + """ + This will return an object of six entries, the first three of which + represent the electric, and the second three of which represent the + magnetic field. + + This satisfies the time-domain Maxwell's equations + as verified by :func:`sumpy.point_calculus.frequency_domain_maxwell`. + """ + + # extract the densities needed + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + (tau0, tau) = self._extract_tau_densities(tau_densities) + + # obtain expressions for scalar and vector potentials + A = self.vector_potential_rep(A_densities, target=target) + # phi = self.scalar_potential_rep(phi_densities, target=target) + + # evaluate the potential form for the electric and magnetic fields + E_scat = ( + 1j*self.k*A + - sym.grad(3, self.D(sigma, target, qfl=qfl)) + + 1j*self.k*sym.grad(3, self.S(sigma, target, qfl=qfl)) + ) + H_scat = ( + sym.grad(3, operand=( + self.D(tau, target, qfl=qfl) + - alpha*self.S(tau, target, qfl=qfl))) + + (self.k**2) * self.S(a, target, qfl=qfl) + - self.k * sym.curl(self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*self.k*sym.curl(self.S(self.n_cross(a), target, qfl=qfl)) + ) + + # join the fields into a vector + return sym.join_fields(E_scat, H_scat) + +# }}} + + +# {{{ Decoupled Potential Integral Equation Operator - Based on Journal Paper + +class DPIEOperatorEvanescent(DPIEOperator): + r""" + Decoupled Potential Integral Equation operator with PEC boundary + conditions, defaults as scaled DPIE. + + See `the journal paper + `_ for details. + + Uses :math:`E(x, t) = Re \lbrace E(x) \exp(-i \omega t) \rbrace` and + :math:`H(x, t) = Re \lbrace H(x) \exp(-i \omega t) \rbrace` and solves for + the :math:`E(x)`, :math:`H(x)` fields using vector and scalar potentials via + the Lorenz Gauage. The DPIE formulates the problem purely in terms of the + vector and scalar potentials, :math:`\boldsymbol{A}` and :math:`\phi`, + and then backs out :math:`E(x)` and :math:`H(x)` via relationships to + the vector and scalar potentials. + """ + + def __init__(self, geometry_list, k=sym.var("k")): + from sumpy.kernel import HelmholtzKernel + from sumpy.kernel import LaplaceKernel + + # specify the frequency variable that will be tuned + self.k = k + self.ik = 1j*k + self.stype = type(self.k) + + # specify the 3-D Helmholtz kernel + self.kernel = HelmholtzKernel(3) + self.kernel_ik = HelmholtzKernel(3, allow_evanescent=True) + self.kernel_laplace = LaplaceKernel(3) + + # specify a list of strings representing geometry objects + self.geometry_list = geometry_list + self.nobjs = len(geometry_list) + + def _eval_all_objects(self, density_vec, int_op, qfl="avg", k=None, kernel=None): + """This private method is so some input integral operator and input + density can be used to evaluate the set of locations defined by the + geometry list + """ + output = np.zeros(density_vec.shape, dtype=self.stype) + (ndim, nobj) = density_vec.shape + for i in range(0, nobj): + output[:, i] = int_op(density_vec=density_vec, + target=self.geometry_list[i], qfl=qfl, k=k, kernel=kernel) + return output + + def _L(self, a, rho, dofdesc): + + # define some useful common sub expressions + Sn_times_rho = cse( + self.S(self.n_times(rho), dofdesc), "Sn_times_rho_"+dofdesc) + Drho = cse(self.D(rho, dofdesc), "Drho_"+dofdesc) + + return sym.join_fields( + sym.n_cross( + sym.curl(self.S(a, dofdesc)) - self.k * Sn_times_rho, + dofdesc=dofdesc), + Drho) + + def _R(self, a, rho, dofdesc): + # define some useful common sub expressions + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + Srho = cse(self.S(Srho_ik_nest, dofdesc), "Srho_"+dofdesc) + Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest), dofdesc), + "Sn_cross_a_"+dofdesc) + + return self.k*sym.join_fields( + sym.n_cross(self.k * Sn_cross_a + sym.grad(ambient_dim=3, + operand=self.S(Srho_ik_nest, dofdesc)), dofdesc=dofdesc), + sym.div(self.S(self.n_cross(Sa_ik_nest), dofdesc)) - self.k * Srho + ) + + def _scaledDPIEs_integral(self, sigma, sigma_n, dofdesc): + qfl = "avg" + + return sym.integral( + ambient_dim=3, + dim=2, + operand=( + (self.Dp(sigma, target=dofdesc, qfl=qfl) + - self.Dp(sigma, target=dofdesc, qfl=qfl, + kernel=self.kernel_laplace, use_laplace=True)) + / self.k + + 1j*0.5*sigma_n + - 1j*self.Sp(sigma, target=dofdesc, qfl=qfl)), + dofdesc=dofdesc) + + def _scaledDPIEv_integral(self, **kwargs): + qfl = "avg" + + # grab densities and domain to integrate over + a = kwargs['a'] + rho = kwargs['rho'] + # rho_n = kwargs['rho_n'] + dofdesc = kwargs['dofdesc'] + + # define some useful common sub expressions + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik = cse(self.S(rho, dofdesc, k=self.ik, kernel=self.kernel_ik), + "Srho_ik"+dofdesc) + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + Sn_cross_a = cse(self.S(self.n_cross(Sa_ik_nest), dofdesc), + "Sn_cross_a_nest_"+dofdesc) + Sn_times_rho = cse( + self.S(self.n_times(rho), dofdesc), "Sn_times_rho_"+dofdesc) + + return sym.integral( + ambient_dim=3, + dim=2, + operand=( + -self.k*sym.n_dot(Sn_times_rho, dofdesc=dofdesc) + + 1j*self.k*( + self.k*sym.n_dot(Sn_cross_a, dofdesc=dofdesc) + - 0.5*Srho_ik + self.Sp(Srho_ik_nest, target=dofdesc, qfl=qfl)) + ), + dofdesc=dofdesc) + + def vector_potential_rep(self, A_densities, target=None, qfl=None): + """ + This method is a representation of the vector potential, phi, + based on the vector density `a` and scalar density `rho` + """ + + # extract the densities from the main IE solution + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # define some useful quantities + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + + # define the vector potential representation + return ( + (sym.curl(self.S(a, target, qfl=qfl)) + - self.k*self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*self.k*(self.k*self.S(self.n_cross(Sa_ik_nest), target, qfl=qfl) + + sym.grad(3, self.S(Srho_ik_nest, target, qfl=qfl))) + ) + + def div_vector_potential_rep(self, A_densities, target=None, qfl=None): + """ + This method is a representation of the vector potential, phi, + based on the vector density `a` and scalar density `rho` + """ + + # extract the densities from the main IE solution + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + + # define some useful quantities + Sa_ik_nest = cse(self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik), "Sa_ik_nest") + Srho_ik_nest = cse(self._eval_all_objects(rho, self.S, k=self.ik, + kernel=self.kernel_ik), "Srho_ik_nest") + + # define the vector potential representation + return self.k*(self.D(rho, target, qfl=qfl) + + 1j*self.k*(sym.div(self.S(self.n_cross(Sa_ik_nest), target, + qfl=qfl)) - self.k * self.S(Srho_ik_nest, target, qfl=qfl))) + + def scattered_volume_field(self, phi_densities, A_densities, tau_densities, + target=None, alpha=1j, qfl=None): + """ + This will return an object of six entries, the first three of which + represent the electric, and the second three of which represent the + magnetic field. + + This satisfies the time-domain Maxwell's equations + as verified by :func:`sumpy.point_calculus.frequency_domain_maxwell`. + """ + + # extract the densities needed + (a0, a, rho0, rho, v) = self._extract_a_densities(A_densities) + (sigma0, sigma, V) = self._extract_phi_densities(phi_densities) + (tau0, tau) = self._extract_tau_densities(tau_densities) + + # obtain expressions for scalar and vector potentials + Sa_ik_nest = self._eval_all_objects(a, self.S, k=self.ik, + kernel=self.kernel_ik) + A = self.vector_potential_rep(A_densities, target=target) + # phi = self.scalar_potential_rep(phi_densities, target=target) + + # evaluate the potential form for the electric and magnetic fields + E_scat = ( + 1j*self.k*A + - sym.grad(3, self.D(sigma, target, qfl=qfl)) + + 1j*self.k*sym.grad(3, self.S(sigma, target, qfl=qfl))) + H_scat = ( + sym.grad(3, operand=( + self.D(tau, target, qfl=qfl) + - alpha*self.S(tau, target, qfl=qfl))) + + (self.k**2) * self.S(a, target, qfl=qfl) + - self.k * sym.curl(self.S(self.n_times(rho), target, qfl=qfl)) + + 1j*(self.k**2)*sym.curl(self.S(self.n_cross(Sa_ik_nest), + target, qfl=qfl)) + ) + + # join the fields into a vector + return sym.join_fields(E_scat, H_scat) + +# }}} + +# vim: foldmethod=marker diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 6f325af13a2a073f0425db592eb465c267d48424..5661e1d53bc89b7470731975ee19f84a009fad31 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -1,4 +1,7 @@ -__copyright__ = "Copyright (C) 2010-2013 Andreas Kloeckner" +__copyright__ = """ +Copyright (C) 2010-2018 Andreas Kloeckner +Copyright (C) 2018 Christian Howard +""" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -1206,7 +1209,16 @@ class NodeMin(SingleScalarOperandExpression): @_deprecate_kwargs("where", "dofdesc") def integral(ambient_dim, dim, operand, dofdesc=None): - """A volume integral of *operand*.""" + """ + Performs boundary/volume integral on *operand*. + `ambient_dim` is the number of dimensions used to represent space while `dim` + is the dimensionality of the surface being integrated over. + + .. note:: + + If, for example, we wish to integrate over the 2-D surface of a sphere + that resides in in 3-dimensions, then *ambient_dim* = 3 and *dim* = 2. + """ return NodeSum( area_element(ambient_dim, dim, dofdesc) @@ -1739,10 +1751,14 @@ def tangential_derivative(ambient_dim, operand, dim=None, dofdesc=None): @_deprecate_kwargs("where", "dofdesc") def normal_derivative(ambient_dim, operand, dim=None, dofdesc=None): - d = Derivative() - return d.resolve( - (normal(ambient_dim, dim, dofdesc).scalar_product(d.dnabla(ambient_dim))) - * d(operand)) + def make_op(operand_i): + d = Derivative() + return d.resolve( + (normal(ambient_dim, dim, dofdesc=dofdesc) + .scalar_product(d.dnabla(ambient_dim))) + * d(operand_i)) + + return componentwise(make_op, operand) def normal_second_derivative(ambient_dim, operand, dim=None, dofdesc=None): @@ -1900,7 +1916,7 @@ def project_to_tangential(xyz_vec, dofdesc=None): @_deprecate_kwargs("where", "dofdesc") def n_dot(vec, dofdesc=None): - nrm = normal(len(vec), dofdesc).as_vector() + nrm = normal(len(vec), dofdesc=dofdesc).as_vector() return np.dot(nrm, vec) @@ -1919,7 +1935,7 @@ def cross(vec_a, vec_b): @_deprecate_kwargs("where", "dofdesc") def n_cross(vec, dofdesc=None): - return cross(normal(3, dofdesc).as_vector(), vec) + return cross(normal(3, dofdesc=dofdesc).as_vector(), vec) def div(vec): diff --git a/test/test_maxwell.py b/test/test_maxwell.py index 42148f2bcf402a27c3b4dd5f9dcb97bee45f44fe..c1a5871c28c159ae5e1b5c5e99d4dc7a1b9ff2d1 100644 --- a/test/test_maxwell.py +++ b/test/test_maxwell.py @@ -235,7 +235,7 @@ def test_pec_mfie_extinction(ctx_factory, case, from pytential.symbolic.pde.maxwell import ( PECChargeCurrentMFIEOperator, - get_sym_maxwell_point_source, + get_sym_maxwell_point_source_em, get_sym_maxwell_plane_wave) mfie = PECChargeCurrentMFIEOperator() @@ -265,7 +265,7 @@ def test_pec_mfie_extinction(ctx_factory, case, else: # point source return bind(places, - get_sym_maxwell_point_source(mfie.kernel, j_sym, mfie.k), + get_sym_maxwell_point_source_em(mfie.kernel, j_sym, mfie.k), auto_where=(source, target))(actx, j=src_j, k=case.k) # }}} @@ -420,7 +420,7 @@ def test_pec_mfie_extinction(ctx_factory, case, return norm(density_discr, f, p=np.inf) e_bc_residual = scat_norm(eh_bc_values[:3]) / scat_norm(inc_field_scat.e) - h_bc_residual = scat_norm(eh_bc_values[3]) / scat_norm(inc_field_scat.h) + h_bc_residual = scat_norm(eh_bc_values[3:]) / scat_norm(inc_field_scat.h) print("E/H PEC BC residuals:", h_max, e_bc_residual, h_bc_residual) @@ -445,7 +445,7 @@ def test_pec_mfie_extinction(ctx_factory, case, ("Hinc", inc_field_scat.h), ("bdry_normals", bdry_normals), ("e_bc_residual", eh_bc_values[:3]), - ("h_bc_residual", eh_bc_values[3]), + ("h_bc_residual", eh_bc_values[3:]), ]) from pytential.qbx import QBXTargetAssociationFailedException @@ -489,7 +489,7 @@ def test_pec_mfie_extinction(ctx_factory, case, rel_err_h = (obs_norm(inc_field_obs.h + obs_repr.h) / obs_norm(inc_field_obs.h)) - # }}} + # # }}} print("ERR", h_max, rel_err_h, rel_err_e) @@ -505,7 +505,8 @@ def test_pec_mfie_extinction(ctx_factory, case, ("maxwell", eoc_rec_repr_maxwell, 1.5), ("PEC BC", eoc_pec_bc, 1.5), ("H", eoc_rec_h, 1.5), - ("E", eoc_rec_e, 1.5)]: + ("E", eoc_rec_e, 1.5) + ]: print(which_eoc) print(eoc_rec.pretty_print()) diff --git a/test/test_maxwell_dpie.py b/test/test_maxwell_dpie.py new file mode 100644 index 0000000000000000000000000000000000000000..ee43b847f3e18c7cd61b5709c733f0754986ee1e --- /dev/null +++ b/test/test_maxwell_dpie.py @@ -0,0 +1,1165 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = """ +Copyright (C) 2017 - 2018 Andreas Kloeckner +Copyright (C) 2017 - 2018 Christian Howard +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np +import numpy.linalg as la # noqa +from arraycontext import PyOpenCLArrayContext +import pyopencl as cl +import pyopencl.clmath # noqa +import pyopencl.clrandom # noqa +import pytest + +from pytential import bind, sym, norm + +from sumpy.visualization import make_field_plotter_from_bbox # noqa +from sumpy.point_calculus import CalculusPatch, frequency_domain_maxwell +from sumpy.tools import vector_from_device +from pytential.target import PointsTarget +from meshmode.mesh.processing import find_bounding_box +from pytools.convergence import EOCRecorder +from pytential.solve import gmres + +import pytential.symbolic.pde.maxwell as mw +import pytential.symbolic.pde.maxwell.dpie as mw_dpie +from pytential.qbx import QBXLayerPotentialSource +from meshmode.discretization import Discretization +from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory +from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + + +import logging +logger = logging.getLogger(__name__) + +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + + +# {{{ test cases + +class MaxwellTestCase: + fmm_backend = "fmmlib" + #fmm_backend = "sumpy" + + def __init__(self, k, is_interior, resolutions, qbx_order, + fmm_tolerance): + self.k = k + self.is_interior = is_interior + self.resolutions = resolutions + self.qbx_order = qbx_order + self.fmm_tolerance = fmm_tolerance + + +class SphereTestCase(MaxwellTestCase): + target_order = 8 + gmres_tol = 1e-10 + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.generation import generate_icosphere + from meshmode.mesh.refinement import refine_uniformly + return refine_uniformly( + generate_icosphere(2, target_order), + resolution) + + def get_observation_mesh(self, target_order): + from meshmode.mesh.generation import generate_icosphere + + if self.is_interior: + return generate_icosphere(5, target_order) + else: + return generate_icosphere(0.5, target_order) + + def get_source(self, actx): + if self.is_interior: + source_ctr = np.array([[0.35, 0.1, 0.15]]).T + else: + source_ctr = np.array([[5, 0.1, 0.15]]).T + + source_rad = 0.3 + + sources = source_ctr + source_rad*2*(np.random.rand(3, 10)-0.5) + from pytential.source import PointPotentialSource + return PointPotentialSource(actx.from_numpy(sources)) + + +class RoundedCubeTestCase(MaxwellTestCase): + target_order = 8 + gmres_tol = 1e-10 + + def get_mesh(self, resolution, target_order): + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource("rounded-cube.step"), 2, order=3, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + from meshmode.mesh.processing import perform_flips, affine_map + mesh = affine_map(mesh, b=np.array([-0.5, -0.5, -0.5])) + mesh = affine_map(mesh, A=np.eye(3)*2) + + # now centered at origin and extends to -1, 1 + + # Flip elements--gmsh generates inside-out geometry. + return perform_flips(mesh, np.ones(mesh.nelements)) + + def get_observation_mesh(self, target_order): + from meshmode.mesh.generation import generate_icosphere + + if self.is_interior: + return generate_icosphere(5, target_order) + else: + return generate_icosphere(0.5, target_order) + + def get_source(self, actx): + if self.is_interior: + source_ctr = np.array([[0.35, 0.1, 0.15]]).T + else: + source_ctr = np.array([[5, 0.1, 0.15]]).T + + source_rad = 0.3 + + sources = source_ctr + source_rad*2*(np.random.rand(3, 10)-0.5) + from pytential.source import PointPotentialSource + return PointPotentialSource(actx.from_numpy(sources)) + + +class ElliptiPlaneTestCase(MaxwellTestCase): + target_order = 4 + gmres_tol = 1e-7 + + def get_mesh(self, resolution, target_order): + from pytools import download_from_web_if_not_present + + download_from_web_if_not_present( + "https://raw.githubusercontent.com/inducer/geometries/master/" + "surface-3d/elliptiplane.brep") + + from meshmode.mesh.io import generate_gmsh, FileSource + mesh = generate_gmsh( + FileSource("elliptiplane.brep"), 2, order=2, + other_options=[ + "-string", + "Mesh.CharacteristicLengthMax = %g;" % resolution]) + + # now centered at origin and extends to -1, 1 + + # Flip elements--gmsh generates inside-out geometry. + from meshmode.mesh.processing import perform_flips + return perform_flips(mesh, np.ones(mesh.nelements)) + + def get_observation_mesh(self, target_order): + from meshmode.mesh.generation import generate_icosphere + + if self.is_interior: + return generate_icosphere(12, target_order) + else: + return generate_icosphere(0.5, target_order) + + def get_source(self, actx): + if self.is_interior: + source_ctr = np.array([[0.35, 0.1, 0.15]]).T + else: + source_ctr = np.array([[3, 1, 10]]).T + + source_rad = 0.3 + + sources = source_ctr + source_rad*2*(np.random.rand(3, 10)-0.5) + from pytential.source import PointPotentialSource + return PointPotentialSource(actx.from_numpy(sources)) + +# }}} + + +tc_int = SphereTestCase(k=1.2, is_interior=True, resolutions=[0, 1], + qbx_order=3, fmm_tolerance=1e-4) + +tc_ext = SphereTestCase(k=1.2, is_interior=False, resolutions=[0, 1], + qbx_order=3, fmm_tolerance=1e-4) + +tc_rc_ext = RoundedCubeTestCase(k=6.4, is_interior=False, resolutions=[0.1], + qbx_order=3, fmm_tolerance=1e-4) + +tc_plane_ext = ElliptiPlaneTestCase(k=2, is_interior=False, resolutions=[0.15], + qbx_order=3, fmm_tolerance=1e-4) + + +class EHField(object): + def __init__(self, eh_field): + assert len(eh_field) == 6 + self.field = eh_field + + @property + def e(self): + return self.field[:3] + + @property + def h(self): + return self.field[3:] + + +# {{ test_dpie_auxiliary + +@pytest.mark.parametrize("case", [ + #tc_int, + tc_ext, + ]) +def test_dpie_auxiliary(ctx_factory, case): + logging.basicConfig(level=logging.INFO) + + cl_ctx = ctx_factory() + queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) + + pytest.importorskip("pyfmmlib") + + np.random.seed(12) + + knl_kwargs = {"k": case.k, "ik": 1j*case.k} + + geom_list = ["obj0"] + + dpie = mw_dpie.DPIEOperatorEvanescent(geometry_list=geom_list) + tau_densities = sym.make_sym_vector("tau_densities", dpie.num_distinct_objects()) + + calc_patch = CalculusPatch(np.array([-3, 0, 0]), h=0.01) + + # {{{ test the auxiliary problem + + # test that the aux problem is capable of computing the desired derivatives + # of an appropriate input field + + # define method to get locations to evaluate representation + def epsilon_off_boundary(dofdesc=None, epsilon=1e-4): + x = sym.nodes(3, dofdesc).as_vector() + return x + sym.normal(3, 2, dofdesc).as_vector()*epsilon + + # loop through the case's resolutions and compute the scattered field + # solution + + eoc_rec = EOCRecorder() + + for resolution in case.resolutions: + + # get the scattered and observation mesh + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) + + # define the pre-scattered discretization + pre_scat_discr = Discretization( + actx, scat_mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # use OpenCL random number generator to create a set of random + # source locations for various variables being solved for + dpie0 = mw_dpie.DPIEOperator(geometry_list=geom_list) + qbx0 = QBXLayerPotentialSource( + pre_scat_discr, fine_order=4*case.target_order, + #fmm_order=False, + qbx_order=case.qbx_order, + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), + fmm_backend=case.fmm_backend + ) + + # define the geometry dictionary + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr} + + # define points to evaluate the gradient at + tgt_n = PointsTarget(bind(geom_map, + epsilon_off_boundary(dofdesc='obj0', epsilon=1.0))(actx)) + geom_map['tgt'] = tgt_n + + # define the quantity that will have a derivative taken of it and + # its associated derivative + def get_test_function(dofdesc=None): + z = sym.nodes(3, dofdesc).as_vector() + z2 = sym.cse(np.dot(z, z), "z_mag_squared") + g = sym.exp(1j*dpie0.k*sym.sqrt(z2))/(4.0*np.pi*sym.sqrt(z2)) + return g + + def get_test_gradient(dofdesc=None): + z = sym.nodes(3, dofdesc).as_vector() + z2 = sym.cse(np.dot(z, z), "z_mag_squared") + grad_g = z*sym.exp(1j*dpie0.k*sym.sqrt(z2))*(1j*dpie.k + - 1.0/sym.sqrt(z2))/(4*np.pi*z2) + return grad_g + + # compute output gradient evaluated at the desired object + tgrad = bind(geom_map, get_test_gradient(dofdesc="tgt"))(actx, **knl_kwargs) + test_func_d = vector_from_device(queue, tgrad) + + # define the problem that will be solved + test_tau_op = bind(geom_map, + dpie0.subproblem_operator(tau_densities=tau_densities)) + test_tau_rhs = bind(geom_map, + dpie0.subproblem_rhs_func(function=get_test_function))(actx, + **knl_kwargs) + + # set GMRES settings for solving + gmres_settings = dict( + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05) + + subprob_result = gmres( + test_tau_op.scipy_op(actx, "tau_densities", np.complex128, + domains=dpie0.get_subproblem_domain_list(), + **knl_kwargs), + test_tau_rhs, **gmres_settings) + dummy_tau = subprob_result.solution + + # compute the error between the associated derivative quantities + tgrad = bind(geom_map, sym.grad(3, + dpie0.subproblem_rep(tau_densities=tau_densities, + target='tgt')))(actx, tau_densities=dummy_tau, + **knl_kwargs) + approx_d = vector_from_device(queue, tgrad) + err = ( + calc_patch.norm(test_func_d - approx_d, np.inf) + / calc_patch.norm(approx_d, np.inf)) + + # append error to the error list + eoc_rec.add_data_point(bind(qbx0, sym.h_max(qbx0.ambient_dim))(actx), err) + + print(eoc_rec) + + assert eoc_rec.order_estimate() >= case.qbx_order - 0.5 + + # }}} + +# }}} + + +@pytest.mark.parametrize("case", [ + #tc_int, + tc_ext, + ]) +def test_pec_dpie_extinction( + ctx_factory, case, + visualize=False, + test_representations=False, + test_operators=False, + ): + + """For (say) is_interior=False (the 'exterior' BVP), this test verifies + extinction of the combined (incoming + scattered) field on the interior + of the scatterer. + """ + + logging.basicConfig(level=logging.INFO) + + cl_ctx = ctx_factory() + queue = cl.CommandQueue(cl_ctx) + actx = PyOpenCLArrayContext(queue) + + pytest.importorskip("pyfmmlib") + + np.random.seed(12) + + knl_kwargs = {"k": case.k, "ik": 1j*case.k} + + geom_list = ["obj0"] + + # {{{ come up with a solution to Maxwell's equations + + # initialize the DPIE operator based on the geometry list + dpie = mw_dpie.DPIEOperatorEvanescent(geometry_list=geom_list) + + # specify some symbolic variables that will be used + # in the process to solve integral equations for the DPIE + phi_densities = sym.make_sym_vector("phi_densities", + dpie.num_scalar_potential_densities()) + A_densities = sym.make_sym_vector("A_densities", + dpie.num_vector_potential_densities()) + tau_densities = sym.make_sym_vector("tau_densities", dpie.num_distinct_objects()) + + # get test source locations from the passed in case's queue + test_source = case.get_source(actx) + + # create the calculus patch and get calculus patch for targets + calc_patch = CalculusPatch(np.array([-3, 0, 0]), h=0.01) + calc_patch_tgt = PointsTarget(cl.array.to_device(queue, calc_patch.points)) + + # define some parameters for the incident wave + # direction for the wave + u_dir = np.array([1, 0, 0], dtype=np.complex128) + + # polarization vector + Ep = np.array([0, 1, 1], dtype=np.complex128) + + # define symbolic vectors for use + uvar = sym.make_sym_vector("u", 3) + Evar = sym.make_sym_vector("Ep", 3) + + # define functions that can be used to generate incident fields for an + # input discretization + # define potentials based on incident plane wave + def get_incident_plane_wave_EHField(tgt): + return bind((test_source, tgt), + mw.get_sym_maxwell_plane_wave(amplitude_vec=Evar, v=uvar, + omega=dpie.k))(actx, u=u_dir, Ep=Ep, **knl_kwargs) + + # get the gradphi_inc field evaluated at some source locations + def get_incident_gradphi(objects, target=None): + return bind(objects, mw.get_sym_maxwell_planewave_gradphi(u=uvar, + Ep=Evar, k=dpie.k, dofdesc=target))(actx, u=u_dir, + Ep=Ep, **knl_kwargs) + + # get the incident plane wave div(A) + def get_incident_divA(objects, target=None): + return bind(objects, mw.get_sym_maxwell_planewave_divA(u=uvar, Ep=Evar, + k=dpie.k, dofdesc=target))(actx, u=u_dir, Ep=Ep, **knl_kwargs) + + # method to get vector potential and scalar potential for incident + # E-M fields + def get_incident_potentials(objects, target=None): + return bind(objects, mw.get_sym_maxwell_planewave_potentials(u=uvar, + Ep=Evar, k=dpie.k, dofdesc=target))(actx, u=u_dir, + Ep=Ep, **knl_kwargs) + + # define a smooth function to represent the density + def dummy_density(omega=1.0, dofdesc=None): + x = sym.nodes(3, dofdesc).as_vector() + return sym.sin(omega*sym.n_dot(x, dofdesc)) + + # get the Electromagnetic field evaluated at the target calculus patch + pde_test_inc = EHField(vector_from_device(queue, + get_incident_plane_wave_EHField(calc_patch_tgt))) + + # compute residuals of incident field at source points + source_maxwell_resids = [ + calc_patch.norm(x, np.inf) / calc_patch.norm(pde_test_inc.e, np.inf) + for x in frequency_domain_maxwell( + calc_patch, pde_test_inc.e, pde_test_inc.h, case.k)] + + # make sure Maxwell residuals are small so we know the incident field + # properly satisfies the maxwell equations + print("Source Maxwell residuals:", source_maxwell_resids) + assert max(source_maxwell_resids) < 1e-6 + + # }}} + + # {{{ test the representations + + if test_representations: + + # import a bunch of stuff that will be useful + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + + # loop through the case's resolutions and compute the scattered field + # solution + rep_error = [] + for resolution in case.resolutions: + + # get the scattered and observation mesh + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) + + # define the pre-scattered discretization + pre_scat_discr = Discretization( + actx, scat_mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # use OpenCL random number generator to create a set of random + # source locations for various variables being solved for + dpie0 = mw_dpie.DPIEOperator(geometry_list=geom_list) + qbx0 = QBXLayerPotentialSource( + pre_scat_discr, fine_order=4*case.target_order, + #fmm_order=False, + qbx_order=case.qbx_order, + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), + fmm_backend=case.fmm_backend + ) + + # define the geometry dictionary + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr + } + + dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(), + dtype=dpie0.stype) + dummy_A = np.array([None]*dpie0.num_vector_potential_densities(), + dtype=dpie0.stype) + + # v = rng.normal(queue, (qbx0.density_discr.nnodes,), dtype=np.float64) + # s = 0*rng.normal(queue, (), dtype=np.float64) + n1 = len(dummy_phi) + n2 = len(dummy_A) + for i in range(0, n1): + dummy_phi[i] = bind(geom_map, dummy_density(dofdesc='obj0'))(actx) + for i in range(0, n2): + dummy_A[i] = bind(geom_map, dummy_density(dofdesc='obj0'))(actx) + test_tau_op = bind(geom_map, + dpie0.subproblem_operator(tau_densities=tau_densities)) + test_tau_rhs = bind(geom_map, + dpie0.subproblem_rhs(A_densities=A_densities))(actx, + A_densities=dummy_A, **knl_kwargs) + + # set GMRES settings for solving + gmres_settings = dict( + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05) + + subprob_result = gmres( + test_tau_op.scipy_op(actx, "tau_densities", np.complex128, + domains=dpie0.get_subproblem_domain_list(), + **knl_kwargs), + test_tau_rhs, **gmres_settings) + dummy_tau = subprob_result.solution + + sym_repr0 = dpie.scattered_volume_field(phi_densities, A_densities, + tau_densities, target='tgt') + + def eval_test_repr_at(tgt): + map = geom_map + map['tgt'] = tgt + return bind(map, sym_repr0)(actx, phi_densities=dummy_phi, + A_densities=dummy_A, tau_densities=dummy_tau, + **knl_kwargs) + + pde_test_repr = EHField(vector_from_device(actx, + eval_test_repr_at(calc_patch_tgt))) + + maxwell_residuals = [ + calc_patch.norm(x, np.inf) + / calc_patch.norm(pde_test_repr.e, np.inf) + for x in frequency_domain_maxwell(calc_patch, + pde_test_repr.e, pde_test_repr.h, case.k)] + print("Maxwell residuals:", maxwell_residuals) + rep_error.append(maxwell_residuals) + + print("Representation Error Results:") + for n in range(0, len(rep_error)): + print("Case {0}: {1}".format(n+1, rep_error[n])) + + # }}} + + # {{{ test the operators + + if test_operators: + + # define error array + op_error = [] + + # define method to get locations to evaluate representation + def epsilon_off_boundary(dofdesc=None, epsilon=1e-4): + x = sym.nodes(3, dofdesc).as_vector() + return x + sym.normal(3, 2, dofdesc).as_vector()*epsilon + + # import a bunch of stuff that will be useful + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + + # loop through the case's resolutions and compute the scattered field + # solution + for resolution in case.resolutions: + + # get the scattered and observation mesh + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) + + # define the pre-scattered discretization + pre_scat_discr = Discretization( + actx, scat_mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # use OpenCL random number generator to create a set of random + # source locations for various variables being solved for + dpie0 = mw_dpie.DPIEOperatorEvanescent(geometry_list=geom_list) + qbx0 = QBXLayerPotentialSource( + pre_scat_discr, fine_order=4*case.target_order, + #fmm_order=False, + qbx_order=case.qbx_order, + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), + fmm_backend=case.fmm_backend + ) + + # define the geometry dictionary + geom_map = { + "obj0": qbx0, + "obj0t": qbx0.density_discr, + "scat": qbx0.density_discr + } + + # compute off-boundary locations that the representation will need + # to be evaluated at + tgt_n = PointsTarget( + bind(geom_map, + epsilon_off_boundary(dofdesc='obj0', epsilon=1e-4))(actx)) + geom_map['tgt'] = tgt_n + + # define a dummy density, specifically to be used for the vector + # potential A densities + x, y, z = qbx0.density_discr.nodes().with_queue(queue) + m = cl.clmath + + density_sym = sym.make_sym_vector("density", 2) + + # The tangential coordinate system is element-local, so we can't just + # conjure up some globally smooth functions, interpret their values + # in the tangential coordinate system, and be done. Instead, generate + # an XYZ function and project it. + density = bind( + qbx0, + sym.xyz_to_tangential(sym.make_sym_vector("jxyz", 3)))( + actx, + jxyz=sym.make_obj_array([ + m.cos(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z), + m.sin(0.5*x) * m.cos(0.5*y) * m.sin(0.5*z), + m.sin(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z), + ])) + + # redefine a_densities + #A_densities = sym.make_sym_vector("A_densities", dpie.num_vector_potential_densities2()) # noqa + + # init random dummy densities for the vector and scalar potentials + dummy_phi = np.array([None]*dpie0.num_scalar_potential_densities(), + dtype=dpie0.stype) + dummy_A = np.array([None]*dpie0.num_vector_potential_densities(), + dtype=dpie0.stype) + dummy_tau = np.array([None]*dpie0.num_distinct_objects(), + dtype=dpie0.stype) + + # compute zero scalar for use in extra constants that are usually + # solved for in operators + n1 = len(dummy_phi) + n2 = len(dummy_A) + n3 = len(dummy_tau) + + for i in range(0, n1): + if i < (n1-1): + dummy_phi[i] = bind( + geom_map, dummy_density(dofdesc='obj0'))(actx) + else: + dummy_phi[i] = 0.0 + for i in range(0, n2): + if i < 2: + dummy_A[i] = density[i] + elif i < (n2-1): + dummy_A[i] = bind(geom_map, dummy_density(dofdesc='obj0'))(actx) + else: + dummy_A[i] = 0.0 + for i in range(0, n3): + dummy_tau[i] = bind(geom_map, dummy_density(dofdesc='obj0'))(actx) + + # check that the scalar density operator and representation are similar + def vector_op_transform(vec_op_out): + a = sym.tangential_to_xyz(vec_op_out[:2], dofdesc='obj0') + return sym.join_fields(a, vec_op_out[2:]) + + scalar_op = dpie0.phi_operator(phi_densities=phi_densities) + #vector_op = vector_op_transform(dpie0.a_operator0(A_densities=A_densities)[:-1]) # noqa + vector_op = vector_op_transform( + dpie0.a_operator(A_densities=A_densities)) + #vector_op = dpie0.a_operator2(A_densities=A_densities)[:-1] + tau_op = dpie0.subproblem_operator(tau_densities=tau_densities) + + # evaluate operators at the dummy densities + scalar_op_eval = vector_from_device(queue, + bind(geom_map, scalar_op)( + actx, phi_densities=dummy_phi, **knl_kwargs)) + vector_op_eval = vector_from_device(queue, + bind(geom_map, vector_op)( + actx, A_densities=dummy_A, **knl_kwargs)) + tau_op_eval = vector_from_device(queue, + bind(geom_map, tau_op)( + actx, tau_densities=dummy_tau, **knl_kwargs)) + + # define the vector operator equivalent representations + #def vec_op_repr(A_densities, target): + # return sym.join_fields(sym.n_cross(dpie0.vector_potential_rep0(A_densities=A_densities, target=target), dofdesc='obj0'), # noqa: E501 + # dpie0.div_vector_potential_rep0(A_densities=A_densities, target=target)/dpie0.k) # noqa: E501 + def vec_op_repr(A_densities, target): + return sym.join_fields( + sym.n_cross( + dpie0.vector_potential_rep( + A_densities=A_densities, + target=target), + dofdesc='obj0'), + dpie0.div_vector_potential_rep( + A_densities=A_densities, + target=target)/dpie0.k) + + scalar_rep_eval = vector_from_device(queue, bind(geom_map, + dpie0.scalar_potential_rep(phi_densities=phi_densities, + target='tgt'))(actx, phi_densities=dummy_phi, + **knl_kwargs)) + vector_rep_eval = vector_from_device(queue, bind(geom_map, + vec_op_repr(A_densities=A_densities, target='tgt'))(actx, + A_densities=dummy_A, **knl_kwargs)) + tau_rep_eval = vector_from_device(queue, bind(geom_map, + dpie0.subproblem_rep(tau_densities=tau_densities, + target='tgt'))(actx, tau_densities=dummy_tau, + **knl_kwargs)) + + axyz = sym.tangential_to_xyz(density_sym, dofdesc='obj0') + + def nxcurlS0(qbx_forced_limit): + return sym.n_cross(sym.curl(dpie0.S(axyz.reshape(3, 1), + target='obj0t', qfl=qbx_forced_limit)), dofdesc='obj0') + + test_op_err = vector_from_device(queue, + bind( + geom_map, + 0.5*axyz + + nxcurlS0("avg") - nxcurlS0(+1) + )(actx, density=density, **knl_kwargs)) + + from sumpy.kernel import LaplaceKernel + knl = LaplaceKernel(3) + + from sumpy.kernel import HelmholtzKernel + knl = HelmholtzKernel(3) + + def nxcurlS(qbx_forced_limit): + + return sym.n_cross( + sym.curl(sym.S( + knl, + sym.cse( + sym.tangential_to_xyz(density_sym, dofdesc='obj0'), + "jxyz"), + k=dpie0.k, + qbx_forced_limit=qbx_forced_limit, + source='obj0', target='obj0t')), + dofdesc='obj0') + + jump_identity_sym = ( + nxcurlS(+1) + - (nxcurlS("avg") + + 0.5*sym.tangential_to_xyz(density_sym, dofdesc='obj0'))) + + bound_jump_identity = bind(geom_map, jump_identity_sym) + jump_identity = bound_jump_identity(actx, density=density, **knl_kwargs) + + err = (norm(qbx0, queue, jump_identity, np.inf)) + print("ERROR", bind(qbx0, sym.h_max(qbx0.ambient_dim))(actx), err) + + # compute the error between the operator values and the + # representation values + + def error_diff(u, v): + return np.linalg.norm(u-v, np.inf) + error_v = [error_diff(scalar_op_eval[0], scalar_rep_eval), + error_diff(vector_op_eval[0], vector_rep_eval[0]), + error_diff(vector_op_eval[1], vector_rep_eval[1]), + error_diff(vector_op_eval[2], vector_rep_eval[2]), + error_diff(vector_op_eval[3], vector_rep_eval[3]), + error_diff(tau_op_eval[0], tau_rep_eval), + np.linalg.norm(test_op_err[0], np.inf), + np.linalg.norm(test_op_err[1], np.inf), + np.linalg.norm(test_op_err[2], np.inf)] + op_error.append(error_v) + + # print the resulting error results + print("Operator Error Results:") + for n in range(0, len(op_error)): + print("Case {0}: {1}".format(n+1, op_error[n])) + + # }}} + + # {{{ solve for the scattered field + + solve_scattered_field = True + if solve_scattered_field: + loc_sign = -1 if case.is_interior else +1 + + # import a bunch of stuff that will be useful + from pytential.qbx import QBXLayerPotentialSource + from meshmode.discretization import Discretization + from meshmode.discretization.poly_element import \ + InterpolatoryQuadratureSimplexGroupFactory + from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder + + # setup an EOC Recorder + eoc_rec_repr_maxwell = EOCRecorder() + eoc_pec_bc = EOCRecorder() + + def frequency_domain_gauge_condition(cpatch, A, phi, k): + # define constants used for the computation + mu = 1 + epsilon = 1 + c = 1/np.sqrt(mu*epsilon) + omega = k*c + + # compute the gauge condition residual + # assumed time dependence exp(-1j*omega*t) + resid_gauge_cond = cpatch.div(A) - 1j*omega*mu*epsilon*phi + + # return the residual for the gauge condition + return resid_gauge_cond + + def gauge_check(divA, phi, k): + return divA - 1j*k*phi + + # loop through the case's resolutions and compute the scattered field + # solution + gauge_err = [] + maxwell_err = [] + for resolution in case.resolutions: + + # get the scattered and observation mesh + scat_mesh = case.get_mesh(resolution, case.target_order) + # observation_mesh = case.get_observation_mesh(case.target_order) + + # define the pre-scattered discretization + pre_scat_discr = Discretization( + actx, scat_mesh, + InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # obtain the QBX layer potential source object + qbx = QBXLayerPotentialSource( + pre_scat_discr, fine_order=4*case.target_order, + #fmm_order=False, + qbx_order=case.qbx_order, + fmm_level_to_order=SimpleExpansionOrderFinder( + case.fmm_tolerance), + fmm_backend=case.fmm_backend + ) + + # define the geometry dictionary + #geom_map = {"g0": qbx} + geom_map = { + "obj0": qbx, + "obj0t": qbx.density_discr, + "scat": qbx.density_discr + } + + # get the maximum mesh element edge length + h_max = bind(qbx, sym.h_max(qbx.ambient_dim))(actx) + + # define the scattered and observation discretization + scat_discr = qbx.density_discr + # obs_discr = Discretization( + # cl_ctx, observation_mesh, + # InterpolatoryQuadratureSimplexGroupFactory(case.target_order)) + + # get the incident field at the scatter and observation locations + #inc_EM_field_scat = EHField(eval_inc_field_at(scat_discr)) + #inc_EM_field_obs = EHField(eval_inc_field_at(obs_discr)) + #inc_vec_field_scat = get_inc_potentials(scat_discr) + #inc_vec_field_obs = get_inc_potentials(obs_discr) + + # {{{ solve the system of integral equations + inc_A = sym.make_sym_vector("inc_A", 3) + inc_phi = sym.make_sym_vector("inc_phi", 1) + inc_divA = sym.make_sym_vector("inc_divA", 1) + inc_gradPhi = sym.make_sym_vector("inc_gradPhi", 3) + + # get the incident fields used for boundary conditions + (phi_inc, A_inc) = get_incident_potentials(geom_map, 'scat') + inc_divA_scat = get_incident_divA(geom_map, 'scat') + inc_gradPhi_scat = get_incident_gradphi(geom_map, 'scat') + + # check that the boundary conditions satisfy gauge condition + # resid = bind(geom_map, gauge_check(inc_divA, inc_phi, + # dpie.k))(queue, inc_divA=inc_divA_scat, inc_phi=phi_inc, + # **knl_kwargs) + + # setup operators that will be solved + phi_op = bind(geom_map, dpie.phi_operator(phi_densities=phi_densities)) + A_op = bind(geom_map, dpie.a_operator(A_densities=A_densities)) + + # setup the RHS with provided data so we can solve for density + # values across the domain + + phi_rhs = bind(geom_map, dpie.phi_rhs(phi_inc=inc_phi, + gradphi_inc=inc_gradPhi))(actx, inc_phi=phi_inc, + inc_gradPhi=inc_gradPhi_scat, **knl_kwargs) + A_rhs = bind(geom_map, dpie.a_rhs(A_inc=inc_A, + divA_inc=inc_divA))( + actx, inc_A=A_inc, inc_divA=inc_divA_scat, + **knl_kwargs) + + # set GMRES settings for solving + gmres_settings = dict( + tol=case.gmres_tol, + progress=True, + hard_failure=True, + stall_iterations=50, no_progress_factor=1.05) + + # solve for the scalar potential densities + gmres_result = gmres( + phi_op.scipy_op(actx, "phi_densities", + np.complex128, + domains=dpie.get_scalar_domain_list(), + **knl_kwargs), + phi_rhs, **gmres_settings) + phi_dens = gmres_result.solution + + # solve for the vector potential densities + gmres_result = gmres( + A_op.scipy_op(actx, "A_densities", np.complex128, + domains=dpie.get_vector_domain_list(), **knl_kwargs), + A_rhs, **gmres_settings) + A_dens = gmres_result.solution + + # solve sub problem for sigma densities + tau_op = bind(geom_map, + dpie.subproblem_operator(tau_densities=tau_densities)) + tau_rhs = bind(geom_map, + dpie.subproblem_rhs(A_densities=A_densities))(actx, + A_densities=A_dens, **knl_kwargs) + gmres_result = gmres( + tau_op.scipy_op(actx, "tau_densities", np.complex128, + domains=dpie.get_subproblem_domain_list(), + **knl_kwargs), + tau_rhs, **gmres_settings) + tau_dens = gmres_result.solution + + # extract useful solutions + def eval_potentials(tgt): + tmap = geom_map + tmap['tgt'] = tgt + phi = vector_from_device(queue, bind(tmap, + dpie.scalar_potential_rep(phi_densities=phi_densities, + target='tgt'))(actx, phi_densities=phi_dens, + **knl_kwargs)) + Axyz = vector_from_device(queue, bind(tmap, + dpie.vector_potential_rep(A_densities=A_densities, + target='tgt'))(actx, A_densities=A_dens, + **knl_kwargs)) + return (phi, Axyz) + + (phi, A) = eval_potentials(calc_patch_tgt) + gauge_residual = frequency_domain_gauge_condition( + calc_patch, A, phi, case.k) + err = calc_patch.norm(gauge_residual, np.inf) + gauge_err.append(err) + + # }}} + + # {{{ volume eval + + sym_repr = dpie.scattered_volume_field( + phi_densities, A_densities, tau_densities, target='tgt') + + def eval_repr_at(tgt, source=None): + places = geom_map.copy() + places['tgt'] = tgt + if source is not None: + places['obj0'] = source + + return bind(places, sym_repr)(actx, phi_densities=phi_dens, + A_densities=A_dens, tau_densities=tau_dens, + **knl_kwargs) + + pde_test_repr = EHField(vector_from_device(actx, + eval_repr_at(calc_patch_tgt))) + + maxwell_residuals = [ + calc_patch.norm(x, np.inf) + / calc_patch.norm(pde_test_repr.e, np.inf) + for x in frequency_domain_maxwell( + calc_patch, pde_test_repr.e, + pde_test_repr.h, case.k)] + + print("Maxwell residuals:", maxwell_residuals) + + maxwell_err.append(maxwell_residuals) + eoc_rec_repr_maxwell.add_data_point(h_max, max(maxwell_residuals)) + + # }}} + + # {{{ check potential PEC BC on total field + + def scalar_pot_PEC_residual(phi, inc_phi, dofdesc=None): + V = dpie.scalar_potential_constants(phi_densities=phi) + return dpie.scalar_potential_rep( + phi_densities=phi, target=dofdesc, qfl=loc_sign + ) + inc_phi - V[0] + + def vector_pot_PEC_residual(a_densities, inc_a, dofdesc=None): + return sym.n_cross( + dpie.vector_potential_rep( + A_densities=a_densities, target=dofdesc, qfl=loc_sign) + + inc_a, dofdesc=dofdesc) + + phi_pec_bc_resid = scalar_pot_PEC_residual( + phi_densities, inc_phi, dofdesc="obj0") + A_pec_bc_resid = vector_pot_PEC_residual( + A_densities, inc_A, dofdesc="obj0") + + scalar_bc_values = bind(geom_map, phi_pec_bc_resid)( + actx, phi_densities=phi_dens, inc_phi=phi_inc, **knl_kwargs) + vector_bc_values = bind(geom_map, A_pec_bc_resid)( + actx, A_densities=A_dens, inc_A=A_inc, **knl_kwargs) + + def scat_norm(f): + return norm(qbx, queue, f, p=np.inf) + + scalar_bc_residual = scat_norm(scalar_bc_values) # / scat_norm(phi_inc) + vector_bc_residual = scat_norm(vector_bc_values) # / scat_norm(A_inc) + + print( + "Potential PEC BC residuals:", + h_max, scalar_bc_residual, vector_bc_residual) + + eoc_pec_bc.add_data_point( + h_max, max(scalar_bc_residual, vector_bc_residual)) + + # }}} + + # {{{ check if dpie helmholtz bcs are satisfied + + # }}} + + # {{{ visualization + + if visualize: + from meshmode.discretization.visualization import make_visualizer + bdry_vis = make_visualizer(queue, scat_discr, case.target_order+3) + + bdry_normals = bind(scat_discr, sym.normal(3))(actx)\ + .as_vector(dtype=object) + + bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [ + ("phi", phi), + # ("Axyz", Axyz), + # ("Einc", inc_EM_field_scat.e), + # ("Hinc", inc_EM_field_scat.h), + ("bdry_normals", bdry_normals), + # ("e_bc_residual", eh_bc_values[:3]), + # ("h_bc_residual", eh_bc_values[3]), + ]) + + fplot = make_field_plotter_from_bbox( + find_bounding_box(scat_discr.mesh), h=(0.05, 0.05, 0.3), + extend_factor=0.3) + + from pytential.qbx import QBXTargetAssociationFailedException + + qbx_tgt_tol = qbx.copy(target_association_tolerance=0.2) + + fplot_tgt = PointsTarget(cl.array.to_device(queue, fplot.points)) + try: + fplot_repr = eval_repr_at(fplot_tgt, source=qbx_tgt_tol) + except QBXTargetAssociationFailedException as e: + fplot.write_vtk_file( + "failed-targets.vts", + [ + ("failed_targets", e.failed_target_flags.get(queue)) + ]) + raise + + fplot_repr = EHField(vector_from_device(queue, fplot_repr)) + + # fplot_inc = EHField( + # vector_from_device(queue, eval_inc_field_at(fplot_tgt))) + + fplot.write_vtk_file( + "potential-%s.vts" % resolution, + [ + ("E", fplot_repr.e), + ("H", fplot_repr.h), + # ("Einc", fplot_inc.e), + # ("Hinc", fplot_inc.h), + ] + ) + + # }}} + + # {{{ error in E, H + + # obs_repr = EHField(eval_repr_at(obs_discr)) + + # def obs_norm(f): + # return norm(obs_discr, queue, f, p=np.inf) + + # inc_field_scat = EHField(get_incident_plane_wave_EHField(scat_discr)) + # inc_field_obs = EHField(get_incident_plane_wave_EHField(obs_discr)) + + # rel_err_e = (obs_norm(inc_field_obs.e + obs_repr.e) + # / obs_norm(inc_field_obs.e)) + # rel_err_h = (obs_norm(inc_field_obs.h + obs_repr.h) + # / obs_norm(inc_field_obs.h)) + + # # }}} + + # print("ERR", h_max, rel_err_h, rel_err_e) + + # eoc_rec_h.add_data_point(h_max, rel_err_h) + # eoc_rec_e.add_data_point(h_max, rel_err_e) + + print("--------------------------------------------------------") + print("is_interior=%s" % case.is_interior) + print("--------------------------------------------------------") + + print("Gauge Error: {0}".format(gauge_err)) + print("Maxwell Residuals: {0}".format(maxwell_err)) + + good = True + for which_eoc, eoc_rec, order_tol in [ + ("maxwell", eoc_rec_repr_maxwell, 1.5), + ("PEC BC", eoc_pec_bc, 1.5) + #("H", eoc_rec_h, 1.5), + #("E", eoc_rec_e, 1.5) + ]: + print(which_eoc) + print(eoc_rec.pretty_print()) + + if len(eoc_rec.history) > 1: + if eoc_rec.order_estimate() < case.qbx_order - order_tol: + good = False + + assert good + + # }}} + +# }}} + + +# You can test individual routines by typing +# $ python test_maxwell.py 'test_routine()' + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from pytest import main + main([__file__]) + +# vim: fdm=marker