Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • inducer/grudge
  • jdsteve2/grudge
  • eshoag2/grudge
  • mattwala/grudge
  • kaushikcfd/grudge
  • fikl2/grudge
6 results
Show changes
Showing
with 2941 additions and 5511 deletions
"""Base classes for operators."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2007 Andreas Kloeckner"
__copyright__ = """
Copyright (C) 2007 Andreas Kloeckner
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
......@@ -25,8 +25,10 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from abc import ABC, abstractmethod
class Operator(object):
class Operator(ABC): # noqa: B024
"""A base class for Discontinuous Galerkin operators.
You may derive your own operators from this class, but, at present
......@@ -34,30 +36,39 @@ class Operator(object):
documentation, to group related classes together in an inheritance
tree.
"""
pass
class TimeDependentOperator(Operator):
"""A base class for time-dependent Discontinuous Galerkin operators.
class HyperbolicOperator(Operator):
"""A base class for hyperbolic Discontinuous Galerkin operators."""
You may derive your own operators from this class, but, at present
this class provides no functionality. Its function is merely as
documentation, to group related classes together in an inheritance
tree.
"""
pass
@abstractmethod
def max_characteristic_velocity(self, actx, **kwargs):
r"""Return a maximum characteristic wavespeed for the operator.
:arg actx: a :class:`arraycontext.ArrayContext`.
:arg \**kwargs: Optional keyword arguments for determining the
max characteristic velocity of the operator.
class HyperbolicOperator(Operator):
"""A base class for hyperbolic Discontinuous Galerkin operators."""
Return a :class:`~meshmode.dof_array.DOFArray` or scalar
representing the (local or global) maximal characteristic velocity of
the operator.
"""
def estimate_rk4_timestep(self, actx, dcoll, **kwargs):
r"""Estimate the largest stable timestep for an RK4 method.
def estimate_rk4_timestep(self, discr, t=None, fields=None):
u"""Estimate the largest stable timestep for an RK4 method.
:arg actx: a :class:`arraycontext.ArrayContext`.
:arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`.
:arg \**kwargs: Optional keyword arguments for determining the
max characteristic velocity of the operator. These are passed
to :meth:`max_characteristic_velocity`.
"""
import grudge.op as op
from grudge.dt_utils import characteristic_lengthscales
wavespeeds = self.max_characteristic_velocity(actx, **kwargs)
local_timesteps = (
characteristic_lengthscales(actx, dcoll) / wavespeeds
)
from grudge.dt_finding import (
dt_non_geometric_factor,
dt_geometric_factor)
return 1 / self.max_eigenvalue(t, fields, discr) \
* (dt_non_geometric_factor(discr)
* dt_geometric_factor(discr))
return op.nodal_min(dcoll, "vol", local_timesteps)
# -*- coding: utf8 -*-
"""Operators modeling advective phenomena."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__copyright__ = """
Copyright (C) 2009-2017 Andreas Kloeckner, Bogdan Enache
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
......@@ -27,384 +26,355 @@ THE SOFTWARE.
"""
import types
import numpy as np
import numpy
import numpy.linalg as la
import grudge.data
import grudge.geometry as geo
import grudge.op as op
from grudge.models import HyperbolicOperator
from grudge.second_order import CentralSecondDerivative
# {{{ constant-coefficient advection ------------------------------------------
class AdvectionOperatorBase(HyperbolicOperator):
flux_types = [
"central",
"upwind",
"lf"
]
def __init__(self, v,
inflow_tag="inflow",
inflow_u=grudge.data.make_tdep_constant(0),
outflow_tag="outflow",
flux_type="central"
):
self.dimensions = len(v)
self.v = v
self.inflow_tag = inflow_tag
self.inflow_u = inflow_u
self.outflow_tag = outflow_tag
self.flux_type = flux_type
# {{{ fluxes
def weak_flux(self):
from grudge.flux import make_normal, FluxScalarPlaceholder
from pymbolic.primitives import IfPositive
u = FluxScalarPlaceholder(0)
normal = make_normal(self.dimensions)
if self.flux_type == "central":
return u.avg*numpy.dot(normal, self.v)
elif self.flux_type == "lf":
return u.avg*numpy.dot(normal, self.v) \
+ 0.5*la.norm(self.v)*(u.int - u.ext)
elif self.flux_type == "upwind":
return (numpy.dot(normal, self.v)*
IfPositive(numpy.dot(normal, self.v),
u.int, # outflow
u.ext, # inflow
))
else:
raise ValueError("invalid flux type")
def max_eigenvalue(self, t=None, fields=None, discr=None):
return la.norm(self.v)
def bind(self, discr):
compiled_sym_operator = discr.compile(self.op_template())
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [self.inflow_tag, self.outflow_tag])
def advection_weak_flux(dcoll, flux_type, u_tpair, velocity):
r"""Compute the numerical flux for the advection operator
$(v \cdot \nabla)u$.
"""
actx = u_tpair.int.array_context
dd = u_tpair.dd
normal = geo.normal(actx, dcoll, dd)
v_dot_n = np.dot(velocity, normal)
flux_type = flux_type.lower()
if flux_type == "central":
return u_tpair.avg * v_dot_n
elif flux_type == "lf":
norm_v = np.sqrt(sum(velocity**2))
return u_tpair.avg * v_dot_n + 0.5 * norm_v * (u_tpair.int - u_tpair.ext)
elif flux_type == "upwind":
u_upwind = actx.np.where(v_dot_n > 0, u_tpair.int, u_tpair.ext)
return u_upwind * v_dot_n
else:
raise ValueError(f"flux '{flux_type}' is not implemented")
def rhs(t, u):
bc_in = self.inflow_u.boundary_interpolant(t, discr, self.inflow_tag)
return compiled_sym_operator(u=u, bc_in=bc_in)
# }}}
return rhs
def bind_interdomain(self,
my_discr, my_part_data,
nb_discr, nb_part_data):
from grudge.partition import compile_interdomain_flux
compiled_sym_operator, from_nb_indices = compile_interdomain_flux(
self.sym_operator(), "u", "nb_bdry_u",
my_discr, my_part_data, nb_discr, nb_part_data,
use_stupid_substitution=True)
# {{{ constant-coefficient advection
from grudge.tools import with_object_array_or_scalar, is_zero
class AdvectionOperatorBase(HyperbolicOperator):
flux_types = ("central", "upwind", "lf")
def nb_bdry_permute(fld):
if is_zero(fld):
return 0
else:
return fld[from_nb_indices]
def __init__(self, dcoll, v, inflow_u=None, flux_type="central"):
if flux_type not in self.flux_types:
raise ValueError(f"unknown flux type: '{flux_type}'")
def rhs(t, u, u_neighbor):
return compiled_sym_operator(u=u,
nb_bdry_u=with_object_array_or_scalar(nb_bdry_permute, u_neighbor))
if inflow_u is not None:
if not isinstance(inflow_u, types.LambdaType):
raise ValueError(
"A specified inflow_u must be a lambda function of time `t`"
)
return rhs
self.dcoll = dcoll
self.v = v
self.inflow_u = inflow_u
self.flux_type = flux_type
def weak_flux(self, u_tpair):
return advection_weak_flux(self.dcoll, self.flux_type, u_tpair, self.v)
def max_characteristic_velocity(self, actx, **kwargs):
return sum(v_i**2 for v_i in self.v)**0.5
class StrongAdvectionOperator(AdvectionOperatorBase):
def flux(self):
from grudge.flux import make_normal, FluxScalarPlaceholder
def flux(self, u_tpair):
actx = u_tpair.int.array_context
dd = u_tpair.dd
normal = geo.normal(actx, self.dcoll, dd)
v_dot_normal = np.dot(self.v, normal)
u = FluxScalarPlaceholder(0)
normal = make_normal(self.dimensions)
return u_tpair.int * v_dot_normal - self.weak_flux(u_tpair)
return u.int * numpy.dot(normal, self.v) - self.weak_flux()
def operator(self, t, u):
from meshmode.mesh import BTAG_ALL
def sym_operator(self):
from grudge.symbolic import Field, BoundaryPair, \
get_flux_operator, make_nabla, InverseMassOperator
dcoll = self.dcoll
u = Field("u")
bc_in = Field("bc_in")
def flux(tpair):
return op.project(dcoll, tpair.dd, "all_faces", self.flux(tpair))
nabla = make_nabla(self.dimensions)
m_inv = InverseMassOperator()
if self.inflow_u is not None:
from grudge.dof_desc import as_dofdesc
flux_op = get_flux_operator(self.flux())
inflow_flux = flux(op.bv_trace_pair(dcoll,
as_dofdesc(BTAG_ALL),
interior=u,
exterior=self.inflow_u(t)))
else:
inflow_flux = 0
return (
-numpy.dot(self.v, nabla*u)
+ m_inv(
flux_op(u)
+ flux_op(BoundaryPair(u, bc_in, self.inflow_tag))))
-self.v.dot(op.local_grad(dcoll, u))
+ op.inverse_mass(
dcoll,
op.face_mass(
dcoll,
sum(flux(tpair) for tpair in op.interior_trace_pairs(dcoll, u))
+ inflow_flux
# FIXME: Add support for inflow/outflow tags
# + flux(op.bv_trace_pair(dcoll,
# self.inflow_tag,
# interior=u,
# exterior=bc_in))
# + flux(op.bv_trace_pair(dcoll,
# self.outflow_tag,
# interior=u,
# exterior=bc_out))
)
)
)
class WeakAdvectionOperator(AdvectionOperatorBase):
def flux(self):
return self.weak_flux()
def sym_operator(self):
from grudge.symbolic import (
Field,
BoundaryPair,
get_flux_operator,
make_stiffness_t,
InverseMassOperator,
RestrictToBoundary,
QuadratureGridUpsampler,
QuadratureInteriorFacesGridUpsampler)
u = Field("u")
def flux(self, u_tpair):
return self.weak_flux(u_tpair)
to_quad = QuadratureGridUpsampler("quad")
to_int_face_quad = QuadratureInteriorFacesGridUpsampler("quad")
def operator(self, t, u):
from meshmode.mesh import BTAG_ALL
# boundary conditions -------------------------------------------------
bc_in = Field("bc_in")
bc_out = RestrictToBoundary(self.outflow_tag)*u
dcoll = self.dcoll
stiff_t = make_stiffness_t(self.dimensions)
m_inv = InverseMassOperator()
def flux(tpair):
return op.project(dcoll, tpair.dd, "all_faces", self.flux(tpair))
flux_op = get_flux_operator(self.flux())
if self.inflow_u is not None:
from grudge.dof_desc import as_dofdesc
inflow_flux = flux(op.bv_trace_pair(dcoll,
as_dofdesc(BTAG_ALL),
interior=u,
exterior=self.inflow_u(t)))
else:
inflow_flux = 0
return m_inv(numpy.dot(self.v, stiff_t*u) - (
flux_op(u)
+ flux_op(BoundaryPair(u, bc_in, self.inflow_tag))
+ flux_op(BoundaryPair(u, bc_out, self.outflow_tag))
))
return (
op.inverse_mass(
dcoll,
np.dot(self.v, op.weak_local_grad(dcoll, u))
- op.face_mass(
dcoll,
sum(flux(tpair) for tpair in op.interior_trace_pairs(dcoll, u))
+ inflow_flux
# FIXME: Add support for inflow/outflow tags
# + flux(op.bv_trace_pair(dcoll,
# self.inflow_tag,
# interior=u,
# exterior=bc_in))
# + flux(op.bv_trace_pair(dcoll,
# self.outflow_tag,
# interior=u,
# exterior=bc_out))
)
)
)
# }}}
def to_quad_int_tpairs(dcoll, u, quad_tag):
from grudge.dof_desc import DISCR_TAG_QUAD
from grudge.trace_pair import TracePair
if issubclass(quad_tag, DISCR_TAG_QUAD):
return [
TracePair(
tpair.dd.with_discr_tag(quad_tag),
interior=op.project(
dcoll, tpair.dd,
tpair.dd.with_discr_tag(quad_tag), tpair.int
),
exterior=op.project(
dcoll, tpair.dd,
tpair.dd.with_discr_tag(quad_tag), tpair.ext
)
) for tpair in op.interior_trace_pairs(dcoll, u)
]
else:
return op.interior_trace_pairs(dcoll, u)
# {{{ variable-coefficient advection ------------------------------------------
class VariableCoefficientAdvectionOperator(HyperbolicOperator):
"""A class for space- and time-dependent DG advection operators.
:param advec_v: Adheres to the :class:`grudge.data.ITimeDependentGivenFunction`
interfacer and is an n-dimensional vector representing the velocity.
:param bc_u_f: Adheres to the :class:`grudge.data.ITimeDependentGivenFunction`
interface and is a scalar representing the boundary condition at all
boundary faces.
# {{{ variable-coefficient advection
Optionally allows diffusion.
"""
class VariableCoefficientAdvectionOperator(AdvectionOperatorBase):
def __init__(self, dcoll, v, inflow_u, flux_type="central", quad_tag=None):
super().__init__(dcoll, v, inflow_u, flux_type=flux_type)
flux_types = [
"central",
"upwind",
"lf"
]
def __init__(self,
dimensions,
advec_v,
bc_u_f="None",
flux_type="central",
diffusion_coeff=None,
diffusion_scheme=CentralSecondDerivative()):
self.dimensions = dimensions
self.advec_v = advec_v
self.bc_u_f = bc_u_f
self.flux_type = flux_type
self.diffusion_coeff = diffusion_coeff
self.diffusion_scheme = diffusion_scheme
# {{{ flux ----------------------------------------------------------------
def flux(self):
from grudge.flux import (
make_normal,
FluxVectorPlaceholder,
flux_max)
from pymbolic.primitives import IfPositive
d = self.dimensions
w = FluxVectorPlaceholder((1+d)+1)
u = w[0]
v = w[1:d+1]
c = w[1+d]
normal = make_normal(self.dimensions)
if self.flux_type == "central":
return (u.int*numpy.dot(v.int, normal )
+ u.ext*numpy.dot(v.ext, normal)) * 0.5
elif self.flux_type == "lf":
n_vint = numpy.dot(normal, v.int)
n_vext = numpy.dot(normal, v.ext)
return 0.5 * (n_vint * u.int + n_vext * u.ext) \
- 0.5 * (u.ext - u.int) \
* flux_max(c.int, c.ext)
elif self.flux_type == "upwind":
return (
IfPositive(numpy.dot(normal, v.avg),
numpy.dot(normal, v.int) * u.int, # outflow
numpy.dot(normal, v.ext) * u.ext, # inflow
))
else:
raise ValueError("invalid flux type")
# }}}
if quad_tag is None:
from grudge.dof_desc import DISCR_TAG_BASE
quad_tag = DISCR_TAG_BASE
def bind_characteristic_velocity(self, discr):
from grudge.symbolic.operators import (
ElementwiseMaxOperator)
from grudge.symbolic import make_sym_vector
velocity_vec = make_sym_vector("v", self.dimensions)
velocity = ElementwiseMaxOperator()(
numpy.dot(velocity_vec, velocity_vec)**0.5)
self.quad_tag = quad_tag
compiled = discr.compile(velocity)
def flux(self, u_tpair):
from grudge.dof_desc import DD_VOLUME_ALL
def do(t, u):
return compiled(v=self.advec_v.volume_interpolant(t, discr))
surf_v = op.project(self.dcoll, DD_VOLUME_ALL, u_tpair.dd, self.v)
return advection_weak_flux(self.dcoll, self.flux_type, u_tpair, surf_v)
return do
def operator(self, t, u):
from meshmode.discretization.connection import FACE_RESTR_ALL
from meshmode.mesh import BTAG_ALL
def sym_operator(self, with_sensor=False):
# {{{ operator preliminaries ------------------------------------------
from grudge.symbolic import (Field, BoundaryPair, get_flux_operator,
make_stiffness_t, InverseMassOperator, make_sym_vector,
ElementwiseMaxOperator, RestrictToBoundary)
from grudge.dof_desc import DD_VOLUME_ALL, DTAG_VOLUME_ALL, as_dofdesc
from grudge.symbolic.primitives import make_common_subexpression as cse
face_dd = as_dofdesc(FACE_RESTR_ALL, self.quad_tag)
boundary_dd = as_dofdesc(BTAG_ALL, self.quad_tag)
quad_dd = as_dofdesc(DTAG_VOLUME_ALL, self.quad_tag)
from grudge.symbolic.operators import (
QuadratureGridUpsampler,
QuadratureInteriorFacesGridUpsampler)
dcoll = self.dcoll
to_quad = QuadratureGridUpsampler("quad")
to_if_quad = QuadratureInteriorFacesGridUpsampler("quad")
def flux(tpair):
return op.project(dcoll, tpair.dd, face_dd, self.flux(tpair))
from grudge.tools import join_fields, \
ptwise_dot
def to_quad(arg):
return op.project(dcoll, DD_VOLUME_ALL, quad_dd, arg)
u = Field("u")
v = make_sym_vector("v", self.dimensions)
c = ElementwiseMaxOperator()(ptwise_dot(1, 1, v, v))
if self.inflow_u is not None:
inflow_flux = flux(op.bv_trace_pair(dcoll,
boundary_dd,
interior=u,
exterior=self.inflow_u(t)))
else:
inflow_flux = 0
quad_u = cse(to_quad(u))
quad_v = cse(to_quad(v))
quad_v = to_quad(self.v)
quad_u = to_quad(u)
w = join_fields(u, v, c)
quad_face_w = to_if_quad(w)
# }}}
return (
op.inverse_mass(
dcoll,
sum(op.weak_local_d_dx(dcoll, quad_dd, d, quad_u * quad_v[d])
for d in range(dcoll.ambient_dim))
- op.face_mass(
dcoll,
face_dd,
sum(flux(quad_tpair)
for quad_tpair in to_quad_int_tpairs(dcoll, u,
self.quad_tag))
+ inflow_flux
# FIXME: Add support for inflow/outflow tags
# + flux(op.bv_trace_pair(dcoll,
# self.inflow_tag,
# interior=u,
# exterior=bc_in))
# + flux(op.bv_trace_pair(dcoll,
# self.outflow_tag,
# interior=u,
# exterior=bc_out))
)
)
)
# {{{ boundary conditions ---------------------------------------------
# }}}
from grudge.mesh import BTAG_ALL
bc_c = to_quad(RestrictToBoundary(BTAG_ALL)(c))
bc_u = to_quad(Field("bc_u"))
bc_v = to_quad(RestrictToBoundary(BTAG_ALL)(v))
if self.bc_u_f is "None":
bc_w = join_fields(0, bc_v, bc_c)
else:
bc_w = join_fields(bc_u, bc_v, bc_c)
# {{{ closed surface advection
minv_st = make_stiffness_t(self.dimensions)
m_inv = InverseMassOperator()
def v_dot_n_tpair(actx, dcoll, velocity, trace_dd):
from meshmode.discretization.connection import FACE_RESTR_INTERIOR
flux_op = get_flux_operator(self.flux())
# }}}
from grudge.dof_desc import BoundaryDomainTag
from grudge.trace_pair import TracePair
# {{{ diffusion -------------------------------------------------------
if with_sensor or (
self.diffusion_coeff is not None and self.diffusion_coeff != 0):
if self.diffusion_coeff is None:
diffusion_coeff = 0
else:
diffusion_coeff = self.diffusion_coeff
normal = geo.normal(actx, dcoll, trace_dd.with_discr_tag(None))
v_dot_n = velocity.dot(normal)
i = op.project(dcoll, trace_dd.with_discr_tag(None), trace_dd, v_dot_n)
if with_sensor:
diffusion_coeff += Field("sensor")
assert isinstance(trace_dd.domain_tag, BoundaryDomainTag)
if trace_dd.domain_tag.tag is FACE_RESTR_INTERIOR:
e = dcoll.opposite_face_connection(trace_dd.domain_tag)(i)
else:
raise ValueError(f"Unrecognized domain tag: {trace_dd.domain_tag}")
from grudge.second_order import SecondDerivativeTarget
return TracePair(trace_dd, interior=i, exterior=e)
# strong_form here allows IPDG to reuse the value of grad u.
grad_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=True,
operand=u)
self.diffusion_scheme.grad(grad_tgt, bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
def surface_advection_weak_flux(dcoll, flux_type, u_tpair, velocity):
actx = u_tpair.int.array_context
v_dot_n = v_dot_n_tpair(actx, dcoll, velocity, trace_dd=u_tpair.dd)
# NOTE: the normals in v_dot_n point to the exterior of their respective
# elements, so this is actually just an average
v_dot_n = 0.5 * (v_dot_n.int - v_dot_n.ext)
div_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=False,
operand=diffusion_coeff*grad_tgt.minv_all)
flux_type = flux_type.lower()
if flux_type == "central":
return u_tpair.avg * v_dot_n
elif flux_type == "lf":
return (u_tpair.avg * v_dot_n
+ 0.5 * actx.np.fabs(v_dot_n) * (u_tpair.int - u_tpair.ext))
else:
raise ValueError(f"flux '{flux_type}' is not implemented")
self.diffusion_scheme.div(div_tgt,
bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
diffusion_part = div_tgt.minv_all
else:
diffusion_part = 0
class SurfaceAdvectionOperator(AdvectionOperatorBase):
def __init__(self, dcoll, v, flux_type="central", quad_tag=None):
super().__init__(dcoll, v, inflow_u=None, flux_type=flux_type)
# }}}
if quad_tag is None:
from grudge.dof_desc import DISCR_TAG_BASE
quad_tag = DISCR_TAG_BASE
to_quad = QuadratureGridUpsampler("quad")
quad_u = cse(to_quad(u))
quad_v = cse(to_quad(v))
self.quad_tag = quad_tag
return m_inv(numpy.dot(minv_st, cse(quad_v*quad_u))
- (flux_op(quad_face_w)
+ flux_op(BoundaryPair(quad_face_w, bc_w, BTAG_ALL)))) \
+ diffusion_part
def flux(self, u_tpair):
from grudge.dof_desc import DD_VOLUME_ALL
def bind(self, discr, sensor=None):
compiled_sym_operator = discr.compile(
self.sym_operator(with_sensor=sensor is not None))
surf_v = op.project(self.dcoll, DD_VOLUME_ALL,
u_tpair.dd.with_discr_tag(None), self.v)
return surface_advection_weak_flux(self.dcoll,
self.flux_type,
u_tpair,
surf_v)
from grudge.mesh import check_bc_coverage, BTAG_ALL
check_bc_coverage(discr.mesh, [BTAG_ALL])
def operator(self, t, u):
from meshmode.discretization.connection import FACE_RESTR_ALL
def rhs(t, u):
kwargs = {}
if sensor is not None:
kwargs["sensor"] = sensor(t, u)
from grudge.dof_desc import DD_VOLUME_ALL, DTAG_VOLUME_ALL, as_dofdesc
if self.bc_u_f is not "None":
kwargs["bc_u"] = \
self.bc_u_f.boundary_interpolant(t, discr, tag=BTAG_ALL)
face_dd = as_dofdesc(FACE_RESTR_ALL, self.quad_tag)
quad_dd = as_dofdesc(DTAG_VOLUME_ALL, self.quad_tag)
return compiled_sym_operator(
u=u,
v=self.advec_v.volume_interpolant(t, discr),
**kwargs)
dcoll = self.dcoll
return rhs
def flux(tpair):
return op.project(dcoll, tpair.dd, face_dd, self.flux(tpair))
def max_eigenvalue(self, t, fields=None, discr=None):
# Gives the max eigenvalue of a vector of eigenvalues.
# As the velocities of each node is stored in the velocity-vector-field
# a pointwise dot product of this vector has to be taken to get the
# magnitude of the velocity at each node. From this vector the maximum
# values limits the timestep.
def to_quad(arg):
return op.project(dcoll, DD_VOLUME_ALL, quad_dd, arg)
from grudge.tools import ptwise_dot
v = self.advec_v.volume_interpolant(t, discr)
return discr.nodewise_max(ptwise_dot(1, 1, v, v)**0.5)
# }}}
quad_v = to_quad(self.v)
quad_u = to_quad(u)
return (
op.inverse_mass(
dcoll,
sum(op.weak_local_d_dx(dcoll, quad_dd, d, quad_u * quad_v[d])
for d in range(dcoll.ambient_dim))
- op.face_mass(
dcoll,
face_dd,
sum(flux(quad_tpair)
for quad_tpair in to_quad_int_tpairs(dcoll, u,
self.quad_tag))
)
)
)
# }}}
# vim: foldmethod=marker
# -*- coding: utf8 -*-
"""Burgers operator."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from grudge.models import HyperbolicOperator
import numpy
from grudge.second_order import CentralSecondDerivative
class BurgersOperator(HyperbolicOperator):
def __init__(self, dimensions, viscosity=None,
viscosity_scheme=CentralSecondDerivative()):
# yes, you read that right--no BCs, 1D only.
# (well--you can run the operator on a 2D grid. If you must.)
self.dimensions = dimensions
self.viscosity = viscosity
self.viscosity_scheme = viscosity_scheme
def characteristic_velocity_optemplate(self, field):
from grudge.symbolic.operators import (
ElementwiseMaxOperator)
return ElementwiseMaxOperator()(field**2)**0.5
def bind_characteristic_velocity(self, discr):
from grudge.symbolic import Field
compiled = discr.compile(
self.characteristic_velocity_optemplate(
Field("u")))
def do(u):
return compiled(u=u)
return do
def sym_operator(self, with_sensor):
from grudge.symbolic import (
Field,
make_stiffness_t,
make_nabla,
InverseMassOperator,
ElementwiseMaxOperator,
get_flux_operator)
from grudge.symbolic.operators import (
QuadratureGridUpsampler,
QuadratureInteriorFacesGridUpsampler)
to_quad = QuadratureGridUpsampler("quad")
to_if_quad = QuadratureInteriorFacesGridUpsampler("quad")
u = Field("u")
u0 = Field("u0")
# boundary conditions -------------------------------------------------
minv_st = make_stiffness_t(self.dimensions)
nabla = make_nabla(self.dimensions)
m_inv = InverseMassOperator()
def flux(u):
return u**2/2
#return u0*u
emax_u = self.characteristic_velocity_optemplate(u)
from grudge.flux.tools import make_lax_friedrichs_flux
from pytools.obj_array import make_obj_array
num_flux = make_lax_friedrichs_flux(
#u0,
to_if_quad(emax_u),
make_obj_array([to_if_quad(u)]),
[make_obj_array([flux(to_if_quad(u))])],
[], strong=False)[0]
from grudge.second_order import SecondDerivativeTarget
if self.viscosity is not None or with_sensor:
viscosity_coeff = 0
if with_sensor:
viscosity_coeff += Field("sensor")
if isinstance(self.viscosity, float):
viscosity_coeff += self.viscosity
elif self.viscosity is None:
pass
else:
raise TypeError("unsupported type of viscosity coefficient")
# strong_form here allows IPDG to reuse the value of grad u.
grad_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=True,
operand=u)
self.viscosity_scheme.grad(grad_tgt, bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
div_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=False,
operand=viscosity_coeff*grad_tgt.minv_all)
self.viscosity_scheme.div(div_tgt,
bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
viscosity_bit = div_tgt.minv_all
else:
viscosity_bit = 0
return m_inv((minv_st[0](flux(to_quad(u)))) - num_flux) \
+ viscosity_bit
def bind(self, discr, u0=1, sensor=None):
compiled_sym_operator = discr.compile(
self.sym_operator(with_sensor=sensor is not None))
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [])
def rhs(t, u):
kwargs = {}
if sensor is not None:
kwargs["sensor"] = sensor(u)
return compiled_sym_operator(u=u, u0=u0, **kwargs)
return rhs
def max_eigenvalue(self, t=None, fields=None, discr=None):
return discr.nodewise_max(fields)
# -*- coding: utf8 -*-
"""Operators modeling diffusive phenomena."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy
import grudge.data
from grudge.models import TimeDependentOperator
from grudge.models.poisson import LaplacianOperatorBase
from grudge.second_order import CentralSecondDerivative
class DiffusionOperator(TimeDependentOperator, LaplacianOperatorBase):
def __init__(self, dimensions, diffusion_tensor=None,
dirichlet_bc=grudge.data.make_tdep_constant(0), dirichlet_tag="dirichlet",
neumann_bc=grudge.data.make_tdep_constant(0), neumann_tag="neumann",
scheme=CentralSecondDerivative()):
self.dimensions = dimensions
self.scheme = scheme
self.dirichlet_bc = dirichlet_bc
self.dirichlet_tag = dirichlet_tag
self.neumann_bc = neumann_bc
self.neumann_tag = neumann_tag
if diffusion_tensor is None:
diffusion_tensor = numpy.eye(dimensions)
self.diffusion_tensor = diffusion_tensor
def bind(self, discr):
"""Return a :class:`BoundPoissonOperator`."""
assert self.dimensions == discr.dimensions
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [self.dirichlet_tag, self.neumann_tag])
return BoundDiffusionOperator(self, discr)
def estimate_timestep(self, discr,
stepper=None, stepper_class=None, stepper_args=None,
t=None, fields=None):
u"""Estimate the largest stable timestep, given a time stepper
`stepper_class`. If none is given, RK4 is assumed.
"""
rk4_dt = 0.2 \
* (discr.dt_non_geometric_factor()
* discr.dt_geometric_factor())**2
from grudge.timestep.stability import \
approximate_rk4_relative_imag_stability_region
return rk4_dt * approximate_rk4_relative_imag_stability_region(
stepper, stepper_class, stepper_args)
class BoundDiffusionOperator(grudge.iterative.OperatorBase):
"""Returned by :meth:`DiffusionOperator.bind`."""
def __init__(self, diffusion_op, discr):
grudge.iterative.OperatorBase.__init__(self)
self.discr = discr
dop = self.diffusion_op = diffusion_op
op = dop.sym_operator(apply_minv=True)
self.compiled_op = discr.compile(op)
# Check whether use of Poincaré mean-value method is required.
# (for pure Neumann or pure periodic)
from grudge.mesh import BTAG_ALL
self.poincare_mean_value_hack = (
len(self.discr.get_boundary(BTAG_ALL).nodes)
== len(self.discr.get_boundary(diffusion_op.neumann_tag).nodes))
def __call__(self, t, u):
dop = self.diffusion_op
context = {"u": u}
if not isinstance(self.diffusion_op.diffusion_tensor, numpy.ndarray):
self.diffusion = dop.diffusion_tensor.volume_interpolant(t, self.discr)
self.neu_diff = dop.diffusion_tensor.boundary_interpolant(
t, self.discr, dop.neumann_tag)
context["diffusion"] = self.diffusion
context["neumann_diffusion"] = self.neu_diff
if not self.discr.get_boundary(dop.dirichlet_tag).is_empty():
context["dir_bc"] = dop.dirichlet_bc.boundary_interpolant(
t, self.discr, dop.dirichlet_tag)
if not self.discr.get_boundary(dop.neumann_tag).is_empty():
context["neu_bc"] = dop.neumann_bc.boundary_interpolant(
t, self.discr, dop.neumann_tag)
return self.compiled_op(**context)
# -*- coding: utf8 -*-
"""grudge operators modelling electromagnetic phenomena."""
from __future__ import division
from __future__ import absolute_import
from six.moves import range
__copyright__ = "Copyright (C) 2007, 2010 Andreas Kloeckner, David Powell"
__copyright__ = """
Copyright (C) 2007-2017 Andreas Kloeckner
Copyright (C) 2010 David Powell
Copyright (C) 2017 Bogdan Enache
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
......@@ -27,18 +27,140 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pytools import memoize_method
from grudge.models import HyperbolicOperator
from grudge.symbolic.primitives import make_common_subexpression as cse
import numpy as np
from arraycontext import get_container_context_recursively
from meshmode.mesh import BTAG_ALL, BTAG_NONE
from pytools.obj_array import join_fields, make_obj_array
from pytools import levi_civita, memoize_method
from pytools.obj_array import flat_obj_array, make_obj_array
import grudge.geometry as geo
import grudge.op as op
from grudge.models import HyperbolicOperator
# {{{ helpers
# NOTE: Hack for getting the derivative operators to play nice
# with grudge.tools.SubsettableCrossProduct
class _Dx:
def __init__(self, dcoll, i):
self.dcoll = dcoll
self.i = i
def __mul__(self, other):
return op.local_d_dx(self.dcoll, self.i, other)
def is_zero(x):
# DO NOT try to replace this with an attempted "== 0" comparison.
# This will become an elementwise numpy operation and not do what
# you want.
if np.isscalar(x):
return x == 0
else:
return False
def count_subset(subset):
from pytools import len_iterable
return len_iterable(uc for uc in subset if uc)
def partial_to_all_subset_indices(subsets, base=0):
"""Takes a sequence of bools and generates it into an array of indices
to be used to insert the subset into the full set.
Example:
>>> list(partial_to_all_subset_indices([[False, True, True], [True,False,True]]))
[array([0 1]), array([2 3]
"""
idx = base
for subset in subsets:
result = []
for is_in in subset:
if is_in:
result.append(idx)
idx += 1
yield np.array(result, dtype=np.intp)
# }}}
# TODO: Check PML
# {{{ SubsettableCrossProduct
class SubsettableCrossProduct:
"""A cross product that can operate on an arbitrary subsets of its
two operands and return an arbitrary subset of its result.
"""
full_subset = (True, True, True)
def __init__(self, op1_subset=full_subset, op2_subset=full_subset,
result_subset=full_subset):
"""Construct a subset-able cross product.
:param op1_subset: The subset of indices of operand 1 to be taken into
account. Given as a 3-sequence of bools.
:param op2_subset: The subset of indices of operand 2 to be taken into
account. Given as a 3-sequence of bools.
:param result_subset: The subset of indices of the result that are
calculated. Given as a 3-sequence of bools.
"""
def subset_indices(subset):
return [i for i, use_component in enumerate(subset)
if use_component]
self.op1_subset = op1_subset
self.op2_subset = op2_subset
self.result_subset = result_subset
import pymbolic
op1 = pymbolic.var("x")
op2 = pymbolic.var("y")
self.functions = []
self.component_lcjk = []
for i, use_component in enumerate(result_subset):
if use_component:
this_expr = 0
this_component = []
for j, j_real in enumerate(subset_indices(op1_subset)):
for k, k_real in enumerate(subset_indices(op2_subset)):
lc = levi_civita((i, j_real, k_real))
if lc != 0:
this_expr += lc*op1[j]*op2[k]
this_component.append((lc, j, k))
self.functions.append(pymbolic.compile(this_expr,
variables=[op1, op2]))
self.component_lcjk.append(this_component)
def __call__(self, x, y, three_mult=None):
"""Compute the subsetted cross product on the indexables *x* and *y*.
:param three_mult: a function of three arguments *sign, xj, yk*
used in place of the product *sign*xj*yk*. Defaults to just this
product if not given.
"""
from pytools.obj_array import flat_obj_array
if three_mult is None:
return flat_obj_array(*[f(x, y) for f in self.functions])
else:
return flat_obj_array(
*[sum(three_mult(lc, x[j], y[k]) for lc, j, k in lcjk)
for lcjk in self.component_lcjk])
cross = SubsettableCrossProduct()
# }}}
# {{{ MaxwellOperator
class MaxwellOperator(HyperbolicOperator):
"""A 3D Maxwell operator which supports fixed or variable
"""A strong-form 3D Maxwell operator which supports fixed or variable
isotropic, non-dispersive, positive epsilon and mu.
Field order is [Ex Ey Ez Hx Hy Hz].
......@@ -46,7 +168,7 @@ class MaxwellOperator(HyperbolicOperator):
_default_dimensions = 3
def __init__(self, epsilon, mu,
def __init__(self, dcoll, epsilon, mu,
flux_type,
bdry_flux_type=None,
pec_tag=BTAG_ALL,
......@@ -69,6 +191,7 @@ class MaxwellOperator(HyperbolicOperator):
boundary condition
"""
self.dcoll = dcoll
self.dimensions = dimensions or self._default_dimensions
space_subset = [True]*self.dimensions + [False]*(3-self.dimensions)
......@@ -76,7 +199,6 @@ class MaxwellOperator(HyperbolicOperator):
e_subset = self.get_eh_subset()[0:3]
h_subset = self.get_eh_subset()[3:6]
from grudge.tools import SubsettableCrossProduct
self.space_cross_e = SubsettableCrossProduct(
op1_subset=space_subset,
op2_subset=e_subset,
......@@ -106,171 +228,135 @@ class MaxwellOperator(HyperbolicOperator):
self.current = current
self.incident_bc_data = incident_bc
@property
def c(self):
from warnings import warn
warn("MaxwellOperator.c is deprecated", DeprecationWarning)
if not self.fixed_material:
raise RuntimeError("Cannot compute speed of light "
"for non-constant material")
return 1/(self.mu*self.epsilon)**0.5
def flux(self, flux_type):
"""The template for the numerical flux for variable coefficients.
def flux(self, wtpair):
"""The numerical flux for variable coefficients.
:param flux_type: can be in [0,1] for anything between central and upwind,
or "lf" for Lax-Friedrichs.
As per Hesthaven and Warburton page 433.
"""
from grudge.flux import (make_normal, FluxVectorPlaceholder,
FluxConstantPlaceholder)
normal = make_normal(self.dimensions)
actx = get_container_context_recursively(wtpair)
normal = geo.normal(actx, self.dcoll, wtpair.dd)
if self.fixed_material:
from grudge.tools import count_subset
w = FluxVectorPlaceholder(count_subset(self.get_eh_subset()))
e, h = self.split_eh(wtpair)
epsilon = self.epsilon
mu = self.mu
else:
raise NotImplementedError("only fixed material supported for now")
e, h = self.split_eh(w)
epsilon = FluxConstantPlaceholder(self.epsilon)
mu = FluxConstantPlaceholder(self.mu)
Z_int = (mu/epsilon)**0.5 # noqa: N806
Y_int = 1/Z_int # noqa: N806
Z_ext = (mu/epsilon)**0.5 # noqa: N806
Y_ext = 1/Z_ext # noqa: N806
else:
from grudge.tools import count_subset
w = FluxVectorPlaceholder(count_subset(self.get_eh_subset())+2)
epsilon, mu, e, h = self.split_eps_mu_eh(w)
Z_int = (mu.int/epsilon.int)**0.5
Y_int = 1/Z_int
Z_ext = (mu.ext/epsilon.ext)**0.5
Y_ext = 1/Z_ext
if flux_type == "lf":
if self.fixed_material:
max_c = (self.epsilon*self.mu)**(-0.5)
else:
from grudge.flux import Max
c_int = (epsilon.int*mu.int)**(-0.5)
c_ext = (epsilon.ext*mu.ext)**(-0.5)
max_c = Max(c_int, c_ext) # noqa
return join_fields(
if self.flux_type == "lf":
# if self.fixed_material:
# max_c = (self.epsilon*self.mu)**(-0.5)
return flat_obj_array(
# flux e,
1/2*(
-self.space_cross_h(normal, h.int-h.ext)
-self.space_cross_h(normal, h.ext-h.int)
# multiplication by epsilon undoes material divisor below
#-max_c*(epsilon.int*e.int - epsilon.ext*e.ext)
# -max_c*(epsilon*e.int - epsilon*e.ext)
),
# flux h
1/2*(
self.space_cross_e(normal, e.int-e.ext)
self.space_cross_e(normal, e.ext-e.int)
# multiplication by mu undoes material divisor below
#-max_c*(mu.int*h.int - mu.ext*h.ext)
# -max_c*(mu*h.int - mu*h.ext)
))
elif isinstance(flux_type, (int, float)):
elif isinstance(self.flux_type, int | float):
# see doc/maxima/maxwell.mac
return join_fields(
return flat_obj_array(
# flux e,
(
-1/(Z_int+Z_ext)*self.space_cross_h(normal,
Z_ext*(h.int-h.ext)
- flux_type*self.space_cross_e(normal, e.int-e.ext))
Z_ext*(h.ext-h.int)
- self.flux_type*self.space_cross_e(normal, e.ext-e.int))
),
# flux h
(
1/(Y_int + Y_ext)*self.space_cross_e(normal,
Y_ext*(e.int-e.ext)
+ flux_type*self.space_cross_h(normal, h.int-h.ext))
Y_ext*(e.ext-e.int)
+ self.flux_type*self.space_cross_h(normal, h.ext-h.int))
),
)
else:
raise ValueError("maxwell: invalid flux_type (%s)"
% self.flux_type)
raise ValueError(f"maxwell: invalid flux_type ({self.flux_type})")
def local_derivatives(self, w=None):
def local_derivatives(self, w):
"""Template for the spatial derivatives of the relevant components of
:math:`E` and :math:`H`
"""
e, h = self.split_eh(self.field_placeholder(w))
e, h = self.split_eh(w)
# Object array of derivative operators
nabla = flat_obj_array(
[_Dx(self.dcoll, i) for i in range(self.dimensions)]
)
def e_curl(field):
return self.space_cross_e(nabla, field)
return self.space_cross_e(nabla, field,
three_mult=lambda lc, x, y: lc * (x * y))
def h_curl(field):
return self.space_cross_h(nabla, field)
from grudge.symbolic import make_nabla
nabla = make_nabla(self.dimensions)
return self.space_cross_h(nabla, field,
three_mult=lambda lc, x, y: lc * (x * y))
# in conservation form: u_t + A u_x = 0
return join_fields(
return flat_obj_array(
(self.current - h_curl(h)),
e_curl(e)
)
def field_placeholder(self, w=None):
"A placeholder for E and H."
from grudge.tools import count_subset
fld_cnt = count_subset(self.get_eh_subset())
if w is None:
from grudge.symbolic import make_sym_vector
w = make_sym_vector("w", fld_cnt)
return w
def pec_bc(self, w=None):
"Construct part of the flux operator template for PEC boundary conditions"
e, h = self.split_eh(self.field_placeholder(w))
def pec_bc(self, w):
"""Construct part of the flux operator template for PEC boundary conditions
"""
e, h = self.split_eh(w)
from grudge.symbolic import RestrictToBoundary
pec_e = RestrictToBoundary(self.pec_tag)(e)
pec_h = RestrictToBoundary(self.pec_tag)(h)
pec_e = op.project(self.dcoll, "vol", self.pec_tag, e)
pec_h = op.project(self.dcoll, "vol", self.pec_tag, h)
return join_fields(-pec_e, pec_h)
return flat_obj_array(-pec_e, pec_h)
def pmc_bc(self, w=None):
"Construct part of the flux operator template for PMC boundary conditions"
e, h = self.split_eh(self.field_placeholder(w))
def pmc_bc(self, w):
"""Construct part of the flux operator template for PMC boundary conditions
"""
e, h = self.split_eh(w)
from grudge.symbolic import RestrictToBoundary
pmc_e = RestrictToBoundary(self.pmc_tag)(e)
pmc_h = RestrictToBoundary(self.pmc_tag)(h)
pmc_e = op.project(self.dcoll, "vol", self.pmc_tag, e)
pmc_h = op.project(self.dcoll, "vol", self.pmc_tag, h)
return join_fields(pmc_e, -pmc_h)
return flat_obj_array(pmc_e, -pmc_h)
def absorbing_bc(self, w=None):
def absorbing_bc(self, w):
"""Construct part of the flux operator template for 1st order
absorbing boundary conditions.
"""
from grudge.symbolic import normal
absorb_normal = normal(self.absorb_tag, self.dimensions)
from grudge.symbolic import RestrictToBoundary, Field
actx = get_container_context_recursively(w)
absorb_normal = geo.normal(actx, self.dcoll, dd=self.absorb_tag)
e, h = self.split_eh(self.field_placeholder(w))
e, h = self.split_eh(w)
if self.fixed_material:
epsilon = self.epsilon
mu = self.mu
else:
epsilon = cse(
RestrictToBoundary(self.absorb_tag)(Field("epsilon")))
mu = cse(
RestrictToBoundary(self.absorb_tag)(Field("mu")))
raise NotImplementedError("only fixed material supported for now")
absorb_Z = (mu/epsilon)**0.5
absorb_Y = 1/absorb_Z
absorb_Z = (mu/epsilon)**0.5 # noqa: N806
absorb_Y = 1/absorb_Z # noqa: N806
absorb_e = RestrictToBoundary(self.absorb_tag)(e)
absorb_h = RestrictToBoundary(self.absorb_tag)(h)
absorb_e = op.project(self.dcoll, "vol", self.absorb_tag, e)
absorb_h = op.project(self.dcoll, "vol", self.absorb_tag, h)
bc = join_fields(
bc = flat_obj_array(
absorb_e + 1/2*(self.space_cross_h(absorb_normal, self.space_cross_e(
absorb_normal, absorb_e))
- absorb_Z*self.space_cross_h(absorb_normal, absorb_h)),
......@@ -281,52 +367,27 @@ class MaxwellOperator(HyperbolicOperator):
return bc
def incident_bc(self, w=None):
"Flux terms for incident boundary conditions"
def incident_bc(self, w):
"""Flux terms for incident boundary conditions"""
# NOTE: Untested for inhomogeneous materials, but would usually be
# physically meaningless anyway (are there exceptions to this?)
e, h = self.split_eh(self.field_placeholder(w))
if not self.fixed_material:
from warnings import warn
if self.incident_tag != BTAG_NONE:
warn("Incident boundary conditions assume homogeneous"
" background material, results may be unphysical")
from grudge.tools import count_subset
e, h = self.split_eh(w)
fld_cnt = count_subset(self.get_eh_subset())
from grudge.tools import is_zero
incident_bc_data = self.incident_bc_data(self, e, h)
if is_zero(incident_bc_data):
return make_obj_array([0]*fld_cnt)
else:
return cse(-incident_bc_data)
return -incident_bc_data
def sym_operator(self, w=None):
def operator(self, t, w):
"""The full operator template - the high level description of
the Maxwell operator.
Combines the relevant operator templates for spatial
derivatives, flux, boundary conditions etc.
"""
w = self.field_placeholder(w)
if self.fixed_material:
flux_w = w
else:
epsilon = self.epsilon
mu = self.mu
flux_w = join_fields(epsilon, mu, w)
from grudge.symbolic import BoundaryPair, \
InverseMassOperator, get_flux_operator
flux_op = get_flux_operator(self.flux(self.flux_type))
bdry_flux_op = get_flux_operator(self.flux(self.bdry_flux_type))
from grudge.tools.indexing import count_subset
elec_components = count_subset(self.get_eh_subset()[0:3])
mag_components = count_subset(self.get_eh_subset()[3:6])
......@@ -335,9 +396,7 @@ class MaxwellOperator(HyperbolicOperator):
material_divisor = (
[self.epsilon]*elec_components+[self.mu]*mag_components)
else:
material_divisor = join_fields(
[epsilon]*elec_components,
[mu]*mag_components)
raise NotImplementedError("only fixed material supported for now")
tags_and_bcs = [
(self.pec_tag, self.pec_bc(w)),
......@@ -346,67 +405,25 @@ class MaxwellOperator(HyperbolicOperator):
(self.incident_tag, self.incident_bc(w)),
]
def make_flux_bc_vector(tag, bc):
if self.fixed_material:
return bc
else:
from grudge.symbolic import RestrictToBoundary
return join_fields(
cse(RestrictToBoundary(tag)(epsilon)),
cse(RestrictToBoundary(tag)(mu)),
bc)
return (
- self.local_derivatives(w)
+ InverseMassOperator()(
flux_op(flux_w)
+ sum(
bdry_flux_op(BoundaryPair(
flux_w, make_flux_bc_vector(tag, bc), tag))
for tag, bc in tags_and_bcs))
) / material_divisor
def bind(self, discr):
"Convert the abstract operator template into compiled code."
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [
self.pec_tag, self.absorb_tag, self.incident_tag])
compiled_sym_operator = discr.compile(self.op_template())
def rhs(t, w, **extra_context):
kwargs = {}
kwargs.update(extra_context)
return compiled_sym_operator(w=w, t=t, **kwargs)
return rhs
def assemble_eh(self, e=None, h=None, discr=None):
"Combines separate E and H vectors into a single array."
if discr is None:
def zero():
return 0
else:
def zero():
return discr.volume_zeros()
from grudge.tools import count_subset
e_components = count_subset(self.get_eh_subset()[0:3])
h_components = count_subset(self.get_eh_subset()[3:6])
def default_fld(fld, comp):
if fld is None:
return [zero() for i in range(comp)]
else:
return fld
dcoll = self.dcoll
e = default_fld(e, e_components)
h = default_fld(h, h_components)
def flux(pair):
return op.project(dcoll, pair.dd, "all_faces", self.flux(pair))
return join_fields(e, h)
from grudge.dof_desc import as_dofdesc
assemble_fields = assemble_eh
return (
- self.local_derivatives(w)
- op.inverse_mass(
dcoll,
op.face_mass(
dcoll,
sum(flux(tpair) for tpair in op.interior_trace_pairs(dcoll, w))
+ sum(flux(op.bv_trace_pair(dcoll, as_dofdesc(tag), w, bc))
for tag, bc in tags_and_bcs)
)
)
) / material_divisor
@memoize_method
def partial_to_eh_subsets(self):
......@@ -418,38 +435,14 @@ class MaxwellOperator(HyperbolicOperator):
e_subset = self.get_eh_subset()[0:3]
h_subset = self.get_eh_subset()[3:6]
from grudge.tools import partial_to_all_subset_indices
return tuple(partial_to_all_subset_indices(
[e_subset, h_subset]))
def split_eps_mu_eh(self, w):
"""Splits an array into epsilon, mu, E and H components.
Only used for fluxes.
"""
e_idx, h_idx = self.partial_to_eh_subsets()
epsilon, mu, e, h = w[[0]], w[[1]], w[e_idx+2], w[h_idx+2]
from grudge.flux import FluxVectorPlaceholder as FVP
if isinstance(w, FVP):
return (
FVP(scalars=epsilon),
FVP(scalars=mu),
FVP(scalars=e),
FVP(scalars=h))
else:
return epsilon, mu, make_obj_array(e), make_obj_array(h)
return tuple(partial_to_all_subset_indices([e_subset, h_subset]))
def split_eh(self, w):
"Splits an array into E and H components"
"""Splits an array into E and H components"""
e_idx, h_idx = self.partial_to_eh_subsets()
e, h = w[e_idx], w[h_idx]
from grudge.flux import FluxVectorPlaceholder as FVP
if isinstance(w, FVP):
return FVP(scalars=e), FVP(scalars=h)
else:
return make_obj_array(e), make_obj_array(h)
return e, h
def get_eh_subset(self):
"""Return a 6-tuple of :class:`bool` objects indicating whether field
......@@ -458,24 +451,26 @@ class MaxwellOperator(HyperbolicOperator):
"""
return 6*(True,)
def max_eigenvalue_expr(self):
"""Return the largest eigenvalue of Maxwell's equations as a hyperbolic
system.
"""
from math import sqrt
def max_characteristic_velocity(self, actx, **kwargs):
if self.fixed_material:
return 1/sqrt(self.epsilon*self.mu) # a number
return 1/np.sqrt(self.epsilon*self.mu) # a number
else:
import grudge.symbolic as sym
return sym.NodalMax()(1/sym.CFunction("sqrt")(self.epsilon*self.mu))
return op.nodal_max(self.dcoll, "vol",
1 / actx.np.sqrt(self.epsilon * self.mu))
def check_bc_coverage(self, mesh):
from meshmode.mesh import check_bc_coverage
check_bc_coverage(mesh, [
self.pec_tag,
self.pmc_tag,
self.absorb_tag,
self.incident_tag,
])
# }}}
def max_eigenvalue(self, t, fields=None, discr=None, context={}):
if self.fixed_material:
return self.max_eigenvalue_expr()
else:
raise ValueError("max_eigenvalue is no longer supported for "
"variable-coefficient problems--use max_eigenvalue_expr")
# {{{ TMMaxwellOperator
class TMMaxwellOperator(MaxwellOperator):
"""A 2D TM Maxwell operator with PEC boundaries.
......@@ -487,11 +482,13 @@ class TMMaxwellOperator(MaxwellOperator):
def get_eh_subset(self):
return (
(False, False, True) # only ez
+
(True, True, False) # hx and hy
(False, False, True, True, True, False) # ez, hx and hy
)
# }}}
# {{{ TEMaxwellOperator
class TEMaxwellOperator(MaxwellOperator):
"""A 2D TE Maxwell operator.
......@@ -503,11 +500,13 @@ class TEMaxwellOperator(MaxwellOperator):
def get_eh_subset(self):
return (
(True, True, False) # ex and ey
+
(False, False, True) # only hz
(True, True, False, False, False, True) # ex and ey, only hz
)
# }}}
# {{{ TE1DMaxwellOperator
class TE1DMaxwellOperator(MaxwellOperator):
"""A 1D TE Maxwell operator.
......@@ -519,11 +518,13 @@ class TE1DMaxwellOperator(MaxwellOperator):
def get_eh_subset(self):
return (
(True, True, False)
+
(False, False, True)
(True, True, False, False, False, True)
)
# }}}
# {{{ SourceFree1DMaxwellOperator
class SourceFree1DMaxwellOperator(MaxwellOperator):
"""A 1D TE Maxwell operator.
......@@ -535,7 +536,73 @@ class SourceFree1DMaxwellOperator(MaxwellOperator):
def get_eh_subset(self):
return (
(False, True, False)
+
(False, False, True)
(False, True, False, False, False, True)
)
# }}}
# {{{ get_rectangular_cavity_mode
def get_rectangular_cavity_mode(actx, nodes, t, E_0, mode_indices): # noqa: N803
"""A rectangular TM cavity mode for a rectangle / cube
with one corner at the origin and the other at (1,1[,1])."""
dims = len(mode_indices)
if dims != 2 and dims != 3:
raise ValueError("Improper mode_indices dimensions")
factors = [n*np.pi for n in mode_indices]
kx, ky = factors[0:2]
if dims == 3:
kz = factors[2]
omega = np.sqrt(sum(f**2 for f in factors))
x = nodes[0]
y = nodes[1]
if dims == 3:
z = nodes[2]
zeros = 0*x
sx = actx.np.sin(kx*x)
cx = actx.np.cos(kx*x)
sy = actx.np.sin(ky*y)
cy = actx.np.cos(ky*y)
if dims == 2:
tfac = t * omega
result = flat_obj_array(
zeros,
zeros,
actx.np.sin(kx * x) * actx.np.sin(ky * y) * np.cos(tfac), # ez
(-ky * actx.np.sin(kx * x) * actx.np.cos(ky * y)
* np.sin(tfac) / omega), # hx
(kx * actx.np.cos(kx * x) * actx.np.sin(ky * y)
* np.sin(tfac) / omega), # hy
zeros,
)
elif dims == 3:
sz = actx.np.sin(kz*z)
cz = actx.np.cos(kz*z)
tdep = np.exp(-1j * omega * t)
gamma_squared = ky**2 + kx**2
result = flat_obj_array(
-kx * kz * E_0*cx*sy*sz*tdep / gamma_squared, # ex
-ky * kz * E_0*sx*cy*sz*tdep / gamma_squared, # ey
E_0 * sx*sy*cz*tdep, # ez
-1j * omega * ky*E_0*sx*cy*cz*tdep / gamma_squared, # hx
1j * omega * kx*E_0*cx*sy*cz*tdep / gamma_squared,
zeros,
)
else:
raise NotImplementedError("only 2D and 3D supported")
return result
# }}}
# vim: foldmethod=marker
"""Grudge operators modeling compressible, inviscid flows (Euler)
Model definitions
-----------------
.. autoclass:: EulerOperator
Predefined initial conditions
-----------------------------
.. autofunction:: vortex_initial_condition
Helper routines and array containers
------------------------------------
.. autoclass:: ConservedEulerField
.. autofunction:: conservative_to_primitive_vars
.. autofunction:: compute_wavespeed
.. autofunction:: euler_volume_flux
.. autofunction:: euler_numerical_flux
"""
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
import numpy as np
from arraycontext import (
ArrayContext,
dataclass_array_container,
with_container_arithmetic,
)
from meshmode.dof_array import DOFArray
from pytools.obj_array import make_obj_array
import grudge.geometry as geo
import grudge.op as op
from grudge.discretization import DiscretizationCollection
from grudge.dof_desc import DISCR_TAG_BASE, DOFDesc, as_dofdesc
from grudge.models import HyperbolicOperator
from grudge.trace_pair import TracePair
# {{{ Array containers for the Euler model
@with_container_arithmetic(bcasts_across_obj_array=False,
container_types_bcast_across=(DOFArray, np.ndarray),
matmul=True,
rel_comparison=True,
)
@dataclass_array_container
@dataclass(frozen=True)
class ConservedEulerField:
# mass and energy become arrays when computing fluxes.
mass: DOFArray | np.ndarray
energy: DOFArray | np.ndarray
momentum: np.ndarray
# NOTE: disable numpy doing any array math
__array_ufunc__ = None
@property
def dim(self):
return len(self.momentum)
# }}}
# {{{ Predefined initial conditions for the Euler model
def vortex_initial_condition(
x_vec, t=0, center=5, mach_number=0.5, epsilon=1, gamma=1.4):
"""Initial condition adapted from Section 2 (equation 2) of:
K. Mattsson, M. Svärd, M. Carpenter, and J. Nordström (2006).
High-order accurate computations for unsteady aerodynamics.
`DOI <https://doi.org/10.1016/j.compfluid.2006.02.004>`__.
"""
x, y = x_vec
actx = x.array_context
fxyt = 1 - (((x - center) - t)**2 + y**2)
expterm = actx.np.exp(fxyt/2)
c = (epsilon**2 * (gamma - 1) * mach_number**2)/(8*np.pi**2)
u = 1 - (epsilon*y/(2*np.pi)) * expterm
v = ((epsilon*(x - center) - t)/(2*np.pi)) * expterm
velocity = make_obj_array([u, v])
rho = (1 - c * actx.np.exp(fxyt)) ** (1 / (gamma - 1))
p = (rho ** gamma)/(gamma * mach_number**2)
rhou = rho * velocity
rhoe = p * (1/(gamma - 1)) + 0.5 * sum(rhou * velocity)
return ConservedEulerField(mass=rho, energy=rhoe, momentum=rhou)
# }}}
# {{{ Variable transformation and helper routines
def conservative_to_primitive_vars(cv_state: ConservedEulerField, gamma=1.4):
"""Converts from conserved variables (density, momentum, total energy)
into primitive variables (density, velocity, pressure).
:arg cv_state: A :class:`ConservedEulerField` containing the conserved
variables.
:arg gamma: The isentropic expansion factor for a single-species gas
(default set to 1.4).
:returns: A :class:`Tuple` containing the primitive variables:
(density, velocity, pressure).
"""
rho = cv_state.mass
rho_e = cv_state.energy
rho_u = cv_state.momentum
u = rho_u / rho
p = (gamma - 1) * (rho_e - 0.5 * sum(rho_u * u))
return rho, u, p
def compute_wavespeed(actx: ArrayContext, cv_state: ConservedEulerField, gamma=1.4):
"""Computes the total translational wavespeed.
:arg cv_state: A :class:`ConservedEulerField` containing the conserved
variables.
:arg gamma: The isentropic expansion factor for a single-species gas
(default set to 1.4).
:returns: A :class:`~meshmode.dof_array.DOFArray` containing local wavespeeds.
"""
rho, u, p = conservative_to_primitive_vars(cv_state, gamma=gamma)
return actx.np.sqrt(np.dot(u, u)) + actx.np.sqrt(gamma * (p / rho))
# }}}
# {{{ Boundary condition types
class InviscidBCObject(metaclass=ABCMeta):
def __init__(self, *, prescribed_state=None) -> None:
self.prescribed_state = prescribed_state
@abstractmethod
def boundary_tpair(
self,
actx: ArrayContext,
dcoll: DiscretizationCollection,
dd_bc: DOFDesc,
state: ConservedEulerField, t=0):
pass
class PrescribedBC(InviscidBCObject):
def boundary_tpair(
self,
actx: ArrayContext,
dcoll: DiscretizationCollection,
dd_bc: DOFDesc,
state: ConservedEulerField, t=0):
dd_base = as_dofdesc("vol", DISCR_TAG_BASE)
return TracePair(
dd_bc,
interior=op.project(dcoll, dd_base, dd_bc, state),
exterior=self.prescribed_state(actx.thaw(dcoll.nodes(dd_bc)), t=t)
)
class InviscidWallBC(InviscidBCObject):
def boundary_tpair(
self,
actx: ArrayContext,
dcoll: DiscretizationCollection,
dd_bc: DOFDesc,
state: ConservedEulerField, t=0):
dd_base = as_dofdesc("vol", DISCR_TAG_BASE)
nhat = geo.normal(actx, dcoll, dd_bc)
interior = op.project(dcoll, dd_base, dd_bc, state)
return TracePair(
dd_bc,
interior=interior,
exterior=ConservedEulerField(
mass=interior.mass,
energy=interior.energy,
momentum=(
interior.momentum - 2.0 * nhat * np.dot(interior.momentum, nhat)
)
)
)
# }}}
# {{{ Euler operator
def euler_volume_flux(
dcoll: DiscretizationCollection, cv_state: ConservedEulerField, gamma=1.4):
"""Computes the (non-linear) volume flux for the
Euler operator.
:arg cv_state: A :class:`ConservedEulerField` containing the conserved
variables.
:arg gamma: The isentropic expansion factor for a single-species gas
(default set to 1.4).
:returns: A :class:`ConservedEulerField` containing the volume fluxes.
"""
from arraycontext import outer
rho, u, p = conservative_to_primitive_vars(cv_state, gamma=gamma)
return ConservedEulerField(
mass=cv_state.momentum,
energy=u * (cv_state.energy + p),
momentum=rho * outer(u, u) + np.eye(dcoll.dim, dtype=object) * p
)
def euler_numerical_flux(
actx: ArrayContext,
dcoll: DiscretizationCollection, tpair: TracePair,
gamma=1.4, lf_stabilization=False):
"""Computes the interface numerical flux for the Euler operator.
:arg tpair: A :class:`grudge.trace_pair.TracePair` containing the conserved
variables on the interior and exterior sides of element facets.
:arg gamma: The isentropic expansion factor for a single-species gas
(default set to 1.4).
:arg lf_stabilization: A boolean denoting whether to apply Lax-Friedrichs
dissipation.
:returns: A :class:`ConservedEulerField` containing the interface fluxes.
"""
from grudge.dof_desc import FACE_RESTR_ALL, VTAG_ALL, BoundaryDomainTag
dd_intfaces = tpair.dd
dd_allfaces = dd_intfaces.with_domain_tag(
BoundaryDomainTag(FACE_RESTR_ALL, VTAG_ALL)
)
q_ll = tpair.int
q_rr = tpair.ext
flux_tpair = TracePair(
tpair.dd,
interior=euler_volume_flux(dcoll, q_ll, gamma=gamma),
exterior=euler_volume_flux(dcoll, q_rr, gamma=gamma)
)
num_flux = flux_tpair.avg
normal = geo.normal(actx, dcoll, dd_intfaces)
if lf_stabilization:
from arraycontext import outer
# Compute jump penalization parameter
lam = actx.np.maximum(compute_wavespeed(actx, q_ll, gamma=gamma),
compute_wavespeed(actx, q_rr, gamma=gamma))
num_flux -= lam*outer(tpair.diff, normal)/2
return op.project(dcoll, dd_intfaces, dd_allfaces, num_flux @ normal)
class EulerOperator(HyperbolicOperator):
r"""This operator discretizes the Euler equations:
.. math::
\partial_t \mathbf{Q} + \nabla\cdot\mathbf{F} = 0,
where :math:`\mathbf{Q}` is the state vector containing density, momentum, and
total energy, and :math:`\mathbf{F}` is the vector of inviscid fluxes
(see :func:`euler_volume_flux`)
"""
def __init__(self, dcoll: DiscretizationCollection,
bdry_conditions=None,
flux_type="lf",
gamma=1.4,
quadrature_tag=None):
self.dcoll = dcoll
self.bdry_conditions = bdry_conditions
self.flux_type = flux_type
self.gamma = gamma
self.lf_stabilization = flux_type == "lf"
self.qtag = quadrature_tag
def max_characteristic_velocity(self, actx: ArrayContext, **kwargs):
state = kwargs["state"]
return compute_wavespeed(actx, state, gamma=self.gamma)
def operator(self, actx: ArrayContext, t, q):
dcoll = self.dcoll
gamma = self.gamma
qtag = self.qtag
dq = as_dofdesc("vol", qtag)
df = as_dofdesc("all_faces", qtag)
def interp_to_quad(u):
return op.project(dcoll, "vol", dq, u)
# Compute volume fluxes
volume_fluxes = op.weak_local_div(
dcoll, dq,
interp_to_quad(euler_volume_flux(dcoll, q, gamma=gamma))
)
# Compute interior interface fluxes
interface_fluxes = (
sum(
euler_numerical_flux(
actx, dcoll,
op.tracepair_with_discr_tag(dcoll, qtag, tpair),
gamma=gamma,
lf_stabilization=self.lf_stabilization
) for tpair in op.interior_trace_pairs(dcoll, q)
)
)
# Compute boundary fluxes
if self.bdry_conditions is not None:
bc_fluxes = sum(
euler_numerical_flux(
actx, dcoll,
self.bdry_conditions[btag].boundary_tpair(
actx, dcoll,
as_dofdesc(btag, qtag),
q,
t=t
),
gamma=gamma,
lf_stabilization=self.lf_stabilization
) for btag in self.bdry_conditions
)
interface_fluxes = interface_fluxes + bc_fluxes
return op.inverse_mass(
dcoll,
volume_fluxes - op.face_mass(dcoll, df, interface_fluxes) # type: ignore[operator]
)
# }}}
# vim: foldmethod=marker
"""Operator for compressible Navier-Stokes and Euler equations."""
from __future__ import division
from __future__ import absolute_import
import six
from six.moves import range
__copyright__ = "Copyright (C) 2007 Hendrik Riedmann, Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy
import grudge.tools
import grudge.mesh
import grudge.data
from grudge.models import TimeDependentOperator
from pytools import Record
from grudge.tools import is_zero
from grudge.second_order import (
StabilizedCentralSecondDerivative,
CentralSecondDerivative,
IPDGSecondDerivative)
from grudge.symbolic.primitives import make_common_subexpression as cse
from pytools import memoize_method
from grudge.symbolic.tools import make_sym_vector
from pytools.obj_array import make_obj_array, join_fields
AXES = ["x", "y", "z", "w"]
from grudge.symbolic.operators import (
QuadratureGridUpsampler,
QuadratureInteriorFacesGridUpsampler)
to_vol_quad = QuadratureGridUpsampler("gasdyn_vol")
# It is recommended (though not required) that these two
# remain the same so that they can be computed together
# by the CUDA backend
to_int_face_quad = QuadratureInteriorFacesGridUpsampler("gasdyn_face")
to_bdry_quad = QuadratureGridUpsampler("gasdyn_face")
# {{{ equations of state
class EquationOfState(object):
def q_to_p(self, op, q):
raise NotImplementedError
def p_to_e(self, p, rho, u):
raise NotImplementedError
class GammaLawEOS(EquationOfState):
# FIXME Shouldn't gamma only occur in the equation of state?
# I.e. shouldn't all uses of gamma go through the EOS?
def __init__(self, gamma):
self.gamma = gamma
def q_to_p(self, op, q):
return (self.gamma-1)*(op.e(q)-0.5*numpy.dot(op.rho_u(q), op.u(q)))
def p_to_e(self, p, rho, u):
return p / (self.gamma - 1) + rho / 2 * numpy.dot(u, u)
class PolytropeEOS(GammaLawEOS):
# inverse is same as superclass
def q_to_p(self, op, q):
return op.rho(q)**self.gamma
# }}}
class GasDynamicsOperator(TimeDependentOperator):
"""An nD Navier-Stokes and Euler operator.
see JSH, TW: Nodal Discontinuous Galerkin Methods p.320 and p.206
dq/dt = d/dx * (-F + tau_:1) + d/dy * (-G + tau_:2)
where e.g. in 2D
q = (rho, rho_u_x, rho_u_y, E)
F = (rho_u_x, rho_u_x^2 + p, rho_u_x * rho_u_y / rho, u_x * (E + p))
G = (rho_u_y, rho_u_x * rho_u_y / rho, rho_u_y^2 + p, u_y * (E + p))
tau_11 = mu * (2 * du/dx - 2/3 * (du/dx + dv/dy))
tau_12 = mu * (du/dy + dv/dx)
tau_21 = tau_12
tau_22 = mu * (2 * dv/dy - 2/3 * (du/dx + dv/dy))
tau_31 = u * tau_11 + v * tau_12
tau_32 = u * tau_21 + v * tau_22
For the heat flux:
q = -k * nabla * T
k = c_p * mu / Pr
Field order is [rho E rho_u_x rho_u_y ...].
"""
# {{{ initialization ------------------------------------------------------
def __init__(self, dimensions,
gamma=None, mu=0,
bc_inflow=None,
bc_outflow=None,
bc_noslip=None,
bc_supersonic_inflow=None,
prandtl=None, spec_gas_const=1.0,
equation_of_state=None,
inflow_tag="inflow",
outflow_tag="outflow",
noslip_tag="noslip",
wall_tag="wall",
supersonic_inflow_tag="supersonic_inflow",
supersonic_outflow_tag="supersonic_outflow",
source=None,
second_order_scheme=CentralSecondDerivative(),
artificial_viscosity_mode=None,
):
"""
:param source: should implement
:class:`grudge.data.IFieldDependentGivenFunction`
or be None.
:param artificial_viscosity_mode:
"""
from grudge.data import (
TimeConstantGivenFunction,
ConstantGivenFunction)
if gamma is not None:
if equation_of_state is not None:
raise ValueError("can only specify one of gamma and equation_of_state")
from warnings import warn
warn("argument gamma is deprecated in favor of equation_of_state",
DeprecationWarning, stacklevel=2)
equation_of_state = GammaLawEOS(gamma)
dull_bc = TimeConstantGivenFunction(
ConstantGivenFunction(make_obj_array(
[1, 1] + [0]*dimensions)))
if bc_inflow is None:
bc_inflow = dull_bc
if bc_outflow is None:
bc_outflow = dull_bc
if bc_noslip is None:
bc_noslip = dull_bc
if bc_supersonic_inflow is None:
bc_supersonic_inflow = dull_bc
self.dimensions = dimensions
self.prandtl = prandtl
self.spec_gas_const = spec_gas_const
self.mu = mu
self.bc_inflow = bc_inflow
self.bc_outflow = bc_outflow
self.bc_noslip = bc_noslip
self.bc_supersonic_inflow = bc_supersonic_inflow
self.inflow_tag = inflow_tag
self.outflow_tag = outflow_tag
self.noslip_tag = noslip_tag
self.wall_tag = wall_tag
self.supersonic_inflow_tag = supersonic_inflow_tag
self.supersonic_outflow_tag = supersonic_outflow_tag
self.source = source
self.equation_of_state = equation_of_state
self.second_order_scheme = second_order_scheme
if artificial_viscosity_mode not in [
"cns", "diffusion", "blended", None]:
raise ValueError("artificial_viscosity_mode has an invalid value")
self.artificial_viscosity_mode = artificial_viscosity_mode
# }}}
# {{{ conversions ---------------------------------------------------------
def state(self):
return make_sym_vector("q", self.dimensions+2)
@memoize_method
def volq_state(self):
return cse(to_vol_quad(self.state()), "vol_quad_state")
@memoize_method
def faceq_state(self):
return cse(to_int_face_quad(self.state()), "face_quad_state")
@memoize_method
def sensor(self):
from grudge.symbolic.primitives import Field
sensor = Field("sensor")
def rho(self, q):
return q[0]
def e(self, q):
return q[1]
def rho_u(self, q):
return q[2:2+self.dimensions]
def u(self, q):
return make_obj_array([
rho_u_i/self.rho(q)
for rho_u_i in self.rho_u(q)])
def p(self,q):
return self.equation_of_state.q_to_p(self, q)
def cse_u(self, q):
return cse(self.u(q), "u")
def cse_rho(self, q):
return cse(self.rho(q), "rho")
def cse_rho_u(self, q):
return cse(self.rho_u(q), "rho_u")
def cse_p(self, q):
return cse(self.p(q), "p")
def temperature(self, q):
c_v = 1 / (self.equation_of_state.gamma - 1) * self.spec_gas_const
return (self.e(q)/self.rho(q) - 0.5 * numpy.dot(self.u(q), self.u(q))) / c_v
def cse_temperature(self, q):
return cse(self.temperature(q), "temperature")
def get_mu(self, q, to_quad_op):
"""
:param to_quad_op: If not *None*, represents an operator which transforms
nodal values onto a quadrature grid on which the returned :math:`\mu`
needs to be represented. In that case, *q* is assumed to already be on the
same quadrature grid.
"""
if to_quad_op is None:
def to_quad_op(x):
return x
if self.mu == "sutherland":
# Sutherland's law: !!!not tested!!!
t_s = 110.4
mu_inf = 1.735e-5
result = cse(
mu_inf * self.cse_temperature(q) ** 1.5 * (1 + t_s)
/ (self.cse_temperature(q) + t_s),
"sutherland_mu")
else:
result = self.mu
if self.artificial_viscosity_mode == "cns":
mapped_sensor = self.sensor()
else:
mapped_sensor = None
if mapped_sensor is not None:
result = result + cse(to_quad_op(mapped_sensor), "quad_sensor")
return cse(result, "mu")
def primitive_to_conservative(self, prims, use_cses=True):
if not use_cses:
from grudge.symbolic.primitives import make_common_subexpression as cse
else:
def cse(x, name): return x
rho = prims[0]
p = prims[1]
u = prims[2:]
e = self.equation_of_state.p_to_e(p, rho, u)
return join_fields(
rho,
cse(e, "e"),
cse(rho * u, "rho_u"))
def conservative_to_primitive(self, q, use_cses=True):
if use_cses:
from grudge.symbolic.primitives import make_common_subexpression as cse
else:
def cse(x, name): return x
return join_fields(
self.rho(q),
self.p(q),
self.u(q))
def characteristic_velocity_optemplate(self, state):
from grudge.symbolic.operators import ElementwiseMaxOperator
from grudge.symbolic.primitives import CFunction
sqrt = CFunction("sqrt")
sound_speed = cse(sqrt(
self.equation_of_state.gamma*self.cse_p(state)/self.cse_rho(state)),
"sound_speed")
u = self.cse_u(state)
speed = cse(sqrt(numpy.dot(u, u)), "norm_u") + sound_speed
return ElementwiseMaxOperator()(speed)
def bind_characteristic_velocity(self, discr):
state = make_sym_vector("q", self.dimensions+2)
compiled = discr.compile(
self.characteristic_velocity_optemplate(state))
def do(q):
return compiled(q=q)
return do
# }}}
# {{{ helpers for second-order part ---------------------------------------
# {{{ compute gradient of state ---------------------------------------
def grad_of(self, var, faceq_var):
from grudge.second_order import SecondDerivativeTarget
grad_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=False,
operand=var,
int_flux_operand=faceq_var,
bdry_flux_int_operand=faceq_var)
self.second_order_scheme.grad(grad_tgt,
bc_getter=self.get_boundary_condition_for,
dirichlet_tags=self.get_boundary_tags(),
neumann_tags=[])
return grad_tgt.minv_all
def grad_of_state(self):
dimensions = self.dimensions
state = self.state()
dq = numpy.zeros((len(state), dimensions), dtype=object)
for i in range(len(state)):
dq[i,:] = self.grad_of(
state[i], self.faceq_state()[i])
return dq
def grad_of_state_func(self, func, of_what_descr):
return cse(self.grad_of(
func(self.volq_state()),
func(self.faceq_state())),
"grad_"+of_what_descr)
# }}}
# {{{ viscous stress tensor
def tau(self, to_quad_op, state, mu=None):
faceq_state = self.faceq_state()
dimensions = self.dimensions
# {{{ compute gradient of u ---------------------------------------
# Use the product rule to compute the gradient of
# u from the gradient of (rho u). This ensures we don't
# compute the derivatives twice.
from pytools.obj_array import with_object_array_or_scalar
dq = with_object_array_or_scalar(
to_quad_op, self.grad_of_state())
q = cse(to_quad_op(state))
du = numpy.zeros((dimensions, dimensions), dtype=object)
for i in range(dimensions):
for j in range(dimensions):
du[i,j] = cse(
(dq[i+2,j] - self.cse_u(q)[i] * dq[0,j]) / self.rho(q),
"du%d_d%s" % (i, AXES[j]))
# }}}
# {{{ put together viscous stress tau -----------------------------
from pytools import delta
if mu is None:
mu = self.get_mu(q, to_quad_op)
tau = numpy.zeros((dimensions, dimensions), dtype=object)
for i in range(dimensions):
for j in range(dimensions):
tau[i,j] = cse(mu * cse(du[i,j] + du[j,i] -
2/self.dimensions * delta(i,j) * numpy.trace(du)),
"tau_%d%d" % (i, j))
return tau
# }}}
# }}}
# }}}
# {{{ heat conduction
def heat_conduction_coefficient(self, to_quad_op):
mu = self.get_mu(self.state(), to_quad_op)
if self.prandtl is None or numpy.isinf(self.prandtl):
return 0
eos = self.equation_of_state
return (mu / self.prandtl) * (eos.gamma / (eos.gamma-1))
def heat_conduction_grad(self, to_quad_op):
grad_p_over_rho = self.grad_of_state_func(
lambda state: self.p(state)/self.rho(state),
"p_over_rho")
return (self.heat_conduction_coefficient(to_quad_op)
* to_quad_op(grad_p_over_rho))
# }}}
# {{{ flux
def flux(self, q):
from pytools import delta
return [ # one entry for each flux direction
cse(join_fields(
# flux rho
self.rho_u(q)[i],
# flux E
cse(self.e(q)+self.cse_p(q))*self.cse_u(q)[i],
# flux rho_u
make_obj_array([
self.rho_u(q)[i]*self.cse_u(q)[j]
+ delta(i,j) * self.cse_p(q)
for j in range(self.dimensions)
])
), "%s_flux" % AXES[i])
for i in range(self.dimensions)]
# }}}
# {{{ boundary conditions ---------------------------------------------
def make_bc_info(self, bc_name, tag, state, state0=None):
"""
:param state0: The boundary 'free-stream' state around which the
BC is linearized.
"""
if state0 is None:
state0 = make_sym_vector(bc_name, self.dimensions+2)
state0 = cse(to_bdry_quad(state0))
rho0 = self.rho(state0)
p0 = self.cse_p(state0)
u0 = self.cse_u(state0)
c0 = (self.equation_of_state.gamma * p0 / rho0)**0.5
from grudge.symbolic import RestrictToBoundary
bdrize_op = RestrictToBoundary(tag)
class SingleBCInfo(Record):
pass
return SingleBCInfo(
rho0=rho0, p0=p0, u0=u0, c0=c0,
# notation: suffix "m" for "minus", i.e. "interior"
drhom=cse(self.rho(cse(to_bdry_quad(bdrize_op(state))))
- rho0, "drhom"),
dumvec=cse(self.cse_u(cse(to_bdry_quad(bdrize_op(state))))
- u0, "dumvec"),
dpm=cse(self.cse_p(cse(to_bdry_quad(bdrize_op(state))))
- p0, "dpm"))
def outflow_state(self, state):
from grudge.symbolic import make_normal
normal = make_normal(self.outflow_tag, self.dimensions)
bc = self.make_bc_info("bc_q_out", self.outflow_tag, state)
# see grudge/doc/maxima/euler.mac
return join_fields(
# bc rho
cse(bc.rho0
+ bc.drhom + numpy.dot(normal, bc.dumvec)*bc.rho0/(2*bc.c0)
- bc.dpm/(2*bc.c0*bc.c0), "bc_rho_outflow"),
# bc p
cse(bc.p0
+ bc.c0*bc.rho0*numpy.dot(normal, bc.dumvec)/2 + bc.dpm/2, "bc_p_outflow"),
# bc u
cse(bc.u0
+ bc.dumvec - normal*numpy.dot(normal, bc.dumvec)/2
+ bc.dpm*normal/(2*bc.c0*bc.rho0), "bc_u_outflow"))
def inflow_state_inner(self, normal, bc, name):
# see grudge/doc/maxima/euler.mac
return join_fields(
# bc rho
cse(bc.rho0
+ numpy.dot(normal, bc.dumvec)*bc.rho0/(2*bc.c0) + bc.dpm/(2*bc.c0*bc.c0), "bc_rho_"+name),
# bc p
cse(bc.p0
+ bc.c0*bc.rho0*numpy.dot(normal, bc.dumvec)/2 + bc.dpm/2, "bc_p_"+name),
# bc u
cse(bc.u0
+ normal*numpy.dot(normal, bc.dumvec)/2 + bc.dpm*normal/(2*bc.c0*bc.rho0), "bc_u_"+name))
def inflow_state(self, state):
from grudge.symbolic import make_normal
normal = make_normal(self.inflow_tag, self.dimensions)
bc = self.make_bc_info("bc_q_in", self.inflow_tag, state)
return self.inflow_state_inner(normal, bc, "inflow")
def noslip_state(self, state):
from grudge.symbolic import make_normal
state0 = join_fields(
make_sym_vector("bc_q_noslip", 2),
[0]*self.dimensions)
normal = make_normal(self.noslip_tag, self.dimensions)
bc = self.make_bc_info("bc_q_noslip", self.noslip_tag, state, state0)
return self.inflow_state_inner(normal, bc, "noslip")
def wall_state(self, state):
from grudge.symbolic import RestrictToBoundary
bc = RestrictToBoundary(self.wall_tag)(state)
wall_rho = self.rho(bc)
wall_e = self.e(bc) # <3 eve
wall_rho_u = self.rho_u(bc)
from grudge.symbolic import make_normal
normal = make_normal(self.wall_tag, self.dimensions)
return join_fields(
wall_rho,
wall_e,
wall_rho_u - 2*numpy.dot(wall_rho_u, normal) * normal)
@memoize_method
def get_primitive_boundary_conditions(self):
state = self.state()
return {
self.outflow_tag: self.outflow_state(state),
self.inflow_tag: self.inflow_state(state),
self.noslip_tag: self.noslip_state(state)
}
@memoize_method
def get_conservative_boundary_conditions(self):
state = self.state()
from grudge.symbolic import RestrictToBoundary
return {
self.supersonic_inflow_tag:
make_sym_vector("bc_q_supersonic_in", self.dimensions+2),
self.supersonic_outflow_tag:
RestrictToBoundary(self.supersonic_outflow_tag)(
(state)),
self.wall_tag: self.wall_state(state),
}
@memoize_method
def get_boundary_tags(self):
return (set(self.get_primitive_boundary_conditions().keys())
| set(self.get_conservative_boundary_conditions().keys()))
@memoize_method
def _normalize_expr(self, expr):
"""Normalize expressions for use as hash keys."""
from grudge.symbolic.mappers import (
QuadratureUpsamplerRemover,
CSERemover)
return CSERemover()(
QuadratureUpsamplerRemover({}, do_warn=False)(expr))
@memoize_method
def _get_norm_primitive_exprs(self):
return [
self._normalize_expr(expr) for expr in
self.conservative_to_primitive(self.state())
]
@memoize_method
def get_boundary_condition_for(self, tag, expr):
prim_bcs = self.get_primitive_boundary_conditions()
cons_bcs = self.get_conservative_boundary_conditions()
if tag in prim_bcs:
# BC is given in primitive variables, avoid converting
# to conservative and back.
try:
norm_expr = self._normalize_expr(expr)
prim_idx = self._get_norm_primitive_exprs().index(norm_expr)
except ValueError:
cbstate = self.primitive_to_conservative(
prim_bcs[tag])
else:
return prim_bcs[tag][prim_idx]
else:
# BC is given in conservative variables, no potential
# for optimization.
cbstate = to_bdry_quad(cons_bcs[tag])
# 'cbstate' is the boundary state in conservative variables.
from grudge.symbolic.mappers import QuadratureUpsamplerRemover
expr = QuadratureUpsamplerRemover({}, do_warn=False)(expr)
def subst_func(expr):
from pymbolic.primitives import Subscript, Variable
if isinstance(expr, Subscript):
assert (isinstance(expr.aggregate, Variable)
and expr.aggregate.name == "q")
return cbstate[expr.index]
elif isinstance(expr, Variable) and expr.name =="sensor":
from grudge.symbolic import RestrictToBoundary
result = RestrictToBoundary(tag)(self.sensor())
return cse(to_bdry_quad(result), "bdry_sensor")
from grudge.symbolic import SubstitutionMapper
return SubstitutionMapper(subst_func)(expr)
# }}}
# {{{ second order part
def div(self, vol_operand, int_face_operand):
from grudge.second_order import SecondDerivativeTarget
div_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=False,
operand=vol_operand,
int_flux_operand=int_face_operand)
self.second_order_scheme.div(div_tgt,
bc_getter=self.get_boundary_condition_for,
dirichlet_tags=list(self.get_boundary_tags()),
neumann_tags=[])
return div_tgt.minv_all
def make_second_order_part(self):
state = self.state()
faceq_state = self.faceq_state()
volq_state = self.volq_state()
volq_tau_mat = self.tau(to_vol_quad, state)
faceq_tau_mat = self.tau(to_int_face_quad, state)
return join_fields(
0,
self.div(
numpy.sum(volq_tau_mat*self.cse_u(volq_state), axis=1)
+ self.heat_conduction_grad(to_vol_quad)
,
numpy.sum(faceq_tau_mat*self.cse_u(faceq_state), axis=1)
+ self.heat_conduction_grad(to_int_face_quad)
,
),
[
self.div(volq_tau_mat[i], faceq_tau_mat[i])
for i in range(self.dimensions)]
)
# }}}
# {{{ operator template ---------------------------------------------------
def make_extra_terms(self):
return 0
def sym_operator(self, sensor_scaling=None, viscosity_only=False):
u = self.cse_u
rho = self.cse_rho
rho_u = self.rho_u
p = self.p
e = self.e
# {{{ artificial diffusion
def make_artificial_diffusion():
if self.artificial_viscosity_mode not in ["diffusion"]:
return 0
dq = self.grad_of_state()
return make_obj_array([
self.div(
to_vol_quad(self.sensor())*to_vol_quad(dq[i]),
to_int_face_quad(self.sensor())*to_int_face_quad(dq[i]))
for i in range(dq.shape[0])])
# }}}
# {{{ state setup
volq_flux = self.flux(self.volq_state())
faceq_flux = self.flux(self.faceq_state())
from grudge.symbolic.primitives import CFunction
sqrt = CFunction("sqrt")
speed = self.characteristic_velocity_optemplate(self.state())
has_viscosity = not is_zero(self.get_mu(self.state(), to_quad_op=None))
# }}}
# {{{ operator assembly -----------------------------------------------
from grudge.flux.tools import make_lax_friedrichs_flux
from grudge.symbolic.operators import InverseMassOperator
from grudge.symbolic.tools import make_stiffness_t
primitive_bcs_as_quad_conservative = dict(
(tag, self.primitive_to_conservative(to_bdry_quad(bc)))
for tag, bc in
six.iteritems(self.get_primitive_boundary_conditions()))
def get_bc_tuple(tag):
state = self.state()
bc = make_obj_array([
self.get_boundary_condition_for(tag, s_i) for s_i in state])
return tag, bc, self.flux(bc)
first_order_part = InverseMassOperator()(
numpy.dot(make_stiffness_t(self.dimensions), volq_flux)
- make_lax_friedrichs_flux(
wave_speed=cse(to_int_face_quad(speed), "emax_c"),
state=self.faceq_state(), fluxes=faceq_flux,
bdry_tags_states_and_fluxes=[
get_bc_tuple(tag) for tag in self.get_boundary_tags()],
strong=False))
if viscosity_only:
first_order_part = 0*first_order_part
result = join_fields(
first_order_part
+ self.make_second_order_part()
+ make_artificial_diffusion()
+ self.make_extra_terms(),
speed)
if self.source is not None:
result = result + join_fields(
make_sym_vector("source_vect", len(self.state())),
# extra field for speed
0)
return result
# }}}
# }}}
# {{{ operator binding ----------------------------------------------------
def bind(self, discr, sensor=None, sensor_scaling=None, viscosity_only=False):
if (sensor is None and
self.artificial_viscosity_mode is not None):
raise ValueError("must specify a sensor if using "
"artificial viscosity")
bound_op = discr.compile(self.sym_operator(
sensor_scaling=sensor_scaling,
viscosity_only=False))
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [
self.inflow_tag,
self.outflow_tag,
self.noslip_tag,
self.wall_tag,
self.supersonic_inflow_tag,
self.supersonic_outflow_tag,
])
if self.mu == 0 and not discr.get_boundary(self.noslip_tag).is_empty():
raise RuntimeError("no-slip BCs only make sense for "
"viscous problems")
def rhs(t, q):
extra_kwargs = {}
if self.source is not None:
extra_kwargs["source_vect"] = self.source.volume_interpolant(
t, q, discr)
if sensor is not None:
extra_kwargs["sensor"] = sensor(q)
opt_result = bound_op(q=q,
bc_q_in=self.bc_inflow.boundary_interpolant(
t, discr, self.inflow_tag),
bc_q_out=self.bc_outflow.boundary_interpolant(
t, discr, self.outflow_tag),
bc_q_noslip=self.bc_noslip.boundary_interpolant(
t, discr, self.noslip_tag),
bc_q_supersonic_in=self.bc_supersonic_inflow
.boundary_interpolant(t, discr,
self.supersonic_inflow_tag),
**extra_kwargs
)
max_speed = opt_result[-1]
ode_rhs = opt_result[:-1]
return ode_rhs, discr.nodewise_max(max_speed)
return rhs
# }}}
# {{{ timestep estimation -------------------------------------------------
def estimate_timestep(self, discr,
stepper=None, stepper_class=None, stepper_args=None,
t=None, max_eigenvalue=None):
u"""Estimate the largest stable timestep, given a time stepper
`stepper_class`. If none is given, RK4 is assumed.
"""
dg_factor = (discr.dt_non_geometric_factor()
* discr.dt_geometric_factor())
# see JSH/TW, eq. (7.32)
rk4_dt = dg_factor / (max_eigenvalue + self.mu / dg_factor)
from grudge.timestep.stability import \
approximate_rk4_relative_imag_stability_region
return rk4_dt * approximate_rk4_relative_imag_stability_region(
stepper, stepper_class, stepper_args)
# }}}
# {{{ limiter (unfinished, deprecated)
class SlopeLimiter1NEuler:
def __init__(self, discr, gamma, dimensions, op):
"""Construct a limiter from Jan's book page 225
"""
self.discr = discr
self.gamma=gamma
self.dimensions=dimensions
self.op=op
from grudge.symbolic.operators import AveragingOperator
self.get_average = AveragingOperator().bind(discr)
def __call__(self, fields):
from grudge.tools import join_fields
#get conserved fields
rho=self.op.rho(fields)
e=self.op.e(fields)
rho_velocity=self.op.rho_u(fields)
#get primitive fields
#to do
#reset field values to cell average
rhoLim=self.get_average(rho)
eLim=self.get_average(e)
temp=join_fields([self.get_average(rho_vel)
for rho_vel in rho_velocity])
#should do for primitive fields too
return join_fields(rhoLim, eLim, temp)
# }}}
# vim: foldmethod=marker
# -*- coding: utf8 -*-
"""Lattice-Boltzmann operator."""
from __future__ import division
from __future__ import absolute_import
from six.moves import range
from six.moves import zip
__copyright__ = "Copyright (C) 2011 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.linalg as la
from grudge.models import HyperbolicOperator
from pytools.obj_array import make_obj_array
class LBMMethodBase(object):
def __len__(self):
return len(self.direction_vectors)
def find_opposites(self):
self.opposites = np.zeros(len(self))
for alpha in range(len(self)):
if self.opposites[alpha]:
continue
found = False
for alpha_2 in range(alpha, len(self)):
if la.norm(
self.direction_vectors[alpha]
+ self.direction_vectors[alpha_2]) < 1e-12:
self.opposites[alpha] = alpha_2
self.opposites[alpha_2] = alpha
found = True
if not found:
raise RuntimeError(
"direction %s had no opposite"
% self.direction_vectors[alpha])
class D2Q9LBMMethod(LBMMethodBase):
def __init__(self):
self.dimensions = 2
alphas = np.arange(0, 9)
thetas = (alphas-1)*np.pi/2
thetas[5:9] += np.pi/4
direction_vectors = np.vstack([
np.cos(thetas), np.sin(thetas)]).T
direction_vectors[0] *= 0
direction_vectors[5:9] *= np.sqrt(2)
direction_vectors[np.abs(direction_vectors) < 1e-12] = 0
self.direction_vectors = direction_vectors
self.weights = np.array([4/9] + [1/9]*4 + [1/36]*4)
self.speed_of_sound = 1/np.sqrt(3)
self.find_opposites()
def f_equilibrium(self, rho, alpha, u):
e_alpha = self.direction_vectors[alpha]
c_s = self.speed_of_sound
return self.weights[alpha]*rho*(
1
+ np.dot(e_alpha, u)/c_s**2
+ 1/2*np.dot(e_alpha, u)**2/c_s**4
- 1/2*np.dot(u, u)/c_s**2)
class LatticeBoltzmannOperator(HyperbolicOperator):
def __init__(self, method, lbm_delta_t, nu, flux_type="upwind"):
self.method = method
self.lbm_delta_t = lbm_delta_t
self.nu = nu
self.flux_type = flux_type
@property
def tau(self):
return (self.nu
/
(self.lbm_delta_t*self.method.speed_of_sound**2))
def get_advection_flux(self, velocity):
from grudge.flux import make_normal, FluxScalarPlaceholder
from pymbolic.primitives import IfPositive
u = FluxScalarPlaceholder(0)
normal = make_normal(self.method.dimensions)
if self.flux_type == "central":
return u.avg*np.dot(normal, velocity)
elif self.flux_type == "lf":
return u.avg*np.dot(normal, velocity) \
+ 0.5*la.norm(v)*(u.int - u.ext)
elif self.flux_type == "upwind":
return (np.dot(normal, velocity)*
IfPositive(np.dot(normal, velocity),
u.int, # outflow
u.ext, # inflow
))
else:
raise ValueError("invalid flux type")
def get_advection_op(self, q, velocity):
from grudge.symbolic import (
BoundaryPair,
get_flux_operator,
make_stiffness_t,
InverseMassOperator)
stiff_t = make_stiffness_t(self.method.dimensions)
flux_op = get_flux_operator(self.get_advection_flux(velocity))
return InverseMassOperator()(
np.dot(velocity, stiff_t*q) - flux_op(q))
def f_bar(self):
from grudge.symbolic import make_sym_vector
return make_sym_vector("f_bar", len(self.method))
def rho(self, f_bar):
return sum(f_bar)
def rho_u(self, f_bar):
return sum(
dv_i * field_i
for dv_i, field_i in
zip(self.method.direction_vectors, f_bar))
def stream_rhs(self, f_bar):
return make_obj_array([
self.get_advection_op(f_bar_alpha, e_alpha)
for e_alpha, f_bar_alpha in
zip(self.method.direction_vectors, f_bar)])
def collision_update(self, f_bar):
from grudge.symbolic.primitives import make_common_subexpression as cse
rho = cse(self.rho(f_bar), "rho")
rho_u = self.rho_u(f_bar)
u = cse(rho_u/rho, "u")
f_eq_func = self.method.f_equilibrium
f_eq = make_obj_array([
f_eq_func(rho, alpha, u) for alpha in range(len(self.method))])
return f_bar - 1/(self.tau+1/2)*(f_bar - f_eq)
def bind_rhs(self, discr):
compiled_sym_operator = discr.compile(
self.stream_rhs(self.f_bar()))
#from grudge.mesh import check_bc_coverage, BTAG_ALL
#check_bc_coverage(discr.mesh, [BTAG_ALL])
def rhs(t, f_bar):
return compiled_sym_operator(f_bar=f_bar)
return rhs
def bind(self, discr, what):
f_bar_sym = self.f_bar()
from grudge.symbolic.mappers.type_inference import (
type_info, NodalRepresentation)
type_hints = dict(
(f_bar_i, type_info.VolumeVector(NodalRepresentation()))
for f_bar_i in f_bar_sym)
compiled_sym_operator = discr.compile(what(f_bar_sym), type_hints=type_hints)
def rhs(f_bar):
return compiled_sym_operator(f_bar=f_bar)
return rhs
def max_eigenvalue(self, t=None, fields=None, discr=None):
return max(
la.norm(v) for v in self.method.direction_vectors)
# -*- coding: utf8 -*-
"""Canned operators for multivariable calculus."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from grudge.models import Operator
class GradientOperator(Operator):
def __init__(self, dimensions):
self.dimensions = dimensions
def flux(self):
from grudge.flux import make_normal, FluxScalarPlaceholder
u = FluxScalarPlaceholder()
normal = make_normal(self.dimensions)
return u.int*normal - u.avg*normal
def sym_operator(self):
from grudge.mesh import BTAG_ALL
from grudge.symbolic import Field, BoundaryPair, \
make_nabla, InverseMassOperator, get_flux_operator
u = Field("u")
bc = Field("bc")
nabla = make_nabla(self.dimensions)
flux_op = get_flux_operator(self.flux())
return nabla*u - InverseMassOperator()(
flux_op(u) +
flux_op(BoundaryPair(u, bc, BTAG_ALL)))
def bind(self, discr):
compiled_sym_operator = discr.compile(self.op_template())
def op(u):
from grudge.mesh import BTAG_ALL
return compiled_sym_operator(u=u,
bc=discr.boundarize_volume_field(u, BTAG_ALL))
return op
class DivergenceOperator(Operator):
def __init__(self, dimensions, subset=None):
self.dimensions = dimensions
if subset is None:
self.subset = dimensions * [True,]
else:
# chop off any extra dimensions
self.subset = subset[:dimensions]
from grudge.tools import count_subset
self.arg_count = count_subset(self.subset)
def flux(self):
from grudge.flux import make_normal, FluxVectorPlaceholder
v = FluxVectorPlaceholder(self.arg_count)
normal = make_normal(self.dimensions)
flux = 0
idx = 0
for i, i_enabled in enumerate(self.subset):
if i_enabled and i < self.dimensions:
flux += (v.int-v.avg)[idx]*normal[i]
idx += 1
return flux
def sym_operator(self):
from grudge.mesh import BTAG_ALL
from grudge.symbolic import make_sym_vector, BoundaryPair, \
get_flux_operator, make_nabla, InverseMassOperator
nabla = make_nabla(self.dimensions)
m_inv = InverseMassOperator()
v = make_sym_vector("v", self.arg_count)
bc = make_sym_vector("bc", self.arg_count)
local_op_result = 0
idx = 0
for i, i_enabled in enumerate(self.subset):
if i_enabled and i < self.dimensions:
local_op_result += nabla[i]*v[idx]
idx += 1
flux_op = get_flux_operator(self.flux())
return local_op_result - m_inv(
flux_op(v) +
flux_op(BoundaryPair(v, bc, BTAG_ALL)))
def bind(self, discr):
compiled_sym_operator = discr.compile(self.op_template())
def op(v):
from grudge.mesh import BTAG_ALL
return compiled_sym_operator(v=v,
bc=discr.boundarize_volume_field(v, BTAG_ALL))
return op
# -*- coding: utf8 -*-
"""Models describing absorbing boundary layers."""
from __future__ import division
from __future__ import absolute_import
from six.moves import range
from six.moves import zip
__copyright__ = "Copyright (C) 2007 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy
from pytools import memoize_method, Record
from grudge.models.em import \
MaxwellOperator, \
TMMaxwellOperator, \
TEMaxwellOperator
class AbarbanelGottliebPMLMaxwellOperator(MaxwellOperator):
"""Implements a PML as in
[1] S. Abarbanel and D. Gottlieb, "On the construction and analysis of absorbing
layers in CEM," Applied Numerical Mathematics, vol. 27, 1998, S. 331-340.
(eq 3.7-3.11)
[2] E. Turkel and A. Yefet, "Absorbing PML
boundary layers for wave-like equations,"
Applied Numerical Mathematics, vol. 27,
1998, S. 533-557.
(eq. 4.10)
[3] Abarbanel, D. Gottlieb, and J.S. Hesthaven, "Long Time Behavior of the
Perfectly Matched Layer Equations in Computational Electromagnetics,"
Journal of Scientific Computing, vol. 17, Dez. 2002, S. 405-422.
Generalized to 3D in doc/maxima/abarbanel-pml.mac.
"""
class PMLCoefficients(Record):
__slots__ = ["sigma", "sigma_prime", "tau"]
# (tau=mu in [3] , to avoid confusion with permeability)
def map(self, f):
return self.__class__(
**dict((name, f(getattr(self, name)))
for name in self.fields))
def __init__(self, *args, **kwargs):
self.add_decay = kwargs.pop("add_decay", True)
MaxwellOperator.__init__(self, *args, **kwargs)
def pml_local_op(self, w):
sub_e, sub_h, sub_p, sub_q = self.split_ehpq(w)
e_subset = self.get_eh_subset()[0:3]
h_subset = self.get_eh_subset()[3:6]
dim_subset = (True,) * self.dimensions + (False,) * (3-self.dimensions)
def pad_vec(v, subset):
result = numpy.zeros((3,), dtype=object)
result[numpy.array(subset, dtype=bool)] = v
return result
from grudge.symbolic import make_sym_vector
sig = pad_vec(
make_sym_vector("sigma", self.dimensions),
dim_subset)
sig_prime = pad_vec(
make_sym_vector("sigma_prime", self.dimensions),
dim_subset)
if self.add_decay:
tau = pad_vec(
make_sym_vector("tau", self.dimensions),
dim_subset)
else:
tau = numpy.zeros((3,))
e = pad_vec(sub_e, e_subset)
h = pad_vec(sub_h, h_subset)
p = pad_vec(sub_p, dim_subset)
q = pad_vec(sub_q, dim_subset)
rhs = numpy.zeros(12, dtype=object)
for mx in range(3):
my = (mx+1) % 3
mz = (mx+2) % 3
from grudge.tools.mathematics import levi_civita
assert levi_civita((mx,my,mz)) == 1
rhs[mx] += -sig[my]/self.epsilon*(2*e[mx]+p[mx]) - 2*tau[my]/self.epsilon*e[mx]
rhs[my] += -sig[mx]/self.epsilon*(2*e[my]+p[my]) - 2*tau[mx]/self.epsilon*e[my]
rhs[3+mz] += 1/(self.epsilon*self.mu) * (
sig_prime[mx] * q[mx] - sig_prime[my] * q[my])
rhs[6+mx] += sig[my]/self.epsilon*e[mx]
rhs[6+my] += sig[mx]/self.epsilon*e[my]
rhs[9+mx] += -sig[mx]/self.epsilon*q[mx] - (e[my] + e[mz])
from grudge.tools import full_to_subset_indices
sub_idx = full_to_subset_indices(e_subset+h_subset+dim_subset+dim_subset)
return rhs[sub_idx]
def sym_operator(self, w=None):
from grudge.tools import count_subset
fld_cnt = count_subset(self.get_eh_subset())
if w is None:
from grudge.symbolic import make_sym_vector
w = make_sym_vector("w", fld_cnt+2*self.dimensions)
from grudge.tools import join_fields
return join_fields(
MaxwellOperator.sym_operator(self, w[:fld_cnt]),
numpy.zeros((2*self.dimensions,), dtype=object)
) + self.pml_local_op(w)
def bind(self, discr, coefficients):
return MaxwellOperator.bind(self, discr,
sigma=coefficients.sigma,
sigma_prime=coefficients.sigma_prime,
tau=coefficients.tau)
def assemble_ehpq(self, e=None, h=None, p=None, q=None, discr=None):
if discr is None:
def zero():
return 0
else:
def zero():
return discr.volume_zeros()
from grudge.tools import count_subset
e_components = count_subset(self.get_eh_subset()[0:3])
h_components = count_subset(self.get_eh_subset()[3:6])
def default_fld(fld, comp):
if fld is None:
return [zero() for i in range(comp)]
else:
return fld
e = default_fld(e, e_components)
h = default_fld(h, h_components)
p = default_fld(p, self.dimensions)
q = default_fld(q, self.dimensions)
from grudge.tools import join_fields
return join_fields(e, h, p, q)
@memoize_method
def partial_to_ehpq_subsets(self):
e_subset = self.get_eh_subset()[0:3]
h_subset = self.get_eh_subset()[3:6]
dim_subset = [True] * self.dimensions + [False] * (3-self.dimensions)
from grudge.tools import partial_to_all_subset_indices
return tuple(partial_to_all_subset_indices(
[e_subset, h_subset, dim_subset, dim_subset]))
def split_ehpq(self, w):
e_idx, h_idx, p_idx, q_idx = self.partial_to_ehpq_subsets()
e, h, p, q = w[e_idx], w[h_idx], w[p_idx], w[q_idx]
from grudge.flux import FluxVectorPlaceholder as FVP
if isinstance(w, FVP):
return FVP(scalars=e), FVP(scalars=h)
else:
from grudge.tools import make_obj_array as moa
return moa(e), moa(h), moa(p), moa(q)
# sigma business ----------------------------------------------------------
def _construct_scalar_coefficients(self, discr, node_coord,
i_min, i_max, o_min, o_max, exponent):
assert o_min < i_min <= i_max < o_max
if o_min != i_min:
l_dist = (i_min - node_coord) / (i_min-o_min)
l_dist_prime = discr.volume_zeros(kind="numpy", dtype=node_coord.dtype)
l_dist_prime[l_dist >= 0] = -1 / (i_min-o_min)
l_dist[l_dist < 0] = 0
else:
l_dist = l_dist_prime = numpy.zeros_like(node_coord)
if i_max != o_max:
r_dist = (node_coord - i_max) / (o_max-i_max)
r_dist_prime = discr.volume_zeros(kind="numpy", dtype=node_coord.dtype)
r_dist_prime[r_dist >= 0] = 1 / (o_max-i_max)
r_dist[r_dist < 0] = 0
else:
r_dist = r_dist_prime = numpy.zeros_like(node_coord)
l_plus_r = l_dist+r_dist
return l_plus_r**exponent, \
(l_dist_prime+r_dist_prime)*exponent*l_plus_r**(exponent-1), \
l_plus_r
def coefficients_from_boxes(self, discr,
inner_bbox, outer_bbox=None,
magnitude=None, tau_magnitude=None,
exponent=None, dtype=None):
if outer_bbox is None:
outer_bbox = discr.mesh.bounding_box()
if exponent is None:
exponent = 2
if magnitude is None:
magnitude = 20
if tau_magnitude is None:
tau_magnitude = 0.4
# scale by free space conductivity
from math import sqrt
magnitude = magnitude*sqrt(self.epsilon/self.mu)
tau_magnitude = tau_magnitude*sqrt(self.epsilon/self.mu)
i_min, i_max = inner_bbox
o_min, o_max = outer_bbox
from grudge.tools import make_obj_array
nodes = discr.nodes
if dtype is not None:
nodes = nodes.astype(dtype)
sigma, sigma_prime, tau = list(zip(*[self._construct_scalar_coefficients(
discr, nodes[:,i],
i_min[i], i_max[i], o_min[i], o_max[i],
exponent)
for i in range(discr.dimensions)]))
def conv(f):
return discr.convert_volume(f, kind=discr.compute_kind,
dtype=discr.default_scalar_type)
return self.PMLCoefficients(
sigma=conv(magnitude*make_obj_array(sigma)),
sigma_prime=conv(magnitude*make_obj_array(sigma_prime)),
tau=conv(tau_magnitude*make_obj_array(tau)))
def coefficients_from_width(self, discr, width,
magnitude=None, tau_magnitude=None, exponent=None,
dtype=None):
o_min, o_max = discr.mesh.bounding_box()
return self.coefficients_from_boxes(discr,
(o_min+width, o_max-width),
(o_min, o_max),
magnitude, tau_magnitude, exponent, dtype)
class AbarbanelGottliebPMLTEMaxwellOperator(
TEMaxwellOperator, AbarbanelGottliebPMLMaxwellOperator):
# not unimplemented--this IS the implementation.
pass
class AbarbanelGottliebPMLTMMaxwellOperator(
TMMaxwellOperator, AbarbanelGottliebPMLMaxwellOperator):
# not unimplemented--this IS the implementation.
pass
# -*- coding: utf8 -*-
"""Operators for Poisson problems."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2007 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from grudge.models import Operator
from grudge.second_order import LDGSecondDerivative
import grudge.data
import grudge.iterative
class LaplacianOperatorBase(object):
def sym_operator(self, apply_minv, u=None, dir_bc=None, neu_bc=None):
"""
:param apply_minv: :class:`bool` specifying whether to compute a complete
divergence operator. If False, the final application of the inverse
mass operator is skipped. This is used in :meth:`op` in order to
reduce the scheme :math:`M^{-1} S u = f` to :math:`S u = M f`, so
that the mass operator only needs to be applied once, when preparing
the right hand side in :meth:`prepare_rhs`.
:class:`grudge.models.diffusion.DiffusionOperator` needs this.
"""
from grudge.symbolic import Field, make_sym_vector
from grudge.second_order import SecondDerivativeTarget
if u is None:
u = Field("u")
if dir_bc is None:
dir_bc = Field("dir_bc")
if neu_bc is None:
neu_bc = Field("neu_bc")
# strong_form here allows IPDG to reuse the value of grad u.
grad_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=True,
operand=u)
def grad_bc_getter(tag, expr):
assert tag == self.dirichlet_tag
return dir_bc
self.scheme.grad(grad_tgt,
bc_getter=grad_bc_getter,
dirichlet_tags=[self.dirichlet_tag],
neumann_tags=[self.neumann_tag])
def apply_diff_tensor(v):
if isinstance(self.diffusion_tensor, np.ndarray):
sym_diff_tensor = self.diffusion_tensor
else:
sym_diff_tensor = (make_sym_vector(
"diffusion", self.dimensions**2)
.reshape(self.dimensions, self.dimensions))
return np.dot(sym_diff_tensor, v)
div_tgt = SecondDerivativeTarget(
self.dimensions, strong_form=False,
operand=apply_diff_tensor(grad_tgt.minv_all))
def div_bc_getter(tag, expr):
if tag == self.dirichlet_tag:
return dir_bc
elif tag == self.neumann_tag:
return neu_bc
else:
assert False, "divergence bc getter " \
"asked for '%s' BC for '%s'" % (tag, expr)
self.scheme.div(div_tgt,
div_bc_getter,
dirichlet_tags=[self.dirichlet_tag],
neumann_tags=[self.neumann_tag])
if apply_minv:
return div_tgt.minv_all
else:
return div_tgt.all
class PoissonOperator(Operator, LaplacianOperatorBase):
"""Implements the Local Discontinuous Galerkin (LDG) Method for elliptic
operators.
See P. Castillo et al.,
Local discontinuous Galerkin methods for elliptic problems",
Communications in Numerical Methods in Engineering 18, no. 1 (2002): 69-75.
"""
def __init__(self, dimensions, diffusion_tensor=None,
dirichlet_bc=grudge.data.ConstantGivenFunction(),
dirichlet_tag="dirichlet",
neumann_bc=grudge.data.ConstantGivenFunction(),
neumann_tag="neumann",
scheme=LDGSecondDerivative()):
self.dimensions = dimensions
self.scheme = scheme
self.dirichlet_bc = dirichlet_bc
self.dirichlet_tag = dirichlet_tag
self.neumann_bc = neumann_bc
self.neumann_tag = neumann_tag
if diffusion_tensor is None:
diffusion_tensor = np.eye(dimensions)
self.diffusion_tensor = diffusion_tensor
# bound operator ----------------------------------------------------------
def bind(self, discr):
"""Return a :class:`BoundPoissonOperator`."""
assert self.dimensions == discr.dimensions
from grudge.mesh import check_bc_coverage
check_bc_coverage(discr.mesh, [self.dirichlet_tag, self.neumann_tag])
return BoundPoissonOperator(self, discr)
class BoundPoissonOperator(grudge.iterative.OperatorBase):
"""Returned by :meth:`PoissonOperator.bind`."""
def __init__(self, poisson_op, discr):
grudge.iterative.OperatorBase.__init__(self)
self.discr = discr
pop = self.poisson_op = poisson_op
op = pop.sym_operator(
apply_minv=False, dir_bc=0, neu_bc=0)
bc_op = pop.sym_operator(apply_minv=False)
self.compiled_op = discr.compile(op)
self.compiled_bc_op = discr.compile(bc_op)
if not isinstance(pop.diffusion_tensor, np.ndarray):
self.diffusion = pop.diffusion_tensor.volume_interpolant(discr)
# Check whether use of Poincaré mean-value method is required.
# (for pure Neumann or pure periodic)
from grudge.mesh import BTAG_ALL
self.poincare_mean_value_hack = (
len(self.discr.get_boundary(BTAG_ALL).nodes)
== len(self.discr.get_boundary(poisson_op.neumann_tag).nodes))
@property
def dtype(self):
return self.discr.default_scalar_type
@property
def shape(self):
nodes = len(self.discr)
return nodes, nodes
def op(self, u):
context = {"u": u}
if not isinstance(self.poisson_op.diffusion_tensor, np.ndarray):
context["diffusion"] = self.diffusion
result = self.compiled_op(**context)
if self.poincare_mean_value_hack:
state_int = self.discr.integral(u)
mean_state = state_int / self.discr.mesh_volume()
return result - mean_state * self.discr._mass_ones()
else:
return result
__call__ = op
def prepare_rhs(self, rhs):
"""Prepare the right-hand side for the linear system op(u)=rhs(f).
In matrix form, LDG looks like this:
.. math::
Mv = Cu + g
Mf = Av + Bu + h
where v is the auxiliary vector, u is the argument of the operator, f
is the result of the grad operator, g and h are inhom boundary data, and
A,B,C are some operator+lifting matrices.
.. math::
M f = A M^{-1}(Cu + g) + Bu + h
so the linear system looks like
.. math::
M f = A M^{-1} Cu + A M^{-1} g + Bu + h
M f - A M^{-1} g - h = (A M^{-1} C + B)u (*)
So the right hand side we're putting together here is really
.. math::
M f - A M^{-1} g - h
.. note::
Resist the temptation to left-multiply by the inverse
mass matrix, as this will result in a non-symmetric
matrix, which (e.g.) a conjugate gradient Krylov
solver will not take well.
"""
pop = self.poisson_op
from grudge.symbolic import MassOperator
return (MassOperator().apply(self.discr, rhs)
- self.compiled_bc_op(
u=self.discr.volume_zeros(),
dir_bc=pop.dirichlet_bc.boundary_interpolant(
self.discr, pop.dirichlet_tag),
neu_bc=pop.neumann_bc.boundary_interpolant(
self.discr, pop.neumann_tag)))
class HelmholtzOperator(PoissonOperator):
def __init__(self, k, *args, **kwargs):
PoissonOperator.__init__(self, *args, **kwargs)
self.k = k
def sym_operator(self, apply_minv, u=None, dir_bc=None, neu_bc=None):
from grudge.symbolic import Field
if u is None:
u = Field("u")
result = PoissonOperator.sym_operator(self,
apply_minv, u, dir_bc, neu_bc)
if apply_minv:
return result + self.k**2 * u
else:
from grudge.symbolic import MassOperator
return result + self.k**2 * MassOperator()(u)
# -*- coding: utf8 -*-
"""Schemes for second-order derivatives."""
from __future__ import division
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import grudge.symbolic.mappers as mappers
from grudge import sym
# {{{ stabilization term generator
class StabilizationTermGenerator(mappers.IdentityMapper):
def __init__(self, flux_args):
super(StabilizationTermGenerator, self).__init__()
self.flux_args = flux_args
self.flux_arg_lookup = dict(
(flux_arg, i) for i, flux_arg in enumerate(flux_args))
def get_flux_arg_idx(self, expr, quad_above):
from grudge.symbolic.mappers import QuadratureDetector
quad_below = QuadratureDetector()(expr)
if quad_above:
if quad_below is not None:
# Both the part of the expression above and below the
# differentiation operator had quadrature upsamplers in it.
# Since we're removing the differentiation operator, there are
# now two layers of quadrature operators. We need to change the
# inner layer to be the only layer.
from grudge.symbolic.mappers import QuadratureUpsamplerChanger
expr = QuadratureUpsamplerChanger(quad_above[0])(expr)
else:
# Only the part of the expression above the differentiation
# operator had quadrature. Insert quadrature here, be done.
expr = quad_above[0](expr)
else:
if quad_below is not None:
# Only the part of the expression below the differentiation
# operator had quadrature--the stuff above doesn't want it.
# Get rid of it.
from grudge.symbolic.mappers import QuadratureUpsamplerRemover
expr = QuadratureUpsamplerRemover({}, do_warn=False)(expr)
else:
# No quadrature, no headaches.
pass
try:
return self.flux_arg_lookup[expr]
except KeyError:
flux_arg_idx = len(self.flux_args)
self.flux_arg_lookup[expr] = flux_arg_idx
self.flux_args.append(expr)
return flux_arg_idx
def map_operator_binding(self, expr, quad_above=[]):
from grudge.symbolic.operators import (
DiffOperatorBase, FluxOperatorBase,
InverseMassOperator,
QuadratureInteriorFacesGridUpsampler)
if isinstance(expr.op, DiffOperatorBase):
flux_arg_idx = self.get_flux_arg_idx(expr.field, quad_above=quad_above)
from grudge.symbolic import \
WeakFormDiffOperatorBase, \
StrongFormDiffOperatorBase
if isinstance(expr.op, WeakFormDiffOperatorBase):
factor = -1
elif isinstance(expr.op, StrongFormDiffOperatorBase):
factor = 1
else:
raise RuntimeError("unknown type of differentiation "
"operator encountered by stab term generator")
from grudge.flux import Normal, FluxScalarPlaceholder
sph = FluxScalarPlaceholder(flux_arg_idx)
return (factor
* Normal(expr.op.xyz_axis)
* (sph.int - sph.ext))
elif isinstance(expr.op, FluxOperatorBase):
return 0
elif isinstance(expr.op, InverseMassOperator):
return self.rec(expr.field, quad_above)
elif isinstance(expr.op, QuadratureInteriorFacesGridUpsampler):
if quad_above:
raise RuntimeError("double quadrature upsampler found "
"when generating stabilization term")
return self.rec(expr.field, [expr.op])
else:
from grudge.symbolic.tools import pretty
raise ValueError("stabilization term generator doesn't know "
"what to do with '%s'" % pretty(expr))
def map_variable(self, expr, quad_above=[]):
from grudge.flux import FieldComponent
return FieldComponent(
self.get_flux_arg_idx(expr, quad_above),
is_interior=True)
def map_subscript(self, expr, quad_above=[]):
from pymbolic.primitives import Variable
assert isinstance(expr.aggregate, Variable)
from grudge.flux import FieldComponent
return FieldComponent(
self.get_flux_arg_idx(expr, quad_above),
is_interior=True)
# }}}
# {{{ neumann bc generator
class NeumannBCGenerator(mappers.IdentityMapper):
def __init__(self, tag, bc):
super(NeumannBCGenerator, self).__init__()
self.tag = tag
self.bc = bc
def map_operator_binding(self, expr):
if isinstance(expr.op, sym.DiffOperatorBase):
from grudge.symbolic import \
WeakFormDiffOperatorBase, \
StrongFormDiffOperatorBase
if isinstance(expr.op, WeakFormDiffOperatorBase):
factor = -1
elif isinstance(expr.op, StrongFormDiffOperatorBase):
factor = 1
else:
raise RuntimeError("unknown type of differentiation "
"operator encountered by stab term generator")
from grudge.symbolic import BoundaryNormalComponent
return (self.bc * factor *
BoundaryNormalComponent(self.tag, expr.op.xyz_axis))
elif isinstance(expr.op, sym.FluxOperatorBase):
return 0
elif isinstance(expr.op, sym.InverseMassOperator):
return self.rec(expr.field)
else:
raise ValueError("neumann normal direction generator doesn't know "
"what to do with '%s'" % expr)
# }}}
class IPDGDerivativeGenerator(mappers.IdentityMapper):
def map_operator_binding(self, expr):
if isinstance(expr.op, sym.DiffOperatorBase):
from grudge.symbolic import (
WeakFormDiffOperatorBase,
StrongFormDiffOperatorBase)
if isinstance(expr.op, WeakFormDiffOperatorBase):
factor = -1
elif isinstance(expr.op, StrongFormDiffOperatorBase):
factor = 1
else:
raise RuntimeError("unknown type of differentiation "
"operator encountered by stab term generator")
from grudge.symbolic import DifferentiationOperator
return factor*DifferentiationOperator(expr.op.xyz_axis)(expr.field)
elif isinstance(expr.op, sym.FluxOperatorBase):
return 0
elif isinstance(expr.op, sym.InverseMassOperator):
return self.rec(expr.field)
elif isinstance(expr.op,
sym.QuadratureInteriorFacesGridUpsampler):
return super(IPDGDerivativeGenerator, self).map_operator_binding(expr)
else:
from grudge.symbolic.tools import pretty
raise ValueError("IPDG derivative generator doesn't know "
"what to do with '%s'" % pretty(expr))
# {{{ second derivative target
class SecondDerivativeTarget(object):
def __init__(self, dimensions, strong_form, operand,
int_flux_operand=None,
bdry_flux_int_operand=None):
"""
:param int_flux_operand: if not None, is used as the interior
argument to the interior fluxes. This is useful e.g. if the boundary
values are on a quadrature grid--in this case, *bdry_flux_int_operand*
can be passed to also be on a boundary grid. If it is None, it defaults
to *operand*.
:param bdry_flux_int_operand: if not None, is used as the interior
argument to the boundary fluxes. This is useful e.g. if the boundary
values are on a quadrature grid--in this case, *bdry_flux_int_operand*
can be passed to also be on a boundary grid. If it is None, it defaults
to *int_flux_operand*.
"""
self.dimensions = dimensions
self.operand = operand
if int_flux_operand is None:
int_flux_operand = operand
if bdry_flux_int_operand is None:
bdry_flux_int_operand = int_flux_operand
self.int_flux_operand = int_flux_operand
self.bdry_flux_int_operand = bdry_flux_int_operand
self.strong_form = strong_form
if strong_form:
self.strong_neg = -1
else:
self.strong_neg = 1
self.local_derivatives = 0
self.inner_fluxes = 0
self.boundary_fluxes = 0
def add_local_derivatives(self, expr):
self.local_derivatives = self.local_derivatives \
+ (-self.strong_neg)*expr
def vec_times(self, vec, operand):
from pytools.obj_array import is_obj_array
if is_obj_array(operand):
if len(operand) != self.dimensions:
raise ValueError("operand of vec_times must have %d dimensions"
% self.dimensions)
return np.dot(vec, operand)
else:
return vec*operand
def normal_times_flux(self, flux):
from grudge.flux import make_normal
return self.vec_times(make_normal(self.dimensions), flux)
def apply_diff(self, nabla, operand):
from pytools.obj_array import make_obj_array, is_obj_array
if is_obj_array(operand):
if len(operand) != self.dimensions:
raise ValueError("operand of apply_diff must have %d dimensions"
% self.dimensions)
return sum(nabla[i](operand[i]) for i in range(self.dimensions))
else:
return make_obj_array(
[nabla[i](operand) for i in range(self.dimensions)])
def _local_nabla(self):
if self.strong_form:
from grudge.symbolic import make_stiffness
return make_stiffness(self.dimensions)
else:
from grudge.symbolic import make_stiffness_t
return make_stiffness_t(self.dimensions)
def add_derivative(self, operand=None):
if operand is None:
operand = self.operand
self.add_local_derivatives(
self.apply_diff(self._local_nabla(), operand))
def add_inner_fluxes(self, flux, expr):
from grudge.symbolic import get_flux_operator
self.inner_fluxes = self.inner_fluxes \
+ get_flux_operator(self.strong_neg*flux)(expr)
def add_boundary_flux(self, flux, volume_expr, bdry_expr, tag):
from grudge.symbolic import BoundaryPair, get_flux_operator
self.boundary_fluxes = self.boundary_fluxes + \
get_flux_operator(self.strong_neg*flux)(BoundaryPair(
volume_expr, bdry_expr, tag))
@property
def fluxes(self):
return self.inner_fluxes + self.boundary_fluxes
@property
def all(self):
return self.local_derivatives + self.fluxes
@property
def minv_all(self):
from grudge.symbolic.primitives import make_common_subexpression as cse
from grudge.symbolic.operators import InverseMassOperator
return (cse(InverseMassOperator()(self.local_derivatives), "grad_loc")
+ cse(InverseMassOperator()(self.fluxes), "grad_flux"))
# }}}
# {{{ second derivative schemes
class SecondDerivativeBase(object):
def grad(self, tgt, bc_getter, dirichlet_tags, neumann_tags):
"""
:param bc_getter: a function (tag, volume_expr) -> boundary expr.
*volume_expr* will be None to query the Neumann condition.
"""
n_times = tgt.normal_times_flux
if tgt.strong_form:
def adjust_flux(f):
return n_times(u.int) - f
else:
def adjust_flux(f):
return f
from grudge.flux import FluxScalarPlaceholder
u = FluxScalarPlaceholder()
tgt.add_derivative()
tgt.add_inner_fluxes(
adjust_flux(self.grad_interior_flux(tgt, u)),
tgt.int_flux_operand)
for tag in dirichlet_tags:
tgt.add_boundary_flux(
adjust_flux(n_times(u.ext)),
tgt.bdry_flux_int_operand, bc_getter(tag, tgt.operand), tag)
for tag in neumann_tags:
tgt.add_boundary_flux(
adjust_flux(n_times(u.int)),
tgt.bdry_flux_int_operand, 0, tag)
def add_div_bcs(self, tgt, bc_getter, dirichlet_tags, neumann_tags,
stab_term, adjust_flux, flux_v, flux_arg_int,
grad_flux_arg_count):
from pytools.obj_array import make_obj_array, join_fields
n_times = tgt.normal_times_flux
def unwrap_cse(expr):
from pymbolic.primitives import CommonSubexpression
if isinstance(expr, CommonSubexpression):
return expr.child
else:
return expr
for tag in dirichlet_tags:
dir_bc_w = join_fields(
[0]*grad_flux_arg_count,
[bc_getter(tag, unwrap_cse(vol_expr)) for vol_expr in
flux_arg_int[grad_flux_arg_count:]])
tgt.add_boundary_flux(
adjust_flux(n_times(flux_v.int-stab_term)),
flux_arg_int, dir_bc_w, tag)
loc_bc_vec = make_obj_array([0]*len(flux_arg_int))
for tag in neumann_tags:
neu_bc_w = join_fields(
NeumannBCGenerator(tag, bc_getter(tag, None))(tgt.operand),
[0]*len(flux_arg_int))
tgt.add_boundary_flux(
adjust_flux(n_times(flux_v.ext)),
loc_bc_vec, neu_bc_w, tag)
class LDGSecondDerivative(SecondDerivativeBase):
def __init__(self, beta_value=0.5, stab_coefficient=1):
self.beta_value = beta_value
self.stab_coefficient = stab_coefficient
def beta(self, tgt):
return np.array([self.beta_value]*tgt.dimensions, dtype=np.float64)
def grad_interior_flux(self, tgt, u):
from grudge.symbolic.primitives import make_common_subexpression as cse
n_times = tgt.normal_times_flux
v_times = tgt.vec_times
return n_times(
cse(u.avg, "u_avg")
- v_times(self.beta(tgt), n_times(u.int-u.ext)))
def div(self, tgt, bc_getter, dirichlet_tags, neumann_tags):
"""
:param bc_getter: a function (tag, volume_expr) -> boundary expr.
*volume_expr* will be None to query the Neumann condition.
"""
from grudge.symbolic.primitives import make_common_subexpression as cse
from grudge.flux import FluxVectorPlaceholder, PenaltyTerm
n_times = tgt.normal_times_flux
v_times = tgt.vec_times
if tgt.strong_form:
def adjust_flux(f):
return n_times(flux_v.int) - f
else:
def adjust_flux(f):
return f
flux_v = FluxVectorPlaceholder(tgt.dimensions)
stab_term_generator = StabilizationTermGenerator(
list(tgt.int_flux_operand))
stab_term = (self.stab_coefficient * PenaltyTerm()
* stab_term_generator(tgt.int_flux_operand))
flux = n_times(flux_v.avg
+ v_times(self.beta(tgt),
cse(n_times(flux_v.int - flux_v.ext), "jump_v"))
- stab_term)
from pytools.obj_array import make_obj_array
flux_arg_int = cse(make_obj_array(stab_term_generator.flux_args))
tgt.add_derivative(cse(tgt.operand))
tgt.add_inner_fluxes(adjust_flux(flux), flux_arg_int)
self.add_div_bcs(tgt, bc_getter, dirichlet_tags, neumann_tags,
stab_term, adjust_flux, flux_v, flux_arg_int, tgt.dimensions)
class StabilizedCentralSecondDerivative(LDGSecondDerivative):
def __init__(self, stab_coefficient=1):
LDGSecondDerivative.__init__(self, 0, stab_coefficient=stab_coefficient)
class CentralSecondDerivative(LDGSecondDerivative):
def __init__(self):
LDGSecondDerivative.__init__(self, 0, 0)
class IPDGSecondDerivative(SecondDerivativeBase):
def __init__(self, stab_coefficient=1):
self.stab_coefficient = stab_coefficient
def grad_interior_flux(self, tgt, u):
from grudge.symbolic.primitives import make_common_subexpression as cse
n_times = tgt.normal_times_flux
return n_times(cse(u.avg, "u_avg"))
def div(self, tgt, bc_getter, dirichlet_tags, neumann_tags):
"""
:param bc_getter: a function (tag, volume_expr) -> boundary expr.
*volume_expr* will be None to query the Neumann condition.
"""
from grudge.symbolic.primitives import make_common_subexpression as cse
from grudge.flux import FluxVectorPlaceholder, PenaltyTerm
n_times = tgt.normal_times_flux
if tgt.strong_form:
def adjust_flux(f):
return n_times(flux_v.int) - f
else:
def adjust_flux(f):
return f
dim = tgt.dimensions
flux_w = FluxVectorPlaceholder(2*tgt.dimensions)
flux_v = flux_w[:dim]
pure_diff_v = flux_w[dim:]
flux_args = (
list(tgt.int_flux_operand)
+ list(IPDGDerivativeGenerator()(tgt.int_flux_operand)))
stab_term_generator = StabilizationTermGenerator(flux_args)
stab_term = (self.stab_coefficient * PenaltyTerm()
* stab_term_generator(tgt.int_flux_operand))
flux = n_times(pure_diff_v.avg - stab_term)
from pytools.obj_array import make_obj_array
flux_arg_int = cse(make_obj_array(stab_term_generator.flux_args))
tgt.add_derivative(cse(tgt.operand))
tgt.add_inner_fluxes(adjust_flux(flux), flux_arg_int)
self.add_div_bcs(tgt, bc_getter, dirichlet_tags, neumann_tags,
stab_term, adjust_flux, flux_v, flux_arg_int, 2*tgt.dimensions)
# }}}
# vim: fdm=marker
# -*- coding: utf8 -*-
"""Wave equation operators."""
from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__copyright__ = """
Copyright (C) 2009 Andreas Kloeckner
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
......@@ -28,18 +27,21 @@ THE SOFTWARE.
import numpy as np
from grudge.models import HyperbolicOperator
from grudge.models.second_order import CentralSecondDerivative
from meshmode.mesh import BTAG_ALL, BTAG_NONE
from grudge import sym
from pytools.obj_array import join_fields
from pytools.obj_array import flat_obj_array
import grudge.geometry as geo
import grudge.op as op
from grudge.dof_desc import DISCR_TAG_BASE, as_dofdesc
from grudge.models import HyperbolicOperator
# {{{ constant-velocity
class StrongWaveOperator(HyperbolicOperator):
"""This operator discretizes the wave equation
:math:`\\partial_t^2 u = c^2 \\Delta u`.
class WeakWaveOperator(HyperbolicOperator):
r"""This operator discretizes the wave equation
:math:`\partial_t^2 u = c^2 \Delta u`.
To be precise, we discretize the hyperbolic system
......@@ -55,16 +57,19 @@ class StrongWaveOperator(HyperbolicOperator):
:math:`c` is assumed to be constant across all space.
"""
def __init__(self, c, ambient_dim, source_f=0,
def __init__(self, dcoll, c, source_f=None,
flux_type="upwind",
dirichlet_tag=BTAG_ALL,
dirichlet_bc_f=0,
neumann_tag=BTAG_NONE,
radiation_tag=BTAG_NONE):
assert isinstance(ambient_dim, int)
radiation_tag=BTAG_NONE,
comm_tag=None):
if source_f is None:
source_f = lambda actx, dcoll, t: dcoll.zeros(actx) # noqa: E731
self.dcoll = dcoll
self.c = c
self.ambient_dim = ambient_dim
self.source_f = source_f
if self.c > 0:
......@@ -80,94 +85,93 @@ class StrongWaveOperator(HyperbolicOperator):
self.flux_type = flux_type
def flux(self, w):
u = w[0]
v = w[1:]
normal = sym.normal(w.dd, self.ambient_dim)
self.comm_tag = comm_tag
flux_weak = join_fields(
def flux(self, wtpair):
u = wtpair[0]
v = wtpair[1:]
actx = u.int.array_context
normal = geo.normal(actx, self.dcoll, wtpair.dd)
central_flux_weak = -self.c*flat_obj_array(
np.dot(v.avg, normal),
u.avg * normal)
if self.flux_type == "central":
pass
return central_flux_weak
elif self.flux_type == "upwind":
# see doc/notes/grudge-notes.tm
flux_weak -= self.sign*join_fields(
0.5*(u.int-u.ext),
0.5*(normal * np.dot(normal, v.int-v.ext)))
return central_flux_weak - self.c*self.sign*flat_obj_array(
0.5*(u.ext-u.int),
0.5*(normal * np.dot(normal, v.ext-v.int)))
else:
raise ValueError("invalid flux type '%s'" % self.flux_type)
flux_strong = join_fields(
np.dot(v.int, normal),
u.int * normal) - flux_weak
raise ValueError(f"invalid flux type '{self.flux_type}'")
return -self.c*flux_strong
def sym_operator(self):
d = self.ambient_dim
w = sym.make_sym_array("w", d+1)
def operator(self, t, w):
dcoll = self.dcoll
u = w[0]
v = w[1:]
actx = u.array_context
base_dd = as_dofdesc("vol", DISCR_TAG_BASE)
# boundary conditions -------------------------------------------------
# dirichlet BCs -------------------------------------------------------
dir_u = sym.cse(sym.interp("vol", self.dirichlet_tag)(u))
dir_v = sym.cse(sym.interp("vol", self.dirichlet_tag)(v))
dir_u = op.project(dcoll, "vol", self.dirichlet_tag, u)
dir_v = op.project(dcoll, "vol", self.dirichlet_tag, v)
if self.dirichlet_bc_f:
# FIXME
from warnings import warn
warn("Inhomogeneous Dirichlet conditions on the wave equation "
"are still having issues.")
"are still having issues.", stacklevel=1)
dir_g = sym.Field("dir_bc_u")
dir_bc = join_fields(2*dir_g - dir_u, dir_v)
dir_g = self.dirichlet_bc_f
dir_bc = flat_obj_array(2*dir_g - dir_u, dir_v)
else:
dir_bc = join_fields(-dir_u, dir_v)
dir_bc = sym.cse(dir_bc, "dir_bc")
dir_bc = flat_obj_array(-dir_u, dir_v)
# neumann BCs ---------------------------------------------------------
neu_u = sym.cse(sym.interp("vol", self.neumann_tag)(u))
neu_v = sym.cse(sym.interp("vol", self.neumann_tag)(v))
neu_bc = sym.cse(join_fields(neu_u, -neu_v), "neu_bc")
neu_u = op.project(dcoll, "vol", self.neumann_tag, u)
neu_v = op.project(dcoll, "vol", self.neumann_tag, v)
neu_bc = flat_obj_array(neu_u, -neu_v)
# radiation BCs -------------------------------------------------------
rad_normal = sym.normal(self.radiation_tag, d)
rad_normal = geo.normal(actx, dcoll, dd=self.radiation_tag)
rad_u = sym.cse(sym.interp("vol", self.radiation_tag)(u))
rad_v = sym.cse(sym.interp("vol", self.radiation_tag)(v))
rad_u = op.project(dcoll, "vol", self.radiation_tag, u)
rad_v = op.project(dcoll, "vol", self.radiation_tag, v)
rad_bc = sym.cse(join_fields(
0.5*(rad_u - self.sign*np.dot(rad_normal, rad_v)),
0.5*rad_normal*(np.dot(rad_normal, rad_v) - self.sign*rad_u)
), "rad_bc")
rad_bc = flat_obj_array(
0.5*(rad_u - self.sign*np.dot(rad_normal, rad_v)),
0.5*rad_normal*(np.dot(rad_normal, rad_v) - self.sign*rad_u)
)
# entire operator -----------------------------------------------------
nabla = sym.nabla(d)
def flux(pair):
return sym.interp(pair.dd, "all_faces")(
self.flux(pair))
def flux(tpair):
return op.project(dcoll, tpair.dd, "all_faces", self.flux(tpair))
result = (
- join_fields(
-self.c*np.dot(nabla, v),
-self.c*(nabla*u)
)
+
sym.InverseMassOperator()(
sym.FaceMassOperator()(
flux(sym.int_fpair(w))
+ flux(sym.bv_fpair(self.dirichlet_tag, w, dir_bc))
+ flux(sym.bv_fpair(self.neumann_tag, w, neu_bc))
+ flux(sym.bv_fpair(self.radiation_tag, w, rad_bc))
)))
result[0] += self.source_f
op.inverse_mass(
dcoll,
flat_obj_array(
-self.c*op.weak_local_div(dcoll, v),
-self.c*op.weak_local_grad(dcoll, u)
)
- op.face_mass(
dcoll,
sum(flux(tpair) for tpair in op.interior_trace_pairs(
dcoll, w, comm_tag=self.comm_tag))
+ flux(op.bv_trace_pair(
dcoll, base_dd.trace(self.dirichlet_tag), w, dir_bc))
+ flux(op.bv_trace_pair(
dcoll, base_dd.trace(self.neumann_tag), w, neu_bc))
+ flux(op.bv_trace_pair(
dcoll, base_dd.trace(self.radiation_tag), w, rad_bc))
)
)
)
result[0] = result[0] + self.source_f(actx, dcoll, t)
return result
......@@ -178,15 +182,19 @@ class StrongWaveOperator(HyperbolicOperator):
self.neumann_tag,
self.radiation_tag])
def max_eigenvalue(self, t, fields=None, discr=None):
def max_characteristic_velocity(self, actx, t=None, fields=None):
return abs(self.c)
def estimate_rk4_timestep(self, actx, dcoll, **kwargs):
# FIXME: Sketchy, empirically determined fudge factor
return 0.38 * super().estimate_rk4_timestep(actx, dcoll, **kwargs)
# }}}
# {{{ variable-velocity
class VariableVelocityStrongWaveOperator(HyperbolicOperator):
class VariableCoefficientWeakWaveOperator(HyperbolicOperator):
r"""This operator discretizes the wave equation
:math:`\partial_t^2 u = c^2 \Delta u`.
......@@ -194,174 +202,146 @@ class VariableVelocityStrongWaveOperator(HyperbolicOperator):
.. math::
\partial_t u - c \nabla \cdot v = 0
\partial_t u - c \\nabla \\cdot v = 0
\partial_t v - c \\nabla u = 0
\partial_t v - c \nabla u = 0
The sign of :math:`v` determines whether we discretize the forward or the
backward wave equation.
"""
def __init__(
self, c, ambient_dim, source=0,
def __init__(self, dcoll, c, source_f=None,
flux_type="upwind",
dirichlet_tag=BTAG_ALL,
dirichlet_bc_f=0,
neumann_tag=BTAG_NONE,
radiation_tag=BTAG_NONE,
time_sign=1,
diffusion_coeff=None,
diffusion_scheme=CentralSecondDerivative()
):
"""*c* and *source* are optemplate expressions.
radiation_tag=BTAG_NONE):
"""
:arg c: a frozen :class:`~meshmode.dof_array.DOFArray`
representing the propagation speed of the wave.
"""
assert isinstance(ambient_dim, int)
from arraycontext import get_container_context_recursively
assert get_container_context_recursively(c) is None
if source_f is None:
source_f = lambda actx, dcoll, t: dcoll.zeros(actx) # noqa: E731
actx = dcoll._setup_actx
self.dcoll = dcoll
self.c = c
self.time_sign = time_sign
self.ambient_dim = ambient_dim
self.source = source
self.source_f = source_f
ones = dcoll.zeros(actx) + 1
thawed_c = actx.thaw(self.c)
self.sign = actx.freeze(
actx.np.where(actx.np.greater(thawed_c, 0), ones, -ones))
self.dirichlet_tag = dirichlet_tag
self.neumann_tag = neumann_tag
self.radiation_tag = radiation_tag
self.flux_type = flux_type
self.diffusion_coeff = diffusion_coeff
self.diffusion_scheme = diffusion_scheme
self.dirichlet_bc_f = dirichlet_bc_f
# {{{ flux ----------------------------------------------------------------
def flux(self, w, c_vol):
u = w[0]
v = w[1:]
normal = sym.normal(w.tag, self.ambient_dim)
self.flux_type = flux_type
c = sym.RestrictToBoundary(w.tag)(c_vol)
def flux(self, wtpair):
c = wtpair[0]
u = wtpair[1]
v = wtpair[2:]
actx = u.int.array_context
normal = geo.normal(actx, self.dcoll, wtpair.dd)
flux = self.time_sign*1/2*join_fields(
c.ext * np.dot(v.ext, normal)
- c.int * np.dot(v.int, normal),
normal*(c.ext*u.ext - c.int*u.int))
flux_central_weak = -0.5 * flat_obj_array(
np.dot(v.int*c.int + v.ext*c.ext, normal),
(u.int * c.int + u.ext*c.ext) * normal)
if self.flux_type == "central":
pass
elif self.flux_type == "upwind":
flux += join_fields(
c.ext*u.ext - c.int*u.int,
c.ext*normal*np.dot(normal, v.ext)
- c.int*normal*np.dot(normal, v.int)
)
else:
raise ValueError("invalid flux type '%s'" % self.flux_type)
return flux
# }}}
def bind_characteristic_velocity(self, discr):
from grudge.symbolic.operators import ElementwiseMaxOperator
return flux_central_weak
compiled = discr.compile(ElementwiseMaxOperator()(self.c))
def do(t, w, **extra_context):
return compiled(t=t, w=w, **extra_context)
elif self.flux_type == "upwind":
return flux_central_weak - 0.5 * flat_obj_array(
c.ext*u.ext - c.int * u.int,
return do
normal * (np.dot(normal, c.ext * v.ext - c.int * v.int)))
def sym_operator(self, with_sensor=False):
d = self.ambient_dim
else:
raise ValueError(f"invalid flux type '{self.flux_type}'")
w = sym.make_sym_array("w", d+1)
def operator(self, t, w):
dcoll = self.dcoll
u = w[0]
v = w[1:]
actx = u.array_context
flux_w = join_fields(self.c, w)
# {{{ boundary conditions
# Dirichlet
dir_c = sym.RestrictToBoundary(self.dirichlet_tag) * self.c
dir_u = sym.RestrictToBoundary(self.dirichlet_tag) * u
dir_v = sym.RestrictToBoundary(self.dirichlet_tag) * v
dir_bc = join_fields(dir_c, -dir_u, dir_v)
# Neumann
neu_c = sym.RestrictToBoundary(self.neumann_tag) * self.c
neu_u = sym.RestrictToBoundary(self.neumann_tag) * u
neu_v = sym.RestrictToBoundary(self.neumann_tag) * v
neu_bc = join_fields(neu_c, neu_u, -neu_v)
# Radiation
rad_normal = sym.make_normal(self.radiation_tag, d)
c = actx.thaw(self.c)
rad_c = sym.RestrictToBoundary(self.radiation_tag) * self.c
rad_u = sym.RestrictToBoundary(self.radiation_tag) * u
rad_v = sym.RestrictToBoundary(self.radiation_tag) * v
flux_w = flat_obj_array(c, w)
rad_bc = join_fields(
rad_c,
0.5*(rad_u - self.time_sign*np.dot(rad_normal, rad_v)),
0.5*rad_normal*(np.dot(rad_normal, rad_v) - self.time_sign*rad_u)
)
# }}}
# {{{ diffusion -------------------------------------------------------
from pytools.obj_array import with_object_array_or_scalar
# boundary conditions -------------------------------------------------
def make_diffusion(arg):
if with_sensor or (
self.diffusion_coeff is not None and self.diffusion_coeff != 0):
if self.diffusion_coeff is None:
diffusion_coeff = 0
else:
diffusion_coeff = self.diffusion_coeff
# dirichlet BCs -------------------------------------------------------
dir_c = op.project(dcoll, "vol", self.dirichlet_tag, c)
dir_u = op.project(dcoll, "vol", self.dirichlet_tag, u)
dir_v = op.project(dcoll, "vol", self.dirichlet_tag, v)
if self.dirichlet_bc_f:
# FIXME
from warnings import warn
warn("Inhomogeneous Dirichlet conditions on the wave equation "
"are still having issues.", stacklevel=1)
if with_sensor:
diffusion_coeff += sym.Field("sensor")
dir_g = self.dirichlet_bc_f
dir_bc = flat_obj_array(dir_c, 2*dir_g - dir_u, dir_v)
else:
dir_bc = flat_obj_array(dir_c, -dir_u, dir_v)
from grudge.second_order import SecondDerivativeTarget
# neumann BCs ---------------------------------------------------------
neu_c = op.project(dcoll, "vol", self.neumann_tag, c)
neu_u = op.project(dcoll, "vol", self.neumann_tag, u)
neu_v = op.project(dcoll, "vol", self.neumann_tag, v)
neu_bc = flat_obj_array(neu_c, neu_u, -neu_v)
# strong_form here allows the reuse the value of grad u.
grad_tgt = SecondDerivativeTarget(
self.ambient_dim, strong_form=True,
operand=arg)
# radiation BCs -------------------------------------------------------
rad_normal = geo.normal(actx, dcoll, dd=self.radiation_tag)
self.diffusion_scheme.grad(grad_tgt, bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
rad_c = op.project(dcoll, "vol", self.radiation_tag, c)
rad_u = op.project(dcoll, "vol", self.radiation_tag, u)
rad_v = op.project(dcoll, "vol", self.radiation_tag, v)
rad_sign = op.project(dcoll, "vol", self.radiation_tag, actx.thaw(self.sign))
div_tgt = SecondDerivativeTarget(
self.ambient_dim, strong_form=False,
operand=diffusion_coeff*grad_tgt.minv_all)
rad_bc = flat_obj_array(
rad_c,
0.5*(rad_u - rad_sign * np.dot(rad_normal, rad_v)),
0.5*rad_normal*(np.dot(rad_normal, rad_v) - rad_sign*rad_u)
)
self.diffusion_scheme.div(div_tgt,
bc_getter=None,
dirichlet_tags=[], neumann_tags=[])
# entire operator -----------------------------------------------------
def flux(tpair):
return op.project(dcoll, tpair.dd, "all_faces", self.flux(tpair))
return div_tgt.minv_all
else:
return 0
result = (
op.inverse_mass(
dcoll,
flat_obj_array(
-c*op.weak_local_div(dcoll, v),
-c*op.weak_local_grad(dcoll, u)
)
- op.face_mass(
dcoll,
sum(flux(tpair)
for tpair in op.interior_trace_pairs(dcoll, flux_w))
+ flux(op.bv_trace_pair(dcoll, self.dirichlet_tag,
flux_w, dir_bc))
+ flux(op.bv_trace_pair(dcoll, self.neumann_tag,
flux_w, neu_bc))
+ flux(op.bv_trace_pair(dcoll, self.radiation_tag,
flux_w, rad_bc))
)
)
)
# }}}
result[0] = result[0] + self.source_f(actx, dcoll, t)
# entire operator -----------------------------------------------------
nabla = sym.nabla(d)
flux_op = sym.get_flux_operator(self.flux())
return (
- join_fields(
- self.time_sign*self.c*np.dot(nabla, v) - make_diffusion(u)
+ self.source,
-self.time_sign*self.c*(nabla*u) - with_object_array_or_scalar(
make_diffusion, v)
)
+
sym.InverseMassOperator() * (
flux_op(flux_w)
+ flux_op(sym.BoundaryPair(flux_w, dir_bc, self.dirichlet_tag))
+ flux_op(sym.BoundaryPair(flux_w, neu_bc, self.neumann_tag))
+ flux_op(sym.BoundaryPair(flux_w, rad_bc, self.radiation_tag))
))
return result
def check_bc_coverage(self, mesh):
from meshmode.mesh import check_bc_coverage
......@@ -370,9 +350,8 @@ class VariableVelocityStrongWaveOperator(HyperbolicOperator):
self.neumann_tag,
self.radiation_tag])
def max_eigenvalue_expr(self):
import grudge.symbolic as sym
return sym.NodalMax()(sym.CFunction("fabs")(self.c))
def max_characteristic_velocity(self, actx, **kwargs):
return actx.np.abs(actx.thaw(self.c))
# }}}
......
"""
Core DG routines
^^^^^^^^^^^^^^^^
Elementwise differentiation
---------------------------
.. autofunction:: local_grad
.. autofunction:: local_d_dx
.. autofunction:: local_div
Weak derivative operators
-------------------------
.. autofunction:: weak_local_grad
.. autofunction:: weak_local_d_dx
.. autofunction:: weak_local_div
Mass, inverse mass, and face mass operators
-------------------------------------------
.. autofunction:: mass
.. autofunction:: inverse_mass
.. autofunction:: face_mass
Working around documentation tool awkwardness
---------------------------------------------
.. class:: TracePair
See :class:`grudge.trace_pair.TracePair`.
Links to canonical locations of external symbols
------------------------------------------------
(This section only exists because Sphinx does not appear able to resolve
these symbols correctly.)
.. class:: ArrayOrContainer
See :data:`arraycontext.ArrayOrContainer`.
"""
from __future__ import annotations
__copyright__ = """
Copyright (C) 2021 Andreas Kloeckner
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from collections.abc import Hashable
from functools import partial
import numpy as np
import modepy as mp
from arraycontext import (
Array,
ArrayContext,
ArrayOrContainer,
map_array_container,
tag_axes,
)
from meshmode.discretization import (
InterpolatoryElementGroupBase,
NodalElementGroupBase,
)
from meshmode.dof_array import DOFArray
from meshmode.transform_metadata import (
DiscretizationDOFAxisTag,
DiscretizationElementAxisTag,
DiscretizationFaceAxisTag,
FirstAxisIsElementsTag,
)
from pytools import keyed_memoize_in
from pytools.obj_array import make_obj_array
import grudge.dof_desc as dof_desc
from grudge.discretization import DiscretizationCollection
from grudge.dof_desc import (
DD_VOLUME_ALL,
DISCR_TAG_BASE,
FACE_RESTR_ALL,
DOFDesc,
VolumeDomainTag,
as_dofdesc,
)
from grudge.interpolation import interp
from grudge.projection import project
from grudge.reductions import (
elementwise_integral,
elementwise_max,
elementwise_min,
elementwise_sum,
integral,
nodal_max,
nodal_max_loc,
nodal_min,
nodal_min_loc,
nodal_sum,
nodal_sum_loc,
norm,
)
from grudge.trace_pair import (
bdry_trace_pair,
bv_trace_pair,
connected_parts,
cross_rank_trace_pairs,
interior_trace_pair,
interior_trace_pairs,
local_interior_trace_pair,
project_tracepair,
tracepair_with_discr_tag,
)
__all__ = (
"bdry_trace_pair",
"bv_trace_pair",
"connected_parts",
"cross_rank_trace_pairs",
"elementwise_integral",
"elementwise_max",
"elementwise_min",
"elementwise_sum",
"face_mass",
"integral",
"interior_trace_pair",
"interior_trace_pairs",
"interp",
"inverse_mass",
"local_d_dx",
"local_div",
"local_grad",
"local_interior_trace_pair",
"mass",
"nodal_max",
"nodal_max_loc",
"nodal_min",
"nodal_min_loc",
"nodal_sum",
"nodal_sum_loc",
"norm",
"project",
"project_tracepair",
"tracepair_with_discr_tag",
"weak_local_d_dx",
"weak_local_div",
"weak_local_grad",
)
# {{{ common derivative "kernels"
def _single_axis_derivative_kernel(
actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, xyz_axis, vec,
*, metric_in_matvec):
# This gets used from both the strong and the weak derivative. These differ
# in three ways:
# - which differentiation matrix gets used,
# - whether inv_jac_mat is pre-multiplied by a factor that includes the
# area element, and
# - whether the chain rule terms ("inv_jac_mat") sit outside (strong)
# or inside (weak) the matrix-vector product that carries out the
# derivative, cf. "metric_in_matvec".
return DOFArray(
actx,
data=tuple(
# r for rst axis
actx.einsum("rej,rij,ej->ei" if metric_in_matvec else "rei,rij,ej->ei",
ijm_i[xyz_axis],
get_diff_mat(
actx,
out_element_group=out_grp,
in_element_group=in_grp),
vec_i,
arg_names=("inv_jac_t", "ref_stiffT_mat", "vec", ),
tagged=(FirstAxisIsElementsTag(),))
for out_grp, in_grp, vec_i, ijm_i in zip(
out_discr.groups,
in_discr.groups,
vec,
inv_jac_mat, strict=True)))
def _gradient_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec,
*, metric_in_matvec):
# See _single_axis_derivative_kernel for comments on the usage scenarios
# (both strong and weak derivative) and their differences.
per_group_grads = [
# r for rst axis
# x for xyz axis
actx.einsum("xrej,rij,ej->xei" if metric_in_matvec else "xrei,rij,ej->xei",
ijm_i,
get_diff_mat(
actx,
out_element_group=out_grp,
in_element_group=in_grp
),
vec_i,
arg_names=("inv_jac_t", "ref_stiffT_mat", "vec"),
tagged=(FirstAxisIsElementsTag(),))
for out_grp, in_grp, vec_i, ijm_i in zip(
out_discr.groups, in_discr.groups, vec,
inv_jac_mat, strict=True)]
return make_obj_array([
DOFArray(actx, data=tuple([ # noqa: C409
pgg_i[xyz_axis] for pgg_i in per_group_grads
]))
for xyz_axis in range(out_discr.ambient_dim)])
def _divergence_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec,
*, metric_in_matvec):
# See _single_axis_derivative_kernel for comments on the usage scenarios
# (both strong and weak derivative) and their differences.
per_group_divs = [
# r for rst axis
# x for xyz axis
actx.einsum("xrej,rij,xej->ei" if metric_in_matvec else "xrei,rij,xej->ei",
ijm_i,
get_diff_mat(
actx,
out_element_group=out_grp,
in_element_group=in_grp
),
vec_i,
arg_names=("inv_jac_t", "ref_stiffT_mat", "vec"),
tagged=(FirstAxisIsElementsTag(),))
for out_grp, in_grp, vec_i, ijm_i in zip(
out_discr.groups,
in_discr.groups,
vec,
inv_jac_mat, strict=True)]
return DOFArray(actx, data=tuple(per_group_divs))
# }}}
# {{{ Derivative operators
def _reference_derivative_matrices(actx: ArrayContext,
out_element_group: NodalElementGroupBase,
in_element_group: InterpolatoryElementGroupBase) -> Array:
def memoize_key(
out_grp: NodalElementGroupBase, in_grp: InterpolatoryElementGroupBase
) -> Hashable:
return (
out_grp.discretization_key(),
in_grp.discretization_key())
@keyed_memoize_in(
actx, _reference_derivative_matrices,
memoize_key)
def get_ref_derivative_mats(
out_grp: NodalElementGroupBase,
in_grp: InterpolatoryElementGroupBase) -> Array:
return actx.freeze(
actx.tag_axis(
1, DiscretizationDOFAxisTag(),
actx.from_numpy(
np.asarray(
mp.diff_matrices(
in_grp.basis_obj(),
out_grp.unit_nodes,
from_nodes=in_grp.unit_nodes,
)))))
return get_ref_derivative_mats(out_element_group, in_element_group)
def _strong_scalar_grad(dcoll, dd_in, vec):
assert isinstance(dd_in.domain_tag, VolumeDomainTag)
from grudge.geometry import inverse_surface_metric_derivative_mat
discr = dcoll.discr_from_dd(dd_in)
actx = vec.array_context
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _gradient_kernel(actx, discr, discr,
_reference_derivative_matrices, inverse_jac_mat, vec,
metric_in_matvec=False)
def _strong_scalar_div(dcoll, dd, vecs):
from arraycontext import get_container_context_recursively, serialize_container
from grudge.geometry import inverse_surface_metric_derivative_mat
assert isinstance(vecs, np.ndarray)
assert vecs.shape == (dcoll.ambient_dim,)
discr = dcoll.discr_from_dd(dd)
actx = get_container_context_recursively(vecs)
vec = actx.np.stack([v for k, v in serialize_container(vecs)])
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _divergence_kernel(actx, discr, discr,
_reference_derivative_matrices, inverse_jac_mat, vec,
metric_in_matvec=False)
def local_grad(
dcoll: DiscretizationCollection, *args, nested=False) -> ArrayOrContainer:
r"""Return the element-local gradient of a function :math:`f` represented
by *vec*:
.. math::
\nabla|_E f = \left(
\partial_x|_E f, \partial_y|_E f, \partial_z|_E f \right)
May be called with ``(vec)`` or ``(dd_in, vec)``.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg nested: return nested object arrays instead of a single multidimensional
array if *vec* is non-scalar.
:returns: an object array (possibly nested) of
:class:`~meshmode.dof_array.DOFArray`\ s or
:class:`~arraycontext.ArrayContainer` of object arrays.
"""
if len(args) == 1:
vec, = args
dd_in = DD_VOLUME_ALL
elif len(args) == 2:
dd_in, vec = args
else:
raise TypeError("invalid number of arguments")
from grudge.tools import rec_map_subarrays
return rec_map_subarrays(
partial(_strong_scalar_grad, dcoll, dd_in),
(), (dcoll.ambient_dim,),
vec, scalar_cls=DOFArray, return_nested=nested,)
def local_d_dx(
dcoll: DiscretizationCollection, xyz_axis, *args) -> ArrayOrContainer:
r"""Return the element-local derivative along axis *xyz_axis* of a
function :math:`f` represented by *vec*:
.. math::
\frac{\partial f}{\partial \lbrace x,y,z\rbrace}\Big|_E
May be called with ``(vec)`` or ``(dd, vec)``.
:arg xyz_axis: an integer indicating the axis along which the derivative
is taken.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
"""
if len(args) == 1:
vec, = args
dd = DD_VOLUME_ALL
elif len(args) == 2:
dd, vec = args
else:
raise TypeError("invalid number of arguments")
if not isinstance(vec, DOFArray):
return map_array_container(partial(local_d_dx, dcoll, xyz_axis, dd), vec)
discr = dcoll.discr_from_dd(dd)
actx = vec.array_context
from grudge.geometry import inverse_surface_metric_derivative_mat
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _single_axis_derivative_kernel(
actx, discr, discr,
_reference_derivative_matrices, inverse_jac_mat, xyz_axis, vec,
metric_in_matvec=False)
def local_div(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the element-local divergence of the vector function
:math:`\mathbf{f}` represented by *vecs*:
.. math::
\nabla|_E \cdot \mathbf{f} = \sum_{i=1}^d \partial_{x_i}|_E \mathbf{f}_i
May be called with ``(vecs)`` or ``(dd, vecs)``.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vecs: an object array of
:class:`~meshmode.dof_array.DOFArray`\s or an
:class:`~arraycontext.ArrayContainer` object
with object array entries. The last axis of the array
must have length matching the volume dimension.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
"""
if len(args) == 1:
vecs, = args
dd = DD_VOLUME_ALL
elif len(args) == 2:
dd, vecs = args
else:
raise TypeError("invalid number of arguments")
from grudge.tools import rec_map_subarrays
return rec_map_subarrays(
lambda vec: _strong_scalar_div(dcoll, dd, vec),
(dcoll.ambient_dim,), (),
vecs, scalar_cls=DOFArray)
# }}}
# {{{ Weak derivative operators
def _reference_stiffness_transpose_matrices(
actx: ArrayContext, out_element_group, in_element_group):
@keyed_memoize_in(
actx, _reference_stiffness_transpose_matrices,
lambda out_grp, in_grp: (out_grp.discretization_key(),
in_grp.discretization_key()))
def get_ref_stiffness_transpose_mat(out_grp, in_grp):
if in_grp == out_grp:
mmat = mp.mass_matrix(out_grp.basis_obj(), out_grp.unit_nodes)
diff_matrices = mp.diff_matrices(out_grp.basis_obj(), out_grp.unit_nodes)
return actx.freeze(
actx.tag_axis(1, DiscretizationDOFAxisTag(),
actx.from_numpy(
np.asarray(
[dmat.T @ mmat.T for dmat in diff_matrices]))))
from modepy import multi_vandermonde, vandermonde
basis = out_grp.basis_obj()
vand = vandermonde(basis.functions, out_grp.unit_nodes)
grad_vand = multi_vandermonde(basis.gradients, in_grp.unit_nodes)
vand_inv_t = np.linalg.inv(vand).T
if not isinstance(grad_vand, tuple):
# NOTE: special case for 1d
grad_vand = (grad_vand,)
weights = in_grp.quadrature_rule().weights
return actx.freeze(
actx.from_numpy(
np.einsum(
"c,bz,acz->abc",
weights,
vand_inv_t,
grad_vand
).copy() # contigify the array
)
)
return get_ref_stiffness_transpose_mat(out_element_group,
in_element_group)
def _weak_scalar_grad(dcoll, dd_in, vec):
from grudge.geometry import inverse_surface_metric_derivative_mat
dd_in = as_dofdesc(dd_in)
in_discr = dcoll.discr_from_dd(dd_in)
out_discr = dcoll.discr_from_dd(dd_in.with_discr_tag(DISCR_TAG_BASE))
actx = vec.array_context
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in,
times_area_element=True,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _gradient_kernel(actx, out_discr, in_discr,
_reference_stiffness_transpose_matrices, inverse_jac_mat, vec,
metric_in_matvec=True)
def _weak_scalar_div(dcoll, dd_in, vecs):
from arraycontext import get_container_context_recursively, serialize_container
from grudge.geometry import inverse_surface_metric_derivative_mat
assert isinstance(vecs, np.ndarray)
assert vecs.shape == (dcoll.ambient_dim,)
in_discr = dcoll.discr_from_dd(dd_in)
out_discr = dcoll.discr_from_dd(dd_in.with_discr_tag(DISCR_TAG_BASE))
actx = get_container_context_recursively(vecs)
vec = actx.np.stack([v for k, v in serialize_container(vecs)])
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in,
times_area_element=True,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _divergence_kernel(actx, out_discr, in_discr,
_reference_stiffness_transpose_matrices, inverse_jac_mat, vec,
metric_in_matvec=True)
def weak_local_grad(
dcoll: DiscretizationCollection, *args, nested=False) -> ArrayOrContainer:
r"""Return the element-local weak gradient of the volume function
represented by *vec*.
May be called with ``(vec)`` or ``(dd_in, vec)``.
Specifically, the function returns an object array where the :math:`i`-th
component is the weak derivative with respect to the :math:`i`-th coordinate
of a scalar function :math:`f`. See :func:`weak_local_d_dx` for further
information. For non-scalar :math:`f`, the function will return a nested object
array containing the component-wise weak derivatives.
:arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg nested: return nested object arrays instead of a single multidimensional
array if *vec* is non-scalar
:returns: an object array (possibly nested) of
:class:`~meshmode.dof_array.DOFArray`\ s or
:class:`~arraycontext.ArrayContainer` of object arrays.
"""
if len(args) == 1:
vecs, = args
dd_in = DD_VOLUME_ALL
elif len(args) == 2:
dd_in, vecs = args
else:
raise TypeError("invalid number of arguments")
from grudge.tools import rec_map_subarrays
return rec_map_subarrays(
partial(_weak_scalar_grad, dcoll, dd_in),
(), (dcoll.ambient_dim,),
vecs, scalar_cls=DOFArray, return_nested=nested)
def weak_local_d_dx(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the element-local weak derivative along axis *xyz_axis* of the
volume function represented by *vec*.
May be called with ``(xyz_axis, vec)`` or ``(dd_in, xyz_axis, vec)``.
Specifically, this function computes the volume contribution of the
weak derivative in the :math:`i`-th component (specified by *xyz_axis*)
of a function :math:`f`, in each element :math:`E`, with respect to polynomial
test functions :math:`\phi`:
.. math::
\int_E \partial_i\phi\,f\,\mathrm{d}x \sim
\mathbf{D}_{E,i}^T \mathbf{M}_{E}^T\mathbf{f}|_E,
where :math:`\mathbf{D}_{E,i}` is the polynomial differentiation matrix on
an :math:`E` for the :math:`i`-th spatial coordinate, :math:`\mathbf{M}_E`
is the elemental mass matrix (see :func:`mass` for more information), and
:math:`\mathbf{f}|_E` is a vector of coefficients for :math:`f` on :math:`E`.
:arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg xyz_axis: an integer indicating the axis along which the derivative
is taken.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
"""
if len(args) == 2:
xyz_axis, vec = args
dd_in = dof_desc.DD_VOLUME_ALL
elif len(args) == 3:
dd_in, xyz_axis, vec = args
else:
raise TypeError("invalid number of arguments")
if not isinstance(vec, DOFArray):
return map_array_container(
partial(weak_local_d_dx, dcoll, dd_in, xyz_axis),
vec
)
from grudge.geometry import inverse_surface_metric_derivative_mat
dd_in = as_dofdesc(dd_in)
in_discr = dcoll.discr_from_dd(dd_in)
out_discr = dcoll.discr_from_dd(dd_in.with_discr_tag(DISCR_TAG_BASE))
actx = vec.array_context
inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in,
times_area_element=True,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return _single_axis_derivative_kernel(
actx, out_discr, in_discr, _reference_stiffness_transpose_matrices,
inverse_jac_mat, xyz_axis, vec,
metric_in_matvec=True)
def weak_local_div(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the element-local weak divergence of the vector volume function
represented by *vecs*.
May be called with ``(vecs)`` or ``(dd, vecs)``.
Specifically, this function computes the volume contribution of the
weak divergence of a vector function :math:`\mathbf{f}`, in each element
:math:`E`, with respect to polynomial test functions :math:`\phi`:
.. math::
\int_E \nabla \phi \cdot \mathbf{f}\,\mathrm{d}x \sim
\sum_{i=1}^d \mathbf{D}_{E,i}^T \mathbf{M}_{E}^T\mathbf{f}_i|_E,
where :math:`\mathbf{D}_{E,i}` is the polynomial differentiation matrix on
an :math:`E` for the :math:`i`-th spatial coordinate, and :math:`\mathbf{M}_E`
is the elemental mass matrix (see :func:`mass` for more information).
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vecs: an object array of
:class:`~meshmode.dof_array.DOFArray`\s or an
:class:`~arraycontext.ArrayContainer` object
with object array entries. The last axis of the array
must have length matching the volume dimension.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec*.
"""
if len(args) == 1:
vecs, = args
dd_in = DD_VOLUME_ALL
elif len(args) == 2:
dd_in, vecs = args
else:
raise TypeError("invalid number of arguments")
from grudge.tools import rec_map_subarrays
return rec_map_subarrays(
lambda vec: _weak_scalar_div(dcoll, dd_in, vec),
(dcoll.ambient_dim,), (),
vecs, scalar_cls=DOFArray)
# }}}
# {{{ Mass operator
def reference_mass_matrix(actx: ArrayContext, out_element_group, in_element_group):
@keyed_memoize_in(
actx, reference_mass_matrix,
lambda out_grp, in_grp: (out_grp.discretization_key(),
in_grp.discretization_key()))
def get_ref_mass_mat(out_grp, in_grp):
if out_grp == in_grp:
return actx.freeze(
actx.from_numpy(
mp.mass_matrix(out_grp.basis_obj(), out_grp.unit_nodes)
)
)
from modepy import vandermonde
basis = out_grp.basis_obj()
vand = vandermonde(basis.functions, out_grp.unit_nodes)
o_vand = vandermonde(basis.functions, in_grp.unit_nodes)
vand_inv_t = np.linalg.inv(vand).T
weights = in_grp.quadrature_rule().weights
return actx.freeze(
actx.tag_axis(0, DiscretizationDOFAxisTag(),
actx.from_numpy(
np.asarray(
np.einsum("j,ik,jk->ij", weights, vand_inv_t, o_vand),
order="C"))))
return get_ref_mass_mat(out_element_group, in_element_group)
def _apply_mass_operator(
dcoll: DiscretizationCollection, dd_out, dd_in, vec):
if not isinstance(vec, DOFArray):
return map_array_container(
partial(_apply_mass_operator, dcoll, dd_out, dd_in), vec
)
from grudge.geometry import area_element
in_discr = dcoll.discr_from_dd(dd_in)
out_discr = dcoll.discr_from_dd(dd_out)
actx = vec.array_context
area_elements = area_element(actx, dcoll, dd=dd_in,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return DOFArray(
actx,
data=tuple(
actx.einsum("ij,ej,ej->ei",
reference_mass_matrix(
actx,
out_element_group=out_grp,
in_element_group=in_grp
),
ae_i,
vec_i,
arg_names=("mass_mat", "jac", "vec"),
tagged=(FirstAxisIsElementsTag(),))
for in_grp, out_grp, ae_i, vec_i in zip(
in_discr.groups, out_discr.groups, area_elements, vec,
strict=True)
)
)
def mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the action of the DG mass matrix on a vector (or vectors)
of :class:`~meshmode.dof_array.DOFArray`\ s, *vec*. In the case of
*vec* being an :class:`~arraycontext.ArrayContainer`,
the mass operator is applied component-wise.
May be called with ``(vec)`` or ``(dd_in, vec)``.
Specifically, this function applies the mass matrix elementwise on a
vector of coefficients :math:`\mathbf{f}` via:
:math:`\mathbf{M}_{E}\mathbf{f}|_E`, where
.. math::
\left(\mathbf{M}_{E}\right)_{ij} = \int_E \phi_i \cdot \phi_j\,\mathrm{d}x,
where :math:`\phi_i` are local polynomial basis functions on :math:`E`.
:arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec*.
"""
if len(args) == 1:
vec, = args
dd_in = dof_desc.DD_VOLUME_ALL
elif len(args) == 2:
dd_in, vec = args
else:
raise TypeError("invalid number of arguments")
dd_out = dd_in.with_discr_tag(DISCR_TAG_BASE)
return _apply_mass_operator(dcoll, dd_out, dd_in, vec)
# }}}
# {{{ Mass inverse operator
def reference_inverse_mass_matrix(actx: ArrayContext, element_group):
@keyed_memoize_in(
actx, reference_inverse_mass_matrix,
lambda grp: grp.discretization_key())
def get_ref_inv_mass_mat(grp):
from modepy import inverse_mass_matrix
basis = grp.basis_obj()
return actx.freeze(
actx.tag_axis(0, DiscretizationDOFAxisTag(),
actx.from_numpy(
np.asarray(
inverse_mass_matrix(basis, grp.unit_nodes),
order="C"))))
return get_ref_inv_mass_mat(element_group)
def _apply_inverse_mass_operator(
dcoll: DiscretizationCollection, dd_out, dd_in, vec):
if not isinstance(vec, DOFArray):
return map_array_container(
partial(_apply_inverse_mass_operator, dcoll, dd_out, dd_in), vec
)
from grudge.geometry import area_element
if dd_out != dd_in:
raise ValueError(
"Cannot compute inverse of a mass matrix mapping "
"between different element groups; inverse is not "
"guaranteed to be well-defined"
)
actx = vec.array_context
discr = dcoll.discr_from_dd(dd_in)
inv_area_elements = 1./area_element(actx, dcoll, dd=dd_in,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
group_data = [
# Based on https://arxiv.org/pdf/1608.03836.pdf
# true_Minv ~ ref_Minv * ref_M * (1/jac_det) * ref_Minv
actx.einsum("ei,ij,ej->ei",
jac_inv,
reference_inverse_mass_matrix(actx, element_group=grp),
vec_i,
tagged=(FirstAxisIsElementsTag(),))
for grp, jac_inv, vec_i in zip(
discr.groups, inv_area_elements, vec, strict=True)]
return DOFArray(actx, data=tuple(group_data))
def inverse_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the action of the DG mass matrix inverse on a vector
(or vectors) of :class:`~meshmode.dof_array.DOFArray`\ s, *vec*.
In the case of *vec* being an :class:`~arraycontext.ArrayContainer`,
the inverse mass operator is applied component-wise.
For affine elements :math:`E`, the element-wise mass inverse
is computed directly as the inverse of the (physical) mass matrix:
.. math::
\left(\mathbf{M}_{J^e}\right)_{ij} =
\int_{\widehat{E}} \widehat{\phi}_i\cdot\widehat{\phi}_j J^e
\mathrm{d}\widehat{x},
where :math:`\widehat{\phi}_i` are basis functions over the reference
element :math:`\widehat{E}`, and :math:`J^e` is the (constant) Jacobian
scaling factor (see :func:`grudge.geometry.area_element`).
For non-affine :math:`E`, :math:`J^e` is not constant. In this case, a
weight-adjusted approximation is used instead following [Chan_2016]_:
.. math::
\mathbf{M}_{J^e}^{-1} \approx
\widehat{\mathbf{M}}^{-1}\mathbf{M}_{1/J^e}\widehat{\mathbf{M}}^{-1},
where :math:`\widehat{\mathbf{M}}` is the reference mass matrix on
:math:`\widehat{E}`.
May be called with ``(vec)`` or ``(dd, vec)``.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec*.
"""
if len(args) == 1:
vec, = args
dd = DD_VOLUME_ALL
elif len(args) == 2:
dd, vec = args
else:
raise TypeError("invalid number of arguments")
return _apply_inverse_mass_operator(dcoll, dd, dd, vec)
# }}}
# {{{ Face mass operator
def reference_face_mass_matrix(
actx: ArrayContext, face_element_group, vol_element_group, dtype):
@keyed_memoize_in(
actx, reference_mass_matrix,
lambda face_grp, vol_grp: (face_grp.discretization_key(),
vol_grp.discretization_key()))
def get_ref_face_mass_mat(face_grp, vol_grp):
nfaces = vol_grp.mesh_el_group.nfaces
assert face_grp.nelements == nfaces * vol_grp.nelements
matrix = np.empty(
(vol_grp.nunit_dofs,
nfaces,
face_grp.nunit_dofs),
dtype=dtype
)
import modepy as mp
from meshmode.discretization import ElementGroupWithBasis
from meshmode.discretization.poly_element import QuadratureSimplexElementGroup
n = vol_grp.order
m = face_grp.order
vol_basis = vol_grp.basis_obj()
faces = mp.faces_for_shape(vol_grp.shape)
for iface, face in enumerate(faces):
# If the face group is defined on a higher-order
# quadrature grid, use the underlying quadrature rule
if isinstance(face_grp, QuadratureSimplexElementGroup):
face_quadrature = face_grp.quadrature_rule()
if face_quadrature.exact_to < m:
raise ValueError(
"The face quadrature rule is only exact for polynomials "
f"of total degree {face_quadrature.exact_to}. Please "
"ensure a quadrature rule is used that is at least "
f"exact for degree {m}."
)
else:
# NOTE: This handles the general case where
# volume and surface quadrature rules may have different
# integration orders
face_quadrature = mp.quadrature_for_space(
mp.space_for_shape(face, 2*max(n, m)),
face
)
# If the group has a nodal basis and is unisolvent,
# we use the basis on the face to compute the face mass matrix
if (isinstance(face_grp, ElementGroupWithBasis)
and face_grp.space.space_dim == face_grp.nunit_dofs):
face_basis = face_grp.basis_obj()
# Sanity check for face quadrature accuracy. Not integrating
# degree N + M polynomials here is asking for a bad time.
if face_quadrature.exact_to < m + n:
raise ValueError(
"The face quadrature rule is only exact for polynomials "
f"of total degree {face_quadrature.exact_to}. Please "
"ensure a quadrature rule is used that is at least "
f"exact for degree {n+m}."
)
matrix[:, iface, :] = mp.nodal_mass_matrix_for_face(
face, face_quadrature,
face_basis.functions, vol_basis.functions,
vol_grp.unit_nodes,
face_grp.unit_nodes,
)
else:
# Otherwise, we use a routine that is purely quadrature-based
# (no need for explicit face basis functions)
matrix[:, iface, :] = mp.nodal_quad_mass_matrix_for_face(
face,
face_quadrature,
vol_basis.functions,
vol_grp.unit_nodes,
)
return actx.freeze(
tag_axes(actx, {
0: DiscretizationDOFAxisTag(),
2: DiscretizationDOFAxisTag()
},
actx.from_numpy(matrix)))
return get_ref_face_mass_mat(face_element_group, vol_element_group)
def _apply_face_mass_operator(dcoll: DiscretizationCollection, dd_in, vec):
if not isinstance(vec, DOFArray):
return map_array_container(
partial(_apply_face_mass_operator, dcoll, dd_in), vec
)
from grudge.geometry import area_element
dd_out = DOFDesc(
VolumeDomainTag(dd_in.domain_tag.volume_tag),
DISCR_TAG_BASE)
volm_discr = dcoll.discr_from_dd(dd_out)
face_discr = dcoll.discr_from_dd(dd_in)
dtype = vec.entry_dtype
actx = vec.array_context
assert len(face_discr.groups) == len(volm_discr.groups)
surf_area_elements = area_element(actx, dcoll, dd=dd_in,
_use_geoderiv_connection=actx.supports_nonscalar_broadcasting)
return DOFArray(
actx,
data=tuple(
actx.einsum("ifj,fej,fej->ei",
reference_face_mass_matrix(
actx,
face_element_group=afgrp,
vol_element_group=vgrp,
dtype=dtype),
actx.tag_axis(1, DiscretizationElementAxisTag(),
surf_ae_i.reshape(
vgrp.mesh_el_group.nfaces,
vgrp.nelements,
surf_ae_i.shape[-1])),
actx.tag_axis(0, DiscretizationFaceAxisTag(),
vec_i.reshape(
vgrp.mesh_el_group.nfaces,
vgrp.nelements,
afgrp.nunit_dofs)),
arg_names=("ref_face_mass_mat", "jac_surf", "vec"),
tagged=(FirstAxisIsElementsTag(),))
for vgrp, afgrp, vec_i, surf_ae_i in zip(volm_discr.groups,
face_discr.groups,
vec,
surf_area_elements,
strict=True)))
def face_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Return the action of the DG face mass matrix on a vector (or vectors)
of :class:`~meshmode.dof_array.DOFArray`\ s, *vec*. In the case of
*vec* being an arbitrary :class:`~arraycontext.ArrayContainer`,
the face mass operator is applied component-wise.
May be called with ``(vec)`` or ``(dd_in, vec)``.
Specifically, this function applies the face mass matrix elementwise on a
vector of coefficients :math:`\mathbf{f}` as the sum of contributions for
each face :math:`f \subset \partial E`:
.. math::
\sum_{f=1}^{N_{\text{faces}} } \mathbf{M}_{f, E}\mathbf{f}|_f,
where
.. math::
\left(\mathbf{M}_{f, E}\right)_{ij} =
\int_{f \subset \partial E} \phi_i(s)\psi_j(s)\,\mathrm{d}s,
where :math:`\phi_i` are (volume) polynomial basis functions on :math:`E`
evaluated on the face :math:`f`, and :math:`\psi_j` are basis functions for
a polynomial space defined on :math:`f`.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base ``"all_faces"`` discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec*.
"""
if len(args) == 1:
vec, = args
dd_in = DD_VOLUME_ALL.trace(FACE_RESTR_ALL)
elif len(args) == 2:
dd_in, vec = args
else:
raise TypeError("invalid number of arguments")
return _apply_face_mass_operator(dcoll, dd_in, vec)
# }}}
# vim: foldmethod=marker
"""Helpers for estimating a stable time step."""
"""
.. currentmodule:: grudge.op
Projections
-----------
from __future__ import division, print_function
.. autofunction:: project
"""
__copyright__ = "Copyright (C) 2015 Andreas Kloeckner"
from __future__ import annotations
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
......@@ -24,71 +34,52 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pytools import memoize_on_first_arg
from meshmode.discretization.poly_element import PolynomialWarpAndBlendElementGroup
import numpy.linalg as la
class WarpAndBlendTimestepInfo(object):
@staticmethod
def dt_non_geometric_factor(discr, grp):
if grp.dim == 1:
if grp.order == 0:
return 1
else:
unodes = grp.unit_nodes
return la.norm(unodes[0] - unodes[1]) * 0.85
else:
unodes = grp.unit_nodes
vertex_indices = grp.vertex_indices()
return 2 / 3 * \
min(min(min(
la.norm(unodes[face_node_index]-unodes[vertex_index])
for vertex_index in vertex_indices
if vertex_index != face_node_index)
for face_node_index in face_indices)
for face_indices in self.face_indices())
@staticmethod
def dt_geometric_factor(discr, grp):
if grp.dim == 1:
return abs(el.map.jacobian())
elif grp.dim == 2:
area = abs(2 * el.map.jacobian())
semiperimeter = sum(la.norm(vertices[vi1] - vertices[vi2])
for vi1, vi2 in [(0, 1), (1, 2), (2, 0)])/2
return area / semiperimeter
elif grp.dim == 3:
result = abs(el.map.jacobian())/max(abs(fj) for fj in el.face_jacobians)
if grp.order in [1, 2]:
from warnings import warn
warn("cowardly halving timestep for order 1 and 2 tets "
"to avoid CFL issues")
result /= 2
return result
else:
raise NotImplementedError("cannot estimate timestep for "
"%d-dimensional elements" % grp.dim)
_GROUP_TYPE_TO_INFO = {
PolynomialWarpAndBlendElementGroup: WarpAndBlendTimestepInfo
}
@memoize_on_first_arg
def dt_non_geometric_factor(discr):
return min(
_GROUP_TYPE_TO_INFO[type(grp)].dt_non_geometric_factor(discr, grp)
for grp in discr.groups)
@memoize_on_first_arg
def dt_geometric_factor(discr):
return min(
_GROUP_TYPE_TO_INFO[type(grp)].dt_geometric_factor(discr, grp)
for grp in discr.groups)
from arraycontext import ArrayOrContainerOrScalarT
from grudge.discretization import DiscretizationCollection
from grudge.dof_desc import (
BoundaryDomainTag,
ConvertibleToDOFDesc,
VolumeDomainTag,
as_dofdesc,
)
def project(
dcoll: DiscretizationCollection,
src: ConvertibleToDOFDesc,
tgt: ConvertibleToDOFDesc, vec: ArrayOrContainerOrScalarT
) -> ArrayOrContainerOrScalarT:
"""Project from one discretization to another, e.g. from the
volume to the boundary, or from the base to the an overintegrated
quadrature discretization.
:arg src: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
:arg tgt: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec*.
"""
# {{{ process dofdesc arguments
src_dofdesc = as_dofdesc(src)
contextual_volume_tag = None
if isinstance(src_dofdesc.domain_tag, VolumeDomainTag):
contextual_volume_tag = src_dofdesc.domain_tag.tag
elif isinstance(src_dofdesc.domain_tag, BoundaryDomainTag):
contextual_volume_tag = src_dofdesc.domain_tag.volume_tag
tgt_dofdesc = as_dofdesc(tgt, _contextual_volume_tag=contextual_volume_tag)
del src
del tgt
# }}}
if src_dofdesc == tgt_dofdesc:
return vec
return dcoll.connection_from_dds(src_dofdesc, tgt_dofdesc)(vec)
"""
.. currentmodule:: grudge.op
Nodal Reductions
----------------
.. note::
In a distributed-memory setting, these reductions automatically
reduce over all ranks involved, and return the same value on
all ranks, in the manner of an MPI ``allreduce``.
.. autofunction:: norm
.. autofunction:: nodal_sum
.. autofunction:: nodal_min
.. autofunction:: nodal_max
.. autofunction:: integral
Rank-local reductions
----------------------
.. autofunction:: nodal_sum_loc
.. autofunction:: nodal_min_loc
.. autofunction:: nodal_max_loc
Elementwise reductions
----------------------
.. autofunction:: elementwise_sum
.. autofunction:: elementwise_max
.. autofunction:: elementwise_min
.. autofunction:: elementwise_integral
"""
from __future__ import annotations
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from functools import partial, reduce
import numpy as np
from arraycontext import (
ArrayOrContainer,
Scalar,
make_loopy_program,
map_array_container,
serialize_container,
tag_axes,
)
from meshmode.dof_array import DOFArray
from meshmode.transform_metadata import (
DiscretizationDOFAxisTag,
DiscretizationElementAxisTag,
)
from pymbolic import Number, RealNumber
from pytools import memoize_in
import grudge.dof_desc as dof_desc
from grudge.array_context import MPIBasedArrayContext
from grudge.discretization import DiscretizationCollection
# {{{ Nodal reductions
def norm(dcoll: DiscretizationCollection, vec, p, dd=None) -> RealNumber:
r"""Return the vector p-norm of a function represented
by its vector of degrees of freedom *vec*.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg p: an integer denoting the order of the integral norm. Currently,
only values of 2 or `numpy.inf` are supported.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:returns: a nonegative scalar denoting the norm.
"""
if dd is None:
dd = dof_desc.DD_VOLUME_ALL
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)
assert actx is not None
dd = dof_desc.as_dofdesc(dd)
if p == 2:
from grudge.op import _apply_mass_operator
return actx.np.sqrt(
actx.np.abs(
nodal_sum(
dcoll, dd,
actx.np.conjugate(vec)
* _apply_mass_operator(dcoll, dd, dd, vec))))
elif p == np.inf:
return nodal_max(dcoll, dd, actx.np.abs(vec), initial=0.)
else:
raise ValueError("unsupported norm order")
def nodal_sum(dcoll: DiscretizationCollection, dd, vec) -> Number:
r"""Return the nodal sum of a vector of degrees of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer`.
:returns: a device scalar denoting the nodal sum.
"""
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)
if not isinstance(actx, MPIBasedArrayContext):
return nodal_sum_loc(dcoll, dd, vec)
comm = actx.mpi_communicator
# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI
return actx.from_numpy(
comm.allreduce(actx.to_numpy(nodal_sum_loc(dcoll, dd, vec)), op=MPI.SUM))
def nodal_sum_loc(dcoll: DiscretizationCollection, dd, vec) -> Number:
r"""Return the rank-local nodal sum of a vector of degrees of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a scalar denoting the rank-local nodal sum.
"""
if not isinstance(vec, DOFArray):
return sum(
nodal_sum_loc(dcoll, dd, comp)
for _, comp in serialize_container(vec)
)
actx = vec.array_context
return sum(
actx.np.sum(grp_ary) if grp_ary.size else actx.from_numpy(np.array(0.))
for grp_ary in vec)
def nodal_min(dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> RealNumber:
r"""Return the nodal minimum of a vector of degrees of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg initial: an optional initial value. Defaults to `numpy.inf`.
:returns: a device scalar denoting the nodal minimum.
"""
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)
if not isinstance(actx, MPIBasedArrayContext):
return nodal_min_loc(dcoll, dd, vec, initial=initial)
comm = actx.mpi_communicator
# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI
return actx.from_numpy(
comm.allreduce(
actx.to_numpy(nodal_min_loc(dcoll, dd, vec, initial=initial)),
op=MPI.MIN))
def nodal_min_loc(
dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> RealNumber:
r"""Return the rank-local nodal minimum of a vector of degrees
of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg initial: an optional initial value. Defaults to `numpy.inf`.
:returns: a scalar denoting the rank-local nodal minimum.
"""
if not isinstance(vec, DOFArray):
return np.min([
nodal_min_loc(dcoll, dd, comp, initial=initial)
for _, comp in serialize_container(vec)
])
actx = vec.array_context
if initial is None:
initial = np.inf
if np.isscalar(initial):
initial = actx.from_numpy(np.array(initial))
return reduce(
lambda acc, grp_ary: actx.np.minimum(
acc,
actx.np.min(grp_ary) if grp_ary.size else initial),
vec, initial)
def nodal_max(dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> RealNumber:
r"""Return the nodal maximum of a vector of degrees of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:arg initial: an optional initial value. Defaults to `-numpy.inf`.
:returns: a device scalar denoting the nodal maximum.
"""
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)
if not isinstance(actx, MPIBasedArrayContext):
return nodal_max_loc(dcoll, dd, vec, initial=initial)
comm = actx.mpi_communicator
# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI
return actx.from_numpy(
comm.allreduce(
actx.to_numpy(nodal_max_loc(dcoll, dd, vec, initial=initial)),
op=MPI.MAX))
def nodal_max_loc(
dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> RealNumber:
r"""Return the rank-local nodal maximum of a vector of degrees
of freedom *vec*.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer`.
:arg initial: an optional initial value. Defaults to `-numpy.inf`.
:returns: a scalar denoting the rank-local nodal maximum.
"""
if not isinstance(vec, DOFArray):
return np.max([
nodal_max_loc(dcoll, dd, comp, initial=initial)
for _, comp in serialize_container(vec)
])
actx = vec.array_context
if initial is None:
initial = -np.inf
if np.isscalar(initial):
initial = actx.from_numpy(np.array(initial))
return reduce(
lambda acc, grp_ary: actx.np.maximum(
acc,
actx.np.max(grp_ary) if grp_ary.size > 0 else initial),
vec, initial)
def integral(dcoll: DiscretizationCollection, dd, vec) -> Scalar:
"""Numerically integrates a function represented by a
:class:`~meshmode.dof_array.DOFArray` of degrees of freedom.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a device scalar denoting the evaluated integral.
"""
from grudge.op import _apply_mass_operator
dd = dof_desc.as_dofdesc(dd)
ones = dcoll.discr_from_dd(dd).zeros(vec.array_context) + 1.0
return nodal_sum(
dcoll, dd, vec * _apply_mass_operator(dcoll, dd, dd, ones)
)
# }}}
# {{{ Elementwise reductions
def _apply_elementwise_reduction(
op_name: str, dcoll: DiscretizationCollection,
*args) -> ArrayOrContainer:
r"""Returns an array container whose entries contain
the elementwise reductions in each cell.
May be called with ``(vec)`` or ``(dd, vec)``.
Note that for array contexts which support nonscalar broadcasting
(e.g. :class:`meshmode.array_context.PytatoPyOpenCLArrayContext`),
the size of each component vector will be of shape ``(nelements, 1)``.
Otherwise, the scalar value of the reduction will be repeated for each
degree of freedom.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer`.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer`.
"""
if len(args) == 1:
vec, = args
dd = dof_desc.DD_VOLUME_ALL
elif len(args) == 2:
dd, vec = args
else:
raise TypeError("invalid number of arguments")
dd = dof_desc.as_dofdesc(dd)
if not isinstance(vec, DOFArray):
return map_array_container(
partial(_apply_elementwise_reduction, op_name, dcoll, dd), vec
)
actx = vec.array_context
if actx.supports_nonscalar_broadcasting:
return DOFArray(
actx,
data=tuple(
tag_axes(actx, {
0: DiscretizationElementAxisTag(),
1: DiscretizationDOFAxisTag()},
getattr(actx.np, op_name)(vec_i, axis=1).reshape(-1, 1))
for vec_i in vec
)
)
else:
@memoize_in(actx, (_apply_elementwise_reduction, dd,
f"elementwise_{op_name}_prg"))
def elementwise_prg():
# FIXME: This computes the reduction value redundantly for each
# output DOF.
t_unit = make_loopy_program(
[
"{[iel]: 0 <= iel < nelements}",
"{[idof, jdof]: 0 <= idof, jdof < ndofs}"
],
f"""
result[iel, idof] = {op_name}(jdof, operand[iel, jdof])
""",
name=f"grudge_elementwise_{op_name}_knl"
)
import loopy as lp
from meshmode.transform_metadata import (
ConcurrentDOFInameTag,
ConcurrentElementInameTag,
)
return lp.tag_inames(t_unit, {
"iel": ConcurrentElementInameTag(),
"idof": ConcurrentDOFInameTag()})
return actx.tag_axis(1, DiscretizationDOFAxisTag(),
DOFArray(
actx,
data=tuple(
actx.call_loopy(elementwise_prg(), operand=vec_i)["result"]
for vec_i in vec)))
def elementwise_sum(
dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Returns a vector of DOFs with all entries on each element set
to the sum of DOFs on that element.
May be called with ``(vec)`` or ``(dd, vec)``.
The input *vec* can either be a :class:`~meshmode.dof_array.DOFArray` or
an :class:`~arraycontext.ArrayContainer` with
:class:`~meshmode.dof_array.DOFArray` entries. If the underlying
array context (see :class:`arraycontext.ArrayContext`) for *vec*
supports nonscalar broadcasting, all :class:`~meshmode.dof_array.DOFArray`
entries will contain a single value for each element. Otherwise, the
entries will have the same number of degrees of freedom as *vec*, but
set to the same value.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec* whose entries
denote the element-wise sum of *vec*.
"""
return _apply_elementwise_reduction("sum", dcoll, *args)
def elementwise_max(
dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Returns a vector of DOFs with all entries on each element set
to the maximum over all DOFs on that element.
May be called with ``(vec)`` or ``(dd, vec)``.
The input *vec* can either be a :class:`~meshmode.dof_array.DOFArray` or
an :class:`~arraycontext.ArrayContainer` with
:class:`~meshmode.dof_array.DOFArray` entries. If the underlying
array context (see :class:`arraycontext.ArrayContext`) for *vec*
supports nonscalar broadcasting, all :class:`~meshmode.dof_array.DOFArray`
entries will contain a single value for each element. Otherwise, the
entries will have the same number of degrees of freedom as *vec*, but
set to the same value.
:arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer`.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec* whose entries
denote the element-wise max of *vec*.
"""
return _apply_elementwise_reduction("max", dcoll, *args)
def elementwise_min(
dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
r"""Returns a vector of DOFs with all entries on each element set
to the minimum over all DOFs on that element.
May be called with ``(vec)`` or ``(dd, vec)``.
The input *vec* can either be a :class:`~meshmode.dof_array.DOFArray` or
an :class:`~arraycontext.ArrayContainer` with
:class:`~meshmode.dof_array.DOFArray` entries. If the underlying
array context (see :class:`arraycontext.ArrayContext`) for *vec*
supports nonscalar broadcasting, all :class:`~meshmode.dof_array.DOFArray`
entries will contain a single value for each element. Otherwise, the
entries will have the same number of degrees of freedom as *vec*, but
set to the same value.
:arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec* whose entries
denote the element-wise min of *vec*.
"""
return _apply_elementwise_reduction("min", dcoll, *args)
def elementwise_integral(
dcoll: DiscretizationCollection, *args) -> ArrayOrContainer:
"""Numerically integrates a function represented by a
:class:`~meshmode.dof_array.DOFArray` of degrees of freedom in
each element of a discretization, given by *dd*.
May be called with ``(vec)`` or ``(dd, vec)``.
The input *vec* can either be a :class:`~meshmode.dof_array.DOFArray` or
an :class:`~arraycontext.ArrayContainer` with
:class:`~meshmode.dof_array.DOFArray` entries. If the underlying
array context (see :class:`arraycontext.ArrayContext`) for *vec*
supports nonscalar broadcasting, all :class:`~meshmode.dof_array.DOFArray`
entries will contain a single value for each element. Otherwise, the
entries will have the same number of degrees of freedom as *vec*, but
set to the same value.
:arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`.
:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
Defaults to the base volume discretization if not provided.
:arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` of them.
:returns: a :class:`~meshmode.dof_array.DOFArray` or an
:class:`~arraycontext.ArrayContainer` like *vec* containing the
elementwise integral if *vec*.
"""
if len(args) == 1:
vec, = args
dd = dof_desc.DD_VOLUME_ALL
elif len(args) == 2:
dd, vec = args
else:
raise TypeError("invalid number of arguments")
dd = dof_desc.as_dofdesc(dd)
from grudge.op import _apply_mass_operator
ones = dcoll.discr_from_dd(dd).zeros(vec.array_context) + 1.0
return elementwise_sum(
dcoll, dd, vec * _apply_mass_operator(dcoll, dd, dd, ones)
)
# }}}
# vim: foldmethod=marker
"""Minimal example of a grudge driver."""
from __future__ import division, print_function
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
......@@ -24,15 +20,52 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pytools import memoize_in
from grudge.dof_desc import DD_VOLUME_ALL
def rk4_step(y, t, h, f):
k1 = f(t, y)
k2 = f(t+h/2, y + h/2*k1)
k3 = f(t+h/2, y + h/2*k2)
k4 = f(t+h, y + h*k3)
return y + h/6*(k1 + 2*k2 + 2*k3 + k4)
def _lsrk45_update(y, a, b, h, rhs_val, residual=0):
residual = a*residual + h*rhs_val
y = y + b * residual
from pytools.obj_array import make_obj_array
return make_obj_array([y, residual])
import pyopencl as cl
def compiled_lsrk45_step(actx, y, t, h, f):
from leap.rk import LSRK4MethodBuilder
@memoize_in(actx, (compiled_lsrk45_step, "update"))
def get_state_updater():
return actx.compile(_lsrk45_update)
def set_up_rk4(field_var_name, dt, fields, rhs, t_start=0):
from leap.rk import LSRK4Method
update = get_state_updater()
residual = None
for a, b, c in LSRK4MethodBuilder.coeffs: # pylint: disable=not-an-iterable
rhs_val = f(t + c*h, y)
if residual is None:
y, residual = update(y, a, b, h, rhs_val)
else:
y, residual = update(y, a, b, h, rhs_val, residual)
return y
def set_up_rk4(field_var_name, dt, fields, rhs, t_start=0.0):
from dagrt.codegen import PythonCodeGenerator
from leap.rk import LSRK4MethodBuilder
dt_method = LSRK4Method(component_id=field_var_name)
dt_method = LSRK4MethodBuilder(component_id=field_var_name)
dt_code = dt_method.generate()
dt_stepper_class = PythonCodeGenerator("TimeStep").get_class(dt_code)
dt_stepper = dt_stepper_class({"<func>"+dt_method.component_id: rhs})
......@@ -44,13 +77,20 @@ def set_up_rk4(field_var_name, dt, fields, rhs, t_start=0):
return dt_stepper
def make_visualizer(discr, vis_order):
def make_visualizer(dcoll, vis_order=None, volume_dd=None, **kwargs):
from meshmode.discretization.visualization import make_visualizer
with cl.CommandQueue(discr.cl_context) as queue:
return make_visualizer(queue, discr.volume_discr, vis_order)
if volume_dd is None:
volume_dd = DD_VOLUME_ALL
return make_visualizer(
dcoll._setup_actx,
dcoll.discr_from_dd(volume_dd), vis_order, **kwargs)
def make_boundary_visualizer(discr, vis_order):
def make_boundary_visualizer(dcoll, vis_order=None, **kwargs):
from meshmode.discretization.visualization import make_visualizer
with cl.CommandQueue(discr.cl_context) as queue:
return make_visualizer(queue, discr.boundary_discr, vis_order)
from meshmode.mesh import BTAG_ALL
return make_visualizer(
dcoll._setup_actx, dcoll.discr_from_dd(BTAG_ALL),
vis_order, **kwargs)
"""Compiler to turn operator expression tree into (imperative) bytecode."""
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2008-15 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six # noqa
from six.moves import zip, reduce
from pytools import Record, memoize_method, memoize
from grudge import sym
import grudge.symbolic.mappers as mappers
# {{{ instructions
class Instruction(Record):
__slots__ = []
priority = 0
def get_assignees(self):
raise NotImplementedError("no get_assignees in %s" % self.__class__)
def get_dependencies(self):
raise NotImplementedError("no get_dependencies in %s" % self.__class__)
def __str__(self):
raise NotImplementedError
def get_execution_method(self, exec_mapper):
raise NotImplementedError
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return not self.__eq__(other)
@memoize
def _make_dep_mapper(include_subscripts):
return mappers.DependencyMapper(
include_operator_bindings=False,
include_subscripts=include_subscripts,
include_calls="descend_args")
class Assign(Instruction):
"""
.. attribute:: names
.. attribute:: exprs
.. attribute:: do_not_return
a list of bools indicating whether the corresponding entry in names and
exprs describes an expression that is not needed beyond this assignment
.. attribute:: priority
.. attribute:: is_scalar_valued
"""
comment = ""
def __init__(self, names, exprs, **kwargs):
Instruction.__init__(self, names=names, exprs=exprs, **kwargs)
if not hasattr(self, "do_not_return"):
self.do_not_return = [False] * len(names)
@memoize_method
def flop_count(self):
return sum(mappers.FlopCounter()(expr) for expr in self.exprs)
def get_assignees(self):
return set(self.names)
@memoize_method
def get_dependencies(self, each_vector=False):
dep_mapper = _make_dep_mapper(include_subscripts=False)
from operator import or_
deps = reduce(
or_, (dep_mapper(expr)
for expr in self.exprs))
from pymbolic.primitives import Variable
deps -= set(Variable(name) for name in self.names)
if not each_vector:
self._dependencies = deps
return deps
def __str__(self):
comment = self.comment
if len(self.names) == 1:
if comment:
comment = "/* %s */ " % comment
return "%s <- %s%s" % (self.names[0], comment, self.exprs[0])
else:
if comment:
comment = " /* %s */" % comment
lines = []
lines.append("{" + comment)
for n, e, dnr in zip(self.names, self.exprs, self.do_not_return):
if dnr:
dnr_indicator = "-#"
else:
dnr_indicator = ""
lines.append(" %s <%s- %s" % (n, dnr_indicator, e))
lines.append("}")
return "\n".join(lines)
def get_execution_method(self, exec_mapper):
return exec_mapper.exec_assign
class DiffBatchAssign(Instruction):
"""
:ivar names:
:ivar operators:
.. note ::
All operators here are guaranteed to satisfy
:meth:`grudge.symbolic.operators.DiffOperatorBase.
equal_except_for_axis`.
:ivar field:
"""
def get_assignees(self):
return set(self.names)
@memoize_method
def get_dependencies(self):
return _make_dep_mapper(include_subscripts=False)(self.field)
def __str__(self):
lines = []
if len(self.names) > 1:
lines.append("{")
for n, d in zip(self.names, self.operators):
lines.append(" %s <- %s(%s)" % (n, d, self.field))
lines.append("}")
else:
for n, d in zip(self.names, self.operators):
lines.append("%s <- %s(%s)" % (n, d, self.field))
return "\n".join(lines)
def get_execution_method(self, exec_mapper):
return exec_mapper.exec_diff_batch_assign
class FluxExchangeBatchAssign(Instruction):
"""
.. attribute:: names
.. attribute:: indices_and_ranks
.. attribute:: rank_to_index_and_name
.. attribute:: arg_fields
"""
priority = 1
def __init__(self, names, indices_and_ranks, arg_fields):
rank_to_index_and_name = {}
for name, (index, rank) in zip(
names, indices_and_ranks):
rank_to_index_and_name.setdefault(rank, []).append(
(index, name))
Instruction.__init__(self,
names=names,
indices_and_ranks=indices_and_ranks,
rank_to_index_and_name=rank_to_index_and_name,
arg_fields=arg_fields)
def get_assignees(self):
return set(self.names)
@memoize_method
def get_dependencies(self):
dep_mapper = _make_dep_mapper()
result = set()
for fld in self.arg_fields:
result |= dep_mapper(fld)
return result
def __str__(self):
lines = []
lines.append("{")
for n, (index, rank) in zip(self.names, self.indices_and_ranks):
lines.append(" %s <- receive index %s from rank %d [%s]" % (
n, index, rank, self.arg_fields))
lines.append("}")
return "\n".join(lines)
def get_execution_method(self, exec_mapper):
return exec_mapper.exec_flux_exchange_batch_assign
class VectorExprAssign(Assign):
"""
.. attribute:: compiled
"""
def get_execution_method(self, exec_mapper):
return exec_mapper.exec_vector_expr_assign
comment = "compiled"
@memoize_method
def compiled(self, exec_mapper):
discr = exec_mapper.discr
from grudge.backends.vector_expr import \
VectorExpressionInfo, simple_result_dtype_getter
from grudge.backends.cuda.vector_expr import CompiledVectorExpression
return CompiledVectorExpression(
[VectorExpressionInfo(
name=name,
expr=expr,
do_not_return=dnr)
for name, expr, dnr in zip(
self.names, self.exprs, self.do_not_return)],
result_dtype_getter=simple_result_dtype_getter,
allocator=discr.pool.allocate)
# }}}
# {{{ graphviz/dot dataflow graph drawing
def dot_dataflow_graph(code, max_node_label_length=30,
label_wrap_width=50):
origins = {}
node_names = {}
result = [
"initial [label=\"initial\"]"
"result [label=\"result\"]"]
for num, insn in enumerate(code.instructions):
node_name = "node%d" % num
node_names[insn] = node_name
node_label = str(insn)
if max_node_label_length is not None:
node_label = node_label[:max_node_label_length]
if label_wrap_width is not None:
from pytools import word_wrap
node_label = word_wrap(node_label, label_wrap_width,
wrap_using="\n ")
node_label = node_label.replace("\n", "\\l") + "\\l"
result.append("%s [ label=\"p%d: %s\" shape=box ];" % (
node_name, insn.priority, node_label))
for assignee in insn.get_assignees():
origins[assignee] = node_name
def get_orig_node(expr):
from pymbolic.primitives import Variable
if isinstance(expr, Variable):
return origins.get(expr.name, "initial")
else:
return "initial"
def gen_expr_arrow(expr, target_node):
result.append("%s -> %s [label=\"%s\"];"
% (get_orig_node(expr), target_node, expr))
for insn in code.instructions:
for dep in insn.get_dependencies():
gen_expr_arrow(dep, node_names[insn])
from grudge.tools import is_obj_array
if is_obj_array(code.result):
for subexp in code.result:
gen_expr_arrow(subexp, "result")
else:
gen_expr_arrow(code.result, "result")
return "digraph dataflow {\n%s\n}\n" % "\n".join(result)
# }}}
# {{{ code representation
class Code(object):
def __init__(self, instructions, result):
self.instructions = instructions
self.result = result
self.last_schedule = None
self.static_schedule_attempts = 5
def dump_dataflow_graph(self):
from grudge.tools import open_unique_debug_file
open_unique_debug_file("dataflow", ".dot")\
.write(dot_dataflow_graph(self, max_node_label_length=None))
def __str__(self):
lines = []
for insn in self.instructions:
lines.extend(str(insn).split("\n"))
lines.append("RESULT: " + str(self.result))
return "\n".join(lines)
# {{{ dynamic scheduler (generates static schedules by self-observation)
class NoInstructionAvailable(Exception):
pass
@memoize_method
def get_next_step(self, available_names, done_insns):
from pytools import all, argmax2
available_insns = [
(insn, insn.priority) for insn in self.instructions
if insn not in done_insns
and all(dep.name in available_names
for dep in insn.get_dependencies())]
if not available_insns:
raise self.NoInstructionAvailable
from pytools import flatten
discardable_vars = set(available_names) - set(flatten(
[dep.name for dep in insn.get_dependencies()]
for insn in self.instructions
if insn not in done_insns))
# {{{ make sure results do not get discarded
from pytools.obj_array import with_object_array_or_scalar
dm = mappers.DependencyMapper(composite_leaves=False)
def remove_result_variable(result_expr):
# The extra dependency mapper run is necessary
# because, for instance, subscripts can make it
# into the result expression, which then does
# not consist of just variables.
for var in dm(result_expr):
from pymbolic.primitives import Variable
assert isinstance(var, Variable)
discardable_vars.discard(var.name)
with_object_array_or_scalar(remove_result_variable, self.result)
# }}}
return argmax2(available_insns), discardable_vars
def execute_dynamic(self, exec_mapper, pre_assign_check=None):
"""Execute the instruction stream, make all scheduling decisions
dynamically. Record the schedule in *self.last_schedule*.
"""
schedule = []
context = exec_mapper.context
next_future_id = 0
futures = []
done_insns = set()
force_future = False
while True:
insn = None
discardable_vars = []
# check futures for completion
i = 0
while i < len(futures):
future = futures[i]
if force_future or future.is_ready():
futures.pop(i)
insn = self.EvaluateFuture(future.id)
assignments, new_futures = future()
force_future = False
break
else:
i += 1
del future
# if no future got processed, pick the next insn
if insn is None:
try:
insn, discardable_vars = self.get_next_step(
frozenset(list(context.keys())),
frozenset(done_insns))
except self.NoInstructionAvailable:
if futures:
# no insn ready: we need a future to complete to continue
force_future = True
else:
# no futures, no available instructions: we're done
break
else:
for name in discardable_vars:
del context[name]
done_insns.add(insn)
assignments, new_futures = \
insn.get_execution_method(exec_mapper)(insn)
if insn is not None:
for target, value in assignments:
if pre_assign_check is not None:
pre_assign_check(target, value)
context[target] = value
futures.extend(new_futures)
schedule.append((discardable_vars, insn, len(new_futures)))
for future in new_futures:
future.id = next_future_id
next_future_id += 1
if len(done_insns) < len(self.instructions):
print("Unreachable instructions:")
for insn in set(self.instructions) - done_insns:
print(" ", insn)
raise RuntimeError("not all instructions are reachable"
"--did you forget to pass a value for a placeholder?")
if self.static_schedule_attempts:
self.last_schedule = schedule
from pytools.obj_array import with_object_array_or_scalar
return with_object_array_or_scalar(exec_mapper, self.result)
# }}}
# {{{ static schedule execution
class EvaluateFuture(object):
"""A fake 'instruction' that represents evaluation of a future."""
def __init__(self, future_id):
self.future_id = future_id
def execute(self, exec_mapper, pre_assign_check=None):
"""If we have a saved, static schedule for this instruction stream,
execute it. Otherwise, punt to the dynamic scheduler below.
"""
if self.last_schedule is None:
return self.execute_dynamic(exec_mapper, pre_assign_check)
context = exec_mapper.context
id_to_future = {}
next_future_id = 0
schedule_is_delay_free = True
for discardable_vars, insn, new_future_count in self.last_schedule:
for name in discardable_vars:
del context[name]
if isinstance(insn, self.EvaluateFuture):
future = id_to_future.pop(insn.future_id)
if not future.is_ready():
schedule_is_delay_free = False
assignments, new_futures = future()
del future
else:
assignments, new_futures = \
insn.get_execution_method(exec_mapper)(insn)
for target, value in assignments:
if pre_assign_check is not None:
pre_assign_check(target, value)
context[target] = value
if len(new_futures) != new_future_count:
raise RuntimeError("static schedule got an unexpected number "
"of futures")
for future in new_futures:
id_to_future[next_future_id] = future
next_future_id += 1
if not schedule_is_delay_free:
self.last_schedule = None
self.static_schedule_attempts -= 1
from pytools.obj_array import with_object_array_or_scalar
return with_object_array_or_scalar(exec_mapper, self.result)
# }}}
# }}}
# {{{ compiler
class OperatorCompiler(mappers.IdentityMapper):
def __init__(self, prefix="_expr", max_vectors_in_batch_expr=None):
super(OperatorCompiler, self).__init__()
self.prefix = prefix
self.max_vectors_in_batch_expr = max_vectors_in_batch_expr
self.code = []
self.expr_to_var = {}
self.assigned_names = set()
# {{{ collect various optemplate components
def collect_diff_ops(self, expr):
return mappers.BoundOperatorCollector(sym.RefDiffOperatorBase)(expr)
# }}}
# {{{ top-level driver
def __call__(self, expr):
# Put the result expressions into variables as well.
expr = sym.cse(expr, "_result")
# from grudge.symbolic.mappers.type_inference import TypeInferrer
# self.typedict = TypeInferrer()(expr)
# Used for diff batching
self.diff_ops = self.collect_diff_ops(expr)
# Finally, walk the expression and build the code.
result = super(OperatorCompiler, self).__call__(expr)
# FIXME: Reenable assignment aggregation
#return Code(self.aggregate_assignments(self.code, result), result)
return Code(self.code, result)
# }}}
# {{{ variables and names
def get_var_name(self, prefix=None):
def generate_suffixes():
yield ""
i = 2
while True:
yield "_%d" % i
i += 1
def generate_plain_names():
i = 0
while True:
yield self.prefix + str(i)
i += 1
if prefix is None:
for name in generate_plain_names():
if name not in self.assigned_names:
break
else:
for suffix in generate_suffixes():
name = prefix + suffix
if name not in self.assigned_names:
break
self.assigned_names.add(name)
return name
def assign_to_new_var(self, expr, priority=0, prefix=None):
from pymbolic.primitives import Variable, Subscript
# Observe that the only things that can be legally subscripted in
# grudge are variables. All other expressions are broken down into
# their scalar components.
if isinstance(expr, (Variable, Subscript)):
return expr
new_name = self.get_var_name(prefix)
self.code.append(self.make_assign(
new_name, expr, priority))
return Variable(new_name)
# }}}
# {{{ map_xxx routines
def map_common_subexpression(self, expr):
try:
return self.expr_to_var[expr.child]
except KeyError:
priority = getattr(expr, "priority", 0)
if isinstance(expr.child, sym.OperatorBinding):
# We need to catch operator bindings here and
# treat them specially. They get assigned to their
# own variable by default, which would mean the
# CSE prefix would be omitted, making the resulting
# code less readable.
rec_child = self.map_operator_binding(
expr.child, name_hint=expr.prefix)
else:
rec_child = self.rec(expr.child)
cse_var = self.assign_to_new_var(rec_child,
priority=priority, prefix=expr.prefix)
self.expr_to_var[expr.child] = cse_var
return cse_var
def map_operator_binding(self, expr, name_hint=None):
if isinstance(expr.op, sym.RefDiffOperatorBase):
return self.map_ref_diff_op_binding(expr)
else:
# make sure operator assignments stand alone and don't get muddled
# up in vector math
field_var = self.assign_to_new_var(
self.rec(expr.field))
result_var = self.assign_to_new_var(
expr.op(field_var),
prefix=name_hint)
return result_var
def map_ones(self, expr):
# make sure expression stands alone and doesn't get
# muddled up in vector math
return self.assign_to_new_var(expr, prefix="ones")
def map_node_coordinate_component(self, expr):
# make sure expression stands alone and doesn't get
# muddled up in vector math
return self.assign_to_new_var(expr, prefix="nodes%d" % expr.axis)
def map_normal_component(self, expr):
# make sure expression stands alone and doesn't get
# muddled up in vector math
return self.assign_to_new_var(expr, prefix="normal%d" % expr.axis)
def map_call(self, expr):
from grudge.symbolic.primitives import CFunction
if isinstance(expr.function, CFunction):
return super(OperatorCompiler, self).map_call(expr)
else:
# If it's not a C-level function, it shouldn't get muddled up into
# a vector math expression.
return self.assign_to_new_var(
type(expr)(
expr.function,
[self.assign_to_new_var(self.rec(par))
for par in expr.parameters]))
def map_ref_diff_op_binding(self, expr):
try:
return self.expr_to_var[expr]
except KeyError:
all_diffs = [diff
for diff in self.diff_ops
if diff.op.equal_except_for_axis(expr.op)
and diff.field == expr.field]
names = [self.get_var_name() for d in all_diffs]
from pytools import single_valued
op_class = single_valued(type(d.op) for d in all_diffs)
self.code.append(
DiffBatchAssign(
names=names,
op_class=op_class,
operators=[d.op for d in all_diffs],
field=self.rec(
single_valued(d.field for d in all_diffs))))
from pymbolic import var
for n, d in zip(names, all_diffs):
self.expr_to_var[d] = var(n)
return self.expr_to_var[expr]
# }}}
# {{{ instruction producers
def make_assign(self, name, expr, priority):
return Assign(names=[name], exprs=[expr],
priority=priority)
# }}}
# {{{ assignment aggregration pass
def aggregate_assignments(self, instructions, result):
from pymbolic.primitives import Variable
# {{{ aggregation helpers
def get_complete_origins_set(insn, skip_levels=0):
if skip_levels < 0:
skip_levels = 0
result = set()
for dep in insn.get_dependencies():
if isinstance(dep, Variable):
dep_origin = origins_map.get(dep.name, None)
if dep_origin is not None:
if skip_levels <= 0:
result.add(dep_origin)
result |= get_complete_origins_set(
dep_origin, skip_levels-1)
return result
var_assignees_cache = {}
def get_var_assignees(insn):
try:
return var_assignees_cache[insn]
except KeyError:
result = set(Variable(assignee)
for assignee in insn.get_assignees())
var_assignees_cache[insn] = result
return result
def aggregate_two_assignments(ass_1, ass_2):
names = ass_1.names + ass_2.names
from pymbolic.primitives import Variable
deps = (ass_1.get_dependencies() | ass_2.get_dependencies()) \
- set(Variable(name) for name in names)
return Assign(
names=names, exprs=ass_1.exprs + ass_2.exprs,
_dependencies=deps,
priority=max(ass_1.priority, ass_2.priority))
# }}}
# {{{ main aggregation pass
origins_map = dict(
(assignee, insn)
for insn in instructions
for assignee in insn.get_assignees())
from pytools import partition
unprocessed_assigns, other_insns = partition(
lambda insn: isinstance(insn, Assign) and not insn.is_scalar_valued,
instructions)
# filter out zero-flop-count assigns--no need to bother with those
processed_assigns, unprocessed_assigns = partition(
lambda ass: ass.flop_count() == 0,
unprocessed_assigns)
# filter out zero assignments
from pytools import any
from grudge.tools import is_zero
i = 0
while i < len(unprocessed_assigns):
my_assign = unprocessed_assigns[i]
if any(is_zero(expr) for expr in my_assign.exprs):
processed_assigns.append(unprocessed_assigns.pop())
else:
i += 1
# greedy aggregation
while unprocessed_assigns:
my_assign = unprocessed_assigns.pop()
my_deps = my_assign.get_dependencies()
my_assignees = get_var_assignees(my_assign)
agg_candidates = []
for i, other_assign in enumerate(unprocessed_assigns):
other_deps = other_assign.get_dependencies()
other_assignees = get_var_assignees(other_assign)
if ((my_deps & other_deps
or my_deps & other_assignees
or other_deps & my_assignees)
and my_assign.priority == other_assign.priority):
agg_candidates.append((i, other_assign))
did_work = False
if agg_candidates:
my_indirect_origins = get_complete_origins_set(
my_assign, skip_levels=1)
for other_assign_index, other_assign in agg_candidates:
if self.max_vectors_in_batch_expr is not None:
new_assignee_count = len(
set(my_assign.get_assignees())
| set(other_assign.get_assignees()))
new_dep_count = len(
my_assign.get_dependencies(
each_vector=True)
| other_assign.get_dependencies(
each_vector=True))
if (new_assignee_count + new_dep_count
> self.max_vectors_in_batch_expr):
continue
other_indirect_origins = get_complete_origins_set(
other_assign, skip_levels=1)
if (my_assign not in other_indirect_origins and
other_assign not in my_indirect_origins):
did_work = True
# aggregate the two assignments
new_assignment = aggregate_two_assignments(
my_assign, other_assign)
del unprocessed_assigns[other_assign_index]
unprocessed_assigns.append(new_assignment)
for assignee in new_assignment.get_assignees():
origins_map[assignee] = new_assignment
break
if not did_work:
processed_assigns.append(my_assign)
externally_used_names = set(
expr
for insn in processed_assigns + other_insns
for expr in insn.get_dependencies())
from pytools.obj_array import is_obj_array
if is_obj_array(result):
externally_used_names |= set(expr for expr in result)
else:
externally_used_names |= set([result])
def schedule_and_finalize_assignment(ass):
dep_mapper = _make_dep_mapper(include_subscripts=False)
names_exprs = list(zip(ass.names, ass.exprs))
my_assignees = set(name for name, expr in names_exprs)
names_exprs_deps = [
(name, expr,
set(dep.name for dep in dep_mapper(expr) if
isinstance(dep, Variable)) & my_assignees)
for name, expr in names_exprs]
ordered_names_exprs = []
available_names = set()
while names_exprs_deps:
schedulable = []
i = 0
while i < len(names_exprs_deps):
name, expr, deps = names_exprs_deps[i]
unsatisfied_deps = deps - available_names
if not unsatisfied_deps:
schedulable.append((str(expr), name, expr))
del names_exprs_deps[i]
else:
i += 1
# make sure these come out in a constant order
schedulable.sort()
if schedulable:
for key, name, expr in schedulable:
ordered_names_exprs.append((name, expr))
available_names.add(name)
else:
raise RuntimeError("aggregation resulted in an "
"impossible assignment")
return self.finalize_multi_assign(
names=[name for name, expr in ordered_names_exprs],
exprs=[expr for name, expr in ordered_names_exprs],
do_not_return=[Variable(name) not in externally_used_names
for name, expr in ordered_names_exprs],
priority=ass.priority)
return [schedule_and_finalize_assignment(ass)
for ass in processed_assigns] + other_insns
# }}}
# }}}
def finalize_multi_assign(self, names, exprs, do_not_return, priority):
return VectorExprAssign(names=names, exprs=exprs,
do_not_return=do_not_return,
priority=priority)
# }}}
# vim: foldmethod=marker
"""Mappers to transform symbolic operators."""
from __future__ import division
__copyright__ = "Copyright (C) 2008 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
import numpy as np
import pymbolic.primitives
import pymbolic.mapper.stringifier
import pymbolic.mapper.evaluator
import pymbolic.mapper.dependency
import pymbolic.mapper.substitutor
import pymbolic.mapper.constant_folder
import pymbolic.mapper.flop_counter
from pymbolic.mapper import CSECachingMapperMixin
from grudge import sym
import grudge.symbolic.operators as op
# {{{ mixins
class LocalOpReducerMixin(object):
"""Reduces calls to mapper methods for all local differentiation
operators to a single mapper method, and likewise for mass
operators.
"""
# {{{ global differentiation
def map_diff(self, expr, *args, **kwargs):
return self.map_diff_base(expr, *args, **kwargs)
def map_minv_st(self, expr, *args, **kwargs):
return self.map_diff_base(expr, *args, **kwargs)
def map_stiffness(self, expr, *args, **kwargs):
return self.map_diff_base(expr, *args, **kwargs)
def map_stiffness_t(self, expr, *args, **kwargs):
return self.map_diff_base(expr, *args, **kwargs)
def map_quad_stiffness_t(self, expr, *args, **kwargs):
return self.map_diff_base(expr, *args, **kwargs)
# }}}
# {{{ global mass
def map_mass_base(self, expr, *args, **kwargs):
return self.map_elementwise_linear(expr, *args, **kwargs)
def map_mass(self, expr, *args, **kwargs):
return self.map_mass_base(expr, *args, **kwargs)
def map_inverse_mass(self, expr, *args, **kwargs):
return self.map_mass_base(expr, *args, **kwargs)
def map_quad_mass(self, expr, *args, **kwargs):
return self.map_mass_base(expr, *args, **kwargs)
# }}}
# {{{ reference differentiation
def map_ref_diff(self, expr, *args, **kwargs):
return self.map_ref_diff_base(expr, *args, **kwargs)
def map_ref_stiffness_t(self, expr, *args, **kwargs):
return self.map_ref_diff_base(expr, *args, **kwargs)
def map_ref_quad_stiffness_t(self, expr, *args, **kwargs):
return self.map_ref_diff_base(expr, *args, **kwargs)
# }}}
# {{{ reference mass
def map_ref_mass_base(self, expr, *args, **kwargs):
return self.map_elementwise_linear(expr, *args, **kwargs)
def map_ref_mass(self, expr, *args, **kwargs):
return self.map_ref_mass_base(expr, *args, **kwargs)
def map_ref_inverse_mass(self, expr, *args, **kwargs):
return self.map_ref_mass_base(expr, *args, **kwargs)
def map_ref_quad_mass(self, expr, *args, **kwargs):
return self.map_ref_mass_base(expr, *args, **kwargs)
# }}}
class FluxOpReducerMixin(object):
"""Reduces calls to mapper methods for all flux
operators to a smaller number of mapper methods.
"""
def map_flux(self, expr, *args, **kwargs):
return self.map_flux_base(expr, *args, **kwargs)
def map_bdry_flux(self, expr, *args, **kwargs):
return self.map_flux_base(expr, *args, **kwargs)
def map_quad_flux(self, expr, *args, **kwargs):
return self.map_flux_base(expr, *args, **kwargs)
def map_quad_bdry_flux(self, expr, *args, **kwargs):
return self.map_flux_base(expr, *args, **kwargs)
class OperatorReducerMixin(LocalOpReducerMixin, FluxOpReducerMixin):
"""Reduces calls to *any* operator mapping function to just one."""
def _map_op_base(self, expr, *args, **kwargs):
return self.map_operator(expr, *args, **kwargs)
map_elementwise_linear = _map_op_base
map_interpolation = _map_op_base
map_nodal_sum = _map_op_base
map_nodal_min = _map_op_base
map_nodal_max = _map_op_base
map_stiffness = _map_op_base
map_diff = _map_op_base
map_stiffness_t = _map_op_base
map_ref_diff = _map_op_base
map_ref_stiffness_t = _map_op_base
map_mass = _map_op_base
map_inverse_mass = _map_op_base
map_ref_mass = _map_op_base
map_ref_inverse_mass = _map_op_base
map_opposite_interior_face_swap = _map_op_base
map_face_mass_operator = _map_op_base
map_ref_face_mass_operator = _map_op_base
class CombineMapperMixin(object):
def map_operator_binding(self, expr):
return self.combine([self.rec(expr.op), self.rec(expr.field)])
class IdentityMapperMixin(LocalOpReducerMixin, FluxOpReducerMixin):
def map_operator_binding(self, expr, *args, **kwargs):
assert not isinstance(self, BoundOpMapperMixin), \
"IdentityMapper instances cannot be combined with " \
"the BoundOpMapperMixin"
return type(expr)(
self.rec(expr.op, *args, **kwargs),
self.rec(expr.field, *args, **kwargs))
# {{{ operators
def map_elementwise_linear(self, expr, *args, **kwargs):
assert not isinstance(self, BoundOpMapperMixin), \
"IdentityMapper instances cannot be combined with " \
"the BoundOpMapperMixin"
# it's a leaf--no changing children
return expr
map_interpolation = map_elementwise_linear
map_nodal_sum = map_elementwise_linear
map_nodal_min = map_elementwise_linear
map_nodal_max = map_elementwise_linear
map_stiffness = map_elementwise_linear
map_diff = map_elementwise_linear
map_stiffness_t = map_elementwise_linear
map_ref_diff = map_elementwise_linear
map_ref_stiffness_t = map_elementwise_linear
map_mass = map_elementwise_linear
map_inverse_mass = map_elementwise_linear
map_ref_mass = map_elementwise_linear
map_ref_inverse_mass = map_elementwise_linear
map_opposite_interior_face_swap = map_elementwise_linear
map_face_mass_operator = map_elementwise_linear
map_ref_face_mass_operator = map_elementwise_linear
# }}}
# {{{ primitives
def map_grudge_variable(self, expr, *args, **kwargs):
# it's a leaf--no changing children
return expr
map_c_function = map_grudge_variable
map_ones = map_grudge_variable
map_node_coordinate_component = map_grudge_variable
# }}}
class BoundOpMapperMixin(object):
def map_operator_binding(self, expr, *args, **kwargs):
return getattr(self, expr.op.mapper_method)(
expr.op, expr.field, *args, **kwargs)
# }}}
# {{{ basic mappers
class CombineMapper(CombineMapperMixin, pymbolic.mapper.CombineMapper):
pass
class DependencyMapper(
CombineMapperMixin,
pymbolic.mapper.dependency.DependencyMapper,
OperatorReducerMixin):
def __init__(self,
include_operator_bindings=True,
composite_leaves=None,
**kwargs):
if composite_leaves is False:
include_operator_bindings = False
if composite_leaves is True:
include_operator_bindings = True
pymbolic.mapper.dependency.DependencyMapper.__init__(self,
composite_leaves=composite_leaves, **kwargs)
self.include_operator_bindings = include_operator_bindings
def map_operator_binding(self, expr):
if self.include_operator_bindings:
return set([expr])
else:
return CombineMapperMixin.map_operator_binding(self, expr)
def map_operator(self, expr):
return set()
def map_grudge_variable(self, expr):
return set([expr])
def _map_leaf(self, expr):
return set()
map_ones = _map_leaf
map_node_coordinate_component = _map_leaf
class FlopCounter(
CombineMapperMixin,
pymbolic.mapper.flop_counter.FlopCounter):
def map_operator_binding(self, expr):
return self.rec(expr.field)
def map_scalar_parameter(self, expr):
return 0
def map_c_function(self, expr):
return 1
def map_ones(self, expr):
return 0
def map_node_coordinate_component(self, expr):
return 0
class IdentityMapper(
IdentityMapperMixin,
pymbolic.mapper.IdentityMapper):
pass
class SubstitutionMapper(pymbolic.mapper.substitutor.SubstitutionMapper,
IdentityMapperMixin):
pass
class CSERemover(IdentityMapper):
def map_common_subexpression(self, expr):
return self.rec(expr.child)
# }}}
# {{{ operator binder
class OperatorBinder(CSECachingMapperMixin, IdentityMapper):
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_product(self, expr):
if len(expr.children) == 0:
return expr
from pymbolic.primitives import flattened_product, Product
first = expr.children[0]
if isinstance(first, op.Operator):
prod = flattened_product(expr.children[1:])
if isinstance(prod, Product) and len(prod.children) > 1:
from warnings import warn
warn("Binding '%s' to more than one "
"operand in a product is ambiguous - "
"use the parenthesized form instead."
% first)
return sym.OperatorBinding(first, self.rec(prod))
else:
return first * self.rec(flattened_product(expr.children[1:]))
# }}}
# {{{ operator specializer
class OperatorSpecializer(CSECachingMapperMixin, IdentityMapper):
"""Guided by a typedict obtained through type inference (i.e. by
:class:`grudge.symbolic.mappers.type_inference.TypeInferrrer`),
substitutes more specialized operators for generic ones.
For example, if type inference has determined that a differentiation
operator is applied to an argument on a quadrature grid, this
differentiation operator is then swapped out for a *quadrature*
differentiation operator.
"""
def __init__(self, typedict):
"""
:param typedict: generated by
:class:`grudge.symbolic.mappers.type_inference.TypeInferrer`.
"""
self.typedict = typedict
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, expr):
from grudge.symbolic.primitives import BoundaryPair
from grudge.symbolic.mappers.type_inference import (
type_info, QuadratureRepresentation)
# {{{ figure out field type
try:
field_type = self.typedict[expr.field]
except TypeError:
# numpy arrays are not hashable
# has_quad_operand remains unset
assert isinstance(expr.field, np.ndarray)
else:
try:
field_repr_tag = field_type.repr_tag
except AttributeError:
# boundary pairs are not assigned types
assert isinstance(expr.field, BoundaryPair)
has_quad_operand = False
else:
has_quad_operand = isinstance(field_repr_tag,
QuadratureRepresentation)
# }}}
# Based on where this is run in the symbolic operator processing
# pipeline, we may encounter both reference and non-reference
# operators.
# {{{ elementwise operators
if isinstance(expr.op, op.MassOperator) and has_quad_operand:
return op.QuadratureMassOperator(
field_repr_tag.quadrature_tag)(self.rec(expr.field))
elif isinstance(expr.op, op.RefMassOperator) and has_quad_operand:
return op.RefQuadratureMassOperator(
field_repr_tag.quadrature_tag)(self.rec(expr.field))
elif (isinstance(expr.op, op.StiffnessTOperator) and has_quad_operand):
return op.QuadratureStiffnessTOperator(
expr.op.xyz_axis, field_repr_tag.quadrature_tag)(
self.rec(expr.field))
elif (isinstance(expr.op, op.RefStiffnessTOperator)
and has_quad_operand):
return op.RefQuadratureStiffnessTOperator(
expr.op.xyz_axis, field_repr_tag.quadrature_tag)(
self.rec(expr.field))
elif (isinstance(expr.op, op.QuadratureGridUpsampler)
and isinstance(field_type, type_info.BoundaryVectorBase)):
# potential shortcut:
#if (isinstance(expr.field, OperatorBinding)
#and isinstance(expr.field.op, RestrictToBoundary)):
#return QuadratureRestrictToBoundary(
#expr.field.op.tag, expr.op.quadrature_tag)(
#self.rec(expr.field.field))
return op.QuadratureBoundaryGridUpsampler(
expr.op.quadrature_tag, field_type.boundary_tag)(expr.field)
# }}}
elif isinstance(expr.op, op.RestrictToBoundary) and has_quad_operand:
raise TypeError("RestrictToBoundary cannot be applied to "
"quadrature-based operands--use QuadUpsample(Boundarize(...))")
# {{{ flux operator specialization
elif isinstance(expr.op, op.FluxOperatorBase):
from pytools.obj_array import with_object_array_or_scalar
repr_tag_cell = [None]
def process_flux_arg(flux_arg):
arg_repr_tag = self.typedict[flux_arg].repr_tag
if repr_tag_cell[0] is None:
repr_tag_cell[0] = arg_repr_tag
else:
# An error for this condition is generated by
# the type inference pass.
assert arg_repr_tag == repr_tag_cell[0]
is_boundary = isinstance(expr.field, BoundaryPair)
if is_boundary:
bpair = expr.field
with_object_array_or_scalar(process_flux_arg, bpair.field)
with_object_array_or_scalar(process_flux_arg, bpair.bfield)
else:
with_object_array_or_scalar(process_flux_arg, expr.field)
is_quad = isinstance(repr_tag_cell[0], QuadratureRepresentation)
if is_quad:
assert not expr.op.is_lift
quad_tag = repr_tag_cell[0].quadrature_tag
new_fld = self.rec(expr.field)
flux = expr.op.flux
if is_boundary:
if is_quad:
return op.QuadratureBoundaryFluxOperator(
flux, quad_tag, bpair.tag)(new_fld)
else:
return op.BoundaryFluxOperator(flux, bpair.tag)(new_fld)
else:
if is_quad:
return op.QuadratureFluxOperator(flux, quad_tag)(new_fld)
else:
return op.FluxOperator(flux, expr.op.is_lift)(new_fld)
# }}}
else:
return IdentityMapper.map_operator_binding(self, expr)
# }}}
# {{{ global-to-reference mapper
class GlobalToReferenceMapper(CSECachingMapperMixin, IdentityMapper):
"""Maps operators that apply on the global function space down to operators on
reference elements, together with explicit multiplication by geometric factors.
"""
def __init__(self, ambient_dim, dim=None):
CSECachingMapperMixin.__init__(self)
IdentityMapper.__init__(self)
if dim is None:
dim = ambient_dim
self.ambient_dim = ambient_dim
self.dim = dim
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, expr):
# Global-to-reference is run after operator specialization, so
# if we encounter non-quadrature operators here, we know they
# must be nodal.
jac_in = sym.area_element(self.ambient_dim, self.dim, dd=expr.op.dd_in)
jac_noquad = sym.area_element(self.ambient_dim, self.dim,
dd=expr.op.dd_in.with_qtag(sym.QTAG_NONE))
def rewrite_derivative(ref_class, field, dd_in, with_jacobian=True):
jac_tag = sym.area_element(self.ambient_dim, self.dim, dd=dd_in)
rec_field = self.rec(field)
if with_jacobian:
rec_field = jac_tag * rec_field
return sum(
sym.inverse_metric_derivative(
rst_axis, expr.op.xyz_axis,
ambient_dim=self.ambient_dim, dim=self.dim) *
ref_class(rst_axis, dd_in=dd_in)(rec_field)
for rst_axis in range(self.dim))
if isinstance(expr.op, op.MassOperator):
return op.RefMassOperator(expr.op.dd_in, expr.op.dd_out)(
jac_in * self.rec(expr.field))
elif isinstance(expr.op, op.InverseMassOperator):
return op.RefInverseMassOperator(expr.op.dd_in, expr.op.dd_out)(
1/jac_in * self.rec(expr.field))
elif isinstance(expr.op, op.FaceMassOperator):
jac_in_surf = sym.area_element(self.ambient_dim, self.dim - 1,
dd=expr.op.dd_in)
return op.RefFaceMassOperator(expr.op.dd_in, expr.op.dd_out)(
jac_in_surf * self.rec(expr.field))
elif isinstance(expr.op, op.StiffnessOperator):
return op.RefMassOperator()(jac_noquad *
self.rec(
op.DiffOperator(expr.op.xyz_axis)(expr.field)))
elif isinstance(expr.op, op.DiffOperator):
return rewrite_derivative(
op.RefDiffOperator,
expr.field, dd_in=expr.op.dd_in, with_jacobian=False)
elif isinstance(expr.op, op.StiffnessTOperator):
return rewrite_derivative(
op.RefStiffnessTOperator,
expr.field, dd_in=expr.op.dd_in)
elif isinstance(expr.op, op.MInvSTOperator):
return self.rec(
op.InverseMassOperator()(
op.StiffnessTOperator(expr.op.xyz_axis)(expr.field)))
else:
return IdentityMapper.map_operator_binding(self, expr)
# }}}
# {{{ stringification ---------------------------------------------------------
class StringifyMapper(pymbolic.mapper.stringifier.StringifyMapper):
def _format_dd(self, dd):
def fmt(s):
if isinstance(s, type):
return s.__name__
else:
return repr(s)
from meshmode.discretization.connection import (
FRESTR_ALL_FACES, FRESTR_INTERIOR_FACES)
if dd.domain_tag is None:
result = "?"
elif dd.domain_tag is sym.DTAG_VOLUME_ALL:
result = "vol"
elif dd.domain_tag is sym.DTAG_SCALAR:
result = "scalar"
elif dd.domain_tag is FRESTR_ALL_FACES:
result = "all_faces"
elif dd.domain_tag is FRESTR_INTERIOR_FACES:
result = "int_faces"
else:
result = fmt(dd.domain_tag)
if dd.quadrature_tag is None:
pass
elif dd.quadrature_tag is sym.QTAG_NONE:
result += "q"
else:
result += "Q"+fmt(dd.quadrature_tag)
return result
def _format_op_dd(self, op):
return "[%s->%s]" % (self._format_dd(op.dd_in), self._format_dd(op.dd_out))
# {{{ nodal ops
def map_nodal_sum(self, expr, enclosing_prec):
return "NodalSum" + self._format_op_dd(expr)
def map_nodal_max(self, expr, enclosing_prec):
return "NodalMax" + self._format_op_dd(expr)
def map_nodal_min(self, expr, enclosing_prec):
return "NodalMin" + self._format_op_dd(expr)
# }}}
# {{{ global differentiation
def map_diff(self, expr, enclosing_prec):
return "Diffx%d%s" % (expr.xyz_axis, self._format_op_dd(expr))
def map_minv_st(self, expr, enclosing_prec):
return "MInvSTx%d%s" % (expr.xyz_axis, self._format_op_dd(expr))
def map_stiffness(self, expr, enclosing_prec):
return "Stiffx%d%s" % (expr.xyz_axis, self._format_op_dd(expr))
def map_stiffness_t(self, expr, enclosing_prec):
return "StiffTx%d%s" % (expr.xyz_axis, self._format_op_dd(expr))
# }}}
# {{{ global mass
def map_mass(self, expr, enclosing_prec):
return "M"
def map_inverse_mass(self, expr, enclosing_prec):
return "InvM"
# }}}
# {{{ reference differentiation
def map_ref_diff(self, expr, enclosing_prec):
return "Diffr%d%s" % (expr.rst_axis, self._format_op_dd(expr))
def map_ref_stiffness_t(self, expr, enclosing_prec):
return "StiffTr%d%s" % (expr.rst_axis, self._format_op_dd(expr))
# }}}
# {{{ reference mass
def map_ref_mass(self, expr, enclosing_prec):
return "RefM" + self._format_op_dd(expr)
def map_ref_inverse_mass(self, expr, enclosing_prec):
return "RefInvM" + self._format_op_dd(expr)
# }}}
def map_elementwise_linear(self, expr, enclosing_prec):
return "ElWLin:%s%s" % (
expr.__class__.__name__,
self._format_op_dd(expr))
# {{{ flux
def map_face_mass_operator(self, expr, enclosing_prec):
return "FaceM" + self._format_op_dd(expr)
def map_ref_face_mass_operator(self, expr, enclosing_prec):
return "RefFaceM" + self._format_op_dd(expr)
def map_opposite_interior_face_swap(self, expr, enclosing_prec):
return "OppSwap" + self._format_op_dd(expr)
# }}}
def map_ones(self, expr, enclosing_prec):
return "Ones" + self._format_op_props(expr)
# {{{ geometry data
def map_node_coordinate_component(self, expr, enclosing_prec):
return "x[%d]@%s" % (expr.axis, self._format_dd(expr.dd))
# }}}
def map_operator_binding(self, expr, enclosing_prec):
from pymbolic.mapper.stringifier import PREC_NONE
return "<%s>(%s)" % (
self.rec(expr.op, PREC_NONE),
self.rec(expr.field, PREC_NONE))
def map_c_function(self, expr, enclosing_prec):
return expr.name
def map_grudge_variable(self, expr, enclosing_prec):
return "%s:%s" % (expr.name, self._format_dd(expr.dd))
def map_interpolation(self, expr, enclosing_prec):
return "Interp" + self._format_op_dd(expr)
class PrettyStringifyMapper(
pymbolic.mapper.stringifier.CSESplittingStringifyMapperMixin,
StringifyMapper):
pass
class NoCSEStringifyMapper(StringifyMapper):
def map_common_subexpression(self, expr, enclosing_prec):
return self.rec(expr.child, enclosing_prec)
# }}}
# {{{ quadrature support
class QuadratureUpsamplerRemover(CSECachingMapperMixin, IdentityMapper):
def __init__(self, quad_min_degrees, do_warn=True):
IdentityMapper.__init__(self)
CSECachingMapperMixin.__init__(self)
self.quad_min_degrees = quad_min_degrees
self.do_warn = do_warn
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, expr):
if isinstance(expr.op, (op.QuadratureGridUpsampler,
op.QuadratureInteriorFacesGridUpsampler,
op.QuadratureBoundaryGridUpsampler)):
try:
min_degree = self.quad_min_degrees[expr.op.quadrature_tag]
except KeyError:
if self.do_warn:
from warnings import warn
warn("No minimum degree for quadrature tag '%s' specified--"
"falling back to nodal evaluation"
% expr.op.quadrature_tag)
return self.rec(expr.field)
else:
if min_degree is None:
return self.rec(expr.field)
else:
return expr.op(self.rec(expr.field))
else:
return IdentityMapper.map_operator_binding(self, expr)
class QuadratureDetector(CSECachingMapperMixin, CombineMapper):
"""For a given expression, this mapper returns the upsampling
operator in effect at the root of the expression, or *None*
if there isn't one.
"""
class QuadStatusNotKnown:
pass
map_common_subexpression_uncached = \
CombineMapper.map_common_subexpression
def combine(self, values):
from pytools import single_valued
return single_valued([
v for v in values if v is not self.QuadStatusNotKnown])
def map_variable(self, expr):
return None
def map_constant(self, expr):
return self.QuadStatusNotKnown
def map_operator_binding(self, expr):
if isinstance(expr.op, (
op.DiffOperatorBase, op.FluxOperatorBase,
op.MassOperatorBase)):
return None
elif isinstance(expr.op, (op.QuadratureGridUpsampler,
op.QuadratureInteriorFacesGridUpsampler)):
return expr.op
else:
return CombineMapper.map_operator_binding(self, expr)
class QuadratureUpsamplerChanger(CSECachingMapperMixin, IdentityMapper):
"""This mapper descends the expression tree, down to each
quadrature-consuming operator (diff, mass) along each branch.
It then change
"""
def __init__(self, desired_quad_op):
IdentityMapper.__init__(self)
CSECachingMapperMixin.__init__(self)
self.desired_quad_op = desired_quad_op
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, expr):
if isinstance(expr.op, (
op.DiffOperatorBase, op.FluxOperatorBase,
op.MassOperatorBase)):
return expr
elif isinstance(expr.op, (op.QuadratureGridUpsampler,
op.QuadratureInteriorFacesGridUpsampler)):
return self.desired_quad_op(expr.field)
else:
return IdentityMapper.map_operator_binding(self, expr)
# }}}
# {{{ simplification / optimization
class CommutativeConstantFoldingMapper(
pymbolic.mapper.constant_folder.CommutativeConstantFoldingMapper,
IdentityMapperMixin):
def __init__(self):
pymbolic.mapper.constant_folder\
.CommutativeConstantFoldingMapper.__init__(self)
self.dep_mapper = DependencyMapper()
def is_constant(self, expr):
return not bool(self.dep_mapper(expr))
def map_operator_binding(self, expr):
field = self.rec(expr.field)
from grudge.tools import is_zero
if is_zero(field):
return 0
return expr.op(field)
class EmptyFluxKiller(CSECachingMapperMixin, IdentityMapper):
def __init__(self, mesh):
IdentityMapper.__init__(self)
self.mesh = mesh
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, expr):
from meshmode.mesh import is_boundary_tag_empty
if (isinstance(expr.op, sym.InterpolationOperator)
and expr.op.dd_out.is_boundary()
and expr.op.dd_out.domain_tag not in [
sym.FRESTR_ALL_FACES, sym.FRESTR_INTERIOR_FACES]
and is_boundary_tag_empty(self.mesh,
expr.op.dd_out.domain_tag)):
return 0
else:
return IdentityMapper.map_operator_binding(self, expr)
class _InnerDerivativeJoiner(pymbolic.mapper.RecursiveMapper):
def map_operator_binding(self, expr, derivatives):
if isinstance(expr.op, op.DifferentiationOperator):
derivatives.setdefault(expr.op, []).append(expr.field)
return 0
else:
return DerivativeJoiner()(expr)
def map_common_subexpression(self, expr, derivatives):
# no use preserving these if we're moving derivatives around
return self.rec(expr.child, derivatives)
def map_sum(self, expr, derivatives):
from pymbolic.primitives import flattened_sum
return flattened_sum(tuple(
self.rec(child, derivatives) for child in expr.children))
def map_product(self, expr, derivatives):
from grudge.symbolic.tools import is_scalar
from pytools import partition
scalars, nonscalars = partition(is_scalar, expr.children)
if len(nonscalars) != 1:
return DerivativeJoiner()(expr)
else:
from pymbolic import flattened_product
factor = flattened_product(scalars)
nonscalar, = nonscalars
sub_derivatives = {}
nonscalar = self.rec(nonscalar, sub_derivatives)
def do_map(expr):
if is_scalar(expr):
return expr
else:
return self.rec(expr, derivatives)
for operator, operands in six.iteritems(sub_derivatives):
for operand in operands:
derivatives.setdefault(operator, []).append(
factor*operand)
return factor*nonscalar
def map_constant(self, expr, *args):
return DerivativeJoiner()(expr)
def map_scalar_parameter(self, expr, *args):
return DerivativeJoiner()(expr)
def map_if_positive(self, expr, *args):
return DerivativeJoiner()(expr)
def map_power(self, expr, *args):
return DerivativeJoiner()(expr)
# these two are necessary because they're forwarding targets
def map_algebraic_leaf(self, expr, *args):
return DerivativeJoiner()(expr)
def map_quotient(self, expr, *args):
return DerivativeJoiner()(expr)
map_node_coordinate_component = map_algebraic_leaf
class DerivativeJoiner(CSECachingMapperMixin, IdentityMapper):
"""Joins derivatives:
.. math::
\frac{\partial A}{\partial x} + \frac{\partial B}{\partial x}
\rightarrow
\frac{\partial (A+B)}{\partial x}.
"""
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_sum(self, expr):
idj = _InnerDerivativeJoiner()
def invoke_idj(expr):
sub_derivatives = {}
result = idj(expr, sub_derivatives)
if not sub_derivatives:
return expr
else:
for operator, operands in six.iteritems(sub_derivatives):
derivatives.setdefault(operator, []).extend(operands)
return result
derivatives = {}
new_children = [invoke_idj(child)
for child in expr.children]
for operator, operands in six.iteritems(derivatives):
new_children.insert(0, operator(
sum(self.rec(operand) for operand in operands)))
from pymbolic.primitives import flattened_sum
return flattened_sum(new_children)
class _InnerInverseMassContractor(pymbolic.mapper.RecursiveMapper):
def __init__(self, outer_mass_contractor):
self.outer_mass_contractor = outer_mass_contractor
self.extra_operator_count = 0
def map_constant(self, expr):
from grudge.tools import is_zero
if is_zero(expr):
return 0
else:
return op.OperatorBinding(
op.InverseMassOperator(),
self.outer_mass_contractor(expr))
def map_algebraic_leaf(self, expr):
return op.OperatorBinding(
op.InverseMassOperator(),
self.outer_mass_contractor(expr))
def map_operator_binding(self, binding):
if isinstance(binding.op, op.MassOperator):
return binding.field
elif isinstance(binding.op, op.StiffnessOperator):
return op.DifferentiationOperator(binding.op.xyz_axis)(
self.outer_mass_contractor(binding.field))
elif isinstance(binding.op, op.StiffnessTOperator):
return op.MInvSTOperator(binding.op.xyz_axis)(
self.outer_mass_contractor(binding.field))
elif isinstance(binding.op, op.FluxOperator):
assert not binding.op.is_lift
return op.FluxOperator(binding.op.flux, is_lift=True)(
self.outer_mass_contractor(binding.field))
elif isinstance(binding.op, op.BoundaryFluxOperator):
assert not binding.op.is_lift
return op.BoundaryFluxOperator(binding.op.flux,
binding.op.boundary_tag, is_lift=True)(
self.outer_mass_contractor(binding.field))
else:
self.extra_operator_count += 1
return op.InverseMassOperator()(
self.outer_mass_contractor(binding))
def map_sum(self, expr):
return expr.__class__(tuple(self.rec(child) for child in expr.children))
def map_product(self, expr):
def is_scalar(expr):
return isinstance(expr, (int, float, complex, sym.ScalarParameter))
from pytools import len_iterable
nonscalar_count = len_iterable(ch
for ch in expr.children
if not is_scalar(ch))
if nonscalar_count > 1:
# too complicated, don't touch it
self.extra_operator_count += 1
return op.InverseMassOperator()(
self.outer_mass_contractor(expr))
else:
def do_map(expr):
if is_scalar(expr):
return expr
else:
return self.rec(expr)
return expr.__class__(tuple(
do_map(child) for child in expr.children))
class InverseMassContractor(CSECachingMapperMixin, IdentityMapper):
# assumes all operators to be bound
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def map_operator_binding(self, binding):
# we only care about bindings of inverse mass operators
if isinstance(binding.op, op.InverseMassOperator):
iimc = _InnerInverseMassContractor(self)
proposed_result = iimc(binding.field)
if iimc.extra_operator_count > 1:
# We're introducing more work than we're saving.
# Don't perform the simplification
return binding.op(self.rec(binding.field))
else:
return proposed_result
else:
return binding.op(self.rec(binding.field))
# }}}
# {{{ error checker
class ErrorChecker(CSECachingMapperMixin, IdentityMapper):
map_common_subexpression_uncached = \
IdentityMapper.map_common_subexpression
def __init__(self, mesh):
self.mesh = mesh
def map_operator_binding(self, expr):
if isinstance(expr.op, op.DiffOperatorBase):
if (self.mesh is not None
and expr.op.xyz_axis >= self.mesh.dim):
raise ValueError("optemplate tries to differentiate along a "
"non-existent axis (e.g. Z in 2D)")
# FIXME: Also check fluxes
return IdentityMapper.map_operator_binding(self, expr)
def map_normal(self, expr):
if self.mesh is not None and expr.axis >= self.mesh.dimensions:
raise ValueError("optemplate tries to differentiate along a "
"non-existent axis (e.g. Z in 2D)")
return expr
# }}}
# {{{ collectors for various symbolic operator components
class CollectorMixin(OperatorReducerMixin, LocalOpReducerMixin, FluxOpReducerMixin):
def combine(self, values):
from pytools import flatten
return set(flatten(values))
def map_constant(self, expr, *args, **kwargs):
return set()
map_grudge_variable = map_constant
map_c_function = map_grudge_variable
map_ones = map_grudge_variable
map_node_coordinate_component = map_grudge_variable
map_operator = map_grudge_variable
# I'm not sure this works still.
#class GeometricFactorCollector(CollectorMixin, CombineMapper):
# pass
class BoundOperatorCollector(CSECachingMapperMixin, CollectorMixin, CombineMapper):
def __init__(self, op_class):
self.op_class = op_class
map_common_subexpression_uncached = \
CombineMapper.map_common_subexpression
def map_operator_binding(self, expr):
if isinstance(expr.op, self.op_class):
result = set([expr])
else:
result = set()
return result | CombineMapper.map_operator_binding(self, expr)
class FluxExchangeCollector(CSECachingMapperMixin, CollectorMixin, CombineMapper):
map_common_subexpression_uncached = \
CombineMapper.map_common_subexpression
def map_flux_exchange(self, expr):
return set([expr])
# }}}
# {{{ evaluation
class Evaluator(pymbolic.mapper.evaluator.EvaluationMapper):
pass
# }}}
# vim: foldmethod=marker