From c9911808b2dd7f667413f5542bf4a72ac51eddb1 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Mon, 6 Apr 2020 15:53:21 -0500 Subject: [PATCH 01/12] Weak advection operator with SBP flux. --- grudge/models/advection.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/grudge/models/advection.py b/grudge/models/advection.py index 091b9870..b6c9d105 100644 --- a/grudge/models/advection.py +++ b/grudge/models/advection.py @@ -126,6 +126,32 @@ class WeakAdvectionOperator(AdvectionOperatorBase): #+ flux(sym.bv_tpair(self.outflow_tag, u, bc_out)) )) + +class WeakAdvectionSBPOperator(AdvectionOperatorBase): + def flux(self, u): + return self.weak_flux(u) + + def sym_operator(self, sbp_tag=sym.BTAG_NONE): + u = sym.var("u") + + # boundary conditions ------------------------------------------------- + bc_in = self.inflow_u + + def flux(pair): + return sym.interp(pair.dd, "all_faces")( + self.flux(pair)) + + sbp_u = sym.cse(sym.interp("vol", sbp_tag)(sym.var("state_from_sbp"))) + return sym.InverseMassOperator()( + np.dot( + self.v, sym.stiffness_t(self.ambient_dim)*u) + - sym.FaceMassOperator()( + flux(sym.int_tpair(u)) + + flux(sym.bv_tpair(sym.BTAG_ALL, u, bc_in)) + + flux(sym.bv_tpair(sbp_tag, u, sbp_u)) + + )) + # }}} -- GitLab From 035f941def8bf97ecdc673cce4635ce7cdf9270a Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Wed, 6 May 2020 10:58:19 -0500 Subject: [PATCH 02/12] Working version of SBP-DG advection - needs some tweaking --- grudge/discretization.py | 12 ++++++++++-- grudge/models/advection.py | 6 +++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 20fc0505..d9c4b1d2 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -162,7 +162,11 @@ class DGDiscretizationWithBoundaries(DiscretizationBase): elif dd.domain_tag is sym.FACE_RESTR_INTERIOR: return self._interior_faces_connection().to_discr elif dd.is_boundary(): - return self._boundary_connection(dd.domain_tag).to_discr + # FIXME: this is broken for DTAG_BOUNDARY. + if isinstance(dd.domain_tag, sym.DTAG_BOUNDARY): + return self._boundary_connection(dd.domain_tag.tag).to_discr + else: + return self._boundary_connection(dd.domain_tag).to_discr else: raise ValueError("DOF desc tag not understood: " + str(dd)) @@ -241,7 +245,11 @@ class DGDiscretizationWithBoundaries(DiscretizationBase): return self._interior_faces_connection() elif to_dd.is_boundary(): assert from_dd.quadrature_tag is sym.QTAG_NONE - return self._boundary_connection(to_dd.domain_tag) + # FIXME: this may be a bug + if isinstance(to_dd.domain_tag, sym.DTAG_BOUNDARY): + return self._boundary_connection(to_dd.domain_tag.tag) + else: + return self._boundary_connection(to_dd.domain_tag) elif to_dd.is_volume(): from meshmode.discretization.connection import \ make_same_mesh_connection diff --git a/grudge/models/advection.py b/grudge/models/advection.py index b6c9d105..34ec6655 100644 --- a/grudge/models/advection.py +++ b/grudge/models/advection.py @@ -49,7 +49,7 @@ class AdvectionOperatorBase(HyperbolicOperator): self.flux_type = flux_type def weak_flux(self, u): - normal = sym.normal(u. dd, self.ambient_dim) + normal = sym.normal(u.dd, self.ambient_dim) v_dot_normal = sym.cse(self.v.dot(normal), "v_dot_normal") norm_v = sym.sqrt((self.v**2).sum()) @@ -141,14 +141,14 @@ class WeakAdvectionSBPOperator(AdvectionOperatorBase): return sym.interp(pair.dd, "all_faces")( self.flux(pair)) - sbp_u = sym.cse(sym.interp("vol", sbp_tag)(sym.var("state_from_sbp"))) return sym.InverseMassOperator()( np.dot( self.v, sym.stiffness_t(self.ambient_dim)*u) - sym.FaceMassOperator()( flux(sym.int_tpair(u)) + flux(sym.bv_tpair(sym.BTAG_ALL, u, bc_in)) - + flux(sym.bv_tpair(sbp_tag, u, sbp_u)) + + flux(sym.bv_tpair(sbp_tag, u, sym.var("state_from_sbp", + dd=sym.as_dofdesc(sbp_tag)))) )) -- GitLab From 284fce899b6a3d316454ed5666b0c915517db061 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Fri, 8 May 2020 17:10:20 -0500 Subject: [PATCH 03/12] Updates requirements to point to proper meshmode branch --- grudge/discretization.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index d9c4b1d2..c7ed34c3 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -245,7 +245,7 @@ class DGDiscretizationWithBoundaries(DiscretizationBase): return self._interior_faces_connection() elif to_dd.is_boundary(): assert from_dd.quadrature_tag is sym.QTAG_NONE - # FIXME: this may be a bug + # FIXME: this is broken for DTAG_BOUNDARY if isinstance(to_dd.domain_tag, sym.DTAG_BOUNDARY): return self._boundary_connection(to_dd.domain_tag.tag) else: diff --git a/requirements.txt b/requirements.txt index deb09394..e8a234e7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,4 @@ git+https://gitlab.tiker.net/inducer/dagrt.git git+https://gitlab.tiker.net/inducer/leap.git git+https://github.com/inducer/meshpy.git git+https://github.com/inducer/modepy.git -git+https://github.com/inducer/meshmode.git +git+https://gitlab.tiker.net/cory/meshmode.git -- GitLab From 9772cc21085f9dab530964457b8cd46e6e958edf Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Fri, 8 May 2020 17:10:51 -0500 Subject: [PATCH 04/12] Adds test case for SBP-DG - one case (negative x-speed) fails due to issue with DG flux at SBP-facing boundary --- test/projection.py | 539 ++++++++++++++++++++++++++++++++++++++++++ test/q_2.txt | 6 + test/q_4.txt | 96 ++++++++ test/q_6.txt | 324 +++++++++++++++++++++++++ test/sbp_operators.py | 163 +++++++++++++ test/test_grudge.py | 386 ++++++++++++++++++++++++++++++ 6 files changed, 1514 insertions(+) create mode 100644 test/projection.py create mode 100644 test/q_2.txt create mode 100644 test/q_4.txt create mode 100644 test/q_6.txt create mode 100644 test/sbp_operators.py diff --git a/test/projection.py b/test/projection.py new file mode 100644 index 00000000..06b455d4 --- /dev/null +++ b/test/projection.py @@ -0,0 +1,539 @@ +import numpy as np +from sbp_operators import (sbp21, sbp42, sbp63) + + +def gaussian_quad(n): + + # Gaussian quadrature per the algorithm + # of Hesthaven and Warburton. + + if n == 0: + x = 0 + w = 2 + else: + h1 = 2 * np.linspace(0, n, n+1, endpoint=True) + je = 2/(h1[0:n] + 2)*np.linspace(1, n, n, endpoint=True) * \ + np.linspace(1, n, n, endpoint=True) / \ + np.sqrt((h1[0:n]+1)*(h1[0:n]+3)) + a = np.diag(je, 1) + ap = np.diag(je, -1) + a = a + ap + [x, v] = np.linalg.eig(a) + idx = x.argsort()[::1] + x = x[idx] + v = v[:, idx] + w = 2 * (v[0, :]) ** 2 + + return x, w + + +def legendre_vandermonde(x, n): + + v = np.zeros((x.shape[0], n+1)) + p_n_0 = x + p_n_1 = np.ones(x.shape[0]) + if n >= 0: + v[:, 0] = p_n_1 * np.sqrt(1.0/2.0) + + if n >= 1: + v[:, 1] = p_n_0 * np.sqrt(3.0/2.0) + + for i in range(2, n+1): + a = (2*i - 1)/i + c = (i - 1)*(i - 1)*(2*i) / (i * i * (2*i - 2)) + p_n_2 = p_n_1 + p_n_1 = p_n_0 + p_n_0 = a*x*p_n_1 - c*p_n_2 + v[:, i] = p_n_0 * np.sqrt((2*i+1)/2) + + return v + + +def glue_pieces(n, vx, xg): + + # Remove leading dimensions of 1. + if vx.shape[0] == 1: + vx = np.squeeze(vx, axis=0) + k = vx.shape[0] - 1 + + [r, w_dump] = gaussian_quad(n) + # Python uses zero-based indices. + va = np.arange(0, k, 1) + vb = np.arange(1, k+1, 1) + + # x = np.ones(n+1)*vx[va] + 0.5*(r+1)*(vx[vb] - vx[va]) + x = (np.ones(n+1).reshape(1, -1)).transpose()*vx[va] + \ + 0.5*((r.reshape(1, -1)).transpose()+1)*(vx[vb] - vx[va]) + x = np.squeeze(x) + + m = np.diag(2/(2*np.arange(0, n+1, 1)+1)) + vr = legendre_vandermonde(r, n) + u = np.zeros((n+1, k*(n+1))) + + xmin = vx[0] + xmax = vx[-1] + + xbr = 2*np.divide((x-xmin), (xmax-xmin)) - 1 + vbr = (legendre_vandermonde((xbr.transpose()).flatten(), + n)).dot(np.sqrt(m)) + + invsqm_invvr = np.sqrt(np.diag( + np.divide(1, np.diag(m)))).dot(np.linalg.inv(vr)) + + for i_n in range(0, n+1): + pbr = np.reshape(vbr[:, i_n], x.shape) + tmp = invsqm_invvr.dot(pbr) + u[i_n, :] = (tmp.transpose()).flatten() + + xgbr = 2*np.divide((xg-xmin), (xmax-xmin)) - 1 + v = (legendre_vandermonde(xgbr, n)).dot(np.sqrt(m)) + + m = np.divide(m, 2) + + return u, v, m + + +def make_projection(n, order): + + p = order - 1 + + # Diagonal SBP operators + # Just orders 2 and 4 for now. + if order == 2: + # In K+W code, FD op size is n+1 + [p_sbp, q_sbp] = sbp21(n+1) + m = 2 + # pb = 1 + # from opt files + q = np.loadtxt('q_2.txt') + r = 1 + elif order == 4: + # In K+W code, FD op size is n+1 + [p_sbp, q_sbp] = sbp42(n+1) + m = 4 + # pb = 2 + # from opt files + q = np.loadtxt('q_4.txt') + r = 5 + elif order == 6: + # In K+W code, FD op size is n+1 + [p_sbp, q_sbp] = sbp63(n+1) + m = 6 + # pb = 2 + # from opt files + q = np.loadtxt('q_6.txt') + r = 8 + + h = p_sbp + + s = int(r - (m/2 - 1)) + + # Make glue and finite difference grids. + xf_b = np.linspace(0, s + m - 2, s + m - 1, endpoint=True) + xg_b = np.linspace(0, s + m - 2, s + m - 1, endpoint=True) + # Get the glue pieces. + [u_dump, v_dump, mr_b] = glue_pieces(p, xf_b, xg_b) + + i = np.kron(np.linspace(s+1, n-s+1, n-2*s+1, endpoint=True), + np.ones(m*(p+1))) + j = np.kron(np.ones(n+1-2*s), + np.linspace(1, m*(p+1), m*(p+1), endpoint=True)) + \ + (p+1)*(i-1-m/2) + # Interior solution. + qi = np.kron(np.ones(n+1-2*s), q[0:(m*(p+1))]) + pg2f = np.zeros((n+1, n*(p+1))) + for k in range(0, i.shape[0]): + # Zero-based indices. + pg2f[int(i[k])-1, int(j[k])-1] = qi[k] + + # Boundary solution. + qb = np.reshape(q[m*(p+1):], (r*(p+1), s,), order='F') + qb = np.transpose(qb) + + # Left block. + pg2f[0:s, 0:r*(p+1)] = qb + + qb = np.reshape((np.diag(2*np.mod(np.linspace(1, p+1, p+1, endpoint=True), + 2) - 1)).dot(np.flipud(np.reshape(np.rot90(qb, + 2).transpose(), (p+1, r*s,), order='F'))), + (r*(p+1), s,), order='F').transpose() + + # pg2f[np.arange(pg2f.shape[0]-s, pg2f.shape[0], 1), + # np.arange(pg2f.shape[1]+1-r*(p+1)-1, pg2f.shape[1], 1)] = qb + + # Right block. + for ind_i in range(pg2f.shape[0]-s, pg2f.shape[0]): + for ind_j in range(pg2f.shape[1]+1-r*(p+1)-1, pg2f.shape[1]): + pg2f[ind_i, ind_j] = qb[ind_i-(pg2f.shape[0]-s), + ind_j - (pg2f.shape[1]+1-r*(p+1)-1)] + + m = np.kron(np.eye(n), mr_b) + + # Pf2g comes directly from the compatibility equation. + pf2g = (np.kron(np.eye(n), + np.diag(1/np.diag(mr_b))).dot(np.transpose(pg2f))).dot(h) + + return pf2g, pg2f, m, h + + +def make_projection_g2g_hr(n, vxi_l, vxi_r): + + n_p = n + 1 + + nv_l = vxi_l.shape[0] + nv_r = vxi_r.shape[0] + + k_l = nv_l - 1 + k_r = nv_r - 1 + + tol = 100.0*np.finfo(float).eps + + # Check to ensure that first and last grid points align. + assert abs(vxi_l[-1] - vxi_r[-1]) <= \ + (tol*max(abs(vxi_l[-1]), abs(vxi_r[-1])) + np.finfo(float).eps) + assert abs(vxi_l[-1] - vxi_r[-1]) <= \ + (tol*max(abs(vxi_l[-1]), abs(vxi_r[-1])) + np.finfo(float).eps) + + vxi_g = np.sort(np.concatenate([vxi_l, vxi_r])) + vxi_g = np.unique(vxi_g) + + xi_g = np.vstack((vxi_g[0:-1], vxi_g[1:])) + k_g = xi_g.shape[1] + + np_l = k_l * n_p + np_r = k_r * n_p + np_g = k_g * n_p + + pg2l = np.zeros((np_l, np_g,)) + pl2g = np.zeros((np_g, np_l,)) + + for k in range(0, k_l): + xia_l = vxi_l[k] + xib_l = vxi_l[k+1] + + k_g = np.where(np.all( + np.vstack(([vxi_g < xib_l - tol], + [vxi_g >= xia_l - tol])), axis=0)) + + [pc2f, pf2c] = make_projection_g2g_h_gen(n, np.squeeze(xi_g[:, k_g])) + + idx_l = (k) * n_p + idx_g = (k_g[0][0]) * n_p + + # pg2l[np.arange(idx_l, idx_l+n_p, 1), + # np.arange(idx_g, idx_g+n_p*k_g[0].shape[0], 1)] = pf2c + # pl2g[np.arange(idx_g, idx_g+n_p*kg[0].shape[0], 1), + # np.arange(idx_l, idx_l+n_p, 1)] = pc2f + + for i in range(idx_l, idx_l+n_p): + for j in range(idx_g, idx_g+n_p*k_g[0].shape[0]): + pg2l[i, j] = pf2c[i-idx_l, j-idx_g] + pl2g[j, i] = pc2f[j-idx_g, i-idx_l] + + pg2r = np.zeros((np_r, np_g)) + pr2g = np.zeros((np_g, np_r)) + + for k in range(0, k_r): + xia_r = vxi_r[k] + xib_r = vxi_r[k+1] + + k_g = np.where(np.all( + np.vstack(([vxi_g < xib_r - tol], + [vxi_g >= xia_r - tol])), axis=0)) + + [pc2f, pf2c] = make_projection_g2g_h_gen(n, np.squeeze(xi_g[:, k_g])) + + idx_r = (k) * n_p + idx_g = (k_g[0][0]) * n_p + + # pg2r[np.arange(idx_r, idx_r+n_p-1, 1), + # np.arange(idx_g, idx_g+n_p*k_g[0].shape[0]-1, 1)] = pf2c + # pr2g[idx_g:idx_g+n_p*k_g[0].shape[0]-1, + # idx_r:idx_r+n_p-1] = pc2f + + for i in range(idx_r, idx_r+n_p): + for j in range(idx_g, idx_g+n_p*k_g[0].shape[0]): + pg2r[i, j] = pf2c[i-idx_r, j-idx_g] + pr2g[j, i] = pc2f[j-idx_g, i-idx_r] + + return vxi_g, pl2g, pg2l, pr2g, pg2r + + +def make_projection_g2g_h_gen(n, xi_c): + + [r, w_dump] = gaussian_quad(n) + + if len(xi_c.shape) == 1: + k = 1 + vx = xi_c + fa = xi_c[0] + fb = xi_c[-1] + else: + k = xi_c.shape[1] + vx = np.append(xi_c[0, :], xi_c[1, -1]) + fa = xi_c[0, 0] + fb = xi_c[1, -1] + + vx = 2/(fb - fa) * vx + 1 - 2*fb/(fb-fa) + + va = np.arange(0, k, 1) + vb = np.arange(1, k+1, 1) + + x = (np.ones(n+1).reshape(1, -1)).transpose()*vx[va] + \ + 0.5*((r.reshape(1, -1)).transpose()+1)*(vx[vb] - vx[va]) + + jc = (vx[vb] - vx[va])/2 + jf = 1 + + pc2f = np.zeros((x.size, r.shape[0])) + + m = (2.0/(2*np.linspace(0, n, n+1, endpoint=True) + 1)) + + vr = legendre_vandermonde(r, n) + + sq_invm_invvr = np.diag(np.sqrt(np.divide(1, m))).dot(np.linalg.inv(vr)) + + for k_i in range(0, k): + pc2f[np.arange(k_i*(n+1), (k_i+1)*(n+1), 1), :] = \ + sq_invm_invvr.dot(legendre_vandermonde(x[:, k_i], + n).dot(np.diag(np.sqrt(m)))) + + pf2c = (1.0 / jf) * (np.diag(1.0 / m).dot( + pc2f.transpose())).dot(np.kron(np.diag(jc), np.diag(m))) + + return pc2f, pf2c + + +def sbp_sbp_projection(na, qa, nb, qb): + + # Get the projection operators for each side. + [paf2g, pag2f, ma_dump, ha_dump] = make_projection(na, qa) + [pbf2g, pbg2f, mb_dump, hb_dump] = make_projection(nb, qb) + + # Modify the projection operators so they go to the + # same order polynomials. + + pg = max(qa, qb)-1 + + paf2g = np.kron(np.eye(na), np.eye(pg+1, qa)).dot(paf2g) + pag2f = pag2f.dot(np.kron(np.eye(na), np.eye(qa, pg+1))) + + pbf2g = np.kron(np.eye(nb), np.eye(pg+1, qb)).dot(pbf2g) + pbg2f = pbg2f.dot(np.kron(np.eye(nb), np.eye(qb, pg+1))) + + # Move to the glue space + xa = np.linspace(-1, 1, na + 1, endpoint=True) + xb = np.linspace(-1, 1, nb + 1, endpoint=True) + + # Glue-to-glue. + [vxi_g_dump, pa2g, pg2a, pb2g, pg2b] = make_projection_g2g_hr(pg, xa, xb) + + # Stack the operators. + pa2b = ((pbg2f.dot(pg2b)).dot(pa2g)).dot(paf2g) + pb2a = ((pag2f.dot(pg2a)).dot(pb2g)).dot(pbf2g) + + return pa2b, pb2a + + +def sbp_sbp_test(): + + na = 100 + nb = 230 + + xa = np.linspace(-1, 1, na+1, endpoint=True) + xb = np.linspace(-1, 1, nb+1, endpoint=True) + + eps = np.finfo(float).eps + + # FIXME: these test ranges are supposed to go to 10, but + # we don't have those diagonal SBP operators coded yet. + for qa in range(2, 6, 2): + for qb in range(2, 6, 2): + print('Creating projection for (qa, qb) ', qa, qb) + [pa2b, pb2a] = sbp_sbp_projection(na, qa, nb, qb) + + # np.savetxt('pa2b_test.txt', pa2b) + # np.savetxt('pb2a_test.txt', pb2a) + + print('Testing projection for (qa, qb) ', qa, qb) + # Check the full operator + for n in range(0, int(min(qa, qb)/2)-1): + np.testing.assert_array_less(np.abs(pa2b.dot((xa ** n)) + - xb ** n), 1000*eps) + np.testing.assert_array_less(np.abs(pb2a.dot((xb ** n)) + - xa ** n), 1000*eps) + print('Test passed for (qa, qb) ', qa, qb) + + # Check the interior operator + n_int = int(min(qa, qb))-1 + ta = pb2a.dot(xb ** n_int) - xa ** n_int + tb = pa2b.dot(xa ** n_int) - xb ** n_int + + mb = np.argwhere(np.abs(tb) > 1000*eps).size / 2 + ma = np.argwhere(np.abs(ta) > 1000*eps).size / 2 + + assert mb < nb / 2 + assert ma < na / 2 + + # Look at interior part only - locate boundary portions ad-hoc. + if mb > 0: + assert np.max(abs(tb[int(mb):int(-mb)])) < 1000*eps + if ma > 0: + assert np.max(abs(ta[int(ma):int(-ma)])) < 1000*eps + + return + + +def sbp_dg_projection(na, nb, qa, qb, gg, dg_nodes): + + # Get the projection operators for SBP-side. + [paf2g, pag2f, ma_dump, ha_dump] = make_projection(na, qa) + [pbf2g, pbg2f] = glue_to_dg(qb, gg, dg_nodes) + + # Modify the projection operator so that it goes to the + # same order polynomials as DG. + + pg = max(qa, qb)-1 + + paf2g = np.kron(np.eye(na), np.eye(pg+1, qa)).dot(paf2g) + pag2f = pag2f.dot(np.kron(np.eye(na), np.eye(qa, pg+1))) + pbf2g = np.kron(np.eye(nb), np.eye(pg+1, qb)).dot(pbf2g) + pbg2f = pbg2f.dot(np.kron(np.eye(nb), np.eye(qb, pg+1))) + + # Move to the glue space + xa = np.linspace(-1, 1, na + 1, endpoint=True) + xb = np.linspace(-1, 1, nb + 1, endpoint=True) + + # Glue-to-glue. + [vxi_g_dump, pa2g, pg2a, pb2g, pg2b] = make_projection_g2g_hr(pg, xa, xb) + + # Stack the operators. + pa2b = ((pbg2f.dot(pg2b)).dot(pa2g)).dot(paf2g) + pb2a = ((pag2f.dot(pg2a)).dot(pb2g)).dot(pbf2g) + + return pa2b, pb2a + + +def glue_to_dg(order, gg, dg_nodes): + + from modepy.quadrature.jacobi_gauss import legendre_gauss_lobatto_nodes + # Each element has order + 1 nodes. + nelem = int(dg_nodes.shape[0] / (order + 1)) + + # Create projection operators. + pglue2dg = np.zeros((dg_nodes.shape[0], (gg.shape[0]-1)*order)) + pdg2glue = np.zeros(((gg.shape[0]-1)*order, dg_nodes.shape[0])) + + for i_elem in range(0, nelem): + + a_elem = dg_nodes[i_elem*(order+1)] + b_elem = dg_nodes[i_elem*(order+1)+order] + + # Find the Legendre coefficients for coincident GG interval, using + # appropriate-order LGL quadrature. + quad_nodes = legendre_gauss_lobatto_nodes(order) + # Before transforming these, use them to get the weights. + quad_weights = lgl_weights(order+1, quad_nodes) + # Transform nodes to the appropriate interval. + for i in range(0, order + 1): + quad_nodes[i] = (quad_nodes[i] + 1) * (b_elem - a_elem) \ + / 2.0 + a_elem + # Transform the weights as well. + for i in range(0, order + 1): + quad_weights[i] = quad_weights[i] * (b_elem - a_elem) / 2.0 + # Verify that nodal points on the interface are already LGL points. + for i in range(0, order+1): + assert abs(quad_nodes[i] - dg_nodes[i_elem*(order+1)+i]) < 1e-12 + + # Do the quadrature. + # Coefficient loop. + for i in range(0, order): + # Quadrature point loop. + for k in range(0, order+1): + # Get Legendre polynomial of order i in + # interval, evaluated at quad point k. + poly_at_k = legendre_interval(i, a_elem, + b_elem, quad_nodes[k]) + + # Get Legendre coefficients. + i_op = i_elem*order + i + j_op = k + i_elem*(order+1) + pdg2glue[i_op][j_op] = ((2*i+1)/(b_elem - a_elem))\ + * quad_weights[k] * poly_at_k + + # Now that we have the projection from DG to glue, the projection + # from glue to DG comes from the H-compatibility condition + + # In our case, H is a repeated diagonal of the quad weights. + h = np.zeros((nelem*(order+1), nelem*(order+1))) + for i_elem in range(0, nelem): + for i in range(0, order+1): + h[i_elem*(order+1)+i][i_elem*(order+1)+i] = quad_weights[i] + + # Will need to invert this. + hinv = np.linalg.inv(h) + + # Now we need to also construct the per-interval mass matrix, + # taking advantage of the fact that the Legendre polynomials + # are orthogonal. + + m = np.zeros(((gg.shape[0]-1)*order, (gg.shape[0]-1)*order)) + m_small = np.zeros(order) + # Only same-order Legendre polynomials in the same interval + # will be nonzero, so the matrix is diagonal. + for i in range(0, order): + m_small[i] = (gg[1] - gg[0]) / (2.0 * i + 1) + for i in range(0, (gg.shape[0]-1)*order): + i_rem = i % order + m[i][i] = m_small[i_rem] + + # Finally, we can use compatibility to get the reverse projection. + pglue2dg = (m.dot(pdg2glue)).dot(hinv) + pglue2dg = pglue2dg.transpose() + + return pdg2glue, pglue2dg + + +def legendre_interval(order, a_int, b_int, point_k): + + # First check if the query point + # is within the interval. If it isn't, + # return 0. + if point_k >= a_int and point_k <= (b_int + 1e-12): + pass + else: + return 0 + + # With that out of the way, do some polynomial building + # using Bonnet's recursion formula, remembering to + # shift the Legendre polynomials to the interval + x_trans = (point_k - a_int) / (b_int - a_int) \ + - (b_int - point_k) / (b_int - a_int) + p_n_0 = 1 + p_n_1 = x_trans + + # If our input order is either of these, return them. + if order == 0: + return p_n_0 + elif order == 1: + return p_n_1 + + # Otherwise we enter recursion mode. + p_n_m1 = p_n_0 + p_n = p_n_1 + for i in range(1, order): + p_n_p1 = (1.0 / (i + 1)) * ((2*i + 1) * x_trans * p_n - i * p_n_m1) + p_n_m1 = p_n + p_n = p_n_p1 + + return p_n_p1 + + +def lgl_weights(n, nodes): + + weights = np.zeros(n) + for i in range(0, n): + weights[i] = (2.0 / (n*(n-1) * + (legendre_interval(n-1, -1, 1, nodes[i]))**2)) + + return weights diff --git a/test/q_2.txt b/test/q_2.txt new file mode 100644 index 00000000..e6ac0268 --- /dev/null +++ b/test/q_2.txt @@ -0,0 +1,6 @@ + 0.5000000000000000 + 0.1666666666666667 + 0.5000000000000000 + -0.1666666666666667 + 1.0000000000000000 + -0.3333333333333333 diff --git a/test/q_4.txt b/test/q_4.txt new file mode 100644 index 00000000..4b7cdf12 --- /dev/null +++ b/test/q_4.txt @@ -0,0 +1,96 @@ +-0.0416666666666667 +-0.0027777777777778 +0.0083333333333331 +0.0011904761904764 +0.5416666666666667 +0.1749999999999999 +-0.0083333333333331 +-0.0035714285714288 +0.5416666666666664 +-0.1749999999999997 +-0.0083333333333330 +0.0035714285714288 +-0.0416666666666666 +0.0027777777777777 +0.0083333333333332 +-0.0011904761904764 +1.2400202320584575 +0.0078507119289183 +0.0000000529778775 +-0.0000000352034607 +-0.0729337579017062 +0.0078478800177192 +-0.0000029202645105 +0.0000013131437493 +-0.0178405986993549 +0.0037727491934025 +0.0369615952094064 +0.0112488842016829 +0.0890096133619981 +0.2554389853632429 +0.0276743188599176 +-0.0165123473925149 +-0.2382554888193933 +0.3143058144807063 +-0.0201849049285142 +-0.0048899974275327 +0.3871547721536256 +-0.0850853038136043 +0.0000000347976171 +0.0000004882756223 +0.5704741035559282 +-0.0850856242077431 +0.0000013542349111 +-0.0000002055723806 +0.0683256772889949 +-0.0859279491160664 +-0.0050004014450767 +-0.0024166148237313 +-0.1013262031464566 +-0.0318424986928614 +-0.0075438526011718 +0.0031249334078735 +0.0753716501479071 +-0.1213255227420779 +-0.1027984715767137 +-0.0139918812156549 +0.2575512802532740 +0.0381320823634987 +-0.0000001583253617 +-0.0000012981662079 +0.1954343589159886 +0.0381363203630235 +-0.0000002527030111 +-0.0000009933207050 +0.3452893165945521 +0.0421803361278826 +-0.0208138136083886 +-0.0053811555335502 +0.1259767834841859 +-0.2124800017310330 +-0.0028189687190127 +0.0096801097150306 +0.0757482607519996 +-0.0398415082546706 +0.3060383673815450 +0.0441960895872428 +-0.1427996031213978 +0.0662630873587877 +0.0000000786593311 +0.0000005634968173 +0.1464943743090148 +0.0662607365784819 +-0.0000003956966040 +0.0000006636350413 +0.6413190735565835 +0.0678610437019278 +0.0032993990175575 +0.0025631851830881 +0.4703689973197340 +-0.0325261616434401 +0.0019559089340423 +-0.0041963299765795 +-0.1153828420639347 +0.0747250037529541 +-0.1296203385835838 +-0.0214066073755637 diff --git a/test/q_6.txt b/test/q_6.txt new file mode 100644 index 00000000..5ef09bd3 --- /dev/null +++ b/test/q_6.txt @@ -0,0 +1,324 @@ +0.0076388888888890 +0.0002976190476190 +-0.0015873015873015 +-0.0001322751322751 +0.0000330687830690 +0.0000030062530061 +-0.0645833333333338 +-0.0042658730158725 +0.0130952380952380 +0.0018518518518519 +-0.0000992063492062 +-0.0000150312650316 +0.5569444444444445 +0.1779761904761896 +-0.0115079365079365 +-0.0048941798941798 +0.0000661375661373 +0.0000300625300628 +0.5569444444444448 +-0.1779761904761902 +-0.0115079365079367 +0.0048941798941798 +0.0000661375661374 +-0.0000300625300626 +-0.0645833333333338 +0.0042658730158727 +0.0130952380952383 +-0.0018518518518517 +-0.0000992063492060 +0.0000150312650315 +0.0076388888888888 +-0.0002976190476190 +-0.0015873015873016 +0.0001322751322751 +0.0000330687830688 +-0.0000030062530062 +1.4268563455122170 +0.0382682224832046 +0.0062730242497421 +-0.0000007965781015 +-0.0000021799552952 +0.0000031243708247 +-0.1850971670076224 +0.0622469797290170 +0.0062744190375794 +0.0000002829327749 +-0.0000034557680218 +0.0000022076848745 +-0.0648981557548598 +0.0862272866341048 +0.0062769233641760 +0.0000035338545074 +0.0000014627798546 +-0.0000002921816912 +-0.1581346143995408 +0.1095251374450681 +0.0145814029712218 +0.0012769461829696 +-0.0005373905643547 +-0.0000766303009154 +-0.2594155138207218 +0.1426955295429118 +-0.0474531350117842 +-0.0155963333944991 +0.0006274045660508 +0.0002516855883443 +0.1957195334491137 +-0.2312509699825552 +-0.0353392218318904 +0.0181934735669320 +0.0005316092509378 +-0.0002603302379771 +-0.0095926586883630 +-0.1030241153958490 +0.0285461026469565 +-0.0024152925720598 +-0.0006067429675004 +0.0000840873716417 +0.0545622307097761 +-0.0567239766258600 +0.0293759307346910 +0.0019277778383208 +0.0002123757825960 +0.0000361878454639 +0.3392129585195110 +-0.1507565990718361 +0.0034987545283517 +0.0000000552211631 +0.0000013909237075 +-0.0000017845166932 +0.5611964896479856 +-0.1161082144029679 +0.0034983690188400 +-0.0000001858413508 +0.0000017049198556 +-0.0000006888382763 +0.1417146396683978 +-0.0814598481212354 +0.0034971885203090 +-0.0000010473718728 +-0.0000007041460643 +0.0000006574649649 +-0.0533525569340640 +-0.0467665678596720 +0.0022661137186922 +-0.0002451535334260 +0.0001236463156133 +0.0000205249914517 +0.0600295356643070 +-0.0127942396309124 +0.0120613485586357 +0.0030500892829083 +-0.0001574385845970 +-0.0000696747062982 +-0.0457592973586723 +0.0482087389842354 +0.0061406967811889 +-0.0041461246843002 +-0.0001141261666472 +0.0000726824264687 +0.0204208168834889 +0.0214306912296826 +-0.0065609219424412 +0.0006995593026804 +0.0002176232370328 +-0.0000179572367564 +-0.0234625860909541 +0.0298929334512873 +0.0262264569996840 +0.0037531278946722 +-0.0006161601985534 +-0.0000814417976086 +0.3748942611599946 +0.1668117925907492 +-0.0181769376775942 +0.0000016505102586 +-0.0000030182771242 +0.0000029438211654 +0.0180139527050320 +0.0857028815667877 +-0.0181784679818216 +0.0000009910467807 +-0.0000013440918030 +-0.0000013064971565 +0.0746238386423028 +0.0045891148305835 +-0.0181778229909092 +-0.0000026458877929 +0.0000006240076727 +-0.0000030215871136 +0.6650168973148122 +-0.0756868842396258 +-0.0284338587698686 +-0.0014908662915176 +0.0005533686188464 +0.0000648984500317 +0.0825405356562529 +-0.1679579805970867 +0.0464787505087289 +0.0180197770250175 +-0.0005991907450912 +-0.0002030388131742 +-0.1271337770068975 +0.2395425466352896 +0.0439065706339785 +-0.0189271275362692 +-0.0005931549885790 +0.0002081702648572 +-0.0554008864222944 +0.1345998216307505 +-0.0305376118824286 +0.0012238676357885 +0.0003578248053824 +-0.0000866448053757 +-0.0325548220492018 +0.0193656942897140 +-0.1241249974282292 +-0.0143897631796122 +0.0016891849092679 +0.0002284663733918 +-0.1024581261420299 +0.1257909677432488 +0.0015153781862069 +-0.0000008474008065 +0.0000007789434931 +-0.0000004244145945 +0.2304391611706006 +0.0891571891704643 +0.0015167407319010 +-0.0000009748827134 +-0.0000006240586523 +0.0000009923667029 +0.6440801174119887 +0.0525269565693532 +0.0015173223183447 +0.0000020585296105 +0.0000000627324804 +0.0000009084112755 +0.0820245802248009 +0.0158215065016135 +0.0054897695036996 +0.0007655707893505 +-0.0003292871572016 +-0.0000437729607812 +0.0981630408361174 +-0.0197771964741964 +-0.0240721736921666 +-0.0093092692657951 +0.0003701628210641 +0.0001405185367588 +0.0083452908862361 +-0.0989989738815951 +-0.0192029883069766 +0.0103760447110098 +0.0003403703897566 +-0.0001440922422889 +-0.0288188490304330 +-0.0859982980988029 +0.0177618414433881 +-0.0004103013388925 +-0.0004612200444481 +0.0000380919055893 +0.0682247846427184 +-0.0859464570522154 +-0.0628137261290580 +-0.0083111162477239 +0.0010391751207186 +0.0001087956757791 +-0.1011503113778986 +-0.0162476416019815 +0.0011983954402854 +-0.0000000588179514 +-0.0000005947179354 +0.0000005482563072 +0.0719904403159826 +-0.0019796478930281 +0.0011977824458625 +0.0000012499070772 +0.0000000613542417 +0.0000003740606944 +0.0832538157106969 +0.0122861500314558 +0.0011977298182762 +0.0000001277266936 +0.0000003015490923 +0.0000002630332989 +0.6566754683652896 +0.0262616468261796 +0.0029451944128836 +0.0001330300433277 +-0.0000334377442731 +-0.0000040227100399 +0.1400781774850962 +0.0446073035815920 +-0.0109409719384444 +-0.0017177157691836 +0.0000669932825490 +0.0000159076164692 +0.1257481536103612 +-0.1146025911016925 +-0.0037530097519190 +0.0028742337159273 +0.0000373772981286 +-0.0000201476048079 +0.0932564138521604 +0.0035389065504577 +0.0035428896959126 +-0.0013480068067448 +0.0002762792753176 +0.0000358035921204 +-0.0698521579616875 +0.1098332091938433 +0.2163649169891393 +0.0260966655396425 +-0.0029253556960296 +-0.0003337393398058 +0.0607557809217103 +-0.0477299636863383 +0.0015660752104962 +0.0000002406122343 +0.0000002217587330 +-0.0000003222125190 +-0.0834412984870233 +-0.0205232662007936 +0.0015660005245034 +-0.0000003778473675 +0.0000002791480044 +-0.0000004851896970 +-0.0969053718693941 +0.0066836149296925 +0.0015657755275141 +-0.0000006607220888 +-0.0000002243360089 +-0.0000002883209355 +-0.0012435327466196 +0.0335825687247331 +0.0021479970891172 +-0.0000648055397491 +0.0000557310904477 +0.0000097726914592 +0.7438158486829837 +0.0650351693707065 +-0.0025803264052022 +0.0007626503461785 +-0.0000566482134126 +-0.0000315812989750 +0.4503079338353286 +-0.0897576916820822 +0.0022852763449330 +-0.0004173965860673 +-0.0000920770941274 +0.0000292294123977 +-0.0954825204170535 +0.0253564541898826 +0.0054354864750859 +-0.0009460553470582 +-0.0000801889312214 +-0.0000148890576181 +0.0221931600800686 +-0.0292017286479252 +-0.0875580840377385 +-0.0100077065308661 +0.0011248933398002 +0.0001230164560477 diff --git a/test/sbp_operators.py b/test/sbp_operators.py new file mode 100644 index 00000000..8170805d --- /dev/null +++ b/test/sbp_operators.py @@ -0,0 +1,163 @@ +import numpy as np + + +# Standard FD method: second-order +def sbp21(n): + p = np.zeros((n, n, )) + q = np.zeros((n, n, )) + + # norm matrix + p[0, 0] = 0.5 + p[n-1, n-1] = 0.5 + for i in range(1, n-1): + p[i, i] = 1.0 + + # now the q matrix + q[0, 1] = 0.5 + q[n-1, n-2] = -0.5 + for i in range(1, n-1): + q[i, i-1] = -0.5 + q[i, i+1] = 0.5 + + return p, q + + +# Standard FD method: third-order +def sbp42(n): + p = np.zeros((n, n, )) + q = np.zeros((n, n, )) + + # norm matrix + # upper subblock + p[0, 0] = 17.0/48.0 + p[1, 1] = 59.0/48.0 + p[2, 2] = 43.0/48.0 + p[3, 3] = 49.0/48.0 + # lower subblock + p[n-1, n-1] = 17.0/48.0 + p[n-2, n-2] = 59.0/48.0 + p[n-3, n-3] = 43.0/48.0 + p[n-4, n-4] = 49.0/48.0 + for i in range(4, n-4): + p[i, i] = 1.0 + + # now the q matrix + # upper subblock + q[0, 1] = 59.0/96.0 + q[0, 2] = -1.0/12.0 + q[0, 3] = -1.0/32.0 + q[1, 2] = 59.0/96.0 + q[2, 3] = 59.0/96.0 + q[1, 0] = -q[0, 1] + q[2, 0] = -q[0, 2] + q[2, 1] = -q[1, 2] + q[2, 4] = -1.0/12.0 + q[3, 0] = -q[0, 3] + q[3, 2] = -q[2, 3] + q[3, 4] = 2.0/3.0 + q[3, 5] = -1.0/12.0 + # lower subblock + q[n-1, n-2] = -q[0, 1] + q[n-1, n-3] = -q[0, 2] + q[n-1, n-4] = -q[0, 3] + q[n-2, n-1] = -q[1, 0] + q[n-2, n-3] = -q[1, 2] + q[n-3, n-1] = -q[2, 0] + q[n-3, n-2] = -q[2, 1] + q[n-3, n-4] = -q[2, 3] + q[n-3, n-5] = 1.0/12.0 + q[n-4, n-1] = -q[3, 0] + q[n-4, n-3] = -q[3, 2] + q[n-4, n-5] = -2.0/3.0 + q[n-4, n-6] = 1.0/12.0 + for i in range(4, n-4): + q[i, i-2] = 1.0/12.0 + q[i, i-1] = -2.0/3.0 + q[i, i+1] = 2.0/3.0 + q[i, i+2] = -1.0/12.0 + + return p, q + +# Standard FD method: fourth-order +def sbp63(n): + p = np.zeros((n, n, )) + q = np.zeros((n, n, )) + + # norm matrix + # upper subblock + p[0, 0] = 13649.0/43200.0 + p[1, 1] = 12013.0/8640.0 + p[2, 2] = 2711.0/4320.0 + p[3, 3] = 5359.0/4320.0 + p[4, 4] = 7877.0/8640.0 + p[5, 5] = 43801.0/43200.0 + # lower subblock + p[n-1, n-1] = 13649.0/43200.0 + p[n-2, n-2] = 12013.0/8640.0 + p[n-3, n-3] = 2711.0/4320.0 + p[n-4, n-4] = 5359.0/4320.0 + p[n-5, n-5] = 7877.0/8640.0 + p[n-6, n-6] = 43801.0/43200.0 + for i in range(6, n-6): + p[i, i] = 1.0 + + # now the q matrix + # upper subblock + q[0,1] = 127/211; + q[0,2] = 35/298; + q[0,3] = -32/83; + q[0,4] = 89/456; + q[0,5] = -2/69; + + q[1,0] = -127/211; + q[1,2] = -1/167; + q[1,3] = 391/334; + q[1,4] = -50/71; + q[1,5] = 14/99; + + q[2,0] = -35/298; + q[2,1] = 1/167; + q[2,3] = -34/79; + q[2,4] = 90/113; + q[2,5] = -69/271; + + q[3,0] = 32/83; + q[3,1] = -391/334; + q[3,2] = 34/79; + q[3,4] = 6/25; + q[3,5] = 31/316; + q[3,6] = 1/60; + + q[4,0] = -89/456; + q[4,1] = 50/71; + q[4,2] = -90/113; + q[4,3] = -6/25; + q[4,5] = 37/56; + q[4,6] = -3/20; + q[4,7] = 1/60; + + q[5,0] = 2/69; + q[5,1] = -14/99; + q[5,2] = 69/271; + q[5,3] = -31/316; + q[5,4] = -37/56; + q[5,6] = 3/4; + q[5,7] = -3/20; + q[5,8] = 1/60; + + # lower subblock + for i in range(0, 6): + for j in range(0, 9): + q[n-1-i,n-1-j] = -q[i,j]; + + # interior + for i in range(6, n-6): + q[i, i-3] = -1.0/60.0 + q[i, i-2] = 3.0/20.0 + q[i, i-1] = -3.0/4.0 + q[i, i+1] = 3.0/4.0 + q[i, i+2] = -3.0/20.0 + q[i, i+3] = 1.0/60.0 + + return p, q + diff --git a/test/test_grudge.py b/test/test_grudge.py index e373e133..dcdb6cc3 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -333,6 +333,392 @@ def test_convergence_advec(ctx_factory, mesh_name, mesh_pars, op_type, flux_type assert eoc_rec.order_estimate() > order +@pytest.mark.parametrize("order", [4]) +@pytest.mark.parametrize("order_sbp", [6]) +@pytest.mark.parametrize("spacing_factor", [2]) +@pytest.mark.parametrize("c", [np.array([0.27, 0.31]), np.array([-0.27, 0.31])]) +def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=False): + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + # Domain: x = [-1,1], y = [-1,1] + # SBP Subdomain: x = [-1,0], y = [0,1] (structured mesh) + # DG Subdomain: x = [0,1], y = [0,1] (unstructured mesh) + + # DG Half. + dim = 2 + + from pytools.convergence import EOCRecorder + eoc_rec = EOCRecorder() + from meshmode.mesh.generation import generate_regular_rect_mesh + from grudge import sym + from sbp_operators import (sbp21, sbp42, sbp63) + from projection import sbp_dg_projection + + nelems = [20, 40, 60] + for nelem in nelems: + + mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), + n=(nelem, nelem), order=order, + boundary_tags=["btag_sbp", + "btag_domain"]) + + from meshmode.mesh import BTAG_ALL, BTAG_REALLY_ALL + + bra_bit = mesh.boundary_tag_bit(BTAG_REALLY_ALL) + ba_bit = mesh.boundary_tag_bit(BTAG_ALL) + bdb_bit = mesh.boundary_tag_bit("btag_domain") + bwb_bit = mesh.boundary_tag_bit("btag_sbp") + + # Tag the domain boundaries + bdry_group = mesh.facial_adjacency_groups[0][None] + bdry_group.neighbors.fill(-(bdb_bit | ba_bit | bra_bit)) + + # Let's focus specifically on the western faces now - + # these will be the interface points. + for idx, elem in enumerate(bdry_group.elements): + node_count = 0 + for j in range(0, mesh.groups[0].nodes.shape[2]): + if mesh.groups[0].nodes[0][elem][j] == 0: + node_count += 1 + if node_count > 1: + # Check that the face in question is facing in + # the left direction instead of down or up... + if bdry_group.element_faces[idx] == 1: + # Node on the leftmost face - tag this element + bdry_group.neighbors[idx] = -(bwb_bit) + + # Check to make sure this actually worked. + from meshmode.mesh import is_boundary_tag_empty + assert not is_boundary_tag_empty(mesh, "btag_sbp") + + # Check this new isolating discretization, as well as the inverse. + discr = DGDiscretizationWithBoundaries(cl_ctx, mesh, order=order) + sbp_bdry_discr = discr.discr_from_dd(sym.DTAG_BOUNDARY("btag_sbp")) + sbp_bdry_discr.cl_context = cl_ctx + + dt_factor = 4 + h = 1/nelem + + norm_c = la.norm(c) + + flux_type = "upwind" + + def f(x): + return sym.sin(10*x) + + def u_analytic(x): + return f(-c.dot(x)/norm_c+sym.var("t", sym.DD_SCALAR)*norm_c) + + from grudge.models.advection import WeakAdvectionSBPOperator + + op = WeakAdvectionSBPOperator(c, inflow_u=u_analytic( + sym.nodes(dim, sym.BTAG_ALL)), + flux_type=flux_type) + + # Count the number of DG nodes - will need this later + ngroups = len(mesh.groups) + nnodes_grp = np.ones(ngroups) + for i in range(0, dim): + for j in range(0, ngroups): + nnodes_grp[j] = nnodes_grp[j] * \ + mesh.groups[j].nodes.shape[i*-1 - 1] + + nnodes = int(sum(nnodes_grp)) + + bound_op = bind(discr, op.sym_operator(sym.DTAG_BOUNDARY("btag_sbp"))) + + u = bind(discr, u_analytic(sym.nodes(dim)))(queue, t=0) + + final_time = 0.2 + + dt = dt_factor * h/order**2 + nsteps = (final_time // dt) + 1 + dt = final_time/nsteps + 1e-15 + + # SBP Half. + + # First, need to set up the structured mesh. + n_sbp_x = nelem + n_sbp_y = nelem*spacing_factor + x_sbp = np.linspace(-1, 0, n_sbp_x, endpoint=True) + y_sbp = np.linspace(-1, 1, n_sbp_y, endpoint=True) + dx = x_sbp[1] - x_sbp[0] + dy = y_sbp[1] - y_sbp[0] + + # Set up solution vector: + # For now, timestep is the same as DG. + u_sbp = pyopencl.array.zeros(queue, int(n_sbp_x*n_sbp_y), np.float64) + + # Initial condition + for j in range(0, n_sbp_y): + for i in range(0, n_sbp_x): + u_sbp[i + j*(n_sbp_x)] = np.sin(10*(-c.dot( + [x_sbp[i], + y_sbp[j]])/norm_c)) + + # obtain P and Q + if order_sbp == 2: + [p_x, q_x] = sbp21(n_sbp_x) + [p_y, q_y] = sbp21(n_sbp_y) + elif order_sbp == 4: + [p_x, q_x] = sbp42(n_sbp_x) + [p_y, q_y] = sbp42(n_sbp_y) + elif order_sbp == 6: + [p_x, q_x] = sbp63(n_sbp_x) + [p_y, q_y] = sbp63(n_sbp_y) + + tau_l = 1 + tau_r = 1 + + # for the boundaries + el_x = np.zeros(n_sbp_x) + er_x = np.zeros(n_sbp_x) + el_x[0] = 1 + er_x[n_sbp_x-1] = 1 + e_l_matx = np.zeros((n_sbp_x, n_sbp_x,)) + e_r_matx = np.zeros((n_sbp_x, n_sbp_x,)) + + for i in range(0, n_sbp_x): + for j in range(0, n_sbp_x): + e_l_matx[i, j] = el_x[i]*el_x[j] + e_r_matx[i, j] = er_x[i]*er_x[j] + + el_y = np.zeros(n_sbp_y) + er_y = np.zeros(n_sbp_y) + el_y[0] = 1 + er_y[n_sbp_y-1] = 1 + e_l_maty = np.zeros((n_sbp_y, n_sbp_y,)) + e_r_maty = np.zeros((n_sbp_y, n_sbp_y,)) + + for i in range(0, n_sbp_y): + for j in range(0, n_sbp_y): + e_l_maty[i, j] = el_y[i]*el_y[j] + e_r_maty[i, j] = er_y[i]*er_y[j] + + # construct the spatial operators + d_x = np.linalg.inv(dx*p_x).dot(q_x - 0.5*e_l_matx + 0.5*e_r_matx) + d_y = np.linalg.inv(dy*p_y).dot(q_y - 0.5*e_l_maty + 0.5*e_r_maty) + + # for the boundaries + c_l_x = np.kron(tau_l, (np.linalg.inv(dx*p_x).dot(el_x))) + c_r_x = np.kron(tau_r, (np.linalg.inv(dx*p_x).dot(er_x))) + c_l_y = np.kron(tau_l, (np.linalg.inv(dy*p_y).dot(el_y))) + c_r_y = np.kron(tau_r, (np.linalg.inv(dy*p_y).dot(er_y))) + + # For speed... + dudx_mat = -np.kron(np.eye(n_sbp_y), d_x) + dudy_mat = -np.kron(d_y, np.eye(n_sbp_x)) + + # Number of nodes in our SBP-DG boundary discretization + sbp_nodes_y = (sbp_bdry_discr.nodes())[1] + # When projecting, we use nodes sorted in y, but we will have to unsort + # afterwards to make sure projected solution is injected into DG BC + # in the correct way. + nodesort = np.argsort(sbp_nodes_y.get()) + nodesortlist = nodesort.tolist() + rangex = np.array(range(sbp_nodes_y.get().shape[0])) + unsort_args = [nodesortlist.index(x) for x in rangex] + + west_nodes = np.sort(np.array(sbp_nodes_y.get())) + + # Make element-aligned glue grid. + dg_side_gg = np.zeros(int(west_nodes.shape[0]/(order+1))+1) + counter = 0 + for i in range(0, west_nodes.shape[0]): + if i % (order+1) == 0: + dg_side_gg[counter] = west_nodes[i] + counter += 1 + + dg_side_gg[-1] = west_nodes[-1] + n_west_elements = int(west_nodes.shape[0] / (order + 1)) + sbp2dg, dg2sbp = sbp_dg_projection(n_sbp_y-1, n_west_elements, + order_sbp, order, dg_side_gg, + west_nodes) + + # Get mapping for western face + base_nodes = discr._volume_discr.nodes() + nsbp_nodes = sbp_bdry_discr.nnodes + nodes_per_element = mesh.groups[0].nodes.shape[2] + west_indices = np.zeros(nsbp_nodes) + count = 0 + # Sweep through first block of indices in the box. + for i in range(0, (nelem-1)*2*nodes_per_element): + if base_nodes[0][i] < 1e-10: + # Make sure we're actually at the edge faces. + if i % (2*nodes_per_element) < nodes_per_element: + west_indices[count] = i + count += 1 + + def rhs(t, u): + # Initialize the entire RHS to 0. + rhs_out = pyopencl.array.zeros(queue, + int(n_sbp_x*n_sbp_y) + int(nnodes), + np.float64) + + # Fill the first part with the SBP half of the domain. + + # Pull the SBP vector out of device array for now. + u_sbp_ts = u[0:int(n_sbp_x*n_sbp_y)].get() + + dudx = np.zeros((n_sbp_x*n_sbp_y)) + dudy = np.zeros((n_sbp_x*n_sbp_y)) + + dudx = dudx_mat.dot(u_sbp_ts) + dudy = dudy_mat.dot(u_sbp_ts) + + # Boundary condition + dl_x = np.zeros(n_sbp_x*n_sbp_y) + dr_x = np.zeros(n_sbp_x*n_sbp_y) + dl_y = np.zeros(n_sbp_x*n_sbp_y) + dr_y = np.zeros(n_sbp_x*n_sbp_y) + + # Pull DG solution at western face to project. + u_dg_ts = u[int(n_sbp_x*n_sbp_y):].get() + + dg_west = np.zeros(nsbp_nodes) + for i in range(0, nsbp_nodes): + dg_west[i] = u_dg_ts[int(west_indices[i])] + + # Project DG to SBP: + dg_proj = dg2sbp.dot(dg_west) + + # Need to fill this by looping through each segment. + # X-boundary conditions: + for j in range(0, n_sbp_y): + u_bcx = u_sbp_ts[j*n_sbp_x:((j+1)*n_sbp_x)] + v_l_x = np.transpose(el_x).dot(u_bcx) + v_r_x = np.transpose(er_x).dot(u_bcx) + left_bcx = np.sin(10*(-c.dot( + [x_sbp[0], y_sbp[j]])/norm_c + + norm_c*t)) + # Incorporate DG here. + right_bcx = dg_proj[j] + dl_xbc = c_l_x*(v_l_x - left_bcx) + dr_xbc = c_r_x*(v_r_x - right_bcx) + dl_x[j*n_sbp_x:((j+1)*n_sbp_x)] = dl_xbc + dr_x[j*n_sbp_x:((j+1)*n_sbp_x)] = dr_xbc + # Y-boundary conditions: + for i in range(0, n_sbp_x): + u_bcy = u_sbp_ts[i::n_sbp_x] + v_l_y = np.transpose(el_y).dot(u_bcy) + v_r_y = np.transpose(er_y).dot(u_bcy) + left_bcy = np.sin(10*(-c.dot( + [x_sbp[i], y_sbp[0]])/norm_c + norm_c*t)) + right_bcy = np.sin(10*(-c.dot( + [x_sbp[i], + y_sbp[n_sbp_y-1]])/norm_c + norm_c*t)) + dl_ybc = c_l_y*(v_l_y - left_bcy) + dr_ybc = c_r_y*(v_r_y - right_bcy) + dl_y[i::n_sbp_x] = dl_ybc + dr_y[i::n_sbp_x] = dr_ybc + + # Add these at each point on the SBP half to get the SBP RHS. + rhs_sbp = c[0]*dudx + c[1]*dudy - dl_x - dr_x - dl_y - dr_y + + # Now pop this back into the device RHS vector. + rhs_sbp_dev = pyopencl.array.zeros(queue, (n_sbp_x*n_sbp_y,), np.float64) + rhs_sbp_dev = pyopencl.array.to_device(queue, rhs_sbp) + + rhs_out[0:int(n_sbp_x*n_sbp_y)] = rhs_sbp_dev + + sbp_east = np.zeros(n_sbp_y) + # Pull SBP domain values off of east face. + counter = 0 + for i in range(0, n_sbp_x*n_sbp_y): + if i == n_sbp_x - 1: + sbp_east[counter] = u_sbp_ts[i] + counter += 1 + elif i % n_sbp_x == n_sbp_x - 1: + sbp_east[counter] = u_sbp_ts[i] + counter += 1 + + # Projection from SBP to DG is now a two-step process. + # First: SBP-to-DG. + sbp_proj = sbp2dg.dot(sbp_east) + # Second: Fix the ordering. + sbp_proj = sbp_proj[unsort_args] + + # Grudge DG RHS. + # Critical step - now need to apply projected SBP state to the + # proper nodal locations in u_dg. + rhs_out[int(n_sbp_x*n_sbp_y):] = bound_op(queue, t=t, + u=u[int( + n_sbp_x*n_sbp_y):], + state_from_sbp=sbp_proj) + + return rhs_out + + # Timestepper. + from grudge.shortcuts import set_up_rk4 + + # Make a combined u with the SBP and the DG parts. + u_comb = pyopencl.array.zeros(queue, int(n_sbp_x*n_sbp_y) + nnodes, + np.float64) + u_comb[0:int(n_sbp_x*n_sbp_y)] = u_sbp + for i in range(int(n_sbp_x*n_sbp_y), int(n_sbp_x*n_sbp_y) + nnodes): + u_comb[i] = u[i - int(n_sbp_x*n_sbp_y)] + dt_stepper = set_up_rk4("u", dt, u_comb, rhs) + + from grudge.shortcuts import make_visualizer + vis = make_visualizer(discr, vis_order=order) + + step = 0 + + # Create mesh for structured grid output + sbp_mesh = np.zeros((2, n_sbp_y, n_sbp_x,)) + for j in range(0, n_sbp_y): + sbp_mesh[0, j, :] = x_sbp + for i in range(0, n_sbp_x): + sbp_mesh[1, :, i] = y_sbp + + for event in dt_stepper.run(t_end=final_time): + if isinstance(event, dt_stepper.StateComputed): + + step += 1 + + last_t = event.t + u_sbp = event.state_component[0:int(n_sbp_x*n_sbp_y)] + u_dg = event.state_component[int(n_sbp_x*n_sbp_y):] + + error_l2_dg = bind(discr, + sym.norm(2, sym.var("u") + - u_analytic(sym.nodes(dim))))( + queue, t=last_t, u=u_dg) + sbp_error = np.zeros((n_sbp_x*n_sbp_y)) + error_l2_sbp = 0 + for j in range(0, n_sbp_y): + for i in range(0, n_sbp_x): + sbp_error[i + j*n_sbp_x] = \ + u_sbp[i + j*n_sbp_x].get() - \ + np.sin(10*(-c.dot([x_sbp[i], y_sbp[j]]) + / norm_c + last_t*norm_c)) + error_l2_sbp = error_l2_sbp + \ + dx*dy*(sbp_error[i + j*n_sbp_x]) ** 2 + + error_l2_sbp = np.sqrt(error_l2_sbp) + + # Write out the DG data + if visualize: + vis.write_vtk_file("eoc_dg-%s-%04d.vtu" % + (nelem, step), + [("u", u_dg)], overwrite=True) + + # Write out the SBP data. + from pyvisfile.vtk import write_structured_grid + if visualize: + filename = "eoc_sbp_%s-%04d.vts" % (nelem, step) + write_structured_grid(filename, sbp_mesh, + point_data=[("u", u_sbp.get())]) + + if c[0] > 0: + eoc_rec.add_data_point(h, error_l2_dg) + else: + eoc_rec.add_data_point(h, error_l2_sbp) + + assert eoc_rec.order_estimate() > (order_sbp/2 + 1) * 0.95 + + @pytest.mark.parametrize("order", [3, 4, 5]) def test_convergence_maxwell(ctx_factory, order): """Test whether 3D Maxwell's actually converges""" -- GitLab From 1262014cd654c57cd907c0e33fa88aa0a2664d96 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Fri, 8 May 2020 17:27:36 -0500 Subject: [PATCH 05/12] Placate Flake8 --- test/projection.py | 4 +- test/sbp_operators.py | 86 +++++++++++++++++++++---------------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/test/projection.py b/test/projection.py index 06b455d4..c3e1b5a5 100644 --- a/test/projection.py +++ b/test/projection.py @@ -533,7 +533,7 @@ def lgl_weights(n, nodes): weights = np.zeros(n) for i in range(0, n): - weights[i] = (2.0 / (n*(n-1) * - (legendre_interval(n-1, -1, 1, nodes[i]))**2)) + weights[i] = (2.0 / (n*(n-1) + * (legendre_interval(n-1, -1, 1, nodes[i]))**2)) return weights diff --git a/test/sbp_operators.py b/test/sbp_operators.py index 8170805d..fdcbbe08 100644 --- a/test/sbp_operators.py +++ b/test/sbp_operators.py @@ -78,6 +78,7 @@ def sbp42(n): return p, q + # Standard FD method: fourth-order def sbp63(n): p = np.zeros((n, n, )) @@ -103,52 +104,52 @@ def sbp63(n): # now the q matrix # upper subblock - q[0,1] = 127/211; - q[0,2] = 35/298; - q[0,3] = -32/83; - q[0,4] = 89/456; - q[0,5] = -2/69; - - q[1,0] = -127/211; - q[1,2] = -1/167; - q[1,3] = 391/334; - q[1,4] = -50/71; - q[1,5] = 14/99; - - q[2,0] = -35/298; - q[2,1] = 1/167; - q[2,3] = -34/79; - q[2,4] = 90/113; - q[2,5] = -69/271; - - q[3,0] = 32/83; - q[3,1] = -391/334; - q[3,2] = 34/79; - q[3,4] = 6/25; - q[3,5] = 31/316; - q[3,6] = 1/60; - - q[4,0] = -89/456; - q[4,1] = 50/71; - q[4,2] = -90/113; - q[4,3] = -6/25; - q[4,5] = 37/56; - q[4,6] = -3/20; - q[4,7] = 1/60; - - q[5,0] = 2/69; - q[5,1] = -14/99; - q[5,2] = 69/271; - q[5,3] = -31/316; - q[5,4] = -37/56; - q[5,6] = 3/4; - q[5,7] = -3/20; - q[5,8] = 1/60; + q[0, 1] = 127/211 + q[0, 2] = 35/298 + q[0, 3] = -32/83 + q[0, 4] = 89/456 + q[0, 5] = -2/69 + + q[1, 0] = -127/211 + q[1, 2] = -1/167 + q[1, 3] = 391/334 + q[1, 4] = -50/71 + q[1, 5] = 14/99 + + q[2, 0] = -35/298 + q[2, 1] = 1/167 + q[2, 3] = -34/79 + q[2, 4] = 90/113 + q[2, 5] = -69/271 + + q[3, 0] = 32/83 + q[3, 1] = -391/334 + q[3, 2] = 34/79 + q[3, 4] = 6/25 + q[3, 5] = 31/316 + q[3, 6] = 1/60 + + q[4, 0] = -89/456 + q[4, 1] = 50/71 + q[4, 2] = -90/113 + q[4, 3] = -6/25 + q[4, 5] = 37/56 + q[4, 6] = -3/20 + q[4, 7] = 1/60 + + q[5, 0] = 2/69 + q[5, 1] = -14/99 + q[5, 2] = 69/271 + q[5, 3] = -31/316 + q[5, 4] = -37/56 + q[5, 6] = 3/4 + q[5, 7] = -3/20 + q[5, 8] = 1/60 # lower subblock for i in range(0, 6): for j in range(0, 9): - q[n-1-i,n-1-j] = -q[i,j]; + q[n-1-i, n-1-j] = -q[i, j] # interior for i in range(6, n-6): @@ -160,4 +161,3 @@ def sbp63(n): q[i, i+3] = 1.0/60.0 return p, q - -- GitLab From b1348ca579f2f25fa9babba7b7fa827344f0d005 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Tue, 12 May 2020 09:52:59 -0500 Subject: [PATCH 06/12] Conforms with reversion of erroneous meshmode change to nodal array formation --- test/test_grudge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_grudge.py b/test/test_grudge.py index dcdb6cc3..7f1bd2f6 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -511,7 +511,7 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa dudy_mat = -np.kron(d_y, np.eye(n_sbp_x)) # Number of nodes in our SBP-DG boundary discretization - sbp_nodes_y = (sbp_bdry_discr.nodes())[1] + sbp_nodes_y = (sbp_bdry_discr.nodes().with_queue(queue))[1] # When projecting, we use nodes sorted in y, but we will have to unsort # afterwards to make sure projected solution is injected into DG BC # in the correct way. -- GitLab From 3e7f07695248057da62e5fa01c8fc882194f1b34 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Wed, 13 May 2020 11:49:18 -0500 Subject: [PATCH 07/12] Tracks meshmode changes - SBP operator no longer uses BTAG_ALL --- grudge/models/advection.py | 4 ++-- test/test_grudge.py | 39 +++++++++----------------------------- 2 files changed, 11 insertions(+), 32 deletions(-) diff --git a/grudge/models/advection.py b/grudge/models/advection.py index 34ec6655..3bcf781a 100644 --- a/grudge/models/advection.py +++ b/grudge/models/advection.py @@ -131,7 +131,7 @@ class WeakAdvectionSBPOperator(AdvectionOperatorBase): def flux(self, u): return self.weak_flux(u) - def sym_operator(self, sbp_tag=sym.BTAG_NONE): + def sym_operator(self, sbp_tag=sym.BTAG_NONE, std_tag=sym.BTAG_NONE): u = sym.var("u") # boundary conditions ------------------------------------------------- @@ -146,7 +146,7 @@ class WeakAdvectionSBPOperator(AdvectionOperatorBase): self.v, sym.stiffness_t(self.ambient_dim)*u) - sym.FaceMassOperator()( flux(sym.int_tpair(u)) - + flux(sym.bv_tpair(sym.BTAG_ALL, u, bc_in)) + + flux(sym.bv_tpair(std_tag, u, bc_in)) + flux(sym.bv_tpair(sbp_tag, u, sym.var("state_from_sbp", dd=sym.as_dofdesc(sbp_tag)))) diff --git a/test/test_grudge.py b/test/test_grudge.py index 7f1bd2f6..1bb1e913 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -360,37 +360,14 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), n=(nelem, nelem), order=order, - boundary_tags=["btag_sbp", - "btag_domain"]) - - from meshmode.mesh import BTAG_ALL, BTAG_REALLY_ALL - - bra_bit = mesh.boundary_tag_bit(BTAG_REALLY_ALL) - ba_bit = mesh.boundary_tag_bit(BTAG_ALL) - bdb_bit = mesh.boundary_tag_bit("btag_domain") - bwb_bit = mesh.boundary_tag_bit("btag_sbp") - - # Tag the domain boundaries - bdry_group = mesh.facial_adjacency_groups[0][None] - bdry_group.neighbors.fill(-(bdb_bit | ba_bit | bra_bit)) - - # Let's focus specifically on the western faces now - - # these will be the interface points. - for idx, elem in enumerate(bdry_group.elements): - node_count = 0 - for j in range(0, mesh.groups[0].nodes.shape[2]): - if mesh.groups[0].nodes[0][elem][j] == 0: - node_count += 1 - if node_count > 1: - # Check that the face in question is facing in - # the left direction instead of down or up... - if bdry_group.element_faces[idx] == 1: - # Node on the leftmost face - tag this element - bdry_group.neighbors[idx] = -(bwb_bit) + boundary_dict={"btag_sbp": ["east"], + "btag_std": ["north", "west", + "south"]}) # Check to make sure this actually worked. from meshmode.mesh import is_boundary_tag_empty assert not is_boundary_tag_empty(mesh, "btag_sbp") + assert not is_boundary_tag_empty(mesh, "btag_std") # Check this new isolating discretization, as well as the inverse. discr = DGDiscretizationWithBoundaries(cl_ctx, mesh, order=order) @@ -412,8 +389,9 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa from grudge.models.advection import WeakAdvectionSBPOperator + std_tag = sym.DTAG_BOUNDARY("btag_std") op = WeakAdvectionSBPOperator(c, inflow_u=u_analytic( - sym.nodes(dim, sym.BTAG_ALL)), + sym.nodes(dim, std_tag)), flux_type=flux_type) # Count the number of DG nodes - will need this later @@ -426,7 +404,8 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa nnodes = int(sum(nnodes_grp)) - bound_op = bind(discr, op.sym_operator(sym.DTAG_BOUNDARY("btag_sbp"))) + bound_op = bind(discr, op.sym_operator(sbp_tag=sym.DTAG_BOUNDARY( + "btag_sbp"), std_tag=std_tag)) u = bind(discr, u_analytic(sym.nodes(dim)))(queue, t=0) @@ -537,7 +516,7 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa west_nodes) # Get mapping for western face - base_nodes = discr._volume_discr.nodes() + base_nodes = discr._volume_discr.nodes().with_queue(queue) nsbp_nodes = sbp_bdry_discr.nnodes nodes_per_element = mesh.groups[0].nodes.shape[2] west_indices = np.zeros(nsbp_nodes) -- GitLab From 7afa1c6b05fbddb46807974d01ec4d566faa3644 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Thu, 14 May 2020 10:16:26 -0500 Subject: [PATCH 08/12] Tracking meshmode changes --- test/test_grudge.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/test_grudge.py b/test/test_grudge.py index 1bb1e913..3d0b1bc8 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -360,9 +360,10 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), n=(nelem, nelem), order=order, - boundary_dict={"btag_sbp": ["east"], - "btag_std": ["north", "west", - "south"]}) + face_to_boundary_tag={ + "btag_sbp": ["+x"], + "btag_std": ["+y", "+x", + "-y"]}) # Check to make sure this actually worked. from meshmode.mesh import is_boundary_tag_empty -- GitLab From 9b345ed255d8afd65fb17732f98750065c87d401 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Fri, 15 May 2020 15:49:02 -0500 Subject: [PATCH 09/12] Adds test code to illustrate boundary restriction issue --- grudge/models/advection.py | 6 + test/sbp_dg_test.py | 383 +++++++++++++++++++++++++++++++++++++ 2 files changed, 389 insertions(+) create mode 100644 test/sbp_dg_test.py diff --git a/grudge/models/advection.py b/grudge/models/advection.py index 3bcf781a..76f62e62 100644 --- a/grudge/models/advection.py +++ b/grudge/models/advection.py @@ -152,6 +152,12 @@ class WeakAdvectionSBPOperator(AdvectionOperatorBase): )) + def test_restr(self, sbp_tag=sym.BTAG_NONE, std_tag=sym.BTAG_NONE): + u = sym.var("u") + + return sym.bv_tpair(sbp_tag, u, sym.var("state_from_sbp", + dd=sym.as_dofdesc(sbp_tag))).int + # }}} diff --git a/test/sbp_dg_test.py b/test/sbp_dg_test.py new file mode 100644 index 00000000..2a89b75d --- /dev/null +++ b/test/sbp_dg_test.py @@ -0,0 +1,383 @@ +# Copyright (C) 2020 Cory Mikida +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import numpy as np +import pyopencl as cl # noqa +import pyopencl.array as parray # noqa +import pyopencl.clmath # noqa + +import pytest # noqa + +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + +from grudge import bind, DGDiscretizationWithBoundaries +import numpy.linalg as la +from sbp_operators import (sbp21, sbp42, sbp63) +from projection import sbp_dg_projection + +import logging +logger = logging.getLogger(__name__) + + +# Domain: x = [-1,1], y = [-1,1] +# SBP Subdomain: x = [-1,0], y = [0,1] (structured mesh) +# DG Subdomain: x = [0,1], y = [0,1] (unstructured mesh) + +def main(write_output=True, order=4): + cl_ctx = cl.create_some_context() + queue = cl.CommandQueue(cl_ctx) + + # DG Half. + dim = 2 + + from meshmode.mesh.generation import generate_regular_rect_mesh + from grudge import sym + + nelem_x = 20 + nelem_y = 20 + mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), + n=(nelem_x, nelem_y), order=order, + face_to_boundary_tag={"btag_sbp": ["-x"], + "btag_std": ["+y", "+x", "-y"]}) + + # Check to make sure this actually worked. + from meshmode.mesh import is_boundary_tag_empty + assert not is_boundary_tag_empty(mesh, "btag_sbp") + assert not is_boundary_tag_empty(mesh, "btag_std") + + # Check this new isolating discretization, as well as the inverse. + discr = DGDiscretizationWithBoundaries(cl_ctx, mesh, order=order) + sbp_bdry_discr = discr.discr_from_dd(sym.DTAG_BOUNDARY("btag_sbp")) + sbp_bdry_discr.cl_context = cl_ctx + + # Visualize our new boundary discretization + from meshmode.discretization.visualization import make_visualizer + vis = make_visualizer(queue, sbp_bdry_discr, vis_order=order) + vis.write_vtk_file("bdry_disc.vtu", + [], overwrite=True) + + std_bdry_discr = discr.discr_from_dd(sym.DTAG_BOUNDARY("btag_std")) + std_bdry_discr.cl_context = cl_ctx + + # Visualize our new boundary discretization + from meshmode.discretization.visualization import make_visualizer + vis = make_visualizer(queue, std_bdry_discr, vis_order=order) + vis.write_vtk_file("std_bdry_disc.vtu", + [], overwrite=True) + + dt_factor = 4 + h = 1/40 + + c = np.array([0.1, 0.1]) + norm_c = la.norm(c) + + flux_type = "upwind" + + def f(x): + return sym.sin(10*x) + + def u_analytic(x): + return f(-c.dot(x)/norm_c+sym.var("t", sym.DD_SCALAR)*norm_c) + + from grudge.models.advection import WeakAdvectionSBPOperator + + std_tag = sym.DTAG_BOUNDARY("btag_std") + + op = WeakAdvectionSBPOperator(c, inflow_u=u_analytic( + sym.nodes(dim, std_tag)), + flux_type=flux_type) + + # Count the number of DG nodes - will need this later + ngroups = len(mesh.groups) + nnodes_grp = np.ones(ngroups) + for i in range(0, dim): + for j in range(0, ngroups): + nnodes_grp[j] = nnodes_grp[j]*mesh.groups[j].nodes.shape[i*-1 - 1] + + nnodes = int(sum(nnodes_grp)) + + bound_op = bind(discr, op.sym_operator(sbp_tag=sym.DTAG_BOUNDARY( + "btag_sbp"), std_tag=std_tag)) + bound_restr = bind(discr, op.test_restr(sbp_tag=sym.DTAG_BOUNDARY( + "btag_sbp"), std_tag=std_tag)) + + u = bind(discr, u_analytic(sym.nodes(dim)))(queue, t=0) + + final_time = 0.2 + + dt = dt_factor * h/order**2 + nsteps = (final_time // dt) + 1 + dt = final_time/nsteps + 1e-15 + + # SBP Half. + + # First, need to set up the structured mesh. + n_sbp_x = 20 + n_sbp_y = 40 + x_sbp = np.linspace(-1, 0, n_sbp_x, endpoint=True) + y_sbp = np.linspace(-1, 1, n_sbp_y, endpoint=True) + dx = x_sbp[1] - x_sbp[0] + dy = y_sbp[1] - y_sbp[0] + + # Set up solution vector: + # For now, timestep is the same as DG. + u_sbp = parray.zeros(queue, int(n_sbp_x*n_sbp_y), np.float64) + + # Initial condition + for j in range(0, n_sbp_y): + for i in range(0, n_sbp_x): + u_sbp[i + j*(n_sbp_x)] = np.sin(10*(-c.dot( + [x_sbp[i], y_sbp[j]])/norm_c)) + + # obtain P and Q + order_sbp = 4 + if order_sbp == 2: + [p_x, q_x] = sbp21(n_sbp_x) + [p_y, q_y] = sbp21(n_sbp_y) + elif order_sbp == 4: + [p_x, q_x] = sbp42(n_sbp_x) + [p_y, q_y] = sbp42(n_sbp_y) + elif order_sbp == 6: + [p_x, q_x] = sbp63(n_sbp_x) + [p_y, q_y] = sbp63(n_sbp_y) + + tau_l = 1 + tau_r = 1 + + # for the boundaries + el_x = np.zeros(n_sbp_x) + er_x = np.zeros(n_sbp_x) + el_x[0] = 1 + er_x[n_sbp_x-1] = 1 + e_l_matx = np.zeros((n_sbp_x, n_sbp_x,)) + e_r_matx = np.zeros((n_sbp_x, n_sbp_x,)) + + for i in range(0, n_sbp_x): + for j in range(0, n_sbp_x): + e_l_matx[i, j] = el_x[i]*el_x[j] + e_r_matx[i, j] = er_x[i]*er_x[j] + + el_y = np.zeros(n_sbp_y) + er_y = np.zeros(n_sbp_y) + el_y[0] = 1 + er_y[n_sbp_y-1] = 1 + e_l_maty = np.zeros((n_sbp_y, n_sbp_y,)) + e_r_maty = np.zeros((n_sbp_y, n_sbp_y,)) + + for i in range(0, n_sbp_y): + for j in range(0, n_sbp_y): + e_l_maty[i, j] = el_y[i]*el_y[j] + e_r_maty[i, j] = er_y[i]*er_y[j] + + # construct the spatial operators + d_x = np.linalg.inv(dx*p_x).dot(q_x - 0.5*e_l_matx + 0.5*e_r_matx) + d_y = np.linalg.inv(dy*p_y).dot(q_y - 0.5*e_l_maty + 0.5*e_r_maty) + + # for the boundaries + c_l_x = np.kron(tau_l, (np.linalg.inv(dx*p_x).dot(el_x))) + c_r_x = np.kron(tau_r, (np.linalg.inv(dx*p_x).dot(er_x))) + c_l_y = np.kron(tau_l, (np.linalg.inv(dy*p_y).dot(el_y))) + c_r_y = np.kron(tau_r, (np.linalg.inv(dy*p_y).dot(er_y))) + + # For speed... + dudx_mat = -np.kron(np.eye(n_sbp_y), d_x) + dudy_mat = -np.kron(d_y, np.eye(n_sbp_x)) + + # Number of nodes in our SBP-DG boundary discretization + sbp_nodes_y = (sbp_bdry_discr.nodes().with_queue(queue))[1] + # When projecting, we use nodes sorted in y, but we will have to unsort + # afterwards to make sure projected solution is injected into DG BC + # in the correct way. + nodesort = np.argsort(sbp_nodes_y.get()) + nodesortlist = nodesort.tolist() + rangex = np.array(range(sbp_nodes_y.get().shape[0])) + unsort_args = [nodesortlist.index(x) for x in rangex] + + west_nodes = np.sort(np.array(sbp_nodes_y.get())) + + # Make element-aligned glue grid. + dg_side_gg = np.zeros(int(west_nodes.shape[0]/(order+1))+1) + counter = 0 + for i in range(0, west_nodes.shape[0]): + if i % (order+1) == 0: + dg_side_gg[counter] = west_nodes[i] + counter += 1 + + dg_side_gg[-1] = west_nodes[-1] + n_west_elements = int(west_nodes.shape[0] / (order + 1)) + sbp2dg, dg2sbp = sbp_dg_projection(n_sbp_y-1, n_west_elements, order_sbp, + order, dg_side_gg, west_nodes) + + def rhs(t, u): + # Initialize the entire RHS to 0. + rhs_out = parray.zeros(queue, + int(n_sbp_x*n_sbp_y) + int(nnodes), np.float64) + + # Fill the first part with the SBP half of the domain. + + # Pull the SBP vector out of device array for now. + u_sbp_ts = u[0:int(n_sbp_x*n_sbp_y)].get() + + dudx = np.zeros((n_sbp_x*n_sbp_y)) + dudy = np.zeros((n_sbp_x*n_sbp_y)) + + dudx = dudx_mat.dot(u_sbp_ts) + dudy = dudy_mat.dot(u_sbp_ts) + + # Boundary condition + dl_x = np.zeros(n_sbp_x*n_sbp_y) + dr_x = np.zeros(n_sbp_x*n_sbp_y) + dl_y = np.zeros(n_sbp_x*n_sbp_y) + dr_y = np.zeros(n_sbp_x*n_sbp_y) + + # Need to fill this by looping through each segment. + # X-boundary conditions: + for j in range(0, n_sbp_y): + u_bcx = u_sbp_ts[j*n_sbp_x:((j+1)*n_sbp_x)] + v_l_x = np.transpose(el_x).dot(u_bcx) + v_r_x = np.transpose(er_x).dot(u_bcx) + left_bcx = np.sin(10*(-c.dot( + [x_sbp[0], y_sbp[j]])/norm_c + norm_c*t)) + right_bcx = np.sin(10*(-c.dot( + [x_sbp[n_sbp_x-1], + y_sbp[j]])/norm_c + norm_c*t)) + dl_xbc = c_l_x*(v_l_x - left_bcx) + dr_xbc = c_r_x*(v_r_x - right_bcx) + dl_x[j*n_sbp_x:((j+1)*n_sbp_x)] = dl_xbc + dr_x[j*n_sbp_x:((j+1)*n_sbp_x)] = dr_xbc + # Y-boundary conditions: + for i in range(0, n_sbp_x): + u_bcy = u_sbp_ts[i::n_sbp_x] + v_l_y = np.transpose(el_y).dot(u_bcy) + v_r_y = np.transpose(er_y).dot(u_bcy) + left_bcy = np.sin(10*(-c.dot( + [x_sbp[i], y_sbp[0]])/norm_c + norm_c*t)) + right_bcy = np.sin(10*(-c.dot( + [x_sbp[i], + y_sbp[n_sbp_y-1]])/norm_c + norm_c*t)) + dl_ybc = c_l_y*(v_l_y - left_bcy) + dr_ybc = c_r_y*(v_r_y - right_bcy) + dl_y[i::n_sbp_x] = dl_ybc + dr_y[i::n_sbp_x] = dr_ybc + + # Add these at each point on the SBP half to get the SBP RHS. + rhs_sbp = c[0]*dudx + c[1]*dudy - dl_x - dr_x - dl_y - dr_y + + # Now pop this back into the device RHS vector. + rhs_sbp_dev = parray.zeros(queue, (n_sbp_x*n_sbp_y,), np.float64) + rhs_sbp_dev = parray.to_device(queue, rhs_sbp) + + rhs_out[0:int(n_sbp_x*n_sbp_y)] = rhs_sbp_dev + + sbp_east = np.zeros(n_sbp_y) + # Pull SBP domain values off of east face. + counter = 0 + for i in range(0, n_sbp_x*n_sbp_y): + if i == n_sbp_x - 1: + sbp_east[counter] = u_sbp_ts[i] + counter += 1 + elif i % n_sbp_x == n_sbp_x - 1: + sbp_east[counter] = u_sbp_ts[i] + counter += 1 + + # Projection from SBP to DG is now a two-step process. + # First: SBP-to-DG. + sbp_proj = sbp2dg.dot(sbp_east) + # Second: Fix the ordering. + sbp_proj = sbp_proj[unsort_args] + + # Grudge DG RHS. + # Critical step - now need to apply projected SBP state to the + # proper nodal locations in u_dg. + rhs_out[int(n_sbp_x*n_sbp_y):] = bound_op(queue, t=t, + u=u[int(n_sbp_x*n_sbp_y):], + state_from_sbp=sbp_proj) + + test_qty = bound_restr(queue, t=t, u=u[int(n_sbp_x*n_sbp_y):], + state_from_sbp=sbp_proj) + print("Restriction of volume to boundary: ", test_qty) + + return rhs_out + + # Timestepper. + from grudge.shortcuts import set_up_rk4 + + # Make a combined u with the SBP and the DG parts. + u_comb = parray.zeros(queue, int(n_sbp_x*n_sbp_y) + nnodes, np.float64) + u_comb[0:int(n_sbp_x*n_sbp_y)] = u_sbp + for i in range(int(n_sbp_x*n_sbp_y), int(n_sbp_x*n_sbp_y) + nnodes): + u_comb[i] = u[i - int(n_sbp_x*n_sbp_y)] + dt_stepper = set_up_rk4("u", dt, u_comb, rhs) + + from grudge.shortcuts import make_visualizer + vis = make_visualizer(discr, vis_order=order) + + step = 0 + + # Create mesh for structured grid output + sbp_mesh = np.zeros((2, n_sbp_y, n_sbp_x)) + for j in range(0, n_sbp_y): + sbp_mesh[0, j, :] = x_sbp + for i in range(0, n_sbp_x): + sbp_mesh[1, :, i] = y_sbp + + for event in dt_stepper.run(t_end=final_time): + if isinstance(event, dt_stepper.StateComputed): + + step += 1 + + last_t = event.t + u_sbp = event.state_component[0:int(n_sbp_x*n_sbp_y)] + u_dg = event.state_component[int(n_sbp_x*n_sbp_y):] + + error_l2_dg = bind(discr, + sym.norm(2, sym.var("u") + - u_analytic(sym.nodes(dim))))( + queue, t=last_t, u=u_dg) + print('DG L2 Error after Step ', step, error_l2_dg) + sbp_error = np.zeros((n_sbp_x*n_sbp_y)) + error_l2_sbp = 0 + for j in range(0, n_sbp_y): + for i in range(0, n_sbp_x): + sbp_error[i + j*n_sbp_x] = u_sbp[i + j*n_sbp_x].get() - \ + np.sin(10*(-c.dot([x_sbp[i], y_sbp[j]])/norm_c + + - last_t*norm_c)) + error_l2_sbp = error_l2_sbp + \ + dx*dy*(sbp_error[i + j*n_sbp_x]) ** 2 + + error_l2_sbp = np.sqrt(error_l2_sbp) + print('SBP L2 Error after Step ', step, error_l2_sbp) + + # Write out the DG data only + vis.write_vtk_file("dg-%04d.vtu" % step, + [("u", u_dg)], overwrite=True) + + # Try writing out a VTK file with the SBP data. + from pyvisfile.vtk import write_structured_grid + + # Overwrite existing files - this is annoying when debugging. + filename = "sbp_%04d.vts" % step + import os + if os.path.exists(filename): + os.remove(filename) + + write_structured_grid(filename, sbp_mesh, + point_data=[("u", u_sbp.get())]) + + +if __name__ == "__main__": + main() -- GitLab From 703d80d8ecd1f7c7f0dd2654c5c4698986f37662 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Mon, 18 May 2020 11:26:26 -0500 Subject: [PATCH 10/12] Preliminary SBP tests now pass in Grudge thanks to Andreas --- test/test_grudge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_grudge.py b/test/test_grudge.py index 78d3e0b3..81a1a4ab 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -403,7 +403,7 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), n=(nelem, nelem), order=order, face_to_boundary_tag={ - "btag_sbp": ["+x"], + "btag_sbp": ["-x"], "btag_std": ["+y", "+x", "-y"]}) -- GitLab From a867d4452f7a46f16170c40e02b9eee4929c423d Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Mon, 18 May 2020 15:19:15 -0500 Subject: [PATCH 11/12] Tracking meshmode changes --- test/test_grudge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_grudge.py b/test/test_grudge.py index 81a1a4ab..081e634c 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -402,7 +402,7 @@ def test_convergence_sbp_advec(order, order_sbp, spacing_factor, c, visualize=Fa mesh = generate_regular_rect_mesh(a=(0, -1), b=(1, 1), n=(nelem, nelem), order=order, - face_to_boundary_tag={ + boundary_tag_to_face={ "btag_sbp": ["-x"], "btag_std": ["+y", "+x", "-y"]}) -- GitLab From c88f0f06b99bb2e2c1449a9d9929c67d37e2c9e6 Mon Sep 17 00:00:00 2001 From: Cory Mikida Date: Mon, 30 Aug 2021 16:18:20 -0500 Subject: [PATCH 12/12] Point requirements back to main --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e8a234e7..deb09394 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,4 @@ git+https://gitlab.tiker.net/inducer/dagrt.git git+https://gitlab.tiker.net/inducer/leap.git git+https://github.com/inducer/meshpy.git git+https://github.com/inducer/modepy.git -git+https://gitlab.tiker.net/cory/meshmode.git +git+https://github.com/inducer/meshmode.git -- GitLab