From bed01166c34d0dbff8f37e13e111f42005782dff Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 26 Jan 2018 14:17:22 -0600 Subject: [PATCH 01/68] NewLinearRecurrenceBasedDerivativeWrangler --- sumpy/expansion/__init__.py | 89 ++++++++++++++++++++++++++++--------- sumpy/tools.py | 75 +++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 20 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 108bc67d..9bd5b2d2 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -372,32 +372,81 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): return LinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) -class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): +class NewLinearRecurrenceBasedDerivativeWrangler \ + (LinearRecurrenceBasedDerivativeWrangler): - def try_get_recurrence_for_derivative(self, deriv, in_terms_of): - deriv = np.array(deriv, dtype=int) + @memoize_method + def _get_stored_ids_and_coeff_mat(self): + from scipy.linalg.interpolative import interp_decomp + from six import iteritems + from sumpy.tools import nullspace - for dim in np.where(2 <= deriv)[0]: - # Check if we can reduce this dimension in terms of the other - # dimensions. + tol = 1e-13 + stored_identifiers = [] - reduced_deriv = deriv.copy() - reduced_deriv[dim] -= 2 - coeffs = {} + import time + start_time = time.time() + logger.debug("computing recurrence for Taylor coefficients: start") - for other_dim in range(self.dim): - if other_dim == dim: - continue - needed_deriv = reduced_deriv.copy() - needed_deriv[other_dim] += 2 - needed_deriv = tuple(needed_deriv) + pde_dict = self.get_pde_dict() + P = [] + + mis = self.get_full_coefficient_identifiers() + coeff_ident_enumerate_dict = dict((tuple(mi), i) for + (i, mi) in enumerate(mis)) + + for mi in mis: + for pde_mi, coeff in iteritems(pde_dict): + diff = np.array(mi, dtype=int) - pde_mi + if (diff >= 0).all(): + eq = [0]*len(mis) + for pde_mi2, coeff2 in iteritems(pde_dict): + c = tuple(pde_mi2 + diff) + eq[coeff_ident_enumerate_dict[c]] = 1 + P.append(eq) + + P = np.array(P, dtype=np.float64) + N = nullspace(P, atol=tol) + k, idx, _ = interp_decomp(N.T, tol) + idx = idx[:k] + S = np.linalg.solve(N[idx, :].T, N.T).T + stored_identifiers = [mis[i] for i in idx] + + coeff_matrix = defaultdict(lambda: []) + for i in range(S.shape[0]): + for j in range(S.shape[1]): + if np.abs(S[i][j]) > tol: + coeff_matrix[i].append((j, S[i][j])) - if needed_deriv not in in_terms_of: - break + logger.debug("computing recurrence for Taylor coefficients: " + "done after {dur:.2f} seconds" + .format(dur=time.time() - start_time)) - coeffs[needed_deriv] = -1 - else: - return coeffs + logger.debug("number of Taylor coefficients was reduced from {orig} to {red}" + .format(orig=len(self.get_full_coefficient_identifiers()), + red=len(stored_identifiers))) + + return stored_identifiers, coeff_matrix + + @memoize_method + def get_coefficient_matrix(self, rscale): + _, coeff_matrix = self._get_stored_ids_and_coeff_mat() + return coeff_matrix + + def get_derivative_taker(self, expr, var_list): + from sumpy.tools import NewLinearRecurrenceBasedMiDerivativeTaker + return NewLinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) + + +class LaplaceDerivativeWrangler(NewLinearRecurrenceBasedDerivativeWrangler): + + def get_pde_dict(self): + pde_dict = {} + for d in range(self.dim): + mi = [0]*self.dim + mi[d] = 2 + pde_dict[tuple(mi)] = 1 + return pde_dict class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): diff --git a/sumpy/tools.py b/sumpy/tools.py index 964c64cc..e2f6f86b 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -139,6 +139,37 @@ class LinearRecurrenceBasedMiDerivativeTaker(MiDerivativeTaker): return expr + +class NewLinearRecurrenceBasedMiDerivativeTaker(MiDerivativeTaker): + """ + The derivative taker for expansions that use + :class:`sumpy.expansion.LinearRecurrenceBasedDerivativeWrangler` + """ + + def __init__(self, expr, var_list, wrangler): + super(NewLinearRecurrenceBasedMiDerivativeTaker, self).__init__( + expr, var_list) + self.wrangler = wrangler + + @memoize_method + def diff(self, mi): + """ + :arg mi: a multi-index (tuple) indicating how many x/y derivatives are + to be taken. + """ + try: + expr = self.cache_by_mi[mi] + except KeyError: + closest_mi = self.get_closest_cached_mi(mi) + expr = self.cache_by_mi[closest_mi] + + for next_deriv, next_mi in ( + self.get_derivative_taking_sequence(closest_mi, mi)): + expr = expr.diff(next_deriv) + self.cache_by_mi[next_mi] = expr + + return expr + # }}} @@ -448,4 +479,48 @@ def my_syntactic_subs(expr, subst_dict): return expr +# Source: http://scipy-cookbook.readthedocs.io/items/RankNullspace.html +# Author: SciPy Developers +# License: BSD-3-Clause + +def nullspace(A, atol=1e-13, rtol=0): + """Compute an approximate basis for the nullspace of A. + + The algorithm used by this function is based on the singular value + decomposition of `A`. + + Parameters + ---------- + A : ndarray + A should be at most 2-D. A 1-D array with length k will be treated + as a 2-D with shape (1, k) + atol : float + The absolute tolerance for a zero singular value. Singular values + smaller than `atol` are considered to be zero. + rtol : float + The relative tolerance. Singular values less than rtol*smax are + considered to be zero, where smax is the largest singular value. + + If both `atol` and `rtol` are positive, the combined tolerance is the + maximum of the two; that is:: + tol = max(atol, rtol * smax) + Singular values smaller than `tol` are considered to be zero. + + Return value + ------------ + ns : ndarray + If `A` is an array with shape (m, k), then `ns` will be an array + with shape (k, n), where n is the estimated dimension of the + nullspace of `A`. The columns of `ns` are a basis for the + nullspace; each element in numpy.dot(A, ns) will be approximately + zero. + """ + from numpy.linalg import svd + A = np.atleast_2d(A) + u, s, vh = svd(A) + tol = max(atol, rtol * s[0]) + nnz = (s >= tol).sum() + ns = vh[nnz:].conj().T + return ns + # vim: fdm=marker -- GitLab From 2d9b3a7df69ba23db1097fac8582624b3e963589 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 26 Jan 2018 15:22:57 -0600 Subject: [PATCH 02/68] Add scipy to requirements --- .test-conda-env-py3.yml | 1 + requirements.txt | 1 + setup.py | 1 + 3 files changed, 3 insertions(+) diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml index b2b8b1b4..7d69115b 100644 --- a/.test-conda-env-py3.yml +++ b/.test-conda-env-py3.yml @@ -13,4 +13,5 @@ dependencies: - python=3.5 - symengine=0.3.0 - python-symengine=0.3.0 +- scipy # things not in here: loopy boxtree pymbolic pyfmmlib diff --git a/requirements.txt b/requirements.txt index 1e86e610..ab8d542d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ numpy sympy==1.0 +scipy>=0.13 git+https://github.com/inducer/pymbolic git+https://github.com/inducer/islpy git+https://github.com/inducer/pyopencl diff --git a/setup.py b/setup.py index 8d085054..e5f82e37 100644 --- a/setup.py +++ b/setup.py @@ -97,6 +97,7 @@ setup(name="sumpy", "boxtree>=2013.1", "pytest>=2.3", "six", + "scipy>=0.13", # If this causes issues, see: # https://code.google.com/p/sympy/issues/detail?id=3874 -- GitLab From 97477a263ae9c30ffb89affb1847f29aa9145918 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 26 Jan 2018 15:29:54 -0600 Subject: [PATCH 03/68] Fix formatting --- sumpy/expansion/__init__.py | 20 ++++++++++---------- sumpy/tools.py | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 9bd5b2d2..c865b476 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -389,7 +389,7 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ logger.debug("computing recurrence for Taylor coefficients: start") pde_dict = self.get_pde_dict() - P = [] + pde_mat = [] mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for @@ -403,20 +403,20 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ for pde_mi2, coeff2 in iteritems(pde_dict): c = tuple(pde_mi2 + diff) eq[coeff_ident_enumerate_dict[c]] = 1 - P.append(eq) + pde_mat.append(eq) - P = np.array(P, dtype=np.float64) - N = nullspace(P, atol=tol) - k, idx, _ = interp_decomp(N.T, tol) + pde_mat = np.array(pde_mat, dtype=np.float64) + n = nullspace(pde_mat, atol=tol) + k, idx, _ = interp_decomp(n.T, tol) idx = idx[:k] - S = np.linalg.solve(N[idx, :].T, N.T).T + s = np.linalg.solve(n[idx, :].T, n.T).T stored_identifiers = [mis[i] for i in idx] coeff_matrix = defaultdict(lambda: []) - for i in range(S.shape[0]): - for j in range(S.shape[1]): - if np.abs(S[i][j]) > tol: - coeff_matrix[i].append((j, S[i][j])) + for i in range(s.shape[0]): + for j in range(s.shape[1]): + if np.abs(s[i][j]) > tol: + coeff_matrix[i].append((j, s[i][j])) logger.debug("computing recurrence for Taylor coefficients: " "done after {dur:.2f} seconds" diff --git a/sumpy/tools.py b/sumpy/tools.py index e2f6f86b..730c52d2 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -483,7 +483,7 @@ def my_syntactic_subs(expr, subst_dict): # Author: SciPy Developers # License: BSD-3-Clause -def nullspace(A, atol=1e-13, rtol=0): +def nullspace(A, atol=1e-13, rtol=0): # noqa:N803 """Compute an approximate basis for the nullspace of A. The algorithm used by this function is based on the singular value @@ -516,7 +516,7 @@ def nullspace(A, atol=1e-13, rtol=0): zero. """ from numpy.linalg import svd - A = np.atleast_2d(A) + A = np.atleast_2d(A) # noqa:N806 u, s, vh = svd(A) tol = max(atol, rtol * s[0]) nnz = (s >= tol).sum() -- GitLab From a2dfd8d553e8ab92cc74a94081cf57620f62361b Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 26 Jan 2018 16:12:06 -0600 Subject: [PATCH 04/68] fix for pde_mat = 0 --- sumpy/expansion/__init__.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index c865b476..c060b8da 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -405,12 +405,16 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ eq[coeff_ident_enumerate_dict[c]] = 1 pde_mat.append(eq) - pde_mat = np.array(pde_mat, dtype=np.float64) - n = nullspace(pde_mat, atol=tol) - k, idx, _ = interp_decomp(n.T, tol) - idx = idx[:k] - s = np.linalg.solve(n[idx, :].T, n.T).T - stored_identifiers = [mis[i] for i in idx] + if len(pde_mat) > 0: + pde_mat = np.array(pde_mat, dtype=np.float64) + n = nullspace(pde_mat, atol=tol) + k, idx, _ = interp_decomp(n.T, tol) + idx = idx[:k] + s = np.linalg.solve(n[idx, :].T, n.T).T + stored_identifiers = [mis[i] for i in idx] + else: + s = np.eye(len(mis)) + stored_identifiers = mis coeff_matrix = defaultdict(lambda: []) for i in range(s.shape[0]): -- GitLab From 424e616de85da379287e4b702c8aa865ad1cc40a Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 26 Jan 2018 16:12:53 -0600 Subject: [PATCH 05/68] Use projection matrix --- sumpy/expansion/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index c060b8da..d43379a6 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -408,9 +408,9 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ if len(pde_mat) > 0: pde_mat = np.array(pde_mat, dtype=np.float64) n = nullspace(pde_mat, atol=tol) - k, idx, _ = interp_decomp(n.T, tol) + k, idx, proj = interp_decomp(n.T, tol) + s = np.hstack([np.eye(k), proj])[:, np.argsort(idx)] idx = idx[:k] - s = np.linalg.solve(n[idx, :].T, n.T).T stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) -- GitLab From 9f97efb34825d6d5cb2c0b1fdb6c2d54c49f55ca Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 29 Jan 2018 12:56:29 -0600 Subject: [PATCH 06/68] Fix coeff_matrix --- sumpy/expansion/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index d43379a6..f4144a86 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -420,7 +420,7 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ for i in range(s.shape[0]): for j in range(s.shape[1]): if np.abs(s[i][j]) > tol: - coeff_matrix[i].append((j, s[i][j])) + coeff_matrix[j].append((i, s[i][j])) logger.debug("computing recurrence for Taylor coefficients: " "done after {dur:.2f} seconds" -- GitLab From 0d54ec50c2a4456f7e6eca51b1f7cb8dd5388682 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 30 Jan 2018 16:25:10 -0600 Subject: [PATCH 07/68] Introduce deriv_multiplier --- sumpy/expansion/__init__.py | 127 ++++++++---------------------------- 1 file changed, 27 insertions(+), 100 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index f4144a86..f12cde63 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -246,6 +246,10 @@ def _spmv(spmat, x, sparse_vectors): class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): + def __init__(self, order, dim, deriv_multiplier): + super(LinearRecurrenceBasedDerivativeWrangler, self).__init__(order, dim) + self.deriv_multiplier = deriv_multiplier + def get_coefficient_identifiers(self): return self.stored_identifiers @@ -304,77 +308,12 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): matrix_row = [] for icol, coeff in row: diff = row_rscale - sum(stored_identifiers[icol]) - matrix_row.append((icol, coeff * rscale**diff)) + mult = (rscale*self.deriv_multiplier)**diff + matrix_row.append((icol, coeff * mult)) matrix_rows.append((irow, matrix_row)) return defaultdict(list, matrix_rows) - @memoize_method - def _get_stored_ids_and_coeff_mat(self): - stored_identifiers = [] - identifiers_so_far = {} - - import time - start_time = time.time() - logger.debug("computing recurrence for Taylor coefficients: start") - - # Sparse matrix, indexed by row - coeff_matrix_transpose = defaultdict(list) - - # Build up the matrix transpose by row. - from six import iteritems - for i, identifier in enumerate(self.get_full_coefficient_identifiers()): - expr = self.try_get_recurrence_for_derivative( - identifier, identifiers_so_far) - - if expr is None: - # Identifier should be stored - coeff_matrix_transpose[len(stored_identifiers)] = [(i, 1)] - stored_identifiers.append(identifier) - else: - expr = dict((identifiers_so_far[ident], val) for ident, val in - iteritems(expr)) - result = _spmv(coeff_matrix_transpose, expr, sparse_vectors=True) - for j, item in iteritems(result): - coeff_matrix_transpose[j].append((i, item)) - - identifiers_so_far[identifier] = i - - stored_identifiers = stored_identifiers - - coeff_matrix = defaultdict(list) - for i, row in iteritems(coeff_matrix_transpose): - for j, val in row: - coeff_matrix[j].append((i, val)) - - logger.debug("computing recurrence for Taylor coefficients: " - "done after {dur:.2f} seconds" - .format(dur=time.time() - start_time)) - - logger.debug("number of Taylor coefficients was reduced from {orig} to {red}" - .format(orig=len(self.get_full_coefficient_identifiers()), - red=len(stored_identifiers))) - - return stored_identifiers, coeff_matrix - - def try_get_recurrence_for_derivative(self, deriv, in_terms_of): - """ - :arg deriv: a tuple of integers identifying a derivative for which - a recurrence is sought - :arg in_terms_of: a container supporting efficient containment testing - indicating availability of derivatives for use in recurrences - """ - - raise NotImplementedError - - def get_derivative_taker(self, expr, var_list): - from sumpy.tools import LinearRecurrenceBasedMiDerivativeTaker - return LinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) - - -class NewLinearRecurrenceBasedDerivativeWrangler \ - (LinearRecurrenceBasedDerivativeWrangler): - @memoize_method def _get_stored_ids_and_coeff_mat(self): from scipy.linalg.interpolative import interp_decomp @@ -432,17 +371,23 @@ class NewLinearRecurrenceBasedDerivativeWrangler \ return stored_identifiers, coeff_matrix - @memoize_method - def get_coefficient_matrix(self, rscale): - _, coeff_matrix = self._get_stored_ids_and_coeff_mat() - return coeff_matrix + def get_pde_dict(self): + """ + Return the PDE as a dictionary of derivative_identifier: coeff such that + sum(derivative_identifer * coeff) = 0 is the PDE. + """ + + raise NotImplementedError def get_derivative_taker(self, expr, var_list): from sumpy.tools import NewLinearRecurrenceBasedMiDerivativeTaker return NewLinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) -class LaplaceDerivativeWrangler(NewLinearRecurrenceBasedDerivativeWrangler): +class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): + + def __init__(self, order, dim): + super(LaplaceDerivativeWrangler, self).__init__(order, dim, 1) def get_pde_dict(self): pde_dict = {} @@ -456,35 +401,17 @@ class LaplaceDerivativeWrangler(NewLinearRecurrenceBasedDerivativeWrangler): class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): def __init__(self, order, dim, helmholtz_k_name): - super(HelmholtzDerivativeWrangler, self).__init__(order, dim) - self.helmholtz_k_name = helmholtz_k_name - - def try_get_recurrence_for_derivative(self, deriv, in_terms_of): - deriv = np.array(deriv, dtype=int) + multiplier = sym.Symbol(helmholtz_k_name) + super(HelmholtzDerivativeWrangler, self).__init__(order, dim, multiplier) - for dim in np.where(2 <= deriv)[0]: - # Check if we can reduce this dimension in terms of the other - # dimensions. - - reduced_deriv = deriv.copy() - reduced_deriv[dim] -= 2 - coeffs = {} - - for other_dim in range(self.dim): - if other_dim == dim: - continue - needed_deriv = reduced_deriv.copy() - needed_deriv[other_dim] += 2 - needed_deriv = tuple(needed_deriv) - - if needed_deriv not in in_terms_of: - break - - coeffs[needed_deriv] = -1 - else: - k = sym.Symbol(self.helmholtz_k_name) - coeffs[tuple(reduced_deriv)] = -k*k - return coeffs + def get_pde_dict(self, **kwargs): + pde_dict = {} + for d in range(self.dim): + mi = [0]*self.dim + mi[d] = 2 + pde_dict[tuple(mi)] = 1 + pde_dict[tuple([0]*self.dim)] = -1 + return pde_dict # }}} -- GitLab From 97aaf67a4e9f1705219d51c75f453a6f9ae92410 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 1 Feb 2018 23:16:11 -0600 Subject: [PATCH 08/68] Fix pde_dict generation --- sumpy/expansion/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index f12cde63..50876943 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -341,8 +341,11 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): eq = [0]*len(mis) for pde_mi2, coeff2 in iteritems(pde_dict): c = tuple(pde_mi2 + diff) + if c not in coeff_ident_enumerate_dict: + break eq[coeff_ident_enumerate_dict[c]] = 1 - pde_mat.append(eq) + else: + pde_mat.append(eq) if len(pde_mat) > 0: pde_mat = np.array(pde_mat, dtype=np.float64) -- GitLab From 5c861d4d91364747bcb62021e1c0fae73c256a0f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 2 Feb 2018 10:38:36 -0600 Subject: [PATCH 09/68] Use get_reduced_coefficients --- sumpy/expansion/__init__.py | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 50876943..5d8c25d6 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -316,7 +316,6 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): @memoize_method def _get_stored_ids_and_coeff_mat(self): - from scipy.linalg.interpolative import interp_decomp from six import iteritems from sumpy.tools import nullspace @@ -350,9 +349,14 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): if len(pde_mat) > 0: pde_mat = np.array(pde_mat, dtype=np.float64) n = nullspace(pde_mat, atol=tol) - k, idx, proj = interp_decomp(n.T, tol) - s = np.hstack([np.eye(k), proj])[:, np.argsort(idx)] - idx = idx[:k] + idx = self.get_reduced_coeffs() + if idx is None or len(idx) < n.shape[1]: + from scipy.linalg.interpolative import interp_decomp + k, idx, proj = interp_decomp(n.T, tol) + s = np.hstack([np.eye(k), proj])[:, np.argsort(idx)] + idx = idx[:k] + else: + s = np.linalg.solve(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) @@ -382,6 +386,15 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): raise NotImplementedError + def get_reduced_coeffs(self): + """ + If the coefficients of the derivatives can be reduced and the reduced + coefficients are known, returns a list of indices. Returning None + indicates the reduced coefficients are unknown and will be calculated + using an Interpolative Decomposition of the PDE matrix + """ + return None + def get_derivative_taker(self, expr, var_list): from sumpy.tools import NewLinearRecurrenceBasedMiDerivativeTaker return NewLinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) @@ -400,6 +413,13 @@ class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): pde_dict[tuple(mi)] = 1 return pde_dict + def get_reduced_coeffs(self): + idx = [] + for i, mi in enumerate(self.get_full_coefficient_identifiers()): + if mi[-1] < 2: + idx.append(i) + return idx + class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): @@ -416,6 +436,13 @@ class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): pde_dict[tuple([0]*self.dim)] = -1 return pde_dict + def get_reduced_coeffs(self): + idx = [] + for i, mi in enumerate(self.get_full_coefficient_identifiers()): + if mi[-1] < 2: + idx.append(i) + return idx + # }}} -- GitLab From e66bdcf269908d67b9c24c4d07eafe71bd7dbf44 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 2 Feb 2018 10:38:54 -0600 Subject: [PATCH 10/68] Revert "Add scipy to requirements" This reverts commit 6e1d51612b7cdee2362d6cbde8a5ec691b0cc38f. --- .test-conda-env-py3.yml | 1 - requirements.txt | 1 - setup.py | 1 - 3 files changed, 3 deletions(-) diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml index 7d69115b..b2b8b1b4 100644 --- a/.test-conda-env-py3.yml +++ b/.test-conda-env-py3.yml @@ -13,5 +13,4 @@ dependencies: - python=3.5 - symengine=0.3.0 - python-symengine=0.3.0 -- scipy # things not in here: loopy boxtree pymbolic pyfmmlib diff --git a/requirements.txt b/requirements.txt index ab8d542d..1e86e610 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ numpy sympy==1.0 -scipy>=0.13 git+https://github.com/inducer/pymbolic git+https://github.com/inducer/islpy git+https://github.com/inducer/pyopencl diff --git a/setup.py b/setup.py index e5f82e37..8d085054 100644 --- a/setup.py +++ b/setup.py @@ -97,7 +97,6 @@ setup(name="sumpy", "boxtree>=2013.1", "pytest>=2.3", "six", - "scipy>=0.13", # If this causes issues, see: # https://code.google.com/p/sympy/issues/detail?id=3874 -- GitLab From f0dfd4fc9fa4023e938c4ca91356cc40e9c13be4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 12 Mar 2018 10:09:53 -0500 Subject: [PATCH 11/68] Remove use of ID --- sumpy/expansion/__init__.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 5d8c25d6..afdd9248 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -350,13 +350,8 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): pde_mat = np.array(pde_mat, dtype=np.float64) n = nullspace(pde_mat, atol=tol) idx = self.get_reduced_coeffs() - if idx is None or len(idx) < n.shape[1]: - from scipy.linalg.interpolative import interp_decomp - k, idx, proj = interp_decomp(n.T, tol) - s = np.hstack([np.eye(k), proj])[:, np.argsort(idx)] - idx = idx[:k] - else: - s = np.linalg.solve(n.T[:, idx], n.T) + assert len(idx) >= n.shape[1] + s = np.linalg.solve(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) @@ -393,7 +388,7 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): indicates the reduced coefficients are unknown and will be calculated using an Interpolative Decomposition of the PDE matrix """ - return None + raise NotImplementedError def get_derivative_taker(self, expr, var_list): from sumpy.tools import NewLinearRecurrenceBasedMiDerivativeTaker -- GitLab From 1b6c66ae44be50acc83abe2c5497ed6a727f46cb Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 18 Apr 2018 22:44:14 -0500 Subject: [PATCH 12/68] Remove LinearRecurrenceBasedMiDerivativeTaker --- sumpy/expansion/__init__.py | 17 +++++++- sumpy/expansion/local.py | 31 +++++++++++++-- sumpy/tools.py | 78 ------------------------------------- 3 files changed, 43 insertions(+), 83 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index afdd9248..8ba654af 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -192,6 +192,13 @@ class DerivativeWrangler(object): res = sorted(gnitstam(self.order, self.dim), key=sum) return res + @memoize_method + def get_wrangler_of_order(self, order): + obj = self.__class__.__new__(self.__class__) + obj.order = order + obj.dim = self.dim + return obj + class FullDerivativeWrangler(DerivativeWrangler): @@ -391,8 +398,14 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): raise NotImplementedError def get_derivative_taker(self, expr, var_list): - from sumpy.tools import NewLinearRecurrenceBasedMiDerivativeTaker - return NewLinearRecurrenceBasedMiDerivativeTaker(expr, var_list, self) + from sumpy.tools import MiDerivativeTaker + return MiDerivativeTaker(expr, var_list) + + @memoize_method + def get_wrangler_of_order(self, order): + obj = DerivativeWrangler.get_wrangler_of_order(self, order) + obj.deriv_multiplier = self.deriv_multiplier + return obj class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 483bc0eb..dc55797c 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -167,19 +167,44 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): from sumpy.tools import add_mi + src_max_sum = max(sum(mi) for mi in + src_expansion.get_coefficient_identifiers()) + tgt_max_sum = max(sum(mi) for mi in + self.get_coefficient_identifiers()) + + max_sum = src_max_sum + tgt_max_sum + new_deriv_wrangler = \ + src_expansion.derivative_wrangler.get_wrangler_of_order(max_sum) + new_coeffs = new_deriv_wrangler.get_coefficient_identifiers() + new_full_coeffs = new_deriv_wrangler.get_full_coefficient_identifiers() + + ident_to_index = dict((ident, i) for i, ident in + enumerate(new_full_coeffs)) + result = [] for deriv in self.get_coefficient_identifiers(): local_result = [] + + full_coeffs = [0] * len(new_full_coeffs) for coeff, term in zip( src_coeff_exprs, src_expansion.get_coefficient_identifiers()): + full_coeffs[ident_to_index[add_mi(deriv, term)]] = coeff + + stored_coeffs = \ + new_deriv_wrangler.get_stored_mpole_coefficients_from_full( + full_coeffs, src_rscale) + for i, coeff in enumerate(stored_coeffs): + if coeff == 0: + continue + nderivatives_for_scaling = sum(new_coeffs[i])-sum(deriv) kernel_deriv = ( src_expansion.get_scaled_multipole( - taker.diff(add_mi(deriv, term)), + taker.diff(new_coeffs[i]), dvec, src_rscale, - nderivatives=sum(deriv) + sum(term), - nderivatives_for_scaling=sum(term))) + nderivatives=sum(new_coeffs[i]), + nderivatives_for_scaling=nderivatives_for_scaling)) local_result.append( coeff * kernel_deriv * tgt_rscale**sum(deriv)) diff --git a/sumpy/tools.py b/sumpy/tools.py index 730c52d2..91c76050 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -92,84 +92,6 @@ class MiDerivativeTaker(object): if (np.array(mi) >= np.array(other_mi)).all()), key=lambda other_mi: sum(self.mi_dist(mi, other_mi))) - -class LinearRecurrenceBasedMiDerivativeTaker(MiDerivativeTaker): - """ - The derivative taker for expansions that use - :class:`sumpy.expansion.LinearRecurrenceBasedDerivativeWrangler` - """ - - def __init__(self, expr, var_list, wrangler): - super(LinearRecurrenceBasedMiDerivativeTaker, self).__init__( - expr, var_list) - self.wrangler = wrangler - - @memoize_method - def diff(self, mi): - """ - :arg mi: a multi-index (tuple) indicating how many x/y derivatives are - to be taken. - """ - try: - expr = self.cache_by_mi[mi] - except KeyError: - from six import iteritems - from sumpy.symbolic import Add - - closest_mi = self.get_closest_cached_mi(mi) - expr = self.cache_by_mi[closest_mi] - - # Try to reduce the derivative using recurrences first, and if that - # fails fall back to derivative taking. - for next_deriv, next_mi in ( - self.get_derivative_taking_sequence(closest_mi, mi)): - - recurrence = ( - self.wrangler.try_get_recurrence_for_derivative( - next_mi, self.cache_by_mi)) - - if recurrence is not None: - expr = Add(*tuple( - coeff * self.cache_by_mi[ident] - for ident, coeff in iteritems(recurrence))) - else: - expr = expr.diff(next_deriv) - - self.cache_by_mi[next_mi] = expr - - return expr - - -class NewLinearRecurrenceBasedMiDerivativeTaker(MiDerivativeTaker): - """ - The derivative taker for expansions that use - :class:`sumpy.expansion.LinearRecurrenceBasedDerivativeWrangler` - """ - - def __init__(self, expr, var_list, wrangler): - super(NewLinearRecurrenceBasedMiDerivativeTaker, self).__init__( - expr, var_list) - self.wrangler = wrangler - - @memoize_method - def diff(self, mi): - """ - :arg mi: a multi-index (tuple) indicating how many x/y derivatives are - to be taken. - """ - try: - expr = self.cache_by_mi[mi] - except KeyError: - closest_mi = self.get_closest_cached_mi(mi) - expr = self.cache_by_mi[closest_mi] - - for next_deriv, next_mi in ( - self.get_derivative_taking_sequence(closest_mi, mi)): - expr = expr.diff(next_deriv) - self.cache_by_mi[next_mi] = expr - - return expr - # }}} -- GitLab From e1d75f538bf4b719fad2208ed5334b776a81669f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 24 Apr 2018 21:08:14 -0500 Subject: [PATCH 13/68] Docstrings and comments --- sumpy/expansion/__init__.py | 62 +++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 8ba654af..7185d9f4 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -34,6 +34,7 @@ from collections import defaultdict __doc__ = """ .. autoclass:: ExpansionBase +.. autoclass:: LinearRecurrenceBasedDerivativeWrangler Expansion Factories ^^^^^^^^^^^^^^^^^^^ @@ -252,8 +253,22 @@ def _spmv(spmat, x, sparse_vectors): class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): + """ + .. automethod:: __init__ + .. automethod:: get_pde_dict + .. automethod:: get_reduced_coeffs + """ def __init__(self, order, dim, deriv_multiplier): + """ + :param order: order of the expansion + :param dim: number of dimensions + :param deriv_multiplier: symbolic expression that should be multiplied + with each coefficient_identifier to recover the linear + recurrence for the PDE. + + .. seealso:: :func:`~LinearRecurrenceBasedDerivativeWrangler.get_pde_dict` + """ super(LinearRecurrenceBasedDerivativeWrangler, self).__init__(order, dim) self.deriv_multiplier = deriv_multiplier @@ -352,8 +367,15 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): eq[coeff_ident_enumerate_dict[c]] = 1 else: pde_mat.append(eq) - if len(pde_mat) > 0: + """ + Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` + where :math:`r` is the indices of the reduced coefficients and + :math:`K` is a column vector of coefficients. Let :math:`P` be the + PDE matrix and :math:`N` be the nullspace of :math:`P`. + Then, :math:`S^T = N * N_{[r, :]}^{-1}` which implies, + :math:`S = N_{[r, :]}^{-T} N^T = solve(N_{[r, :]}^T, N^T)` + """ pde_mat = np.array(pde_mat, dtype=np.float64) n = nullspace(pde_mat, atol=tol) idx = self.get_reduced_coeffs() @@ -364,7 +386,9 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): s = np.eye(len(mis)) stored_identifiers = mis - coeff_matrix = defaultdict(lambda: []) + # coeff_matrix is a dictionary of lists. Each key in the dictionary + # is a row number and the values are pairs of column number and value. + coeff_matrix = defaultdict(list) for i in range(s.shape[0]): for j in range(s.shape[1]): if np.abs(s[i][j]) > tol: @@ -381,19 +405,35 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): return stored_identifiers, coeff_matrix def get_pde_dict(self): - """ - Return the PDE as a dictionary of derivative_identifier: coeff such that - sum(derivative_identifer * coeff) = 0 is the PDE. + r""" + Returns the PDE as a dictionary of (mi, coeff) such that mi + is the multi index of the derivative and the PDE is represented by, + + .. math:: + + \sum(\frac{mi \times coeff}{deriv\_multiplier^{sum(mi)}}) = 0 + + Note that coeff should be numeric instead of symbolic to enable use of + numeric linear algebra routines. `deriv_multiplier` can be symbolic + and should be used when the PDE has symbolic values as coefficients + for the derivatives. + + :Example: + + :math:`\Delta u - k^2 u = 0` for 2D can be written as, + + .. math:: + + \frac{(2, 0) \times 1}{k^{sum((2, 0))}} + + \frac{(0, 2) \times 1}{k^{sum((0, 2))}} + + \frac{(0, 0) \times -1}{k^{sum((0, 0))}} = 0 """ raise NotImplementedError def get_reduced_coeffs(self): """ - If the coefficients of the derivatives can be reduced and the reduced - coefficients are known, returns a list of indices. Returning None - indicates the reduced coefficients are unknown and will be calculated - using an Interpolative Decomposition of the PDE matrix + Returns the indices of the reduced set of derivatives which are stored. """ raise NotImplementedError @@ -424,6 +464,8 @@ class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): def get_reduced_coeffs(self): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): + # Return only the derivatives with the order of the last dimension + # 0 or 1. Higher order derivatives can be reduced down to these. if mi[-1] < 2: idx.append(i) return idx @@ -447,6 +489,8 @@ class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): def get_reduced_coeffs(self): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): + # Return only the derivatives with the order of the last dimension + # 0 or 1. Higher order derivatives can be reduced down to these. if mi[-1] < 2: idx.append(i) return idx -- GitLab From ce3a1aeb09379be3b9be5f1df6057a237c3b4d78 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 24 Apr 2018 21:23:47 -0500 Subject: [PATCH 14/68] refactor get_wrangler_of_order->copy --- sumpy/expansion/__init__.py | 23 +++++++++++++++-------- sumpy/expansion/local.py | 2 +- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 7185d9f4..70449074 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -194,11 +194,8 @@ class DerivativeWrangler(object): return res @memoize_method - def get_wrangler_of_order(self, order): - obj = self.__class__.__new__(self.__class__) - obj.order = order - obj.dim = self.dim - return obj + def copy(self, order): + raise NotImplementedError class FullDerivativeWrangler(DerivativeWrangler): @@ -216,6 +213,12 @@ class FullDerivativeWrangler(DerivativeWrangler): get_stored_mpole_coefficients_from_full = ( get_full_kernel_derivatives_from_stored) + @memoize_method + def copy(self, **kwargs): + order = kwargs.pop('order', self.order) + dim = kwargs.pop('dim', self.dim) + return type(self)(order, dim) + # {{{ sparse matrix-vector multiplication @@ -442,9 +445,13 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): return MiDerivativeTaker(expr, var_list) @memoize_method - def get_wrangler_of_order(self, order): - obj = DerivativeWrangler.get_wrangler_of_order(self, order) - obj.deriv_multiplier = self.deriv_multiplier + def copy(self, **kwargs): + obj = type(self).__new__(type(self)) + order = kwargs.pop('order', self.order) + dim = kwargs.pop('dim', self.dim) + deriv_multiplier = kwargs.pop('deriv_multiplier', self.deriv_multiplier) + LinearRecurrenceBasedDerivativeWrangler.__init__(obj, order, + dim, deriv_multiplier) return obj diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index dc55797c..b9c5d0a6 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -174,7 +174,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): max_sum = src_max_sum + tgt_max_sum new_deriv_wrangler = \ - src_expansion.derivative_wrangler.get_wrangler_of_order(max_sum) + src_expansion.derivative_wrangler.copy(order=max_sum) new_coeffs = new_deriv_wrangler.get_coefficient_identifiers() new_full_coeffs = new_deriv_wrangler.get_full_coefficient_identifiers() -- GitLab From 6d22542d97d389e40c39c7dc36e206692758f76f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 24 Apr 2018 23:10:45 -0500 Subject: [PATCH 15/68] Fix Helmholtz PDE --- sumpy/expansion/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 70449074..b335917c 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -490,7 +490,7 @@ class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): mi = [0]*self.dim mi[d] = 2 pde_dict[tuple(mi)] = 1 - pde_dict[tuple([0]*self.dim)] = -1 + pde_dict[tuple([0]*self.dim)] = 1 return pde_dict def get_reduced_coeffs(self): -- GitLab From 71571ba877f1a05d43c1280767a0bb52c38d2872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Mon, 30 Apr 2018 16:47:57 -0400 Subject: [PATCH 16/68] Various readability/doc/convention improvements --- sumpy/expansion/__init__.py | 75 +++++++++++++++++++------------------ sumpy/expansion/local.py | 3 ++ 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index b335917c..7ab2125c 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -217,6 +217,11 @@ class FullDerivativeWrangler(DerivativeWrangler): def copy(self, **kwargs): order = kwargs.pop('order', self.order) dim = kwargs.pop('dim', self.dim) + + if kwargs: + raise TypeError("unexpected keyword arguments '%s'" + % ", ".join(kwargs)) + return type(self)(order, dim) @@ -263,14 +268,14 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): """ def __init__(self, order, dim, deriv_multiplier): - """ + r""" :param order: order of the expansion :param dim: number of dimensions - :param deriv_multiplier: symbolic expression that should be multiplied - with each coefficient_identifier to recover the linear - recurrence for the PDE. - - .. seealso:: :func:`~LinearRecurrenceBasedDerivativeWrangler.get_pde_dict` + :param deriv_multiplier: a symbolic expression that is used to 'normalize out' + constant coefficients in the PDE in + :func:`~LinearRecurrenceBasedDerivativeWrangler.get_pde_dict`, so that the + Taylor coefficient with multi-index :math:`\nu` as seen by that representation + of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ super(LinearRecurrenceBasedDerivativeWrangler, self).__init__(order, dim) self.deriv_multiplier = deriv_multiplier @@ -306,19 +311,19 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): Return a matrix that expresses every derivative in terms of a set of "stored" derivatives. - For example, for the recurrence + For example, for the recurrence:: u_xx + u_yy + u_zz = 0 - the coefficient matrix features the following entries: + the coefficient matrix features the following entries:: - ... u_xx u_yy ... <= cols = only stored derivatives - ================== - ...| ... ... ... ... - | - u_zz| ... -1 -1 ... + ... u_xx u_yy ... <= cols = only stored derivatives + ================== + ...| ... ... ... ... + | + u_zz| ... -1 -1 ... - ^ rows = one for every derivative + ^ rows = one for every derivative """ stored_identifiers, coeff_matrix = self._get_stored_ids_and_coeff_mat() @@ -347,9 +352,8 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): tol = 1e-13 stored_identifiers = [] - import time - start_time = time.time() - logger.debug("computing recurrence for Taylor coefficients: start") + from pytools import ProcessLogger + plog = ProcessLogger(logger, "compute recurrence for Taylor coefficients") pde_dict = self.get_pde_dict() pde_mat = [] @@ -371,13 +375,18 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): else: pde_mat.append(eq) if len(pde_mat) > 0: - """ + r""" Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` where :math:`r` is the indices of the reduced coefficients and :math:`K` is a column vector of coefficients. Let :math:`P` be the - PDE matrix and :math:`N` be the nullspace of :math:`P`. + PDE matrix, i.e. the matrix obtained by applying the PDE + as an identity to each of the Taylor coefficients. + (Realize that those, as functions of space, each still satisfy the PDE.) + As a result, if :math:`\mathbf\alpha` is a vector of Taylor coefficients, + one would expect :math:`P\mathbf\alpha = \mathbf 0`. + Further, let :math:`N` be the nullspace of :math:`P`. Then, :math:`S^T = N * N_{[r, :]}^{-1}` which implies, - :math:`S = N_{[r, :]}^{-T} N^T = solve(N_{[r, :]}^T, N^T)` + :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. """ pde_mat = np.array(pde_mat, dtype=np.float64) n = nullspace(pde_mat, atol=tol) @@ -397,9 +406,7 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): if np.abs(s[i][j]) > tol: coeff_matrix[j].append((i, s[i][j])) - logger.debug("computing recurrence for Taylor coefficients: " - "done after {dur:.2f} seconds" - .format(dur=time.time() - start_time)) + plog.done() logger.debug("number of Taylor coefficients was reduced from {orig} to {red}" .format(orig=len(self.get_full_coefficient_identifiers()), @@ -410,14 +417,18 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): def get_pde_dict(self): r""" Returns the PDE as a dictionary of (mi, coeff) such that mi - is the multi index of the derivative and the PDE is represented by, + is the multi-index of the derivative and the PDE is represented by, .. math:: - \sum(\frac{mi \times coeff}{deriv\_multiplier^{sum(mi)}}) = 0 + \sum_{\nu,c_\nu\in \text{pde\_dict}} + \frac{c_\nu\cdot \alpha_\nu} + {\text{deriv\_multiplier}^{\sum \text{mi}}} = 0, - Note that coeff should be numeric instead of symbolic to enable use of - numeric linear algebra routines. `deriv_multiplier` can be symbolic + where :math:`\mathbf\alpha` is a coefficient vector. + + Note that *coeff* should be a number (not symbolic) to enable use of + numeric linear algebra routines. *deriv_multiplier* can be symbolic and should be used when the PDE has symbolic values as coefficients for the derivatives. @@ -444,16 +455,6 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): from sumpy.tools import MiDerivativeTaker return MiDerivativeTaker(expr, var_list) - @memoize_method - def copy(self, **kwargs): - obj = type(self).__new__(type(self)) - order = kwargs.pop('order', self.order) - dim = kwargs.pop('dim', self.dim) - deriv_multiplier = kwargs.pop('deriv_multiplier', self.deriv_multiplier) - LinearRecurrenceBasedDerivativeWrangler.__init__(obj, order, - dim, deriv_multiplier) - return obj - class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index b9c5d0a6..0336ef43 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -169,8 +169,10 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): src_max_sum = max(sum(mi) for mi in src_expansion.get_coefficient_identifiers()) + assert src_max_sum == src_expansion.order tgt_max_sum = max(sum(mi) for mi in self.get_coefficient_identifiers()) + assert tgt_max_sum == tgt_expansion.order max_sum = src_max_sum + tgt_max_sum new_deriv_wrangler = \ @@ -209,6 +211,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): local_result.append( coeff * kernel_deriv * tgt_rscale**sum(deriv)) result.append(sym.Add(*local_result)) + else: from sumpy.tools import MiDerivativeTaker expr = src_expansion.evaluate(src_coeff_exprs, dvec, rscale=src_rscale) -- GitLab From edd50e7336e30474e31cfa1b6f289f2c83aace1b Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 18:09:54 -0500 Subject: [PATCH 17/68] Rename derivative wrangler -> expansion terms wrangler, improve understandability of wrangled M2L --- sumpy/expansion/__init__.py | 105 +++++++++++++++-------------------- sumpy/expansion/local.py | 58 +++++++++---------- sumpy/expansion/multipole.py | 8 +-- 3 files changed, 80 insertions(+), 91 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 7ab2125c..22223b47 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -28,13 +28,12 @@ import numpy as np import logging from pytools import memoize_method import sumpy.symbolic as sym -from sumpy.tools import MiDerivativeTaker from collections import defaultdict __doc__ = """ .. autoclass:: ExpansionBase -.. autoclass:: LinearRecurrenceBasedDerivativeWrangler +.. autoclass:: LinearRecurrenceBasedExpansionTermsWrangler Expansion Factories ^^^^^^^^^^^^^^^^^^^ @@ -159,9 +158,9 @@ class ExpansionBase(object): # }}} -# {{{ derivative wrangler +# {{{ expansion terms wrangler -class DerivativeWrangler(object): +class ExpansionTermsWrangler(object): def __init__(self, order, dim): self.order = order @@ -178,9 +177,6 @@ class DerivativeWrangler(object): rscale): raise NotImplementedError - def get_derivative_taker(self, expr, var_list): - raise NotImplementedError - @memoize_method def get_full_coefficient_identifiers(self): """ @@ -193,18 +189,21 @@ class DerivativeWrangler(object): res = sorted(gnitstam(self.order, self.dim), key=sum) return res - @memoize_method - def copy(self, order): - raise NotImplementedError + def copy(self, **kwargs): + order = kwargs.pop('order', self.order) + dim = kwargs.pop('dim', self.dim) + + if kwargs: + raise TypeError("unexpected keyword arguments '%s'" + % ", ".join(kwargs)) + return type(self)(order, dim) -class FullDerivativeWrangler(DerivativeWrangler): - def get_derivative_taker(self, expr, dvec): - return MiDerivativeTaker(expr, dvec) +class FullExpansionTermsWrangler(ExpansionTermsWrangler): get_coefficient_identifiers = ( - DerivativeWrangler.get_full_coefficient_identifiers) + ExpansionTermsWrangler.get_full_coefficient_identifiers) def get_full_kernel_derivatives_from_stored(self, stored_kernel_derivatives, rscale): @@ -213,17 +212,6 @@ class FullDerivativeWrangler(DerivativeWrangler): get_stored_mpole_coefficients_from_full = ( get_full_kernel_derivatives_from_stored) - @memoize_method - def copy(self, **kwargs): - order = kwargs.pop('order', self.order) - dim = kwargs.pop('dim', self.dim) - - if kwargs: - raise TypeError("unexpected keyword arguments '%s'" - % ", ".join(kwargs)) - - return type(self)(order, dim) - # {{{ sparse matrix-vector multiplication @@ -260,7 +248,7 @@ def _spmv(spmat, x, sparse_vectors): # }}} -class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): +class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): """ .. automethod:: __init__ .. automethod:: get_pde_dict @@ -271,13 +259,14 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): r""" :param order: order of the expansion :param dim: number of dimensions - :param deriv_multiplier: a symbolic expression that is used to 'normalize out' - constant coefficients in the PDE in - :func:`~LinearRecurrenceBasedDerivativeWrangler.get_pde_dict`, so that the - Taylor coefficient with multi-index :math:`\nu` as seen by that representation - of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. + :param deriv_multiplier: a symbolic expression that is used to + 'normalize out' constant coefficients in the PDE in + :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so that + the Taylor coefficient with multi-index :math:`\nu` as seen by that + representation of the PDE is :math:`\text{coeff} / + {\text{deriv\_multiplier}^{|\nu|}}`. """ - super(LinearRecurrenceBasedDerivativeWrangler, self).__init__(order, dim) + super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim) self.deriv_multiplier = deriv_multiplier def get_coefficient_identifiers(self): @@ -451,15 +440,11 @@ class LinearRecurrenceBasedDerivativeWrangler(DerivativeWrangler): """ raise NotImplementedError - def get_derivative_taker(self, expr, var_list): - from sumpy.tools import MiDerivativeTaker - return MiDerivativeTaker(expr, var_list) - -class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): +class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): def __init__(self, order, dim): - super(LaplaceDerivativeWrangler, self).__init__(order, dim, 1) + super(LaplaceExpansionTermsWrangler, self).__init__(order, dim, 1) def get_pde_dict(self): pde_dict = {} @@ -479,11 +464,11 @@ class LaplaceDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): return idx -class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): +class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): def __init__(self, order, dim, helmholtz_k_name): multiplier = sym.Symbol(helmholtz_k_name) - super(HelmholtzDerivativeWrangler, self).__init__(order, dim, multiplier) + super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier) def get_pde_dict(self, **kwargs): pde_dict = {} @@ -511,31 +496,33 @@ class HelmholtzDerivativeWrangler(LinearRecurrenceBasedDerivativeWrangler): class VolumeTaylorExpansionBase(object): @classmethod - def get_or_make_derivative_wrangler(cls, *key): + def get_or_make_expansion_terms_wrangler(cls, *key): """ - This stores the derivative wrangler at the class attribute level because - precomputing the derivative wrangler may become expensive. + This stores the expansion terms wrangler at the class attribute level + because recreating the expansion terms wrangler implicitly empties its + caches. """ try: - wrangler = cls.derivative_wrangler_cache[key] + wrangler = cls.expansion_terms_wrangler_cache[key] except KeyError: - wrangler = cls.derivative_wrangler_class(*key) - cls.derivative_wrangler_cache[key] = wrangler + wrangler = cls.expansion_terms_wrangler_class(*key) + cls.expansion_terms_wrangler_cache[key] = wrangler return wrangler @property - def derivative_wrangler(self): - return self.get_or_make_derivative_wrangler(*self.derivative_wrangler_key) + def expansion_terms_wrangler(self): + return self.get_or_make_expansion_terms_wrangler( + *self.expansion_terms_wrangler_key) def get_coefficient_identifiers(self): """ Returns the identifiers of the coefficients that actually get stored. """ - return self.derivative_wrangler.get_coefficient_identifiers() + return self.expansion_terms_wrangler.get_coefficient_identifiers() def get_full_coefficient_identifiers(self): - return self.derivative_wrangler.get_full_coefficient_identifiers() + return self.expansion_terms_wrangler.get_full_coefficient_identifiers() @property @memoize_method @@ -549,33 +536,33 @@ class VolumeTaylorExpansionBase(object): class VolumeTaylorExpansion(VolumeTaylorExpansionBase): - derivative_wrangler_class = FullDerivativeWrangler - derivative_wrangler_cache = {} + expansion_terms_wrangler_class = FullExpansionTermsWrangler + expansion_terms_wrangler_cache = {} # not user-facing, be strict about having to pass use_rscale def __init__(self, kernel, order, use_rscale): - self.derivative_wrangler_key = (order, kernel.dim) + self.expansion_terms_wrangler_key = (order, kernel.dim) class LaplaceConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): - derivative_wrangler_class = LaplaceDerivativeWrangler - derivative_wrangler_cache = {} + expansion_terms_wrangler_class = LaplaceExpansionTermsWrangler + expansion_terms_wrangler_cache = {} # not user-facing, be strict about having to pass use_rscale def __init__(self, kernel, order, use_rscale): - self.derivative_wrangler_key = (order, kernel.dim) + self.expansion_terms_wrangler_key = (order, kernel.dim) class HelmholtzConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): - derivative_wrangler_class = HelmholtzDerivativeWrangler - derivative_wrangler_cache = {} + expansion_terms_wrangler_class = HelmholtzExpansionTermsWrangler + expansion_terms_wrangler_cache = {} # not user-facing, be strict about having to pass use_rscale def __init__(self, kernel, order, use_rscale): helmholtz_k_name = kernel.get_base_kernel().helmholtz_k_name - self.derivative_wrangler_key = (order, kernel.dim, helmholtz_k_name) + self.expansion_terms_wrangler_key = (order, kernel.dim, helmholtz_k_name) # }}} diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 0336ef43..716fa93f 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -128,7 +128,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): def evaluate(self, coeffs, bvec, rscale): from sumpy.tools import mi_power, mi_factorial evaluated_coeffs = ( - self.derivative_wrangler.get_full_kernel_derivatives_from_stored( + self.expansion_terms_wrangler.get_full_kernel_derivatives_from_stored( coeffs, rscale)) bvec = bvec * rscale**-1 result = sum( @@ -167,50 +167,52 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): from sumpy.tools import add_mi - src_max_sum = max(sum(mi) for mi in - src_expansion.get_coefficient_identifiers()) - assert src_max_sum == src_expansion.order - tgt_max_sum = max(sum(mi) for mi in - self.get_coefficient_identifiers()) - assert tgt_max_sum == tgt_expansion.order + # Create a expansion terms wrangler for derivatives up to order + # (tgt order)+(src order). + tgtplusderiv_exp_terms_wrangler = \ + src_expansion.expansion_terms_wrangler.copy( + order=self.order + src_expansion.order) + tgtplusderiv_coeff_ids = \ + tgtplusderiv_exp_terms_wrangler.get_coefficient_identifiers() + tgtplusderiv_full_coeff_ids = \ + tgtplusderiv_exp_terms_wrangler.get_full_coefficient_identifiers() - max_sum = src_max_sum + tgt_max_sum - new_deriv_wrangler = \ - src_expansion.derivative_wrangler.copy(order=max_sum) - new_coeffs = new_deriv_wrangler.get_coefficient_identifiers() - new_full_coeffs = new_deriv_wrangler.get_full_coefficient_identifiers() - - ident_to_index = dict((ident, i) for i, ident in - enumerate(new_full_coeffs)) + tgtplusderiv_ident_to_index = dict((ident, i) for i, ident in + enumerate(tgtplusderiv_full_coeff_ids)) result = [] - for deriv in self.get_coefficient_identifiers(): - local_result = [] + for lexp_mi in self.get_coefficient_identifiers(): + lexp_mi_terms = [] - full_coeffs = [0] * len(new_full_coeffs) + # Embed the source coefficients in the coefficient array + # for the (tgt order)+(src order) wrangler, offset by lexp_mi. + embedded_coeffs = [0] * len(tgtplusderiv_full_coeff_ids) for coeff, term in zip( src_coeff_exprs, src_expansion.get_coefficient_identifiers()): - full_coeffs[ident_to_index[add_mi(deriv, term)]] = coeff + embedded_coeffs[ + tgtplusderiv_ident_to_index[add_mi(lexp_mi, term)]] \ + = coeff - stored_coeffs = \ - new_deriv_wrangler.get_stored_mpole_coefficients_from_full( - full_coeffs, src_rscale) + stored_coeffs = tgtplusderiv_exp_terms_wrangler \ + .get_stored_mpole_coefficients_from_full( + embedded_coeffs, src_rscale) for i, coeff in enumerate(stored_coeffs): if coeff == 0: continue - nderivatives_for_scaling = sum(new_coeffs[i])-sum(deriv) + nderivatives_for_scaling = \ + sum(tgtplusderiv_coeff_ids[i])-sum(lexp_mi) kernel_deriv = ( src_expansion.get_scaled_multipole( - taker.diff(new_coeffs[i]), + taker.diff(tgtplusderiv_coeff_ids[i]), dvec, src_rscale, - nderivatives=sum(new_coeffs[i]), + nderivatives=sum(tgtplusderiv_coeff_ids[i]), nderivatives_for_scaling=nderivatives_for_scaling)) - local_result.append( - coeff * kernel_deriv * tgt_rscale**sum(deriv)) - result.append(sym.Add(*local_result)) + lexp_mi_terms.append( + coeff * kernel_deriv * tgt_rscale**sum(lexp_mi)) + result.append(sym.Add(*lexp_mi_terms)) else: from sumpy.tools import MiDerivativeTaker diff --git a/sumpy/expansion/multipole.py b/sumpy/expansion/multipole.py index 92ae80f3..25488952 100644 --- a/sumpy/expansion/multipole.py +++ b/sumpy/expansion/multipole.py @@ -94,7 +94,7 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): mi_power(avec, mi) / mi_factorial(mi) for mi in self.get_full_coefficient_identifiers()] return ( - self.derivative_wrangler.get_stored_mpole_coefficients_from_full( + self.expansion_terms_wrangler.get_stored_mpole_coefficients_from_full( result, rscale)) def get_scaled_multipole(self, expr, bvec, rscale, nderivatives, @@ -127,8 +127,8 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): return result def get_kernel_derivative_taker(self, bvec): - return (self.derivative_wrangler.get_derivative_taker( - self.kernel.get_expression(bvec), bvec)) + from sumpy.tools import MiDerivativeTaker + return MiDerivativeTaker(self.kernel.get_expression(bvec), bvec) def translate_from(self, src_expansion, src_coeff_exprs, src_rscale, dvec, tgt_rscale): @@ -188,7 +188,7 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): logger.info("building translation operator: done") return ( - self.derivative_wrangler.get_stored_mpole_coefficients_from_full( + self.expansion_terms_wrangler.get_stored_mpole_coefficients_from_full( result, tgt_rscale)) -- GitLab From 11b7d29b99367eb4a0e2ae1fa73ce04c183e75a8 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 18:27:34 -0500 Subject: [PATCH 18/68] More M2L summary comments --- sumpy/expansion/local.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 716fa93f..2f494a66 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -194,10 +194,12 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): tgtplusderiv_ident_to_index[add_mi(lexp_mi, term)]] \ = coeff + # Compress the embedded coefficient set stored_coeffs = tgtplusderiv_exp_terms_wrangler \ .get_stored_mpole_coefficients_from_full( embedded_coeffs, src_rscale) + # Sum the embeeded coefficient set for i, coeff in enumerate(stored_coeffs): if coeff == 0: continue -- GitLab From f4394132f3324c1ff3d8727ee6ad1a53c63a87bd Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 19:09:48 -0500 Subject: [PATCH 19/68] Do away with embedding construction in M2L --- sumpy/expansion/local.py | 70 ++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 45 deletions(-) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 2f494a66..ad29ea05 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -157,64 +157,44 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): # # coeff0 * diff(kernel, mi0) + coeff1 * diff(kernel, mi1) + ... # - # To get the local expansion coefficients, we take derivatives of - # the multipole expansion. + # The local is formed by differentiating the mpole, i.e. the + # target variable is in the mpole basis--so that is what the + # derivatives for the local will hit. # - # This code speeds up derivative taking by caching all kernel - # derivatives. + # We group the calculation here so that we take all derivatives + # in one big gulp, which ends up being more efficient than + # evaluating the mpole (as a giant sum) and then differentiating + # that. + # + # In particular, this speeds up derivative taking by allowing all + # kernel derivatives to be cached. taker = src_expansion.get_kernel_derivative_taker(dvec) from sumpy.tools import add_mi - # Create a expansion terms wrangler for derivatives up to order - # (tgt order)+(src order). - tgtplusderiv_exp_terms_wrangler = \ - src_expansion.expansion_terms_wrangler.copy( - order=self.order + src_expansion.order) - tgtplusderiv_coeff_ids = \ - tgtplusderiv_exp_terms_wrangler.get_coefficient_identifiers() - tgtplusderiv_full_coeff_ids = \ - tgtplusderiv_exp_terms_wrangler.get_full_coefficient_identifiers() - - tgtplusderiv_ident_to_index = dict((ident, i) for i, ident in - enumerate(tgtplusderiv_full_coeff_ids)) + src_exp_terms_wrangler = src_expansion.expansion_terms_wrangler + src_exp_stored_coeff_ids = \ + src_exp_terms_wrangler.get_coefficient_identifiers() result = [] - for lexp_mi in self.get_coefficient_identifiers(): - lexp_mi_terms = [] - - # Embed the source coefficients in the coefficient array - # for the (tgt order)+(src order) wrangler, offset by lexp_mi. - embedded_coeffs = [0] * len(tgtplusderiv_full_coeff_ids) - for coeff, term in zip( - src_coeff_exprs, - src_expansion.get_coefficient_identifiers()): - embedded_coeffs[ - tgtplusderiv_ident_to_index[add_mi(lexp_mi, term)]] \ - = coeff - - # Compress the embedded coefficient set - stored_coeffs = tgtplusderiv_exp_terms_wrangler \ - .get_stored_mpole_coefficients_from_full( - embedded_coeffs, src_rscale) - - # Sum the embeeded coefficient set - for i, coeff in enumerate(stored_coeffs): - if coeff == 0: - continue - nderivatives_for_scaling = \ - sum(tgtplusderiv_coeff_ids[i])-sum(lexp_mi) + for tgt_coeff_id in self.get_coefficient_identifiers(): + tgt_coeff_terms = [] + + for src_coeff_id, coeff in zip( + src_exp_stored_coeff_ids, src_coeff_exprs): + nderivatives_for_scaling = sum(src_coeff_id) + term_mi = add_mi(src_coeff_id, tgt_coeff_id) kernel_deriv = ( src_expansion.get_scaled_multipole( - taker.diff(tgtplusderiv_coeff_ids[i]), + taker.diff(term_mi), dvec, src_rscale, - nderivatives=sum(tgtplusderiv_coeff_ids[i]), + nderivatives=sum(term_mi), nderivatives_for_scaling=nderivatives_for_scaling)) - lexp_mi_terms.append( - coeff * kernel_deriv * tgt_rscale**sum(lexp_mi)) - result.append(sym.Add(*lexp_mi_terms)) + tgt_coeff_terms.append( + coeff * kernel_deriv * tgt_rscale**sum(tgt_coeff_id)) + result.append(sym.Add(*tgt_coeff_terms)) else: from sumpy.tools import MiDerivativeTaker -- GitLab From e6729cedfe0910f9a5341e5dcd7269319743c7cf Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 19:14:53 -0500 Subject: [PATCH 20/68] Remove ExpansionTermsWrangler.copy, fix docstring formatting --- sumpy/expansion/__init__.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 22223b47..5b6ac7c2 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -189,16 +189,6 @@ class ExpansionTermsWrangler(object): res = sorted(gnitstam(self.order, self.dim), key=sum) return res - def copy(self, **kwargs): - order = kwargs.pop('order', self.order) - dim = kwargs.pop('dim', self.dim) - - if kwargs: - raise TypeError("unexpected keyword arguments '%s'" - % ", ".join(kwargs)) - - return type(self)(order, dim) - class FullExpansionTermsWrangler(ExpansionTermsWrangler): @@ -261,9 +251,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :param dim: number of dimensions :param deriv_multiplier: a symbolic expression that is used to 'normalize out' constant coefficients in the PDE in - :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so that - the Taylor coefficient with multi-index :math:`\nu` as seen by that - representation of the PDE is :math:`\text{coeff} / + :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, + so that the Taylor coefficient with multi-index :math:`\nu` as seen + by that representation of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim) -- GitLab From b2400584b2908475c4502569d1fe4418d646acc1 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 22:59:36 -0500 Subject: [PATCH 21/68] Revert "Do away with embedding construction in M2L" This reverts commit f4394132f3324c1ff3d8727ee6ad1a53c63a87bd. --- sumpy/expansion/local.py | 70 ++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 25 deletions(-) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index ad29ea05..2f494a66 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -157,44 +157,64 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): # # coeff0 * diff(kernel, mi0) + coeff1 * diff(kernel, mi1) + ... # - # The local is formed by differentiating the mpole, i.e. the - # target variable is in the mpole basis--so that is what the - # derivatives for the local will hit. + # To get the local expansion coefficients, we take derivatives of + # the multipole expansion. # - # We group the calculation here so that we take all derivatives - # in one big gulp, which ends up being more efficient than - # evaluating the mpole (as a giant sum) and then differentiating - # that. - # - # In particular, this speeds up derivative taking by allowing all - # kernel derivatives to be cached. + # This code speeds up derivative taking by caching all kernel + # derivatives. taker = src_expansion.get_kernel_derivative_taker(dvec) from sumpy.tools import add_mi - src_exp_terms_wrangler = src_expansion.expansion_terms_wrangler - src_exp_stored_coeff_ids = \ - src_exp_terms_wrangler.get_coefficient_identifiers() + # Create a expansion terms wrangler for derivatives up to order + # (tgt order)+(src order). + tgtplusderiv_exp_terms_wrangler = \ + src_expansion.expansion_terms_wrangler.copy( + order=self.order + src_expansion.order) + tgtplusderiv_coeff_ids = \ + tgtplusderiv_exp_terms_wrangler.get_coefficient_identifiers() + tgtplusderiv_full_coeff_ids = \ + tgtplusderiv_exp_terms_wrangler.get_full_coefficient_identifiers() - result = [] - for tgt_coeff_id in self.get_coefficient_identifiers(): - tgt_coeff_terms = [] + tgtplusderiv_ident_to_index = dict((ident, i) for i, ident in + enumerate(tgtplusderiv_full_coeff_ids)) - for src_coeff_id, coeff in zip( - src_exp_stored_coeff_ids, src_coeff_exprs): - nderivatives_for_scaling = sum(src_coeff_id) - term_mi = add_mi(src_coeff_id, tgt_coeff_id) + result = [] + for lexp_mi in self.get_coefficient_identifiers(): + lexp_mi_terms = [] + + # Embed the source coefficients in the coefficient array + # for the (tgt order)+(src order) wrangler, offset by lexp_mi. + embedded_coeffs = [0] * len(tgtplusderiv_full_coeff_ids) + for coeff, term in zip( + src_coeff_exprs, + src_expansion.get_coefficient_identifiers()): + embedded_coeffs[ + tgtplusderiv_ident_to_index[add_mi(lexp_mi, term)]] \ + = coeff + + # Compress the embedded coefficient set + stored_coeffs = tgtplusderiv_exp_terms_wrangler \ + .get_stored_mpole_coefficients_from_full( + embedded_coeffs, src_rscale) + + # Sum the embeeded coefficient set + for i, coeff in enumerate(stored_coeffs): + if coeff == 0: + continue + nderivatives_for_scaling = \ + sum(tgtplusderiv_coeff_ids[i])-sum(lexp_mi) kernel_deriv = ( src_expansion.get_scaled_multipole( - taker.diff(term_mi), + taker.diff(tgtplusderiv_coeff_ids[i]), dvec, src_rscale, - nderivatives=sum(term_mi), + nderivatives=sum(tgtplusderiv_coeff_ids[i]), nderivatives_for_scaling=nderivatives_for_scaling)) - tgt_coeff_terms.append( - coeff * kernel_deriv * tgt_rscale**sum(tgt_coeff_id)) - result.append(sym.Add(*tgt_coeff_terms)) + lexp_mi_terms.append( + coeff * kernel_deriv * tgt_rscale**sum(lexp_mi)) + result.append(sym.Add(*lexp_mi_terms)) else: from sumpy.tools import MiDerivativeTaker -- GitLab From 7e003fb3f22c72c025b98d1c4690009dfca85006 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 22:59:39 -0500 Subject: [PATCH 22/68] Revert "Remove ExpansionTermsWrangler.copy, fix docstring formatting" This reverts commit e6729cedfe0910f9a5341e5dcd7269319743c7cf. --- sumpy/expansion/__init__.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 5b6ac7c2..22223b47 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -189,6 +189,16 @@ class ExpansionTermsWrangler(object): res = sorted(gnitstam(self.order, self.dim), key=sum) return res + def copy(self, **kwargs): + order = kwargs.pop('order', self.order) + dim = kwargs.pop('dim', self.dim) + + if kwargs: + raise TypeError("unexpected keyword arguments '%s'" + % ", ".join(kwargs)) + + return type(self)(order, dim) + class FullExpansionTermsWrangler(ExpansionTermsWrangler): @@ -251,9 +261,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :param dim: number of dimensions :param deriv_multiplier: a symbolic expression that is used to 'normalize out' constant coefficients in the PDE in - :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, - so that the Taylor coefficient with multi-index :math:`\nu` as seen - by that representation of the PDE is :math:`\text{coeff} / + :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so that + the Taylor coefficient with multi-index :math:`\nu` as seen by that + representation of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim) -- GitLab From b471e3a4dcef9319b649f57203c3b19c69843390 Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 23:00:23 -0500 Subject: [PATCH 23/68] Comment/docstring fixes --- sumpy/expansion/__init__.py | 6 +++--- sumpy/expansion/local.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 22223b47..413d35ab 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -261,9 +261,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :param dim: number of dimensions :param deriv_multiplier: a symbolic expression that is used to 'normalize out' constant coefficients in the PDE in - :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so that - the Taylor coefficient with multi-index :math:`\nu` as seen by that - representation of the PDE is :math:`\text{coeff} / + :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so + that the Taylor coefficient with multi-index :math:`\nu` as seen by + that representation of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 2f494a66..11b1368a 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -199,7 +199,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): .get_stored_mpole_coefficients_from_full( embedded_coeffs, src_rscale) - # Sum the embeeded coefficient set + # Sum the embedded coefficient set for i, coeff in enumerate(stored_coeffs): if coeff == 0: continue -- GitLab From 5988025708b67ab3b95cc649d85b6ef25de9c66c Mon Sep 17 00:00:00 2001 From: Andreas Kloeckner Date: Mon, 30 Apr 2018 23:14:00 -0500 Subject: [PATCH 24/68] Untangle ExpansionTermsWrangler.copy implementation --- sumpy/expansion/__init__.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 413d35ab..4f0a3a01 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -162,6 +162,8 @@ class ExpansionBase(object): class ExpansionTermsWrangler(object): + init_arg_names = ("order", "dim") + def __init__(self, order, dim): self.order = order self.dim = dim @@ -190,14 +192,18 @@ class ExpansionTermsWrangler(object): return res def copy(self, **kwargs): - order = kwargs.pop('order', self.order) - dim = kwargs.pop('dim', self.dim) + new_kwargs = dict( + (name, getattr(self, name)) + for name in self.init_arg_names) + + new_kwargs["order"] = kwargs.pop("order", self.order) + new_kwargs["dim"] = kwargs.pop("dim", self.dim) if kwargs: raise TypeError("unexpected keyword arguments '%s'" % ", ".join(kwargs)) - return type(self)(order, dim) + return type(self)(**new_kwargs) class FullExpansionTermsWrangler(ExpansionTermsWrangler): @@ -255,6 +261,8 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): .. automethod:: get_reduced_coeffs """ + init_arg_names = ("order", "dim", "deriv_multiplier") + def __init__(self, order, dim, deriv_multiplier): r""" :param order: order of the expansion @@ -412,7 +420,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): \sum_{\nu,c_\nu\in \text{pde\_dict}} \frac{c_\nu\cdot \alpha_\nu} - {\text{deriv\_multiplier}^{\sum \text{mi}}} = 0, + {\text{deriv\_multiplier}^{ + \sum \text{mi} + }} = 0, where :math:`\mathbf\alpha` is a coefficient vector. @@ -443,6 +453,8 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): + init_arg_names = ("order", "dim") + def __init__(self, order, dim): super(LaplaceExpansionTermsWrangler, self).__init__(order, dim, 1) @@ -466,7 +478,11 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): + init_arg_names = ("order", "dim", "helmholtz_k_name") + def __init__(self, order, dim, helmholtz_k_name): + self.helmholtz_k_name = helmholtz_k_name + multiplier = sym.Symbol(helmholtz_k_name) super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier) -- GitLab From 75ecfc917f7c1e1f339f1a20e61740f08f0d6580 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 2 May 2018 14:09:34 -0500 Subject: [PATCH 25/68] Use symbolic nullspace --- sumpy/expansion/__init__.py | 47 +++++++++++-------- sumpy/expansion/local.py | 9 +++- sumpy/tools.py | 93 ++++++++++++++++++++----------------- 3 files changed, 86 insertions(+), 63 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 4f0a3a01..e1da810e 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -162,11 +162,12 @@ class ExpansionBase(object): class ExpansionTermsWrangler(object): - init_arg_names = ("order", "dim") + init_arg_names = ("order", "dim", "max_mi") - def __init__(self, order, dim): + def __init__(self, order, dim, max_mi=None): self.order = order self.dim = dim + self.max_mi = max_mi def get_coefficient_identifiers(self): raise NotImplementedError @@ -189,6 +190,16 @@ class ExpansionTermsWrangler(object): as gnitstam) res = sorted(gnitstam(self.order, self.dim), key=sum) + + def filter_tuple(tup): + if self.max_mi is None: + return True + for a, b in zip(tup, self.max_mi): + if a > b: + return False + return True + + res = list(filter(filter_tuple, res)) return res def copy(self, **kwargs): @@ -196,8 +207,8 @@ class ExpansionTermsWrangler(object): (name, getattr(self, name)) for name in self.init_arg_names) - new_kwargs["order"] = kwargs.pop("order", self.order) - new_kwargs["dim"] = kwargs.pop("dim", self.dim) + for name in self.init_arg_names: + new_kwargs[name] = kwargs.pop(name, getattr(self, name)) if kwargs: raise TypeError("unexpected keyword arguments '%s'" @@ -261,9 +272,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): .. automethod:: get_reduced_coeffs """ - init_arg_names = ("order", "dim", "deriv_multiplier") + init_arg_names = ("order", "dim", "deriv_multiplier", "max_mi") - def __init__(self, order, dim, deriv_multiplier): + def __init__(self, order, dim, deriv_multiplier, max_mi=None): r""" :param order: order of the expansion :param dim: number of dimensions @@ -274,7 +285,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): that representation of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ - super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim) + super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim, max_mi) self.deriv_multiplier = deriv_multiplier def get_coefficient_identifiers(self): @@ -354,7 +365,6 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): pde_dict = self.get_pde_dict() pde_mat = [] - mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for (i, mi) in enumerate(mis)) @@ -385,11 +395,10 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): Then, :math:`S^T = N * N_{[r, :]}^{-1}` which implies, :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. """ - pde_mat = np.array(pde_mat, dtype=np.float64) - n = nullspace(pde_mat, atol=tol) + n = nullspace(pde_mat) idx = self.get_reduced_coeffs() assert len(idx) >= n.shape[1] - s = np.linalg.solve(n.T[:, idx], n.T) + s = n.T[:, idx].solve(n.T) stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) @@ -400,8 +409,8 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): coeff_matrix = defaultdict(list) for i in range(s.shape[0]): for j in range(s.shape[1]): - if np.abs(s[i][j]) > tol: - coeff_matrix[j].append((i, s[i][j])) + if np.abs(s[i, j]) > tol: + coeff_matrix[j].append((i, s[i, j])) plog.done() @@ -453,10 +462,10 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): - init_arg_names = ("order", "dim") + init_arg_names = ("order", "dim", "max_mi") - def __init__(self, order, dim): - super(LaplaceExpansionTermsWrangler, self).__init__(order, dim, 1) + def __init__(self, order, dim, max_mi=None): + super(LaplaceExpansionTermsWrangler, self).__init__(order, dim, 1, max_mi) def get_pde_dict(self): pde_dict = {} @@ -478,13 +487,13 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): - init_arg_names = ("order", "dim", "helmholtz_k_name") + init_arg_names = ("order", "dim", "helmholtz_k_name", "max_mi") - def __init__(self, order, dim, helmholtz_k_name): + def __init__(self, order, dim, helmholtz_k_name, max_mi=None): self.helmholtz_k_name = helmholtz_k_name multiplier = sym.Symbol(helmholtz_k_name) - super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier) + super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier, max_mi) def get_pde_dict(self, **kwargs): pde_dict = {} diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 11b1368a..cf94ebd6 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -167,11 +167,18 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): from sumpy.tools import add_mi + max_mi = [0]*self.dim + for i in range(self.dim): + max_mi[i] = max(mi[i] for mi in + src_expansion.get_coefficient_identifiers()) + max_mi[i] += max(mi[i] for mi in + self.get_coefficient_identifiers()) + # Create a expansion terms wrangler for derivatives up to order # (tgt order)+(src order). tgtplusderiv_exp_terms_wrangler = \ src_expansion.expansion_terms_wrangler.copy( - order=self.order + src_expansion.order) + order=self.order + src_expansion.order, max_mi=tuple(max_mi)) tgtplusderiv_coeff_ids = \ tgtplusderiv_exp_terms_wrangler.get_coefficient_identifiers() tgtplusderiv_full_coeff_ids = \ diff --git a/sumpy/tools.py b/sumpy/tools.py index 91c76050..8916e33c 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -401,48 +401,55 @@ def my_syntactic_subs(expr, subst_dict): return expr -# Source: http://scipy-cookbook.readthedocs.io/items/RankNullspace.html -# Author: SciPy Developers -# License: BSD-3-Clause - -def nullspace(A, atol=1e-13, rtol=0): # noqa:N803 - """Compute an approximate basis for the nullspace of A. - - The algorithm used by this function is based on the singular value - decomposition of `A`. - - Parameters - ---------- - A : ndarray - A should be at most 2-D. A 1-D array with length k will be treated - as a 2-D with shape (1, k) - atol : float - The absolute tolerance for a zero singular value. Singular values - smaller than `atol` are considered to be zero. - rtol : float - The relative tolerance. Singular values less than rtol*smax are - considered to be zero, where smax is the largest singular value. - - If both `atol` and `rtol` are positive, the combined tolerance is the - maximum of the two; that is:: - tol = max(atol, rtol * smax) - Singular values smaller than `tol` are considered to be zero. - - Return value - ------------ - ns : ndarray - If `A` is an array with shape (m, k), then `ns` will be an array - with shape (k, n), where n is the estimated dimension of the - nullspace of `A`. The columns of `ns` are a basis for the - nullspace; each element in numpy.dot(A, ns) will be approximately - zero. - """ - from numpy.linalg import svd - A = np.atleast_2d(A) # noqa:N806 - u, s, vh = svd(A) - tol = max(atol, rtol * s[0]) - nnz = (s >= tol).sum() - ns = vh[nnz:].conj().T - return ns +def rref(mat): + rows = len(mat) + cols = len(mat[0]) + col = 0 + pivot_cols = [] + + for row in range(rows): + if col >= cols: + break + i = row + while mat[i][col] == 0: + i += 1 + if i == rows: + i = row + col += 1 + if col == cols: + return mat, pivot_cols + + pivot_cols.append(col) + mat[i], mat[row] = mat[row], mat[i] + + piv = mat[row][col] + for c in range(col, cols): + mat[row][c] /= piv + + for r in range(rows): + if r == row: + continue + piv = mat[r][col] + for c in range(col, cols): + mat[r][c] -= piv * mat[row][c] + col += 1 + return mat, pivot_cols + +def nullspace(m): + m2 = [[sym.sympify(col) for col in row] for row in m] + mat, pivot_cols = rref(m2) + cols = len(mat[0]) + + free_vars = [i for i in range(cols) if i not in pivot_cols] + + n = [] + for free_var in free_vars: + vec = [0]*cols + vec[free_var] = 1 + for piv_row, piv_col in enumerate(pivot_cols): + for pos in pivot_cols[piv_row+1:] + [free_var]: + vec[piv_col] -= mat[piv_row][pos] + n.append(vec) + return sym.Matrix(n).T # vim: fdm=marker -- GitLab From 5a66f4829ede51ddfebb3e75417591557677c274 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 2 May 2018 14:13:16 -0500 Subject: [PATCH 26/68] Fix constructing pde matrix --- sumpy/expansion/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index e1da810e..00849b56 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -378,7 +378,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): c = tuple(pde_mi2 + diff) if c not in coeff_ident_enumerate_dict: break - eq[coeff_ident_enumerate_dict[c]] = 1 + eq[coeff_ident_enumerate_dict[c]] = coeff else: pde_mat.append(eq) if len(pde_mat) > 0: -- GitLab From ccfe82e67e1d91bd88ff20e01e83fdba8e0bbaf4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 2 May 2018 15:08:09 -0500 Subject: [PATCH 27/68] Fix formatting --- sumpy/expansion/__init__.py | 6 ++++-- sumpy/tools.py | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 00849b56..f680347c 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -285,7 +285,8 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): that representation of the PDE is :math:`\text{coeff} / {\text{deriv\_multiplier}^{|\nu|}}`. """ - super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim, max_mi) + super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim, + max_mi) self.deriv_multiplier = deriv_multiplier def get_coefficient_identifiers(self): @@ -493,7 +494,8 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle self.helmholtz_k_name = helmholtz_k_name multiplier = sym.Symbol(helmholtz_k_name) - super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier, max_mi) + super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier, + max_mi) def get_pde_dict(self, **kwargs): pde_dict = {} diff --git a/sumpy/tools.py b/sumpy/tools.py index 8916e33c..dd98325b 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -435,6 +435,7 @@ def rref(mat): col += 1 return mat, pivot_cols + def nullspace(m): m2 = [[sym.sympify(col) for col in row] for row in m] mat, pivot_cols = rref(m2) -- GitLab From 41c0c0cf18a3ca3e875796138d74d843d00ea416 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 4 Sep 2018 11:53:40 -0500 Subject: [PATCH 28/68] Re-implement rref python ints are not converted to symbolic unless necessary making the calculation much faster --- sumpy/expansion/__init__.py | 5 ++- sumpy/tools.py | 90 ++++++++++++++++++++++--------------- 2 files changed, 58 insertions(+), 37 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index f680347c..68a99197 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -356,7 +356,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): @memoize_method def _get_stored_ids_and_coeff_mat(self): from six import iteritems - from sumpy.tools import nullspace + from sumpy.tools import nullspace, solve_symbolic tol = 1e-13 stored_identifiers = [] @@ -382,6 +382,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): eq[coeff_ident_enumerate_dict[c]] = coeff else: pde_mat.append(eq) + if len(pde_mat) > 0: r""" Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` @@ -399,7 +400,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): n = nullspace(pde_mat) idx = self.get_reduced_coeffs() assert len(idx) >= n.shape[1] - s = n.T[:, idx].solve(n.T) + s = solve_symbolic(n.T[:,idx], n.T) stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) diff --git a/sumpy/tools.py b/sumpy/tools.py index c5438320..996f75f0 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -657,45 +657,56 @@ def my_syntactic_subs(expr, subst_dict): return expr -def rref(mat): - rows = len(mat) - cols = len(mat[0]) - col = 0 - pivot_cols = [] - for row in range(rows): - if col >= cols: +def rref(m): + l = np.array(m, dtype=object) + index = 0 + nrows = l.shape[0] + ncols = l.shape[1] + pivot_cols = [] + for i in range(ncols): + if index == nrows: break - i = row - while mat[i][col] == 0: - i += 1 - if i == rows: - i = row - col += 1 - if col == cols: - return mat, pivot_cols - - pivot_cols.append(col) - mat[i], mat[row] = mat[row], mat[i] - - piv = mat[row][col] - for c in range(col, cols): - mat[row][c] /= piv - - for r in range(rows): - if r == row: + pivot = nrows + for k in range(index, nrows): + if l[k, i] != 0 and pivot == nrows: + pivot = k + if abs(l[k, i]) == 1: + pivot = k + break + if pivot == nrows: + continue + if pivot != index: + l[pivot,:], l[index,:] = l[index,:].copy(), l[pivot,:].copy() + + pivot_cols.append(i) + scale = l[index, i] + t = l[index,:]//scale + not_exact = (t * scale != l[index, :]) + if (np.any(not_exact)): + for j in range(ncols): + if not_exact[j]: + t[j] = sym.sympify(l[index, j])/scale + + l[index,:] = t + + for j in range(nrows): + if (j == index): continue - piv = mat[r][col] - for c in range(col, cols): - mat[r][c] -= piv * mat[row][c] - col += 1 - return mat, pivot_cols + + scale = l[j, i] + if scale != 0: + l[j,:] = l[j,:] - l[index,:]*scale + + index = index + 1 + + return l, pivot_cols def nullspace(m): - m2 = [[sym.sympify(col) for col in row] for row in m] - mat, pivot_cols = rref(m2) - cols = len(mat[0]) + mat, pivot_cols = rref(m) + pivot_cols = list(pivot_cols) + cols = mat.shape[1] free_vars = [i for i in range(cols) if i not in pivot_cols] @@ -705,8 +716,17 @@ def nullspace(m): vec[free_var] = 1 for piv_row, piv_col in enumerate(pivot_cols): for pos in pivot_cols[piv_row+1:] + [free_var]: - vec[piv_col] -= mat[piv_row][pos] + vec[piv_col] -= mat[piv_row,pos] n.append(vec) - return sym.Matrix(n).T + return np.array(n, dtype=object).T + + +def solve_symbolic(A, b): + if isinstance(A, sym.Matrix): + big = A.row_join(b) + else: + big = np.hstack((A, b)) + red = rref(big)[0] + return red[:,big.shape[0]:] # vim: fdm=marker -- GitLab From 8fc85da6507f8bb11658dc58348b7bb037d106e6 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 9 Sep 2018 22:06:53 -0500 Subject: [PATCH 29/68] Fix formatting --- benchmarks/bench_translations.py | 10 +---- .../translations/PDE-reduction-symbolic.ipynb | 2 +- sumpy/expansion/__init__.py | 2 +- sumpy/tools.py | 42 +++++++++---------- 4 files changed, 25 insertions(+), 31 deletions(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index 8d6cfdd8..621ac275 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -1,13 +1,10 @@ import numpy as np -import pytest -import pyopencl as cl from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) from sumpy.expansion.multipole import ( VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion, - VolumeTaylorMultipoleExpansionBase, LaplaceConformingVolumeTaylorMultipoleExpansion, HelmholtzConformingVolumeTaylorMultipoleExpansion) from sumpy.expansion.local import ( @@ -15,13 +12,11 @@ from sumpy.expansion.local import ( LaplaceConformingVolumeTaylorLocalExpansion, HelmholtzConformingVolumeTaylorLocalExpansion) -from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, - DirectionalSourceDerivative) +from sumpy.kernel import LaplaceKernel, HelmholtzKernel import logging logger = logging.getLogger(__name__) -import sympy import six import pymbolic.mapper.flop_counter @@ -29,6 +24,7 @@ import sumpy.symbolic as sym from sumpy.assignment_collection import SymbolicAssignmentCollection from sumpy.codegen import to_loopy_insns + class Param: def __init__(self, dim, order): self.dim = dim @@ -125,5 +121,3 @@ class Helmholtz2DTranslation(TranslationBenchmarkSuite): Param(2, 15), Param(2, 20), ] - - diff --git a/contrib/translations/PDE-reduction-symbolic.ipynb b/contrib/translations/PDE-reduction-symbolic.ipynb index bffcdf9e..3550b079 100644 --- a/contrib/translations/PDE-reduction-symbolic.ipynb +++ b/contrib/translations/PDE-reduction-symbolic.ipynb @@ -125,7 +125,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.5" } }, "nbformat": 4, diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 68a99197..a40aafde 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -400,7 +400,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): n = nullspace(pde_mat) idx = self.get_reduced_coeffs() assert len(idx) >= n.shape[1] - s = solve_symbolic(n.T[:,idx], n.T) + s = solve_symbolic(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] else: s = np.eye(len(mis)) diff --git a/sumpy/tools.py b/sumpy/tools.py index 996f75f0..32d14bcb 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -657,50 +657,50 @@ def my_syntactic_subs(expr, subst_dict): return expr - def rref(m): - l = np.array(m, dtype=object) + mat = np.array(m, dtype=object) index = 0 - nrows = l.shape[0] - ncols = l.shape[1] + nrows = mat.shape[0] + ncols = mat.shape[1] pivot_cols = [] for i in range(ncols): if index == nrows: break pivot = nrows for k in range(index, nrows): - if l[k, i] != 0 and pivot == nrows: + if mat[k, i] != 0 and pivot == nrows: pivot = k - if abs(l[k, i]) == 1: + if abs(mat[k, i]) == 1: pivot = k break if pivot == nrows: continue if pivot != index: - l[pivot,:], l[index,:] = l[index,:].copy(), l[pivot,:].copy() + mat[[pivot, index], :] = mat[[index, pivot], :] pivot_cols.append(i) - scale = l[index, i] - t = l[index,:]//scale - not_exact = (t * scale != l[index, :]) + scale = mat[index, i] + t = mat[index, :]//scale + not_exact = (t * scale != mat[index, :]) + if (np.any(not_exact)): for j in range(ncols): if not_exact[j]: - t[j] = sym.sympify(l[index, j])/scale + t[j] = sym.sympify(mat[index, j])/scale - l[index,:] = t + mat[index, :] = t for j in range(nrows): if (j == index): continue - scale = l[j, i] + scale = mat[j, i] if scale != 0: - l[j,:] = l[j,:] - l[index,:]*scale + mat[j, :] = mat[j, :] - mat[index, :]*scale index = index + 1 - return l, pivot_cols + return mat, pivot_cols def nullspace(m): @@ -716,17 +716,17 @@ def nullspace(m): vec[free_var] = 1 for piv_row, piv_col in enumerate(pivot_cols): for pos in pivot_cols[piv_row+1:] + [free_var]: - vec[piv_col] -= mat[piv_row,pos] + vec[piv_col] -= mat[piv_row, pos] n.append(vec) return np.array(n, dtype=object).T -def solve_symbolic(A, b): - if isinstance(A, sym.Matrix): - big = A.row_join(b) +def solve_symbolic(a, b): + if isinstance(a, sym.Matrix): + big = a.row_join(b) else: - big = np.hstack((A, b)) + big = np.hstack((a, b)) red = rref(big)[0] - return red[:,big.shape[0]:] + return red[:, big.shape[0]:] # vim: fdm=marker -- GitLab From d85e90d2c6e77624d0a7f4a440101c519056478a Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 4 Oct 2018 21:45:06 -0500 Subject: [PATCH 30/68] automatically figure out the reduced coefficients --- sumpy/expansion/__init__.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index a40aafde..54dbfd75 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -398,7 +398,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. """ n = nullspace(pde_mat) - idx = self.get_reduced_coeffs() + idx = self.get_reduced_coeffs(n) assert len(idx) >= n.shape[1] s = solve_symbolic(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] @@ -455,11 +455,25 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): raise NotImplementedError - def get_reduced_coeffs(self): + def get_reduced_coeffs(self, nullspace): """ Returns the indices of the reduced set of derivatives which are stored. """ - raise NotImplementedError + mat = nullspace.copy() + nrows = mat.shape[0] + ncols = mat.shape[1] + rows = [] + for i in range(nrows): + for j in range(ncols): + if mat[i, j] != 0: + col = j + break + else: + continue + rows.append(i) + for j in range(i+1, nrows): + mat[j, :] = mat[i, col]*mat[j, :] - mat[i, :]*mat[j, col] + return rows class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): @@ -477,7 +491,7 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) pde_dict[tuple(mi)] = 1 return pde_dict - def get_reduced_coeffs(self): + def get_reduced_coeffs(self, nullspace): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): # Return only the derivatives with the order of the last dimension @@ -507,7 +521,7 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle pde_dict[tuple([0]*self.dim)] = 1 return pde_dict - def get_reduced_coeffs(self): + def get_reduced_coeffs(self, nullspace): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): # Return only the derivatives with the order of the last dimension -- GitLab From 56cca9ebf72189ea18d1ba43bc32b04592c16568 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 4 Oct 2018 22:10:01 -0500 Subject: [PATCH 31/68] Comment get_reduced_coeffs specializations --- sumpy/expansion/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 54dbfd75..480f53fc 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -491,7 +491,7 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) pde_dict[tuple(mi)] = 1 return pde_dict - def get_reduced_coeffs(self, nullspace): + def _get_reduced_coeffs(self, nullspace): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): # Return only the derivatives with the order of the last dimension @@ -521,7 +521,7 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle pde_dict[tuple([0]*self.dim)] = 1 return pde_dict - def get_reduced_coeffs(self, nullspace): + def _get_reduced_coeffs(self, nullspace): idx = [] for i, mi in enumerate(self.get_full_coefficient_identifiers()): # Return only the derivatives with the order of the last dimension -- GitLab From 0751aeb9b2bec93fb6fa4d98638a710803e1500c Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 5 Oct 2018 12:40:32 -0500 Subject: [PATCH 32/68] Add a comment --- sumpy/expansion/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 480f53fc..647ebb01 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -458,6 +458,12 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): def get_reduced_coeffs(self, nullspace): """ Returns the indices of the reduced set of derivatives which are stored. + Override this method if the reduced set is known analytically. + + This method does elementary row operations to figure out which rows are + linearly dependent on the previous rows. Partial pivoting is not done + to preserve the order so that a row is not linearly dependent on a row + that came after in the original row order. """ mat = nullspace.copy() nrows = mat.shape[0] -- GitLab From 3221c321f199149acb72ac30c6c3ffdc8ae4efd3 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 8 Oct 2018 16:11:42 -0500 Subject: [PATCH 33/68] test m2m full vs reduced --- test/test_m2m.py | 254 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) create mode 100644 test/test_m2m.py diff --git a/test/test_m2m.py b/test/test_m2m.py new file mode 100644 index 00000000..44799bf2 --- /dev/null +++ b/test/test_m2m.py @@ -0,0 +1,254 @@ +from __future__ import division, absolute_import, print_function + +__copyright__ = "Copyright (C) 2012 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from six.moves import range + +import numpy as np +import numpy.linalg as la +import sys + +import pytest +import pyopencl as cl +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + +from sumpy.expansion.multipole import ( + VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion, + VolumeTaylorMultipoleExpansionBase, + LaplaceConformingVolumeTaylorMultipoleExpansion, + HelmholtzConformingVolumeTaylorMultipoleExpansion) +from sumpy.expansion.local import ( + VolumeTaylorLocalExpansion, H2DLocalExpansion, + LaplaceConformingVolumeTaylorLocalExpansion, + HelmholtzConformingVolumeTaylorLocalExpansion) +from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, + DirectionalSourceDerivative) +from pytools.convergence import PConvergenceVerifier + +import logging +logger = logging.getLogger(__name__) + +try: + import faulthandler +except ImportError: + pass +else: + faulthandler.enable() + +import matplotlib.pyplot as plt + +def test_m2m(ctx_getter, order=5): + logging.basicConfig(level=logging.INFO) + + from sympy.core.cache import clear_cache + clear_cache() + + ctx = ctx_getter() + queue = cl.CommandQueue(ctx) + + np.random.seed(17) + + res = 200 + nsources = 150 + + knl = HelmholtzKernel(2) + out_kernels = [knl] + + extra_kwargs = {} + if isinstance(knl, HelmholtzKernel): + extra_kwargs["k"] = 0.05 + + # Just to make sure things also work away from the origin + origin = np.array([2, 1], np.float64) + sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) + + origin[:, np.newaxis]) + strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) + + pconv_verifier_p2m2p = PConvergenceVerifier() + pconv_verifier_p2m2m2p = PConvergenceVerifier() + pconv_verifier_p2m2m2l2p = PConvergenceVerifier() + pconv_verifier_full = PConvergenceVerifier() + + from sumpy.visualization import FieldPlotter + + eval_offset = np.array([5.5, 0.0]) + + centers = (np.array( + [ + # box 0: particles, first mpole here + [0, 0], + + # box 1: second mpole here + np.array([-0.2, 0.1], np.float64), + + # box 2: first local here + eval_offset + np.array([0.3, -0.2], np.float64), + + # box 3: second local and eval here + eval_offset + ], + dtype=np.float64) + origin).T.copy() + + del eval_offset + + nboxes = centers.shape[-1] + + def eval_at(e2p, source_box_nr, rscale): + e2p_target_boxes = np.array([source_box_nr], dtype=np.int32) + + # These are indexed by global box numbers. + e2p_box_target_starts = np.array([0, 0, 0, 0], dtype=np.int32) + e2p_box_target_counts_nonchild = np.array([0, 0, 0, 0], + dtype=np.int32) + e2p_box_target_counts_nonchild[source_box_nr] = ntargets + + evt, (pot,) = e2p( + queue, + + src_expansions=mpoles, + src_base_ibox=0, + + target_boxes=e2p_target_boxes, + box_target_starts=e2p_box_target_starts, + box_target_counts_nonchild=e2p_box_target_counts_nonchild, + centers=centers, + targets=targets, + + rscale=rscale, + + out_host=True, **extra_kwargs + ) + + return pot + + if isinstance(knl, LaplaceKernel): + mpole_expn_classes = [LaplaceConformingVolumeTaylorMultipoleExpansion, VolumeTaylorMultipoleExpansion] + local_expn_classes = [LaplaceConformingVolumeTaylorLocalExpansion, VolumeTaylorLocalExpansion] + elif isinstance(knl, HelmholtzKernel): + mpole_expn_classes = [HelmholtzConformingVolumeTaylorMultipoleExpansion, VolumeTaylorMultipoleExpansion] + local_expn_classes = [HelmholtzConformingVolumeTaylorLocalExpansion, VolumeTaylorLocalExpansion] + + h_values = 1000*2.0**np.arange(-5, 3) + for order in [6]: + h_errs = [] + for h in h_values: + m2m_vals = [] + for i, (mpole_expn_class, local_expn_class) in enumerate(zip(mpole_expn_classes, local_expn_classes)): + m_expn = mpole_expn_class(knl, order=order) + l_expn = local_expn_class(knl, order=order) + + from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR + p2m = P2EFromSingleBox(ctx, m_expn) + m2m = E2EFromCSR(ctx, m_expn, m_expn) + m2p = E2PFromSingleBox(ctx, m_expn, out_kernels) + p2p = P2P(ctx, out_kernels, exclude_self=False) + + fp = FieldPlotter(centers[:, -1], extent=h, npoints=res) + targets = fp.points + + m1_rscale = 0.5 + m2_rscale = 0.25 + + # {{{ apply P2M + + p2m_source_boxes = np.array([0], dtype=np.int32) + + # These are indexed by global box numbers. + p2m_box_source_starts = np.array([0, 0, 0, 0], dtype=np.int32) + p2m_box_source_counts_nonchild = np.array([nsources, 0, 0, 0], + dtype=np.int32) + + evt, (mpoles,) = p2m(queue, + source_boxes=p2m_source_boxes, + box_source_starts=p2m_box_source_starts, + box_source_counts_nonchild=p2m_box_source_counts_nonchild, + centers=centers, + sources=sources, + strengths=strengths, + nboxes=nboxes, + rscale=m1_rscale, + + tgt_base_ibox=0, + + #flags="print_hl_wrapper", + out_host=True, **extra_kwargs) + + # }}} + + ntargets = targets.shape[-1] + + # {{{ apply M2M + + m2m_target_boxes = np.array([1], dtype=np.int32) + m2m_src_box_starts = np.array([0, 1], dtype=np.int32) + m2m_src_box_lists = np.array([0], dtype=np.int32) + + evt, (mpoles,) = m2m(queue, + src_expansions=mpoles, + src_base_ibox=0, + tgt_base_ibox=0, + ntgt_level_boxes=mpoles.shape[0], + + target_boxes=m2m_target_boxes, + + src_box_starts=m2m_src_box_starts, + src_box_lists=m2m_src_box_lists, + centers=centers, + + src_rscale=m1_rscale, + tgt_rscale=m2_rscale, + + #flags="print_hl_cl", + out_host=True, **extra_kwargs) + + # }}} + + pot = eval_at(m2p, 1, m2_rscale) + + evt, (pot_direct,) = p2p( + queue, + targets, sources, (strengths,), + out_host=True, **extra_kwargs) + #if (i == 0): + # m2m_vals.append(pot_direct) + #else: + # m2m_vals.append(pot) + m2m_vals.append(pot) + + err = la.norm(m2m_vals[1] - m2m_vals[0])/la.norm(m2m_vals[0]) + print(err) + h_errs.append(abs(err)) + a, b = np.polyfit(np.log(h_values), np.log(h_errs), 1) + plt.loglog(h_values, h_errs, label="order={}".format(order), marker='x') + plt.loglog(h_values, h_values**a / h_values[-3]**a * h_errs[-3], label="h**%.2f" % a) + plt.loglog(h_values, h_values**(-order-1) / h_values[-3]**(-order-1) * h_errs[-3], label="h**-%.2f" % (order+1)) + plt.xlabel("h") + plt.ylabel("Error between reduced and full") + plt.legend() + plt.show() + +if __name__ == "__main__": + test_m2m(cl.create_some_context) + +# vim: fdm=marker -- GitLab From 6f1004e80489db838e72e0417f8963c069e8940b Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 17 Oct 2018 12:42:40 -0500 Subject: [PATCH 34/68] Fix calculating distances --- test/test_m2m.py | 198 +++++++++++++++++++++++------------------------ 1 file changed, 98 insertions(+), 100 deletions(-) diff --git a/test/test_m2m.py b/test/test_m2m.py index 44799bf2..1f657206 100644 --- a/test/test_m2m.py +++ b/test/test_m2m.py @@ -58,7 +58,7 @@ else: import matplotlib.pyplot as plt -def test_m2m(ctx_getter, order=5): +def test_m2m(ctx_getter, order): logging.basicConfig(level=logging.INFO) from sympy.core.cache import clear_cache @@ -77,12 +77,10 @@ def test_m2m(ctx_getter, order=5): extra_kwargs = {} if isinstance(knl, HelmholtzKernel): - extra_kwargs["k"] = 0.05 + extra_kwargs["k"] = 0.1 # Just to make sure things also work away from the origin origin = np.array([2, 1], np.float64) - sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) - + origin[:, np.newaxis]) strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) pconv_verifier_p2m2p = PConvergenceVerifier() @@ -149,106 +147,106 @@ def test_m2m(ctx_getter, order=5): mpole_expn_classes = [HelmholtzConformingVolumeTaylorMultipoleExpansion, VolumeTaylorMultipoleExpansion] local_expn_classes = [HelmholtzConformingVolumeTaylorLocalExpansion, VolumeTaylorLocalExpansion] - h_values = 1000*2.0**np.arange(-5, 3) - for order in [6]: - h_errs = [] - for h in h_values: - m2m_vals = [] - for i, (mpole_expn_class, local_expn_class) in enumerate(zip(mpole_expn_classes, local_expn_classes)): - m_expn = mpole_expn_class(knl, order=order) - l_expn = local_expn_class(knl, order=order) - - from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR - p2m = P2EFromSingleBox(ctx, m_expn) - m2m = E2EFromCSR(ctx, m_expn, m_expn) - m2p = E2PFromSingleBox(ctx, m_expn, out_kernels) - p2p = P2P(ctx, out_kernels, exclude_self=False) - - fp = FieldPlotter(centers[:, -1], extent=h, npoints=res) - targets = fp.points - - m1_rscale = 0.5 - m2_rscale = 0.25 - - # {{{ apply P2M - - p2m_source_boxes = np.array([0], dtype=np.int32) - - # These are indexed by global box numbers. - p2m_box_source_starts = np.array([0, 0, 0, 0], dtype=np.int32) - p2m_box_source_counts_nonchild = np.array([nsources, 0, 0, 0], - dtype=np.int32) - - evt, (mpoles,) = p2m(queue, - source_boxes=p2m_source_boxes, - box_source_starts=p2m_box_source_starts, - box_source_counts_nonchild=p2m_box_source_counts_nonchild, - centers=centers, - sources=sources, - strengths=strengths, - nboxes=nboxes, - rscale=m1_rscale, - - tgt_base_ibox=0, - - #flags="print_hl_wrapper", - out_host=True, **extra_kwargs) - - # }}} - - ntargets = targets.shape[-1] - - # {{{ apply M2M - - m2m_target_boxes = np.array([1], dtype=np.int32) - m2m_src_box_starts = np.array([0, 1], dtype=np.int32) - m2m_src_box_lists = np.array([0], dtype=np.int32) - - evt, (mpoles,) = m2m(queue, - src_expansions=mpoles, - src_base_ibox=0, - tgt_base_ibox=0, - ntgt_level_boxes=mpoles.shape[0], - - target_boxes=m2m_target_boxes, - - src_box_starts=m2m_src_box_starts, - src_box_lists=m2m_src_box_lists, - centers=centers, - - src_rscale=m1_rscale, - tgt_rscale=m2_rscale, - - #flags="print_hl_cl", - out_host=True, **extra_kwargs) - - # }}} - - pot = eval_at(m2p, 1, m2_rscale) - - evt, (pot_direct,) = p2p( - queue, - targets, sources, (strengths,), - out_host=True, **extra_kwargs) - #if (i == 0): - # m2m_vals.append(pot_direct) - #else: - # m2m_vals.append(pot) - m2m_vals.append(pot) - - err = la.norm(m2m_vals[1] - m2m_vals[0])/la.norm(m2m_vals[0]) - print(err) - h_errs.append(abs(err)) - a, b = np.polyfit(np.log(h_values), np.log(h_errs), 1) - plt.loglog(h_values, h_errs, label="order={}".format(order), marker='x') - plt.loglog(h_values, h_values**a / h_values[-3]**a * h_errs[-3], label="h**%.2f" % a) - plt.loglog(h_values, h_values**(-order-1) / h_values[-3]**(-order-1) * h_errs[-3], label="h**-%.2f" % (order+1)) - plt.xlabel("h") + h_values = 2.0**np.arange(-3, 7) + distances = [] + errs = [] + for h in h_values: + sources = (h*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) + + origin[:, np.newaxis]) + distances.append(np.max(np.abs(sources - centers[:, 1].reshape(2, 1)))) + m2m_vals = [] + for i, (mpole_expn_class, local_expn_class) in enumerate(zip(mpole_expn_classes, local_expn_classes)): + m_expn = mpole_expn_class(knl, order=order) + l_expn = local_expn_class(knl, order=order) + + from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR + p2m = P2EFromSingleBox(ctx, m_expn) + m2m = E2EFromCSR(ctx, m_expn, m_expn) + m2p = E2PFromSingleBox(ctx, m_expn, out_kernels) + p2p = P2P(ctx, out_kernels, exclude_self=False) + + fp = FieldPlotter(centers[:, -1], extent=0.1, npoints=res) + targets = fp.points + + m1_rscale = 0.5 + m2_rscale = 0.25 + + # {{{ apply P2M + + p2m_source_boxes = np.array([0], dtype=np.int32) + + # These are indexed by global box numbers. + p2m_box_source_starts = np.array([0, 0, 0, 0], dtype=np.int32) + p2m_box_source_counts_nonchild = np.array([nsources, 0, 0, 0], + dtype=np.int32) + + evt, (mpoles,) = p2m(queue, + source_boxes=p2m_source_boxes, + box_source_starts=p2m_box_source_starts, + box_source_counts_nonchild=p2m_box_source_counts_nonchild, + centers=centers, + sources=sources, + strengths=strengths, + nboxes=nboxes, + rscale=m1_rscale, + + tgt_base_ibox=0, + out_host=True, **extra_kwargs) + + # }}} + + ntargets = targets.shape[-1] + + # {{{ apply M2M + + m2m_target_boxes = np.array([1], dtype=np.int32) + m2m_src_box_starts = np.array([0, 1], dtype=np.int32) + m2m_src_box_lists = np.array([0], dtype=np.int32) + + evt, (mpoles,) = m2m(queue, + src_expansions=mpoles, + src_base_ibox=0, + tgt_base_ibox=0, + ntgt_level_boxes=mpoles.shape[0], + + target_boxes=m2m_target_boxes, + + src_box_starts=m2m_src_box_starts, + src_box_lists=m2m_src_box_lists, + centers=centers, + + src_rscale=m1_rscale, + tgt_rscale=m2_rscale, + + #flags="print_hl_cl", + out_host=True, **extra_kwargs) + + # }}} + + pot = eval_at(m2p, 1, m2_rscale) + + evt, (pot_direct,) = p2p( + queue, + targets, sources, (strengths,), + out_host=True, **extra_kwargs) + m2m_vals.append(pot) + + err = la.norm(m2m_vals[1] - m2m_vals[0])/la.norm(m2m_vals[1]) + print(err) + errs.append(abs(err)) + + p1 = 5 + distances = np.array(distances) + plt.loglog(distances, errs, label="order={}".format(order), marker='x') + plt.loglog(distances, distances**(order+1) / distances[p1]**(order+1) * errs[p1], label="h**%.2f" % (order+1)) + plt.loglog(distances, distances**(order) / distances[p1]**(order) * errs[p1], label="h**%.2f" % (order)) + + plt.xlabel("Distance between furthest source and center") plt.ylabel("Error between reduced and full") plt.legend() plt.show() if __name__ == "__main__": - test_m2m(cl.create_some_context) + test_m2m(cl.create_some_context, 8) # vim: fdm=marker -- GitLab From a092c63453f55c7ad168d71ce2ee40cd751df192 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 17 Oct 2018 12:47:05 -0500 Subject: [PATCH 35/68] Rename file and fix formatting --- test/test_m2m.py => examples/check_m2m.py | 43 ++++++++++------------- 1 file changed, 19 insertions(+), 24 deletions(-) rename test/test_m2m.py => examples/check_m2m.py (88%) diff --git a/test/test_m2m.py b/examples/check_m2m.py similarity index 88% rename from test/test_m2m.py rename to examples/check_m2m.py index 1f657206..ea09cee0 100644 --- a/test/test_m2m.py +++ b/examples/check_m2m.py @@ -21,30 +21,22 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - -from six.moves import range - import numpy as np import numpy.linalg as la -import sys -import pytest import pyopencl as cl from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) from sumpy.expansion.multipole import ( - VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion, - VolumeTaylorMultipoleExpansionBase, + VolumeTaylorMultipoleExpansion, LaplaceConformingVolumeTaylorMultipoleExpansion, HelmholtzConformingVolumeTaylorMultipoleExpansion) from sumpy.expansion.local import ( - VolumeTaylorLocalExpansion, H2DLocalExpansion, + VolumeTaylorLocalExpansion, LaplaceConformingVolumeTaylorLocalExpansion, HelmholtzConformingVolumeTaylorLocalExpansion) -from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, - DirectionalSourceDerivative) -from pytools.convergence import PConvergenceVerifier +from sumpy.kernel import LaplaceKernel, HelmholtzKernel import logging logger = logging.getLogger(__name__) @@ -58,6 +50,7 @@ else: import matplotlib.pyplot as plt + def test_m2m(ctx_getter, order): logging.basicConfig(level=logging.INFO) @@ -83,11 +76,6 @@ def test_m2m(ctx_getter, order): origin = np.array([2, 1], np.float64) strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) - pconv_verifier_p2m2p = PConvergenceVerifier() - pconv_verifier_p2m2m2p = PConvergenceVerifier() - pconv_verifier_p2m2m2l2p = PConvergenceVerifier() - pconv_verifier_full = PConvergenceVerifier() - from sumpy.visualization import FieldPlotter eval_offset = np.array([5.5, 0.0]) @@ -141,11 +129,15 @@ def test_m2m(ctx_getter, order): return pot if isinstance(knl, LaplaceKernel): - mpole_expn_classes = [LaplaceConformingVolumeTaylorMultipoleExpansion, VolumeTaylorMultipoleExpansion] - local_expn_classes = [LaplaceConformingVolumeTaylorLocalExpansion, VolumeTaylorLocalExpansion] + mpole_expn_classes = [LaplaceConformingVolumeTaylorMultipoleExpansion, + VolumeTaylorMultipoleExpansion] + local_expn_classes = [LaplaceConformingVolumeTaylorLocalExpansion, + VolumeTaylorLocalExpansion] elif isinstance(knl, HelmholtzKernel): - mpole_expn_classes = [HelmholtzConformingVolumeTaylorMultipoleExpansion, VolumeTaylorMultipoleExpansion] - local_expn_classes = [HelmholtzConformingVolumeTaylorLocalExpansion, VolumeTaylorLocalExpansion] + mpole_expn_classes = [HelmholtzConformingVolumeTaylorMultipoleExpansion, + VolumeTaylorMultipoleExpansion] + local_expn_classes = [HelmholtzConformingVolumeTaylorLocalExpansion, + VolumeTaylorLocalExpansion] h_values = 2.0**np.arange(-3, 7) distances = [] @@ -155,9 +147,9 @@ def test_m2m(ctx_getter, order): + origin[:, np.newaxis]) distances.append(np.max(np.abs(sources - centers[:, 1].reshape(2, 1)))) m2m_vals = [] - for i, (mpole_expn_class, local_expn_class) in enumerate(zip(mpole_expn_classes, local_expn_classes)): + for i, (mpole_expn_class, local_expn_class) in \ + enumerate(zip(mpole_expn_classes, local_expn_classes)): m_expn = mpole_expn_class(knl, order=order) - l_expn = local_expn_class(knl, order=order) from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR p2m = P2EFromSingleBox(ctx, m_expn) @@ -238,14 +230,17 @@ def test_m2m(ctx_getter, order): p1 = 5 distances = np.array(distances) plt.loglog(distances, errs, label="order={}".format(order), marker='x') - plt.loglog(distances, distances**(order+1) / distances[p1]**(order+1) * errs[p1], label="h**%.2f" % (order+1)) - plt.loglog(distances, distances**(order) / distances[p1]**(order) * errs[p1], label="h**%.2f" % (order)) + plt.loglog(distances, distances**(order+1) / distances[p1]**(order+1) * errs[p1], + label="h**%.2f" % (order+1)) + plt.loglog(distances, distances**(order) / distances[p1]**(order) * errs[p1], + label="h**%.2f" % (order)) plt.xlabel("Distance between furthest source and center") plt.ylabel("Error between reduced and full") plt.legend() plt.show() + if __name__ == "__main__": test_m2m(cl.create_some_context, 8) -- GitLab From 82103baf3e56421cebc09d6409c28dc0375e47a4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 27 Mar 2019 18:38:44 -0500 Subject: [PATCH 36/68] Add StokesConformingVolumeTaylorExpansion --- sumpy/expansion/__init__.py | 109 ++++++++++++++++++++++++++--------- sumpy/expansion/local.py | 12 +++- sumpy/expansion/multipole.py | 12 +++- sumpy/tools.py | 4 ++ test/test_kernels.py | 19 ++++-- 5 files changed, 123 insertions(+), 33 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 647ebb01..f9ffc4f5 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -29,6 +29,7 @@ import logging from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict +from sumpy.tools import CoeffIdentifier, add_mi __doc__ = """ @@ -364,24 +365,23 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): from pytools import ProcessLogger plog = ProcessLogger(logger, "compute recurrence for Taylor coefficients") - pde_dict = self.get_pde_dict() + pdes, iexpr, nexpr = self.get_pdes() pde_mat = [] mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for (i, mi) in enumerate(mis)) + offset = len(mis) for mi in mis: - for pde_mi, coeff in iteritems(pde_dict): - diff = np.array(mi, dtype=int) - pde_mi - if (diff >= 0).all(): - eq = [0]*len(mis) - for pde_mi2, coeff2 in iteritems(pde_dict): - c = tuple(pde_mi2 + diff) - if c not in coeff_ident_enumerate_dict: - break - eq[coeff_ident_enumerate_dict[c]] = coeff - else: - pde_mat.append(eq) + for pde_dict in pdes: + eq = [0]*(len(mis)*nexpr) + for ident, coeff in iteritems(pde_dict): + c = tuple(add_mi(ident.mi, mi)) + if c not in coeff_ident_enumerate_dict: + break + eq[offset*ident.iexpr + coeff_ident_enumerate_dict[c]] = coeff + else: + pde_mat.append(eq) if len(pde_mat) > 0: r""" @@ -398,8 +398,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. """ n = nullspace(pde_mat) + n = n[offset*iexpr:offset*(iexpr+1), :] idx = self.get_reduced_coeffs(n) - assert len(idx) >= n.shape[1] + n = n[:,:len(idx)] s = solve_symbolic(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] else: @@ -422,10 +423,12 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): return stored_identifiers, coeff_matrix - def get_pde_dict(self): + def get_pdes(self): r""" - Returns the PDE as a dictionary of (mi, coeff) such that mi - is the multi-index of the derivative and the PDE is represented by, + Returns a list of PDEs, expression number, number of expressions. + A PDE is a dictionary of (ident, coeff) such that ident is a + namedtuple of (mi, iexpr) where mi is the multi-index of the + derivative, iexpr is the expression number and the PDE is represented by, .. math:: @@ -487,15 +490,16 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) init_arg_names = ("order", "dim", "max_mi") def __init__(self, order, dim, max_mi=None): - super(LaplaceExpansionTermsWrangler, self).__init__(order, dim, 1, max_mi) + super(LaplaceExpansionTermsWrangler, self).__init__(order=order, dim=dim, + deriv_multiplier=1, max_mi=max_mi) - def get_pde_dict(self): + def get_pdes(self): pde_dict = {} for d in range(self.dim): mi = [0]*self.dim mi[d] = 2 - pde_dict[tuple(mi)] = 1 - return pde_dict + pde_dict[CoeffIdentifier(tuple(mi), 0)] = 1 + return [pde_dict], 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -515,17 +519,17 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle self.helmholtz_k_name = helmholtz_k_name multiplier = sym.Symbol(helmholtz_k_name) - super(HelmholtzExpansionTermsWrangler, self).__init__(order, dim, multiplier, - max_mi) + super(HelmholtzExpansionTermsWrangler, self).__init__(order=order, dim=dim, + deriv_multiplier=multiplier, max_mi=max_mi) - def get_pde_dict(self, **kwargs): + def get_pdes(self, **kwargs): pde_dict = {} for d in range(self.dim): mi = [0]*self.dim mi[d] = 2 - pde_dict[tuple(mi)] = 1 - pde_dict[tuple([0]*self.dim)] = 1 - return pde_dict + pde_dict[CoeffIdentifier(tuple(mi), 0)] = 1 + pde_dict[CoeffIdentifier(tuple([0]*self.dim), 0)] = 1 + return [pde_dict], 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -536,6 +540,46 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle idx.append(i) return idx + +class StokesExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): + + init_arg_names = ("order", "dim", "icomp", "viscosity_mu_name", "max_mi") + + def __init__(self, order, dim, icomp, viscosity_mu_name, max_mi=None): + self.viscosity_mu_name = viscosity_mu_name + self.icomp = icomp + multiplier = 1/sym.Symbol(viscosity_mu_name) + super(StokesExpansionTermsWrangler, self).__init__(order=order, dim=dim, + deriv_multiplier=multiplier, max_mi=max_mi) + + def get_pdes(self, **kwargs): + pdes = [] + # velocity + for eq in range(self.dim): + pde_dict = {} + for d in range(self.dim): + mi = [0]*self.dim + mi[d] = 2 + pde_dict[CoeffIdentifier(tuple(mi), eq)] = 1 + mi = [0]*self.dim + mi[eq] = 1 + pde_dict[CoeffIdentifier(tuple(mi), self.dim)] = -1 + pdes.append(pde_dict) + # pressure + pde_dict = {} + for d in range(self.dim): + mi = [0]*self.dim + mi[d] = 2 + pde_dict[CoeffIdentifier(tuple(mi), self.dim)] = 1 + pdes.append(pde_dict) + # divergence + pde_dict = {} + for d in range(self.dim): + mi = [0]*self.dim + mi[d] = 1 + pde_dict[CoeffIdentifier(tuple(mi), d)] = 1 + pdes.append(pde_dict) + return pdes, self.icomp, self.dim+1 # }}} @@ -612,6 +656,19 @@ class HelmholtzConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): helmholtz_k_name = kernel.get_base_kernel().helmholtz_k_name self.expansion_terms_wrangler_key = (order, kernel.dim, helmholtz_k_name) + +class StokesConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): + + expansion_terms_wrangler_class = StokesExpansionTermsWrangler + expansion_terms_wrangler_cache = {} + + # not user-facing, be strict about having to pass use_rscale + def __init__(self, kernel, order, use_rscale): + icomp = kernel.get_base_kernel().icomp + viscosity_mu_name = kernel.get_base_kernel().viscosity_mu_name + self.expansion_terms_wrangler_key = (order, kernel.dim, + icomp, viscosity_mu_name) + # }}} diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index cf94ebd6..f080b746 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -27,7 +27,7 @@ import sumpy.symbolic as sym from sumpy.expansion import ( ExpansionBase, VolumeTaylorExpansion, LaplaceConformingVolumeTaylorExpansion, - HelmholtzConformingVolumeTaylorExpansion) + HelmholtzConformingVolumeTaylorExpansion, StokesConformingVolumeTaylorExpansion) class LocalExpansionBase(ExpansionBase): @@ -269,6 +269,16 @@ class HelmholtzConformingVolumeTaylorLocalExpansion( HelmholtzConformingVolumeTaylorExpansion.__init__( self, kernel, order, use_rscale) + +class StokesConformingVolumeTaylorLocalExpansion( + StokesConformingVolumeTaylorExpansion, + VolumeTaylorLocalExpansionBase): + + def __init__(self, kernel, order, use_rscale=None): + VolumeTaylorLocalExpansionBase.__init__(self, kernel, order, use_rscale) + StokesConformingVolumeTaylorExpansion.__init__( + self, kernel, order, use_rscale) + # }}} diff --git a/sumpy/expansion/multipole.py b/sumpy/expansion/multipole.py index 25488952..07f2eeb6 100644 --- a/sumpy/expansion/multipole.py +++ b/sumpy/expansion/multipole.py @@ -28,7 +28,7 @@ import sumpy.symbolic as sym # noqa from sumpy.symbolic import vector_xreplace from sumpy.expansion import ( ExpansionBase, VolumeTaylorExpansion, LaplaceConformingVolumeTaylorExpansion, - HelmholtzConformingVolumeTaylorExpansion) + HelmholtzConformingVolumeTaylorExpansion, StokesConformingVolumeTaylorExpansion) import logging logger = logging.getLogger(__name__) @@ -220,6 +220,16 @@ class HelmholtzConformingVolumeTaylorMultipoleExpansion( HelmholtzConformingVolumeTaylorExpansion.__init__( self, kernel, order, use_rscale) + +class StokesConformingVolumeTaylorMultipoleExpansion( + StokesConformingVolumeTaylorExpansion, + VolumeTaylorMultipoleExpansionBase): + + def __init__(self, kernel, order, use_rscale=None): + VolumeTaylorMultipoleExpansionBase.__init__(self, kernel, order, use_rscale) + StokesConformingVolumeTaylorExpansion.__init__( + self, kernel, order, use_rscale) + # }}} diff --git a/sumpy/tools.py b/sumpy/tools.py index 32d14bcb..c13a04b6 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -27,6 +27,7 @@ THE SOFTWARE. import six from six.moves import range, zip +from collections import namedtuple from pytools import memoize_method, memoize_in import numpy as np import sumpy.symbolic as sym @@ -729,4 +730,7 @@ def solve_symbolic(a, b): red = rref(big)[0] return red[:, big.shape[0]:] + +CoeffIdentifier = namedtuple('CoeffIdentifier', ['mi', 'iexpr']) + # vim: fdm=marker diff --git a/test/test_kernels.py b/test/test_kernels.py index 3fcb195b..6a96d1ad 100644 --- a/test/test_kernels.py +++ b/test/test_kernels.py @@ -37,13 +37,15 @@ from sumpy.expansion.multipole import ( VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion, VolumeTaylorMultipoleExpansionBase, LaplaceConformingVolumeTaylorMultipoleExpansion, - HelmholtzConformingVolumeTaylorMultipoleExpansion) + HelmholtzConformingVolumeTaylorMultipoleExpansion, + StokesConformingVolumeTaylorMultipoleExpansion) from sumpy.expansion.local import ( VolumeTaylorLocalExpansion, H2DLocalExpansion, LaplaceConformingVolumeTaylorLocalExpansion, - HelmholtzConformingVolumeTaylorLocalExpansion) + HelmholtzConformingVolumeTaylorLocalExpansion, + StokesConformingVolumeTaylorLocalExpansion) from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, - DirectionalSourceDerivative) + DirectionalSourceDerivative, StokesletKernel) from pytools.convergence import PConvergenceVerifier import logging @@ -155,6 +157,8 @@ def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): extra_kwargs["k"] = 0.2 * (0.707 + 0.707j) else: extra_kwargs["k"] = 0.2 + if isinstance(base_knl, StokesletKernel): + extra_kwargs["mu"] = 0.2 if with_source_derivative: knl = DirectionalSourceDerivative(base_knl, "dir_vec") @@ -342,7 +346,10 @@ def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): (HelmholtzKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), (HelmholtzKernel(2), HelmholtzConformingVolumeTaylorLocalExpansion, HelmholtzConformingVolumeTaylorMultipoleExpansion), - (HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion) + (HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion), + (StokesletKernel(2, 0, 0), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), + (StokesletKernel(2, 0, 0), StokesConformingVolumeTaylorLocalExpansion, + StokesConformingVolumeTaylorMultipoleExpansion), ]) def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): logging.basicConfig(level=logging.INFO) @@ -363,6 +370,8 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): extra_kwargs = {} if isinstance(knl, HelmholtzKernel): extra_kwargs["k"] = 0.05 + if isinstance(knl, StokesletKernel): + extra_kwargs["mu"] = 0.05 # Just to make sure things also work away from the origin origin = np.array([2, 1], np.float64) @@ -404,7 +413,7 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): # FIXME: Embarrassing--but we run out of memory for higher orders. orders = [2, 3] else: - orders = [2, 3, 4] + orders = [3, 4, 5] nboxes = centers.shape[-1] def eval_at(e2p, source_box_nr, rscale): -- GitLab From 75dc33f30c6e8058e6372b53cdd920cc6fbe41fe Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 27 Mar 2019 20:36:01 -0500 Subject: [PATCH 37/68] Fix tests --- sumpy/expansion/__init__.py | 4 ++-- test/test_kernels.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index f9ffc4f5..85d9a71b 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -400,7 +400,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): n = nullspace(pde_mat) n = n[offset*iexpr:offset*(iexpr+1), :] idx = self.get_reduced_coeffs(n) - n = n[:,:len(idx)] + n = n[:, :len(idx)] s = solve_symbolic(n.T[:, idx], n.T) stored_identifiers = [mis[i] for i in idx] else: @@ -417,7 +417,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): plog.done() - logger.debug("number of Taylor coefficients was reduced from {orig} to {red}" + print("number of Taylor coefficients was reduced from {orig} to {red}" .format(orig=len(self.get_full_coefficient_identifiers()), red=len(stored_identifiers))) diff --git a/test/test_kernels.py b/test/test_kernels.py index 6a96d1ad..a1fa8230 100644 --- a/test/test_kernels.py +++ b/test/test_kernels.py @@ -412,8 +412,10 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): issubclass(local_expn_class, VolumeTaylorExpansionBase): # FIXME: Embarrassing--but we run out of memory for higher orders. orders = [2, 3] - else: + elif isinstance(knl, StokesletKernel): orders = [3, 4, 5] + else: + orders = [2, 3, 4] nboxes = centers.shape[-1] def eval_at(e2p, source_box_nr, rscale): -- GitLab From 65963f7246294ecb7a2dc5716d453aa1d888ac52 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 29 Mar 2019 18:48:28 -0500 Subject: [PATCH 38/68] Fix creating coefficient matrix for multi expr kernels --- sumpy/expansion/__init__.py | 26 +++++++++++++++++++++----- test/test_kernels.py | 2 -- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 85d9a71b..c4e8e9ac 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -398,11 +398,27 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. """ n = nullspace(pde_mat) - n = n[offset*iexpr:offset*(iexpr+1), :] - idx = self.get_reduced_coeffs(n) - n = n[:, :len(idx)] - s = solve_symbolic(n.T[:, idx], n.T) - stored_identifiers = [mis[i] for i in idx] + + # Move the rows corresponding to this expression to the front + rearrange = list(range(offset*iexpr, offset*(iexpr+1))) + for i in range(nexpr*offset): + if i < offset*iexpr or i >= offset*(iexpr+1): + rearrange.append(i) + n = n[rearrange, :] + + # Get the indices of the reduced coefficients + idx_all_exprs = self.get_reduced_coeffs(n) + + s = solve_symbolic(n.T[:, idx_all_exprs], n.T) + + # Filter out coefficients belonging to this expression + indices = [] + for idx in idx_all_exprs: + if idx >= offset*iexpr and idx < offset*(iexpr+1): + indices.append(idx) + s = s[:len(indices), offset*iexpr:offset*(iexpr+1)] + + stored_identifiers = [mis[i] for i in indices] else: s = np.eye(len(mis)) stored_identifiers = mis diff --git a/test/test_kernels.py b/test/test_kernels.py index fa42a5ee..04ab51ea 100644 --- a/test/test_kernels.py +++ b/test/test_kernels.py @@ -413,8 +413,6 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): issubclass(local_expn_class, VolumeTaylorExpansionBase): # FIXME: Embarrassing--but we run out of memory for higher orders. orders = [2, 3] - elif isinstance(knl, StokesletKernel): - orders = [3, 4, 5] else: orders = [2, 3, 4] nboxes = centers.shape[-1] -- GitLab From 45fa8d5f486a9de42d3613d78ee5dbb3f901e184 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 2 Apr 2019 21:00:20 -0500 Subject: [PATCH 39/68] Make PDE spec cute and symbolic --- sumpy/expansion/__init__.py | 139 +++++++++++++++++++++++++----------- 1 file changed, 99 insertions(+), 40 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index c4e8e9ac..e2513e3c 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -373,7 +373,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): offset = len(mis) for mi in mis: - for pde_dict in pdes: + for pde_dict in pdes.eqs: eq = [0]*(len(mis)*nexpr) for ident, coeff in iteritems(pde_dict): c = tuple(add_mi(ident.mi, mi)) @@ -501,6 +501,96 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): return rows +class PDE(object): + def __init__(self, dim, nexprs=None, eqs=None): + if nexprs is not None: + eqs = [] + for iexpr in range(nexprs): + mi = [0]*dim + eq = dict() + eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 + eqs.append(eq) + self.dim = dim + self.eqs = eqs + + def __mul__(self, param): + eqs = self.eqs.copy() + for eq in eqs: + for k, v in eq.items(): + eq[k] = eq[k] * param + return PDE(self.dim, eqs=eqs) + + __rmul__ = __mul__ + + def __add__(self, other_pde): + assert self.dim == other_pde.dim + assert len(self.eqs) == len(other_pde.eqs) + eqs = [] + for eq1, eq2 in zip(self.eqs, other_pde.eqs): + eq = defaultdict(lambda: 0) + for k, v in eq1.items(): + eq[k] += v + for k, v in eq2.items(): + eq[k] += v + eqs.append(dict(eq)) + return PDE(self.dim, eqs=eqs) + + __radd__ = __add__ + + def __sub__(self, other_pde): + return self + (-1)*other_pde + + def laplacian(self): + eqs = [] + for eq in self.eqs: + new_eq = defaultdict(lambda: 0) + for ident, v in eq.items(): + for d in range(self.dim): + mi = list(ident.mi) + mi[d] += 2 + new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) + new_eq[new_ident] += v + eqs.append(dict(new_eq)) + return PDE(self.dim, eqs=eqs) + + def __or__(self, other_pde): + assert self.dim == other_pde.dim + eqs = self.eqs + other_pde.eqs + return PDE(self.dim, eqs=eqs) + + def __getitem__(self, key): + eqs = self.eqs.__getitem__(key) + if not isinstance(eqs, list): + eqs = [eqs] + return PDE(self.dim, eqs=eqs) + + def grad(self): + assert len(self.eqs) == 1 + eqs = [] + for d in range(self.dim): + new_eq = defaultdict(lambda: 0) + for ident, v in self.eqs[0].items(): + mi = list(ident.mi) + mi[d] += 1 + new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) + new_eq[new_ident] += v + eqs.append(dict(new_eq)) + return PDE(self.dim, eqs=eqs) + + def div(self): + result = defaultdict(lambda: 0) + for d, eq in enumerate(self.eqs): + for ident, v in eq.items(): + mi = list(ident.mi) + mi[d] += 1 + new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) + result[new_ident] += v + return PDE(self.dim, eqs=[dict(result)]) + + def __repr__(self): + return repr(self.eqs) + + class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "max_mi") @@ -510,12 +600,8 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) deriv_multiplier=1, max_mi=max_mi) def get_pdes(self): - pde_dict = {} - for d in range(self.dim): - mi = [0]*self.dim - mi[d] = 2 - pde_dict[CoeffIdentifier(tuple(mi), 0)] = 1 - return [pde_dict], 0, 1 + w = PDE(dim=self.dim, nexprs=1) + return w.laplacian(), 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -539,13 +625,8 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle deriv_multiplier=multiplier, max_mi=max_mi) def get_pdes(self, **kwargs): - pde_dict = {} - for d in range(self.dim): - mi = [0]*self.dim - mi[d] = 2 - pde_dict[CoeffIdentifier(tuple(mi), 0)] = 1 - pde_dict[CoeffIdentifier(tuple([0]*self.dim), 0)] = 1 - return [pde_dict], 0, 1 + w = PDE(dim=self.dim, nexprs=1) + return (w.laplacian() + w), 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -569,32 +650,10 @@ class StokesExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): deriv_multiplier=multiplier, max_mi=max_mi) def get_pdes(self, **kwargs): - pdes = [] - # velocity - for eq in range(self.dim): - pde_dict = {} - for d in range(self.dim): - mi = [0]*self.dim - mi[d] = 2 - pde_dict[CoeffIdentifier(tuple(mi), eq)] = 1 - mi = [0]*self.dim - mi[eq] = 1 - pde_dict[CoeffIdentifier(tuple(mi), self.dim)] = -1 - pdes.append(pde_dict) - # pressure - pde_dict = {} - for d in range(self.dim): - mi = [0]*self.dim - mi[d] = 2 - pde_dict[CoeffIdentifier(tuple(mi), self.dim)] = 1 - pdes.append(pde_dict) - # divergence - pde_dict = {} - for d in range(self.dim): - mi = [0]*self.dim - mi[d] = 1 - pde_dict[CoeffIdentifier(tuple(mi), d)] = 1 - pdes.append(pde_dict) + w = PDE(dim=self.dim, nexprs=self.dim+1) + u = w[:self.dim] + p = w[-1] + pdes = (u.laplacian() - p.grad() | u.div()) return pdes, self.icomp, self.dim+1 # }}} -- GitLab From 1d77a54f50bbd30d0a91e4a5757720d77827939f Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 2 Apr 2019 22:55:06 -0500 Subject: [PATCH 40/68] simplify laplacian --- sumpy/expansion/__init__.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index e2513e3c..673b595b 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -541,17 +541,10 @@ class PDE(object): return self + (-1)*other_pde def laplacian(self): - eqs = [] - for eq in self.eqs: - new_eq = defaultdict(lambda: 0) - for ident, v in eq.items(): - for d in range(self.dim): - mi = list(ident.mi) - mi[d] += 2 - new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) - new_eq[new_ident] += v - eqs.append(dict(new_eq)) - return PDE(self.dim, eqs=eqs) + p = PDE(self.dim, eqs=[]) + for j in range(len(self.eqs)): + p = p | self[j].grad().div() + return p def __or__(self, other_pde): assert self.dim == other_pde.dim -- GitLab From 467773585d8ded349ddc144cbbe44fa2dac894d4 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 00:01:48 -0500 Subject: [PATCH 41/68] Fix list copying --- sumpy/expansion/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 673b595b..1010483f 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -514,7 +514,7 @@ class PDE(object): self.eqs = eqs def __mul__(self, param): - eqs = self.eqs.copy() + eqs = self.eqs[:] for eq in eqs: for k, v in eq.items(): eq[k] = eq[k] * param -- GitLab From bd6110dee72ff1b9b2a759df53deb3c381406c48 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 00:47:25 -0500 Subject: [PATCH 42/68] Only one constructor for PDE --- sumpy/expansion/__init__.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 1010483f..d344d9e8 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -502,14 +502,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): class PDE(object): - def __init__(self, dim, nexprs=None, eqs=None): - if nexprs is not None: - eqs = [] - for iexpr in range(nexprs): - mi = [0]*dim - eq = dict() - eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 - eqs.append(eq) + def __init__(self, dim, eqs): self.dim = dim self.eqs = eqs @@ -584,6 +577,16 @@ class PDE(object): return repr(self.eqs) +def make_pde_syms(dim, nexprs): + eqs = [] + for iexpr in range(nexprs): + mi = [0]*dim + eq = dict() + eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 + eqs.append(eq) + return PDE(dim, eqs=eqs) + + class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "max_mi") @@ -593,7 +596,7 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) deriv_multiplier=1, max_mi=max_mi) def get_pdes(self): - w = PDE(dim=self.dim, nexprs=1) + w = make_pde_syms(self.dim, 1) return w.laplacian(), 0, 1 def _get_reduced_coeffs(self, nullspace): @@ -618,7 +621,7 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle deriv_multiplier=multiplier, max_mi=max_mi) def get_pdes(self, **kwargs): - w = PDE(dim=self.dim, nexprs=1) + w = make_pde_syms(self.dim, 1) return (w.laplacian() + w), 0, 1 def _get_reduced_coeffs(self, nullspace): @@ -643,7 +646,7 @@ class StokesExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): deriv_multiplier=multiplier, max_mi=max_mi) def get_pdes(self, **kwargs): - w = PDE(dim=self.dim, nexprs=self.dim+1) + w = make_pde_syms(self.dim, self.dim+1) u = w[:self.dim] p = w[-1] pdes = (u.laplacian() - p.grad() | u.div()) -- GitLab From b192d1f375ce4177ab19be354f804ece0706346c Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 13:04:27 -0500 Subject: [PATCH 43/68] Make deriv_multiplier automatic --- sumpy/expansion/__init__.py | 85 ++++++++++++++++++++++++++----------- sumpy/tools.py | 12 ++++++ 2 files changed, 73 insertions(+), 24 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index d344d9e8..3ac64e9e 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -29,7 +29,7 @@ import logging from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict -from sumpy.tools import CoeffIdentifier, add_mi +from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive __doc__ = """ @@ -269,26 +269,19 @@ def _spmv(spmat, x, sparse_vectors): class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): """ .. automethod:: __init__ - .. automethod:: get_pde_dict + .. automethod:: get_pdes .. automethod:: get_reduced_coeffs """ - init_arg_names = ("order", "dim", "deriv_multiplier", "max_mi") + init_arg_names = ("order", "dim", "max_mi") - def __init__(self, order, dim, deriv_multiplier, max_mi=None): + def __init__(self, order, dim, max_mi=None): r""" :param order: order of the expansion :param dim: number of dimensions - :param deriv_multiplier: a symbolic expression that is used to - 'normalize out' constant coefficients in the PDE in - :func:`~LinearRecurrenceBasedExpansionTermsWrangler.get_pde_dict`, so - that the Taylor coefficient with multi-index :math:`\nu` as seen by - that representation of the PDE is :math:`\text{coeff} / - {\text{deriv\_multiplier}^{|\nu|}}`. """ super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim, max_mi) - self.deriv_multiplier = deriv_multiplier def get_coefficient_identifiers(self): return self.stored_identifiers @@ -339,6 +332,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): full_coeffs = self.get_full_coefficient_identifiers() matrix_rows = [] + _, deriv_multiplier, _, _ = self._get_pdes() for irow, row in six.iteritems(coeff_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + # (u_xx / rscale**2) * coeff2 @@ -348,7 +342,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): matrix_row = [] for icol, coeff in row: diff = row_rscale - sum(stored_identifiers[icol]) - mult = (rscale*self.deriv_multiplier)**diff + mult = (rscale*deriv_multiplier)**diff matrix_row.append((icol, coeff * mult)) matrix_rows.append((irow, matrix_row)) @@ -365,7 +359,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): from pytools import ProcessLogger plog = ProcessLogger(logger, "compute recurrence for Taylor coefficients") - pdes, iexpr, nexpr = self.get_pdes() + pdes, _, iexpr, nexpr = self._get_pdes() pde_mat = [] mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for @@ -444,7 +438,16 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): Returns a list of PDEs, expression number, number of expressions. A PDE is a dictionary of (ident, coeff) such that ident is a namedtuple of (mi, iexpr) where mi is the multi-index of the - derivative, iexpr is the expression number and the PDE is represented by, + derivative, iexpr is the expression number + """ + + raise NotImplementedError + + @memoize_method + def _get_pdes(self): + r""" + Returns a list of `pde_dict`s, a multiplier, expression number, + number of expressions such that each PDE is represented by, .. math:: @@ -457,7 +460,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): where :math:`\mathbf\alpha` is a coefficient vector. Note that *coeff* should be a number (not symbolic) to enable use of - numeric linear algebra routines. *deriv_multiplier* can be symbolic + fast linear algebra routines. *deriv_multiplier* can be symbolic and should be used when the PDE has symbolic values as coefficients for the derivatives. @@ -471,8 +474,9 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): \frac{(0, 2) \times 1}{k^{sum((0, 2))}} + \frac{(0, 0) \times -1}{k^{sum((0, 0))}} = 0 """ - - raise NotImplementedError + pde, iexpr, nexpr = self.get_pdes() + pde, multiplier = process_pde(pde) + return pde, multiplier, iexpr, nexpr def get_reduced_coeffs(self, nullspace): """ @@ -577,7 +581,42 @@ class PDE(object): return repr(self.eqs) +def process_pde(pde): + """ + Process a PDE object to return a PDE and a multiplier such that + the sum of multiplier ** order * derivative * coefficient gives the + original PDE `pde`. + """ + multiplier = None + for eq in pde.eqs: + for ident1, val1 in eq.items(): + for ident2, val2 in eq.items(): + s1 = sum(ident1.mi) + s2 = sum(ident2.mi) + if s1 == s2: + continue + m = nth_root_assume_positive(val1/val2, s2 - s1) + if multiplier is None: + multiplier = m + elif multiplier != m: + return pde, 1 + eqs = [] + for eq in pde.eqs: + new_eq = dict() + for i, (k, v) in enumerate(eq.items()): + new_eq[k] = v * multiplier**sum(k.mi) + if i == 0: + val = new_eq[k] + new_eq[k] /= val + eqs.append(new_eq) + return PDE(pde.dim, eqs=eqs), multiplier + + def make_pde_syms(dim, nexprs): + """ + Returns a list of expressions of size `nexprs` to create a PDE + of dimension `dim`. + """ eqs = [] for iexpr in range(nexprs): mi = [0]*dim @@ -593,7 +632,7 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) def __init__(self, order, dim, max_mi=None): super(LaplaceExpansionTermsWrangler, self).__init__(order=order, dim=dim, - deriv_multiplier=1, max_mi=max_mi) + max_mi=max_mi) def get_pdes(self): w = make_pde_syms(self.dim, 1) @@ -615,10 +654,8 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle def __init__(self, order, dim, helmholtz_k_name, max_mi=None): self.helmholtz_k_name = helmholtz_k_name - - multiplier = sym.Symbol(helmholtz_k_name) super(HelmholtzExpansionTermsWrangler, self).__init__(order=order, dim=dim, - deriv_multiplier=multiplier, max_mi=max_mi) + max_mi=max_mi) def get_pdes(self, **kwargs): w = make_pde_syms(self.dim, 1) @@ -641,15 +678,15 @@ class StokesExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): def __init__(self, order, dim, icomp, viscosity_mu_name, max_mi=None): self.viscosity_mu_name = viscosity_mu_name self.icomp = icomp - multiplier = 1/sym.Symbol(viscosity_mu_name) super(StokesExpansionTermsWrangler, self).__init__(order=order, dim=dim, - deriv_multiplier=multiplier, max_mi=max_mi) + max_mi=max_mi) def get_pdes(self, **kwargs): w = make_pde_syms(self.dim, self.dim+1) + mu = sym.Symbol(self.viscosity_mu_name) u = w[:self.dim] p = w[-1] - pdes = (u.laplacian() - p.grad() | u.div()) + pdes = (mu * u.laplacian() - p.grad() | u.div()) return pdes, self.icomp, self.dim+1 # }}} diff --git a/sumpy/tools.py b/sumpy/tools.py index 7216c834..c5103c34 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -738,4 +738,16 @@ def solve_symbolic(a, b): CoeffIdentifier = namedtuple('CoeffIdentifier', ['mi', 'iexpr']) + +def nth_root_assume_positive(expr, n): + """ + Get the nth root of a symbolic expression assuming that + the symbols are positive. + """ + expr = sym.sympify(expr) + if expr.is_Pow: + return expr.base ** (expr.exp / n) + else: + return expr ** (sym.Integer(1)/n) + # vim: fdm=marker -- GitLab From e6d4fdd83b34d5feb3896a7601bb4b9c972f3c44 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 13:22:00 -0500 Subject: [PATCH 44/68] Fix for Laplace --- sumpy/expansion/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 3ac64e9e..05233960 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -600,6 +600,8 @@ def process_pde(pde): multiplier = m elif multiplier != m: return pde, 1 + if multiplier is None: + return pde, 1 eqs = [] for eq in pde.eqs: new_eq = dict() -- GitLab From fe5e257451f9316468f1dce30e0168442497f074 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 13:27:02 -0500 Subject: [PATCH 45/68] Fix copying --- sumpy/expansion/__init__.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 05233960..e116d828 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -511,10 +511,12 @@ class PDE(object): self.eqs = eqs def __mul__(self, param): - eqs = self.eqs[:] - for eq in eqs: + eqs = [] + for eq in self.eqs: + new_eq = dict() for k, v in eq.items(): - eq[k] = eq[k] * param + new_eq[k] = eq[k] * param + eqs.append(new_eq) return PDE(self.dim, eqs=eqs) __rmul__ = __mul__ -- GitLab From f4e187836b64da2e1609a5fd2b8bfff12f155c88 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 13:46:25 -0500 Subject: [PATCH 46/68] Fix Helmholtz --- sumpy/expansion/__init__.py | 7 +++++-- sumpy/tools.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index e116d828..8eb16434 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -611,7 +611,9 @@ def process_pde(pde): new_eq[k] = v * multiplier**sum(k.mi) if i == 0: val = new_eq[k] - new_eq[k] /= val + new_eq[k] /= sym.sympify(val) + if isinstance(new_eq[k], sym.Integer): + new_eq[k] = int(new_eq[k]) eqs.append(new_eq) return PDE(pde.dim, eqs=eqs), multiplier @@ -663,7 +665,8 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle def get_pdes(self, **kwargs): w = make_pde_syms(self.dim, 1) - return (w.laplacian() + w), 0, 1 + k = sym.Symbol(self.helmholtz_k_name) + return (w.laplacian() + k**2 * w), 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] diff --git a/sumpy/tools.py b/sumpy/tools.py index c5103c34..e734d7dc 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -746,7 +746,7 @@ def nth_root_assume_positive(expr, n): """ expr = sym.sympify(expr) if expr.is_Pow: - return expr.base ** (expr.exp / n) + return expr.base ** (expr.exp / sym.Integer(n)) else: return expr ** (sym.Integer(1)/n) -- GitLab From 72d4aa954a517a40c36cad06b15560a802be38c5 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 3 Apr 2019 14:10:31 -0500 Subject: [PATCH 47/68] symengine compat --- sumpy/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/tools.py b/sumpy/tools.py index e734d7dc..c7387603 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -746,7 +746,7 @@ def nth_root_assume_positive(expr, n): """ expr = sym.sympify(expr) if expr.is_Pow: - return expr.base ** (expr.exp / sym.Integer(n)) + return expr.args[0] ** (expr.args[1] / sym.Integer(n)) else: return expr ** (sym.Integer(1)/n) -- GitLab From 227af3e26e725ae95b67ec6b27b44622600e7a34 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 9 Apr 2019 03:34:25 -0500 Subject: [PATCH 48/68] Document new functions in tools.py --- sumpy/tools.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/sumpy/tools.py b/sumpy/tools.py index c7387603..695694bf 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -663,7 +663,15 @@ def my_syntactic_subs(expr, subst_dict): return expr -def rref(m): +def reduced_row_echelon_form(m): + """Calculates a reduced row echelon form of a + matrix `m`. + + :arg m: a 2D :class:`numpy.ndarray` or a list of lists or a sympy Matrix + :return: reduced row echelon form as a 2D :class:`numpy.ndarray` + and a list of pivots + """ + mat = np.array(m, dtype=object) index = 0 nrows = mat.shape[0] @@ -710,7 +718,12 @@ def rref(m): def nullspace(m): - mat, pivot_cols = rref(m) + """Calculates the nullspace of a matrix `m`. + + :arg m: a 2D :class:`numpy.ndarray` or a list of lists or a sympy Matrix + :return: nullspace of `m` as a 2D :class:`numpy.ndarray` + """ + mat, pivot_cols = reduced_row_echelon_form(m) pivot_cols = list(pivot_cols) cols = mat.shape[1] @@ -727,12 +740,12 @@ def nullspace(m): return np.array(n, dtype=object).T -def solve_symbolic(a, b): - if isinstance(a, sym.Matrix): - big = a.row_join(b) +def solve_symbolic(A, b): # noqa: N803 + if isinstance(A, sym.Matrix): + big = A.row_join(b) else: - big = np.hstack((a, b)) - red = rref(big)[0] + big = np.hstack((A, b)) + red = reduced_row_echelon_form(big)[0] return red[:, big.shape[0]:] -- GitLab From 758b705f93cb1c37143e5333fd41eb619f2071aa Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 9 Apr 2019 03:36:10 -0500 Subject: [PATCH 49/68] Clarify creating a new expansion terms wrangler --- sumpy/expansion/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index cfb660e7..e2150bfe 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -175,7 +175,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): self.get_coefficient_identifiers()) # Create a expansion terms wrangler for derivatives up to order - # (tgt order)+(src order). + # (tgt order)+(src order) including a corresponding reduction matrix tgtplusderiv_exp_terms_wrangler = \ src_expansion.expansion_terms_wrangler.copy( order=self.order + src_expansion.order, max_mi=tuple(max_mi)) -- GitLab From 5988f2982204e49437cdc40e07bc047d26755bac Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 9 Apr 2019 03:37:34 -0500 Subject: [PATCH 50/68] LinearRecurrence -> LinearPDE --- sumpy/expansion/__init__.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 8eb16434..871c5b81 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -34,7 +34,7 @@ from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive __doc__ = """ .. autoclass:: ExpansionBase -.. autoclass:: LinearRecurrenceBasedExpansionTermsWrangler +.. autoclass:: LinearPDEBasedExpansionTermsWrangler Expansion Factories ^^^^^^^^^^^^^^^^^^^ @@ -266,7 +266,7 @@ def _spmv(spmat, x, sparse_vectors): # }}} -class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): +class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): """ .. automethod:: __init__ .. automethod:: get_pdes @@ -280,7 +280,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): :param order: order of the expansion :param dim: number of dimensions """ - super(LinearRecurrenceBasedExpansionTermsWrangler, self).__init__(order, dim, + super(LinearPDEBasedExpansionTermsWrangler, self).__init__(order, dim, max_mi) def get_coefficient_identifiers(self): @@ -314,7 +314,7 @@ class LinearRecurrenceBasedExpansionTermsWrangler(ExpansionTermsWrangler): Return a matrix that expresses every derivative in terms of a set of "stored" derivatives. - For example, for the recurrence:: + For example, for the PDE:: u_xx + u_yy + u_zz = 0 @@ -632,7 +632,7 @@ def make_pde_syms(dim, nexprs): return PDE(dim, eqs=eqs) -class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): +class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "max_mi") @@ -654,7 +654,7 @@ class LaplaceExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler) return idx -class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): +class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "helmholtz_k_name", "max_mi") @@ -678,7 +678,7 @@ class HelmholtzExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangle return idx -class StokesExpansionTermsWrangler(LinearRecurrenceBasedExpansionTermsWrangler): +class StokesExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "icomp", "viscosity_mu_name", "max_mi") -- GitLab From 82c68486e3d9614e12c2fb38a463e1f75b102a6a Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 9 Apr 2019 04:10:18 -0500 Subject: [PATCH 51/68] Document PDE class --- sumpy/expansion/__init__.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 871c5b81..a0abecfb 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -506,7 +506,24 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): class PDE(object): + r""" + Represents a system of PDEs of dimension `dim`. It is represented by a + list of dictionaries with each dictionary representing a single PDE. + Each dictionary maps a :class:`CoeffIdentifier` object to a value, + + .. math:: + + \sum_{(mi, ident), c \text{pde\_dict}} + \frac{\partial^{\sum(mi)}}{\partial u^mi} c = 0, + + where :math:`u` is the solution vector of the PDE. + """ def __init__(self, dim, eqs): + """ + :arg dim: dimension of the PDE + :arg eqs: list of dictionaries mapping a :class:`CoeffIdentifier` to a + value. + """ self.dim = dim self.eqs = eqs -- GitLab From decacbed8e4442fe98ece4d4643ef2076210be99 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 19 May 2019 18:23:26 -0500 Subject: [PATCH 52/68] fix issues found with Maxwell's --- sumpy/expansion/__init__.py | 45 ++++++++++++++++++++++++++++++------- sumpy/tools.py | 25 ++++++++++++--------- 2 files changed, 52 insertions(+), 18 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index a0abecfb..ddf7b899 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -406,11 +406,8 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): s = solve_symbolic(n.T[:, idx_all_exprs], n.T) # Filter out coefficients belonging to this expression - indices = [] - for idx in idx_all_exprs: - if idx >= offset*iexpr and idx < offset*(iexpr+1): - indices.append(idx) - s = s[:len(indices), offset*iexpr:offset*(iexpr+1)] + indices = list(filter(lambda x: x < offset, idx_all_exprs)) + s = s[:len(indices), :offset] stored_identifiers = [mis[i] for i in indices] else: @@ -586,6 +583,39 @@ class PDE(object): eqs.append(dict(new_eq)) return PDE(self.dim, eqs=eqs) + def diff(self, mi): + eqs = [] + for eq in self.eqs: + new_eq = defaultdict(lambda: 0) + for ident, v in eq.items(): + new_mi = add_mi(ident.mi, mi) + new_ident = CoeffIdentifier(tuple(new_mi), ident.iexpr) + new_eq[new_ident] += v + eqs.append(dict(new_eq)) + return PDE(self.dim, eqs=eqs) + + def curl(self): + assert len(self.eqs) == self.dim + if self.dim == 2: + f1, f2 = self[0], self[1] + return f2.diff((1, 0)) - f1.diff((0, 1)) + + assert self.dim == 3 + eqs = None + for d in range(3): + f1, f2 = self[(d+1) % 3], self[(d+2) % 3] + mi1 = [0, 0, 0] + mi1[(d+1) % 3] = 1 + mi2 = [0, 0, 0] + mi2[(d+2) % 3] = 1 + new_eqs = f2.diff(mi1) - f1.diff(mi2) + if eqs is None: + eqs = new_eqs + else: + eqs = eqs | new_eqs + + return eqs + def div(self): result = defaultdict(lambda: 0) for d, eq in enumerate(self.eqs): @@ -615,10 +645,9 @@ def process_pde(pde): if s1 == s2: continue m = nth_root_assume_positive(val1/val2, s2 - s1) - if multiplier is None: + if multiplier is None and not isinstance(m, (int, sym.Integer)): multiplier = m - elif multiplier != m: - return pde, 1 + if multiplier is None: return pde, 1 eqs = [] diff --git a/sumpy/tools.py b/sumpy/tools.py index 695694bf..7f09fb70 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -694,15 +694,17 @@ def reduced_row_echelon_form(m): pivot_cols.append(i) scale = mat[index, i] - t = mat[index, :]//scale - not_exact = (t * scale != mat[index, :]) - - if (np.any(not_exact)): - for j in range(ncols): - if not_exact[j]: - t[j] = sym.sympify(mat[index, j])/scale - - mat[index, :] = t + if isinstance(scale, (int, sym.Integer)): + scale = int(scale) + + for j in range(mat.shape[1]): + elem = mat[index, j] + if isinstance(scale, int) and isinstance(elem, (int, sym.Integer)): + quo = int(elem) // scale + if quo * scale == elem: + mat[index, j] = quo + continue + mat[index, j] = sym.sympify(elem)/scale for j in range(nrows): if (j == index): @@ -735,7 +737,10 @@ def nullspace(m): vec[free_var] = 1 for piv_row, piv_col in enumerate(pivot_cols): for pos in pivot_cols[piv_row+1:] + [free_var]: - vec[piv_col] -= mat[piv_row, pos] + if isinstance(mat[piv_row, pos], sym.Integer): + vec[piv_col] -= int(mat[piv_row, pos]) + else: + vec[piv_col] -= mat[piv_row, pos] n.append(vec) return np.array(n, dtype=object).T -- GitLab From 07479232e4fe851dc39fa408896970eef622cd71 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 21 Apr 2019 17:59:15 -0500 Subject: [PATCH 53/68] reconstruction matrix --- sumpy/expansion/__init__.py | 96 +++++++++++++++++++++++++++++++++---- test/test_kernels.py | 24 +++++----- 2 files changed, 98 insertions(+), 22 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index ddf7b899..02ba9fec 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -266,6 +266,19 @@ def _spmv(spmat, x, sparse_vectors): # }}} +def _reconstruct(reconstruct_matrix, stored): + res = [0] * len(reconstruct_matrix) + stored_idx = 0 + for row, deps in reconstruct_matrix: + if len(deps) == 0: + res[row] = stored[stored_idx] + stored_idx += 1 + continue + for k, v in deps.items(): + res[row] += res[k] * v + return res + + class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): """ .. automethod:: __init__ @@ -288,15 +301,18 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): def get_full_kernel_derivatives_from_stored(self, stored_kernel_derivatives, rscale): - coeff_matrix = self.get_coefficient_matrix(rscale) - return _spmv(coeff_matrix, stored_kernel_derivatives, + coeff_matrix, reconstruct_matrix, use_reconstruct = \ + self.get_coefficient_matrix(rscale) + if not use_reconstruct: + return _spmv(coeff_matrix, stored_kernel_derivatives, sparse_vectors=False) + return _reconstruct(reconstruct_matrix, stored_kernel_derivatives) def get_stored_mpole_coefficients_from_full(self, full_mpole_coefficients, rscale): # = M^T x, where M = coeff matrix - coeff_matrix = self.get_coefficient_matrix(rscale) + coeff_matrix, _, _ = self.get_coefficient_matrix(rscale) result = [0] * len(self.stored_identifiers) for row, coeff in enumerate(full_mpole_coefficients): for col, val in coeff_matrix[row]: @@ -305,7 +321,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): @property def stored_identifiers(self): - stored_identifiers, coeff_matrix = self._get_stored_ids_and_coeff_mat() + stored_identifiers, coeff_matrix, _ = self.get_stored_ids_and_coeff_mat() return stored_identifiers @memoize_method @@ -328,11 +344,13 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): ^ rows = one for every derivative """ - stored_identifiers, coeff_matrix = self._get_stored_ids_and_coeff_mat() + stored_identifiers, coeff_matrix, reconstruct_matrix = \ + self.get_stored_ids_and_coeff_mat() full_coeffs = self.get_full_coefficient_identifiers() matrix_rows = [] _, deriv_multiplier, _, _ = self._get_pdes() + count_nonzero_coeff = 0 for irow, row in six.iteritems(coeff_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + # (u_xx / rscale**2) * coeff2 @@ -344,9 +362,37 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): diff = row_rscale - sum(stored_identifiers[icol]) mult = (rscale*deriv_multiplier)**diff matrix_row.append((icol, coeff * mult)) + count_nonzero_coeff += len(row) matrix_rows.append((irow, matrix_row)) - return defaultdict(list, matrix_rows) + if reconstruct_matrix is None: + return defaultdict(list, matrix_rows), None, False + + reconstruct_matrix_with_rscale = [] + count_nonzero_reconstruct = 0 + for row, deps in reconstruct_matrix: + # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + + # (u_xx / rscale**2) * coeff2 + # is converted to u_xxx = u_yy * (rscale * coeff1) + + # u_xx * (rscale * coeff2) + row_rscale = sum(full_coeffs[row]) + matrix_row = [] + deps_with_rscale = {} + for k, coeff in deps.items(): + diff = row_rscale - sum(full_coeffs[k]) + mult = (rscale*deriv_multiplier)**diff + deps_with_rscale[k] = coeff * mult + count_nonzero_reconstruct += len(deps) + reconstruct_matrix_with_rscale.append((row, deps_with_rscale)) + + use_reconstruct = count_nonzero_reconstruct < count_nonzero_coeff + + return defaultdict(list, matrix_rows), reconstruct_matrix_with_rscale, \ + use_reconstruct + + @memoize_method + def get_stored_ids_and_coeff_mat(self): + return self._get_stored_ids_and_coeff_mat()[:3] @memoize_method def _get_stored_ids_and_coeff_mat(self): @@ -357,7 +403,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): stored_identifiers = [] from pytools import ProcessLogger - plog = ProcessLogger(logger, "compute recurrence for Taylor coefficients") + plog = ProcessLogger(logger, "compute PDE for Taylor coefficients") pdes, _, iexpr, nexpr = self._get_pdes() pde_mat = [] @@ -377,6 +423,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): else: pde_mat.append(eq) + reconstruct_matrix = None if len(pde_mat) > 0: r""" Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` @@ -404,15 +451,18 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): idx_all_exprs = self.get_reduced_coeffs(n) s = solve_symbolic(n.T[:, idx_all_exprs], n.T) - # Filter out coefficients belonging to this expression indices = list(filter(lambda x: x < offset, idx_all_exprs)) s = s[:len(indices), :offset] stored_identifiers = [mis[i] for i in indices] + if nexpr == 1: + reconstruct_matrix = self._get_reconstruct_matrix(pde_mat, + indices) else: s = np.eye(len(mis)) stored_identifiers = mis + idx_all_exprs = list(range(len(mis))) # coeff_matrix is a dictionary of lists. Each key in the dictionary # is a row number and the values are pairs of column number and value. @@ -428,7 +478,35 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): .format(orig=len(self.get_full_coefficient_identifiers()), red=len(stored_identifiers))) - return stored_identifiers, coeff_matrix + return stored_identifiers, coeff_matrix, reconstruct_matrix, pde_mat, \ + idx_all_exprs + + def _get_reconstruct_matrix(self, pde_mat, stored): + constructed = [False]*len(pde_mat[0]) + reconstruct = [] + for i in stored: + reconstruct.append((i, dict())) + constructed[i] = True + found = True + while found: + found = False + for j in range(len(pde_mat)): + deps = np.nonzero(pde_mat[j])[0] + c = [not constructed[d] for d in deps] + if np.count_nonzero(c) == 1: + new_index = deps[np.nonzero(c)[0]][0] + recurrence = {} + for d in deps: + if not constructed[d]: + assert d == new_index + continue + recurrence[d] = -pde_mat[j][d]/pde_mat[j][new_index] + reconstruct.append((new_index, recurrence)) + constructed[new_index] = True + found = True + if np.all(constructed): + return reconstruct + return None def get_pdes(self): r""" diff --git a/test/test_kernels.py b/test/test_kernels.py index 04ab51ea..313fdd63 100644 --- a/test/test_kernels.py +++ b/test/test_kernels.py @@ -22,8 +22,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -from six.moves import range - import numpy as np import numpy.linalg as la import sys @@ -186,7 +184,7 @@ def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): else: h_values = [1/2, 1/3, 1/5] - center = np.array([2, 1], np.float64) + center = np.array([2, 1, 0][:knl.dim], np.float64) sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) + center[:, np.newaxis]) @@ -206,15 +204,15 @@ def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): for h in h_values: if issubclass(expn_class, LocalExpansionBase): - loc_center = np.array([5.5, 0.0]) + center + loc_center = np.array([5.5, 0.0, 0.0][:knl.dim]) + center centers = np.array(loc_center, dtype=np.float64).reshape(knl.dim, 1) fp = FieldPlotter(loc_center, extent=h, npoints=res) else: - eval_center = np.array([1/h, 0.0]) + center + eval_center = np.array([1/h, 0.0, 0.0][:knl.dim]) + center fp = FieldPlotter(eval_center, extent=0.1, npoints=res) - centers = ( - np.array([0.0, 0.0], dtype=np.float64).reshape(knl.dim, 1) - + center[:, np.newaxis]) + centers = (np.array([0.0, 0.0, 0.0][:knl.dim], + dtype=np.float64).reshape(knl.dim, 1) + + center[:, np.newaxis]) targets = fp.points @@ -375,7 +373,7 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): extra_kwargs["mu"] = 0.05 # Just to make sure things also work away from the origin - origin = np.array([2, 1], np.float64) + origin = np.array([2, 1, 0][:knl.dim], np.float64) sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) + origin[:, np.newaxis]) strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) @@ -387,18 +385,18 @@ def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): from sumpy.visualization import FieldPlotter - eval_offset = np.array([5.5, 0.0]) + eval_offset = np.array([5.5, 0.0, 0][:knl.dim]) centers = (np.array( [ # box 0: particles, first mpole here - [0, 0], + [0, 0, 0][:knl.dim], # box 1: second mpole here - np.array([-0.2, 0.1], np.float64), + np.array([-0.2, 0.1, 0][:knl.dim], np.float64), # box 2: first local here - eval_offset + np.array([0.3, -0.2], np.float64), + eval_offset + np.array([0.3, -0.2, 0][:knl.dim], np.float64), # box 3: second local and eval here eval_offset -- GitLab From 6a2bccb004ea98d19138de506fd695cc2d5ce619 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 May 2019 18:58:23 -0500 Subject: [PATCH 54/68] New API for PDE --- sumpy/expansion/__init__.py | 189 ++------------------------------- sumpy/expansion/pde_utils.py | 200 +++++++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+), 182 deletions(-) create mode 100644 sumpy/expansion/pde_utils.py diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 02ba9fec..888ba26d 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -29,8 +29,9 @@ import logging from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict -from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive - +from sumpy.tools import add_mi +from .pde_utils import (make_pde_syms, process_pde, laplacian, div, curl, grad, + PDE) __doc__ = """ .. autoclass:: ExpansionBase @@ -399,7 +400,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): from six import iteritems from sumpy.tools import nullspace, solve_symbolic - tol = 1e-13 stored_identifiers = [] from pytools import ProcessLogger @@ -469,7 +469,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): coeff_matrix = defaultdict(list) for i in range(s.shape[0]): for j in range(s.shape[1]): - if np.abs(s[i, j]) > tol: + if s[i, j] != 0: coeff_matrix[j].append((i, s[i, j])) plog.done() @@ -580,181 +580,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): return rows -class PDE(object): - r""" - Represents a system of PDEs of dimension `dim`. It is represented by a - list of dictionaries with each dictionary representing a single PDE. - Each dictionary maps a :class:`CoeffIdentifier` object to a value, - - .. math:: - - \sum_{(mi, ident), c \text{pde\_dict}} - \frac{\partial^{\sum(mi)}}{\partial u^mi} c = 0, - - where :math:`u` is the solution vector of the PDE. - """ - def __init__(self, dim, eqs): - """ - :arg dim: dimension of the PDE - :arg eqs: list of dictionaries mapping a :class:`CoeffIdentifier` to a - value. - """ - self.dim = dim - self.eqs = eqs - - def __mul__(self, param): - eqs = [] - for eq in self.eqs: - new_eq = dict() - for k, v in eq.items(): - new_eq[k] = eq[k] * param - eqs.append(new_eq) - return PDE(self.dim, eqs=eqs) - - __rmul__ = __mul__ - - def __add__(self, other_pde): - assert self.dim == other_pde.dim - assert len(self.eqs) == len(other_pde.eqs) - eqs = [] - for eq1, eq2 in zip(self.eqs, other_pde.eqs): - eq = defaultdict(lambda: 0) - for k, v in eq1.items(): - eq[k] += v - for k, v in eq2.items(): - eq[k] += v - eqs.append(dict(eq)) - return PDE(self.dim, eqs=eqs) - - __radd__ = __add__ - - def __sub__(self, other_pde): - return self + (-1)*other_pde - - def laplacian(self): - p = PDE(self.dim, eqs=[]) - for j in range(len(self.eqs)): - p = p | self[j].grad().div() - return p - - def __or__(self, other_pde): - assert self.dim == other_pde.dim - eqs = self.eqs + other_pde.eqs - return PDE(self.dim, eqs=eqs) - - def __getitem__(self, key): - eqs = self.eqs.__getitem__(key) - if not isinstance(eqs, list): - eqs = [eqs] - return PDE(self.dim, eqs=eqs) - - def grad(self): - assert len(self.eqs) == 1 - eqs = [] - for d in range(self.dim): - new_eq = defaultdict(lambda: 0) - for ident, v in self.eqs[0].items(): - mi = list(ident.mi) - mi[d] += 1 - new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) - new_eq[new_ident] += v - eqs.append(dict(new_eq)) - return PDE(self.dim, eqs=eqs) - - def diff(self, mi): - eqs = [] - for eq in self.eqs: - new_eq = defaultdict(lambda: 0) - for ident, v in eq.items(): - new_mi = add_mi(ident.mi, mi) - new_ident = CoeffIdentifier(tuple(new_mi), ident.iexpr) - new_eq[new_ident] += v - eqs.append(dict(new_eq)) - return PDE(self.dim, eqs=eqs) - - def curl(self): - assert len(self.eqs) == self.dim - if self.dim == 2: - f1, f2 = self[0], self[1] - return f2.diff((1, 0)) - f1.diff((0, 1)) - - assert self.dim == 3 - eqs = None - for d in range(3): - f1, f2 = self[(d+1) % 3], self[(d+2) % 3] - mi1 = [0, 0, 0] - mi1[(d+1) % 3] = 1 - mi2 = [0, 0, 0] - mi2[(d+2) % 3] = 1 - new_eqs = f2.diff(mi1) - f1.diff(mi2) - if eqs is None: - eqs = new_eqs - else: - eqs = eqs | new_eqs - - return eqs - - def div(self): - result = defaultdict(lambda: 0) - for d, eq in enumerate(self.eqs): - for ident, v in eq.items(): - mi = list(ident.mi) - mi[d] += 1 - new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) - result[new_ident] += v - return PDE(self.dim, eqs=[dict(result)]) - - def __repr__(self): - return repr(self.eqs) - - -def process_pde(pde): - """ - Process a PDE object to return a PDE and a multiplier such that - the sum of multiplier ** order * derivative * coefficient gives the - original PDE `pde`. - """ - multiplier = None - for eq in pde.eqs: - for ident1, val1 in eq.items(): - for ident2, val2 in eq.items(): - s1 = sum(ident1.mi) - s2 = sum(ident2.mi) - if s1 == s2: - continue - m = nth_root_assume_positive(val1/val2, s2 - s1) - if multiplier is None and not isinstance(m, (int, sym.Integer)): - multiplier = m - - if multiplier is None: - return pde, 1 - eqs = [] - for eq in pde.eqs: - new_eq = dict() - for i, (k, v) in enumerate(eq.items()): - new_eq[k] = v * multiplier**sum(k.mi) - if i == 0: - val = new_eq[k] - new_eq[k] /= sym.sympify(val) - if isinstance(new_eq[k], sym.Integer): - new_eq[k] = int(new_eq[k]) - eqs.append(new_eq) - return PDE(pde.dim, eqs=eqs), multiplier - - -def make_pde_syms(dim, nexprs): - """ - Returns a list of expressions of size `nexprs` to create a PDE - of dimension `dim`. - """ - eqs = [] - for iexpr in range(nexprs): - mi = [0]*dim - eq = dict() - eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 - eqs.append(eq) - return PDE(dim, eqs=eqs) - class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): @@ -766,7 +591,7 @@ class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): def get_pdes(self): w = make_pde_syms(self.dim, 1) - return w.laplacian(), 0, 1 + return laplacian(w), 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -790,7 +615,7 @@ class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): def get_pdes(self, **kwargs): w = make_pde_syms(self.dim, 1) k = sym.Symbol(self.helmholtz_k_name) - return (w.laplacian() + k**2 * w), 0, 1 + return (laplacian(w) + k**2 * w), 0, 1 def _get_reduced_coeffs(self, nullspace): idx = [] @@ -817,7 +642,7 @@ class StokesExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): mu = sym.Symbol(self.viscosity_mu_name) u = w[:self.dim] p = w[-1] - pdes = (mu * u.laplacian() - p.grad() | u.div()) + pdes = PDE(self.dim, mu * laplacian(u) - grad(p), div(u)) return pdes, self.icomp, self.dim+1 # }}} diff --git a/sumpy/expansion/pde_utils.py b/sumpy/expansion/pde_utils.py new file mode 100644 index 00000000..1b26e038 --- /dev/null +++ b/sumpy/expansion/pde_utils.py @@ -0,0 +1,200 @@ +from __future__ import division, absolute_import + +__copyright__ = "Copyright (C) 2019 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from collections import defaultdict +from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive +import sumpy.symbolic as sym + +class PDE(object): + r""" + Represents a system of PDEs of dimension `dim`. It is represented by a + list of dictionaries with each dictionary representing a single PDE. + Each dictionary maps a :class:`CoeffIdentifier` object to a value, + + .. math:: + + \sum_{(mi, ident), c \text{pde\_dict}} + \frac{\partial^{\sum(mi)}}{\partial u^mi} c = 0, + + where :math:`u` is the solution vector of the PDE. + """ + def __init__(self, dim, *eqs): + """ + :arg dim: dimension of the PDE + :arg eqs: list of dictionaries mapping a :class:`CoeffIdentifier` to a + value or PDE instance + """ + self.dim = dim + self.eqs = [] + for obj in eqs: + if isinstance(obj, PDE): + self.eqs.extend(obj.eqs) + else: + self.eqs.append(obj) + + def __mul__(self, param): + eqs = [] + for eq in self.eqs: + new_eq = dict() + for k, v in eq.items(): + new_eq[k] = eq[k] * param + eqs.append(new_eq) + return PDE(self.dim, *eqs) + + __rmul__ = __mul__ + + def __add__(self, other_pde): + assert self.dim == other_pde.dim + assert len(self.eqs) == len(other_pde.eqs) + eqs = [] + for eq1, eq2 in zip(self.eqs, other_pde.eqs): + eq = defaultdict(lambda: 0) + for k, v in eq1.items(): + eq[k] += v + for k, v in eq2.items(): + eq[k] += v + eqs.append(dict(eq)) + return PDE(self.dim, *eqs) + + __radd__ = __add__ + + def __sub__(self, other_pde): + return self + (-1)*other_pde + + def __getitem__(self, key): + eqs = self.eqs.__getitem__(key) + if not isinstance(eqs, list): + eqs = [eqs] + return PDE(self.dim, *eqs) + + def __repr__(self): + return repr(self.eqs) + + +def laplacian(pde): + p = PDE(pde.dim) + for j in range(len(pde.eqs)): + p.eqs.append(div(grad(pde[j])).eqs[0]) + return p + + +def diff(pde, mi): + eqs = [] + for eq in pde.eqs: + new_eq = defaultdict(lambda: 0) + for ident, v in eq.items(): + new_mi = add_mi(ident.mi, mi) + new_ident = CoeffIdentifier(tuple(new_mi), ident.iexpr) + new_eq[new_ident] += v + eqs.append(dict(new_eq)) + return PDE(pde.dim, *eqs) + + +def grad(pde): + assert len(pde.eqs) == 1 + eqs = [] + for d in range(pde.dim): + mi = [0]*pde.dim + mi[d] += 1 + eqs.append(diff(pde, mi).eqs[0]) + return PDE(pde.dim, *eqs) + + +def curl(pde): + assert len(pde.eqs) == pde.dim + if pde.dim == 2: + f1, f2 = pde[0], pde[1] + return diff(f2, (1, 0)) - diff(f1, (0, 1)) + + assert pde.dim == 3 + eqs = [] + for d in range(3): + f1, f2 = pde[(d+1) % 3], pde[(d+2) % 3] + mi1 = [0, 0, 0] + mi1[(d+1) % 3] = 1 + mi2 = [0, 0, 0] + mi2[(d+2) % 3] = 1 + new_eqs = diff(f2, mi1) - diff(f1, mi2) + eqs.extend(new_eqs.eqs) + return PDE(pde.dim, *eqs) + + +def div(pde): + result = defaultdict(lambda: 0) + for d, eq in enumerate(pde.eqs): + for ident, v in eq.items(): + mi = list(ident.mi) + mi[d] += 1 + new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) + result[new_ident] += v + return PDE(pde.dim, dict(result)) + + +def process_pde(pde): + """ + Process a PDE object to return a PDE and a multiplier such that + the sum of multiplier ** order * derivative * coefficient gives the + original PDE `pde`. + """ + multiplier = None + for eq in pde.eqs: + for ident1, val1 in eq.items(): + for ident2, val2 in eq.items(): + s1 = sum(ident1.mi) + s2 = sum(ident2.mi) + if s1 == s2: + continue + m = nth_root_assume_positive(val1/val2, s2 - s1) + if multiplier is None and not isinstance(m, (int, sym.Integer)): + multiplier = m + + if multiplier is None: + return pde, 1 + eqs = [] + for eq in pde.eqs: + new_eq = dict() + for i, (k, v) in enumerate(eq.items()): + new_eq[k] = v * multiplier**sum(k.mi) + if i == 0: + val = new_eq[k] + new_eq[k] /= sym.sympify(val) + if isinstance(new_eq[k], sym.Integer): + new_eq[k] = int(new_eq[k]) + eqs.append(new_eq) + return PDE(pde.dim, *eqs), multiplier + + +def make_pde_syms(dim, nexprs): + """ + Returns a list of expressions of size `nexprs` to create a PDE + of dimension `dim`. + """ + eqs = [] + for iexpr in range(nexprs): + mi = [0]*dim + eq = dict() + eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 + eqs.append(eq) + return PDE(dim, *eqs) + -- GitLab From bf4cb2deeee82f477f83ddfe72d25f8b57a6a38e Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 May 2019 19:08:26 -0500 Subject: [PATCH 55/68] Don't modify PDE objects --- sumpy/expansion/__init__.py | 3 +-- sumpy/expansion/pde_utils.py | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 888ba26d..a2d5c6f2 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -30,7 +30,7 @@ from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict from sumpy.tools import add_mi -from .pde_utils import (make_pde_syms, process_pde, laplacian, div, curl, grad, +from .pde_utils import (make_pde_syms, process_pde, laplacian, div, grad, PDE) __doc__ = """ @@ -580,7 +580,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): return rows - class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): init_arg_names = ("order", "dim", "max_mi") diff --git a/sumpy/expansion/pde_utils.py b/sumpy/expansion/pde_utils.py index 1b26e038..bd856a62 100644 --- a/sumpy/expansion/pde_utils.py +++ b/sumpy/expansion/pde_utils.py @@ -26,6 +26,7 @@ from collections import defaultdict from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive import sumpy.symbolic as sym + class PDE(object): r""" Represents a system of PDEs of dimension `dim`. It is represented by a @@ -93,10 +94,10 @@ class PDE(object): def laplacian(pde): - p = PDE(pde.dim) + eqs = [] for j in range(len(pde.eqs)): - p.eqs.append(div(grad(pde[j])).eqs[0]) - return p + eqs.append(div(grad(pde[j])).eqs[0]) + return PDE(pde.dim, *eqs) def diff(pde, mi): @@ -197,4 +198,3 @@ def make_pde_syms(dim, nexprs): eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 eqs.append(eq) return PDE(dim, *eqs) - -- GitLab From 65e5457d6b7f9da4ae11b3dda0be9fd91af11d4e Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 May 2019 19:09:44 -0500 Subject: [PATCH 56/68] simplify code --- sumpy/expansion/pde_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/pde_utils.py b/sumpy/expansion/pde_utils.py index bd856a62..23152e9a 100644 --- a/sumpy/expansion/pde_utils.py +++ b/sumpy/expansion/pde_utils.py @@ -96,7 +96,7 @@ class PDE(object): def laplacian(pde): eqs = [] for j in range(len(pde.eqs)): - eqs.append(div(grad(pde[j])).eqs[0]) + eqs.append(div(grad(pde[j]))) return PDE(pde.dim, *eqs) @@ -118,7 +118,7 @@ def grad(pde): for d in range(pde.dim): mi = [0]*pde.dim mi[d] += 1 - eqs.append(diff(pde, mi).eqs[0]) + eqs.append(diff(pde, mi)) return PDE(pde.dim, *eqs) -- GitLab From ca5693328e5c7e5f07be23d2b0990d3012f312be Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 24 Jun 2019 19:52:17 +0200 Subject: [PATCH 57/68] Add method to extract single pde --- sumpy/expansion/__init__.py | 167 +++++++++++------------------------- sumpy/tools.py | 26 ++++++ 2 files changed, 78 insertions(+), 115 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index a2d5c6f2..384fe64a 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -29,7 +29,7 @@ import logging from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict -from sumpy.tools import add_mi +from sumpy.tools import add_mi, find_linear_independent_row, CoeffIdentifier from .pde_utils import (make_pde_syms, process_pde, laplacian, div, grad, PDE) @@ -350,7 +350,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): full_coeffs = self.get_full_coefficient_identifiers() matrix_rows = [] - _, deriv_multiplier, _, _ = self._get_pdes() count_nonzero_coeff = 0 for irow, row in six.iteritems(coeff_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + @@ -361,7 +360,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): matrix_row = [] for icol, coeff in row: diff = row_rscale - sum(stored_identifiers[icol]) - mult = (rscale*deriv_multiplier)**diff + mult = rscale**diff matrix_row.append((icol, coeff * mult)) count_nonzero_coeff += len(row) matrix_rows.append((irow, matrix_row)) @@ -381,7 +380,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): deps_with_rscale = {} for k, coeff in deps.items(): diff = row_rscale - sum(full_coeffs[k]) - mult = (rscale*deriv_multiplier)**diff + mult = rscale**diff deps_with_rscale[k] = coeff * mult count_nonzero_reconstruct += len(deps) reconstruct_matrix_with_rscale.append((row, deps_with_rscale)) @@ -393,19 +392,13 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): @memoize_method def get_stored_ids_and_coeff_mat(self): - return self._get_stored_ids_and_coeff_mat()[:3] - - @memoize_method - def _get_stored_ids_and_coeff_mat(self): from six import iteritems from sumpy.tools import nullspace, solve_symbolic - stored_identifiers = [] - from pytools import ProcessLogger plog = ProcessLogger(logger, "compute PDE for Taylor coefficients") - pdes, _, iexpr, nexpr = self._get_pdes() + pdes, iexpr, nexpr = self.get_pdes() pde_mat = [] mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for @@ -423,7 +416,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): else: pde_mat.append(eq) - reconstruct_matrix = None if len(pde_mat) > 0: r""" Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` @@ -456,9 +448,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): s = s[:len(indices), :offset] stored_identifiers = [mis[i] for i in indices] - if nexpr == 1: - reconstruct_matrix = self._get_reconstruct_matrix(pde_mat, - indices) else: s = np.eye(len(mis)) stored_identifiers = mis @@ -478,35 +467,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): .format(orig=len(self.get_full_coefficient_identifiers()), red=len(stored_identifiers))) - return stored_identifiers, coeff_matrix, reconstruct_matrix, pde_mat, \ - idx_all_exprs - - def _get_reconstruct_matrix(self, pde_mat, stored): - constructed = [False]*len(pde_mat[0]) - reconstruct = [] - for i in stored: - reconstruct.append((i, dict())) - constructed[i] = True - found = True - while found: - found = False - for j in range(len(pde_mat)): - deps = np.nonzero(pde_mat[j])[0] - c = [not constructed[d] for d in deps] - if np.count_nonzero(c) == 1: - new_index = deps[np.nonzero(c)[0]][0] - recurrence = {} - for d in deps: - if not constructed[d]: - assert d == new_index - continue - recurrence[d] = -pde_mat[j][d]/pde_mat[j][new_index] - reconstruct.append((new_index, recurrence)) - constructed[new_index] = True - found = True - if np.all(constructed): - return reconstruct - return None + return stored_identifiers, coeff_matrix, pde_mat def get_pdes(self): r""" @@ -519,65 +480,59 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): raise NotImplementedError @memoize_method - def _get_pdes(self): - r""" - Returns a list of `pde_dict`s, a multiplier, expression number, - number of expressions such that each PDE is represented by, - - .. math:: - - \sum_{\nu,c_\nu\in \text{pde\_dict}} - \frac{c_\nu\cdot \alpha_\nu} - {\text{deriv\_multiplier}^{ - \sum \text{mi} - }} = 0, - - where :math:`\mathbf\alpha` is a coefficient vector. - - Note that *coeff* should be a number (not symbolic) to enable use of - fast linear algebra routines. *deriv_multiplier* can be symbolic - and should be used when the PDE has symbolic values as coefficients - for the derivatives. + def get_single_pde(self): + from six import iteritems + from sumpy.tools import nullspace - :Example: + pdes, iexpr, nexpr = self.get_pdes() + pdes, multiplier = process_pde(pdes) + if nexpr == 1: + return pdes - :math:`\Delta u - k^2 u = 0` for 2D can be written as, + from pytools import ProcessLogger + plog = ProcessLogger(logger, "computing single PDE for multiple PDEs") - .. math:: + from pytools import ( + generate_nonnegative_integer_tuples_summing_to_at_most + as gnitstam) - \frac{(2, 0) \times 1}{k^{sum((2, 0))}} + - \frac{(0, 2) \times 1}{k^{sum((0, 2))}} + - \frac{(0, 0) \times -1}{k^{sum((0, 0))}} = 0 - """ - pde, iexpr, nexpr = self.get_pdes() - pde, multiplier = process_pde(pde) - return pde, multiplier, iexpr, nexpr + for order in range(1, 100): + mis = sorted(gnitstam(order, self.dim), key=sum) + + pde_mat = [] + coeff_ident_enumerate_dict = dict((tuple(mi), i) for + (i, mi) in enumerate(mis)) + offset = len(mis) + + for mi in mis: + for pde_dict in pdes.eqs: + eq = [0]*(len(mis)*nexpr) + for ident, coeff in iteritems(pde_dict): + c = tuple(add_mi(ident.mi, mi)) + if c not in coeff_ident_enumerate_dict: + break + idx = offset*ident.iexpr + coeff_ident_enumerate_dict[c] + eq[idx] = coeff + else: + pde_mat.append(eq) + + if len(pde_mat) == 0: + continue - def get_reduced_coeffs(self, nullspace): - """ - Returns the indices of the reduced set of derivatives which are stored. - Override this method if the reduced set is known analytically. + n = nullspace(pde_mat)[offset*iexpr:offset*(iexpr+1), :] + indep_row = find_linear_independent_row(n) + if len(indep_row) > 0: + pde_dict = {} + min_order = min(sum(mis[k]) for k in indep_row.keys()) + for k, v in indep_row.items(): + mi = mis[k] + mult = multiplier**(sum(mi)-min_order) + pde_dict[CoeffIdentifier(mi, 0)] = v * mult + plog.done() + return PDE(self.dim, pde_dict) - This method does elementary row operations to figure out which rows are - linearly dependent on the previous rows. Partial pivoting is not done - to preserve the order so that a row is not linearly dependent on a row - that came after in the original row order. - """ - mat = nullspace.copy() - nrows = mat.shape[0] - ncols = mat.shape[1] - rows = [] - for i in range(nrows): - for j in range(ncols): - if mat[i, j] != 0: - col = j - break - else: - continue - rows.append(i) - for j in range(i+1, nrows): - mat[j, :] = mat[i, col]*mat[j, :] - mat[i, :]*mat[j, col] - return rows + plog.done() + assert False class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): @@ -592,15 +547,6 @@ class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): w = make_pde_syms(self.dim, 1) return laplacian(w), 0, 1 - def _get_reduced_coeffs(self, nullspace): - idx = [] - for i, mi in enumerate(self.get_full_coefficient_identifiers()): - # Return only the derivatives with the order of the last dimension - # 0 or 1. Higher order derivatives can be reduced down to these. - if mi[-1] < 2: - idx.append(i) - return idx - class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): @@ -616,15 +562,6 @@ class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): k = sym.Symbol(self.helmholtz_k_name) return (laplacian(w) + k**2 * w), 0, 1 - def _get_reduced_coeffs(self, nullspace): - idx = [] - for i, mi in enumerate(self.get_full_coefficient_identifiers()): - # Return only the derivatives with the order of the last dimension - # 0 or 1. Higher order derivatives can be reduced down to these. - if mi[-1] < 2: - idx.append(i) - return idx - class StokesExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): diff --git a/sumpy/tools.py b/sumpy/tools.py index 7f09fb70..38a021d4 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -768,4 +768,30 @@ def nth_root_assume_positive(expr, n): else: return expr ** (sym.Integer(1)/n) + +def find_linear_independent_row(nullspace): + """ + This method does elementary row operations to figure out the first row + which is linearly dependent on the previous rows. Partial pivoting is not done + to find the row with the lowest degree. + """ + ncols = nullspace.shape[1] + nrows = min(nullspace.shape[0], ncols+1) + augment = np.eye(nrows, nrows, dtype=nullspace.dtype) + mat = np.hstack((nullspace[:nrows, :], augment)) + for i in range(nrows): + for j in range(ncols): + if mat[i, j] != 0: + col = j + break + else: + pde_dict = {} + for col in range(ncols, ncols+nrows): + if mat[i, col] != 0: + pde_dict[col-ncols] = mat[i, col] + return pde_dict + for j in range(i+1, nrows): + mat[j, :] = mat[j, :]*mat[i, col] - mat[i, :]*mat[j, col] + return {} + # vim: fdm=marker -- GitLab From 49721d12b0faa489f7fea0a31b531ab2d46744fd Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 24 Jun 2019 20:12:08 +0200 Subject: [PATCH 58/68] Remove deriv multiplier --- sumpy/expansion/__init__.py | 9 +++------ sumpy/expansion/pde_utils.py | 37 +----------------------------------- sumpy/tools.py | 12 ------------ 3 files changed, 4 insertions(+), 54 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 384fe64a..60f6eb5b 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -30,7 +30,7 @@ from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict from sumpy.tools import add_mi, find_linear_independent_row, CoeffIdentifier -from .pde_utils import (make_pde_syms, process_pde, laplacian, div, grad, +from .pde_utils import (make_pde_syms, laplacian, div, grad, PDE) __doc__ = """ @@ -485,7 +485,6 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): from sumpy.tools import nullspace pdes, iexpr, nexpr = self.get_pdes() - pdes, multiplier = process_pde(pdes) if nexpr == 1: return pdes @@ -523,11 +522,9 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): indep_row = find_linear_independent_row(n) if len(indep_row) > 0: pde_dict = {} - min_order = min(sum(mis[k]) for k in indep_row.keys()) + mult = indep_row[max(indep_row.keys())] for k, v in indep_row.items(): - mi = mis[k] - mult = multiplier**(sum(mi)-min_order) - pde_dict[CoeffIdentifier(mi, 0)] = v * mult + pde_dict[CoeffIdentifier(mis[k], 0)] = v / mult plog.done() return PDE(self.dim, pde_dict) diff --git a/sumpy/expansion/pde_utils.py b/sumpy/expansion/pde_utils.py index 23152e9a..34c669ac 100644 --- a/sumpy/expansion/pde_utils.py +++ b/sumpy/expansion/pde_utils.py @@ -23,8 +23,7 @@ THE SOFTWARE. """ from collections import defaultdict -from sumpy.tools import CoeffIdentifier, add_mi, nth_root_assume_positive -import sumpy.symbolic as sym +from sumpy.tools import CoeffIdentifier, add_mi class PDE(object): @@ -152,40 +151,6 @@ def div(pde): return PDE(pde.dim, dict(result)) -def process_pde(pde): - """ - Process a PDE object to return a PDE and a multiplier such that - the sum of multiplier ** order * derivative * coefficient gives the - original PDE `pde`. - """ - multiplier = None - for eq in pde.eqs: - for ident1, val1 in eq.items(): - for ident2, val2 in eq.items(): - s1 = sum(ident1.mi) - s2 = sum(ident2.mi) - if s1 == s2: - continue - m = nth_root_assume_positive(val1/val2, s2 - s1) - if multiplier is None and not isinstance(m, (int, sym.Integer)): - multiplier = m - - if multiplier is None: - return pde, 1 - eqs = [] - for eq in pde.eqs: - new_eq = dict() - for i, (k, v) in enumerate(eq.items()): - new_eq[k] = v * multiplier**sum(k.mi) - if i == 0: - val = new_eq[k] - new_eq[k] /= sym.sympify(val) - if isinstance(new_eq[k], sym.Integer): - new_eq[k] = int(new_eq[k]) - eqs.append(new_eq) - return PDE(pde.dim, *eqs), multiplier - - def make_pde_syms(dim, nexprs): """ Returns a list of expressions of size `nexprs` to create a PDE diff --git a/sumpy/tools.py b/sumpy/tools.py index 38a021d4..345b176f 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -757,18 +757,6 @@ def solve_symbolic(A, b): # noqa: N803 CoeffIdentifier = namedtuple('CoeffIdentifier', ['mi', 'iexpr']) -def nth_root_assume_positive(expr, n): - """ - Get the nth root of a symbolic expression assuming that - the symbols are positive. - """ - expr = sym.sympify(expr) - if expr.is_Pow: - return expr.args[0] ** (expr.args[1] / sym.Integer(n)) - else: - return expr ** (sym.Integer(1)/n) - - def find_linear_independent_row(nullspace): """ This method does elementary row operations to figure out the first row -- GitLab From 3ee9624c7b90b532708fb11ef08af1f361cfc47c Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 25 Jun 2019 17:19:31 +0200 Subject: [PATCH 59/68] Get rid of M = N * N_top^-1 and use algorithm in new proof --- sumpy/expansion/__init__.py | 93 ++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 53 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 60f6eb5b..5e925f44 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -24,7 +24,6 @@ THE SOFTWARE. from six.moves import range import six -import numpy as np import logging from pytools import memoize_method import sumpy.symbolic as sym @@ -393,73 +392,61 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): @memoize_method def get_stored_ids_and_coeff_mat(self): from six import iteritems - from sumpy.tools import nullspace, solve_symbolic from pytools import ProcessLogger plog = ProcessLogger(logger, "compute PDE for Taylor coefficients") pdes, iexpr, nexpr = self.get_pdes() - pde_mat = [] mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for (i, mi) in enumerate(mis)) - offset = len(mis) - - for mi in mis: - for pde_dict in pdes.eqs: - eq = [0]*(len(mis)*nexpr) - for ident, coeff in iteritems(pde_dict): - c = tuple(add_mi(ident.mi, mi)) - if c not in coeff_ident_enumerate_dict: - break - eq[offset*ident.iexpr + coeff_ident_enumerate_dict[c]] = coeff - else: - pde_mat.append(eq) - - if len(pde_mat) > 0: - r""" - Find a matrix :math:`s` such that :math:`K = S^T K_{[r]}` - where :math:`r` is the indices of the reduced coefficients and - :math:`K` is a column vector of coefficients. Let :math:`P` be the - PDE matrix, i.e. the matrix obtained by applying the PDE - as an identity to each of the Taylor coefficients. - (Realize that those, as functions of space, each still satisfy the PDE.) - As a result, if :math:`\mathbf\alpha` is a vector of Taylor coefficients, - one would expect :math:`P\mathbf\alpha = \mathbf 0`. - Further, let :math:`N` be the nullspace of :math:`P`. - Then, :math:`S^T = N * N_{[r, :]}^{-1}` which implies, - :math:`S = N_{[r, :]}^{-T} N^T = N_{[r, :]}^{-T} N^T`. - """ - n = nullspace(pde_mat) - # Move the rows corresponding to this expression to the front - rearrange = list(range(offset*iexpr, offset*(iexpr+1))) - for i in range(nexpr*offset): - if i < offset*iexpr or i >= offset*(iexpr+1): - rearrange.append(i) - n = n[rearrange, :] + pde = self.get_single_pde() + assert len(pde.eqs) == 1 + pde_dict = pde.eqs[0] + max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for + ident in pde_dict.keys()) + max_mi = mis[max_mi_idx] + max_mi_coeff = pde_dict[CoeffIdentifier(max_mi, 0)] + max_mi_mult = -1/sym.sympify(max_mi_coeff) - # Get the indices of the reduced coefficients - idx_all_exprs = self.get_reduced_coeffs(n) + def is_stored(mi): + """ + A multi_index mi is not stored if mi >= max_mi + """ + return any(mi[d] < max_mi[d] for d in range(self.dim)) - s = solve_symbolic(n.T[:, idx_all_exprs], n.T) - # Filter out coefficients belonging to this expression - indices = list(filter(lambda x: x < offset, idx_all_exprs)) - s = s[:len(indices), :offset] + stored_identifiers = [mi for mi in mis if is_stored(mi)] + stored_ident_enumerate_dict = dict((tuple(mi), i) for + (i, mi) in enumerate(stored_identifiers)) - stored_identifiers = [mis[i] for i in indices] - else: - s = np.eye(len(mis)) - stored_identifiers = mis - idx_all_exprs = list(range(len(mis))) + coeff_matrix_dict = defaultdict(lambda: defaultdict(lambda: 0)) + reconstruct_matrix = defaultdict(list) + for i, mi in enumerate(mis): + if is_stored(mi): + coeff_matrix_dict[i][stored_ident_enumerate_dict[mi]] = 1 + continue + diff = [mi[d] - max_mi[d] for d in range(self.dim)] + for other_mi, coeff in iteritems(pde_dict): + j = coeff_ident_enumerate_dict[add_mi(other_mi.mi, diff)] + if i == j: + continue + # PDE might not have max_mi_coeff = -1, divide by -max_mi_coeff + # to get a relation of the form, u_zz = - u_xx - u_yy for Laplace 3D. + reconstruct_matrix[i].append((j, coeff*max_mi_mult)) + # j can be a stored or a non-stored multi-index + # Look at the coeff_matrix of j to get the j as a linear combination + # of stored coefficients. + for dep, other_coeff in iteritems(coeff_matrix_dict[j]): + coeff_matrix_dict[i][dep] += other_coeff*coeff*max_mi_mult # coeff_matrix is a dictionary of lists. Each key in the dictionary # is a row number and the values are pairs of column number and value. coeff_matrix = defaultdict(list) - for i in range(s.shape[0]): - for j in range(s.shape[1]): - if s[i, j] != 0: - coeff_matrix[j].append((i, s[i, j])) + for row, deps in iteritems(coeff_matrix_dict): + for col, val in iteritems(deps): + if val != 0: + coeff_matrix[row].append((col, val)) plog.done() @@ -467,7 +454,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): .format(orig=len(self.get_full_coefficient_identifiers()), red=len(stored_identifiers))) - return stored_identifiers, coeff_matrix, pde_mat + return stored_identifiers, coeff_matrix, None def get_pdes(self): r""" -- GitLab From e937212a179a2718e296b5616595acf599beb969 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 26 Jun 2019 08:26:17 +0200 Subject: [PATCH 60/68] Use faster spmv --- sumpy/expansion/__init__.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 5e925f44..83daa8bb 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -266,16 +266,18 @@ def _spmv(spmat, x, sparse_vectors): # }}} -def _reconstruct(reconstruct_matrix, stored): +def _fast_spmv(reconstruct_matrix, stored, sac): res = [0] * len(reconstruct_matrix) stored_idx = 0 for row, deps in reconstruct_matrix: if len(deps) == 0: res[row] = stored[stored_idx] stored_idx += 1 - continue - for k, v in deps.items(): - res[row] += res[k] * v + else: + for k, v in deps.items(): + res[row] += res[k] * v + new_sym = sym.Symbol(sac.assign_unique("expr", res[row])) + res[row] = new_sym return res @@ -300,18 +302,17 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): return self.stored_identifiers def get_full_kernel_derivatives_from_stored(self, stored_kernel_derivatives, - rscale): + rscale, sac=None): coeff_matrix, reconstruct_matrix, use_reconstruct = \ self.get_coefficient_matrix(rscale) - if not use_reconstruct: + if not use_reconstruct or sac is None: return _spmv(coeff_matrix, stored_kernel_derivatives, sparse_vectors=False) - return _reconstruct(reconstruct_matrix, stored_kernel_derivatives) + return _fast_spmv(reconstruct_matrix, stored_kernel_derivatives, sac) def get_stored_mpole_coefficients_from_full(self, full_mpole_coefficients, rscale): # = M^T x, where M = coeff matrix - coeff_matrix, _, _ = self.get_coefficient_matrix(rscale) result = [0] * len(self.stored_identifiers) for row, coeff in enumerate(full_mpole_coefficients): @@ -369,7 +370,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): reconstruct_matrix_with_rscale = [] count_nonzero_reconstruct = 0 - for row, deps in reconstruct_matrix: + for row, deps in six.iteritems(reconstruct_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + # (u_xx / rscale**2) * coeff2 # is converted to u_xxx = u_yy * (rscale * coeff1) + @@ -377,7 +378,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): row_rscale = sum(full_coeffs[row]) matrix_row = [] deps_with_rscale = {} - for k, coeff in deps.items(): + for k, coeff in deps: diff = row_rscale - sum(full_coeffs[k]) mult = rscale**diff deps_with_rscale[k] = coeff * mult @@ -454,7 +455,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): .format(orig=len(self.get_full_coefficient_identifiers()), red=len(stored_identifiers))) - return stored_identifiers, coeff_matrix, None + return stored_identifiers, coeff_matrix, reconstruct_matrix def get_pdes(self): r""" -- GitLab From 93b73161ecf01c06183169c4cb74efb360b1c6e1 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 26 Jun 2019 08:54:25 +0200 Subject: [PATCH 61/68] add fast spmv transpose as well --- sumpy/expansion/__init__.py | 68 +++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 25 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 83daa8bb..37ada3c0 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -266,19 +266,32 @@ def _spmv(spmat, x, sparse_vectors): # }}} -def _fast_spmv(reconstruct_matrix, stored, sac): - res = [0] * len(reconstruct_matrix) - stored_idx = 0 - for row, deps in reconstruct_matrix: - if len(deps) == 0: - res[row] = stored[stored_idx] - stored_idx += 1 - else: - for k, v in deps.items(): - res[row] += res[k] * v - new_sym = sym.Symbol(sac.assign_unique("expr", res[row])) - res[row] = new_sym - return res +def _fast_spmv(reconstruct_matrix, vec, sac, transpose=False): + if not transpose: + res = [0] * len(reconstruct_matrix) + stored_idx = 0 + for row, deps in enumerate(reconstruct_matrix): + if len(deps) == 0: + res[row] = vec[stored_idx] + stored_idx += 1 + else: + for k, v in deps: + res[row] += res[k] * v + new_sym = sym.Symbol(sac.assign_unique("expr", res[row])) + res[row] = new_sym + return res + else: + res = [] + expr_all = vec.copy() + for row, deps in reversed(enumerate(reconstruct_matrix)): + if len(deps) == 0: + res.append(expr_all[row]) + continue + new_sym = sym.Symbol(sac.assign_unique("expr", expr_all[row])) + for k, v in deps: + expr_all[k] += new_sym * v + res.reverse() + return res class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): @@ -311,14 +324,18 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): return _fast_spmv(reconstruct_matrix, stored_kernel_derivatives, sac) def get_stored_mpole_coefficients_from_full(self, full_mpole_coefficients, - rscale): + rscale, sac=None): # = M^T x, where M = coeff matrix - coeff_matrix, _, _ = self.get_coefficient_matrix(rscale) - result = [0] * len(self.stored_identifiers) - for row, coeff in enumerate(full_mpole_coefficients): - for col, val in coeff_matrix[row]: - result[col] += coeff * val - return result + coeff_matrix, reconstruct_matrix, use_reconstruct = \ + self.get_coefficient_matrix(rscale) + if not use_reconstruct or sac is None: + result = [0] * len(self.stored_identifiers) + for row, coeff in enumerate(full_mpole_coefficients): + for col, val in coeff_matrix[row]: + result[col] += coeff * val + return result + return _fast_spmv(reconstruct_matrix, full_mpole_coefficients, sac, + tranpose=True) @property def stored_identifiers(self): @@ -370,20 +387,20 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): reconstruct_matrix_with_rscale = [] count_nonzero_reconstruct = 0 - for row, deps in six.iteritems(reconstruct_matrix): + for row, deps in enumerate(reconstruct_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + # (u_xx / rscale**2) * coeff2 # is converted to u_xxx = u_yy * (rscale * coeff1) + # u_xx * (rscale * coeff2) row_rscale = sum(full_coeffs[row]) matrix_row = [] - deps_with_rscale = {} + deps_with_rscale = [] for k, coeff in deps: diff = row_rscale - sum(full_coeffs[k]) mult = rscale**diff - deps_with_rscale[k] = coeff * mult + deps_with_rscale.append((k, coeff * mult)) count_nonzero_reconstruct += len(deps) - reconstruct_matrix_with_rscale.append((row, deps_with_rscale)) + reconstruct_matrix_with_rscale.append(deps_with_rscale) use_reconstruct = count_nonzero_reconstruct < count_nonzero_coeff @@ -422,8 +439,9 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): (i, mi) in enumerate(stored_identifiers)) coeff_matrix_dict = defaultdict(lambda: defaultdict(lambda: 0)) - reconstruct_matrix = defaultdict(list) + reconstruct_matrix = [] for i, mi in enumerate(mis): + reconstruct_matrix.append([]) if is_stored(mi): coeff_matrix_dict[i][stored_ident_enumerate_dict[mi]] = 1 continue -- GitLab From d0d365ca48128f5f48bd97cccd71b04bcc690e99 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 28 Jun 2019 17:38:58 -0400 Subject: [PATCH 62/68] Fix test error --- sumpy/expansion/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 37ada3c0..2e741eec 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -422,8 +422,8 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): pde = self.get_single_pde() assert len(pde.eqs) == 1 pde_dict = pde.eqs[0] - max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for - ident in pde_dict.keys()) + max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for ident in + pde_dict.keys() if ident.mi in coeff_ident_enumerate_dict) max_mi = mis[max_mi_idx] max_mi_coeff = pde_dict[CoeffIdentifier(max_mi, 0)] max_mi_mult = -1/sym.sympify(max_mi_coeff) -- GitLab From 91362ee1053f592f4b23cdcdb155fc39bc588828 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 28 Jun 2019 18:19:16 -0400 Subject: [PATCH 63/68] Use fast spmv in p2m --- sumpy/expansion/__init__.py | 29 ++++++++++++++++++++--------- sumpy/expansion/local.py | 6 +++--- sumpy/expansion/multipole.py | 6 +++--- sumpy/p2e.py | 6 +++--- 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 2e741eec..baf09da4 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -115,12 +115,14 @@ class ExpansionBase(object): """ raise NotImplementedError - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): """Form an expansion from a source point. :arg avec: vector from source to center. :arg bvec: vector from center to target. Not usually necessary, except for line-Taylor expansion. + :arg sac: a symbolic assignment collection where temporary + expressions are stored. :returns: a list of :mod:`sympy` expressions representing the coefficients of the expansion. @@ -174,11 +176,11 @@ class ExpansionTermsWrangler(object): raise NotImplementedError def get_full_kernel_derivatives_from_stored(self, stored_kernel_derivatives, - rscale): + rscale, sac=None): raise NotImplementedError def get_stored_mpole_coefficients_from_full(self, full_mpole_coefficients, - rscale): + rscale, sac=None): raise NotImplementedError @memoize_method @@ -224,7 +226,7 @@ class FullExpansionTermsWrangler(ExpansionTermsWrangler): ExpansionTermsWrangler.get_full_coefficient_identifiers) def get_full_kernel_derivatives_from_stored(self, stored_kernel_derivatives, - rscale): + rscale, sac=None): return stored_kernel_derivatives get_stored_mpole_coefficients_from_full = ( @@ -283,7 +285,7 @@ def _fast_spmv(reconstruct_matrix, vec, sac, transpose=False): else: res = [] expr_all = vec.copy() - for row, deps in reversed(enumerate(reconstruct_matrix)): + for row, deps in reversed(list(enumerate(reconstruct_matrix))): if len(deps) == 0: res.append(expr_all[row]) continue @@ -335,7 +337,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): result[col] += coeff * val return result return _fast_spmv(reconstruct_matrix, full_mpole_coefficients, sac, - tranpose=True) + transpose=True) @property def stored_identifiers(self): @@ -367,7 +369,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): full_coeffs = self.get_full_coefficient_identifiers() matrix_rows = [] - count_nonzero_coeff = 0 + count_nonzero_coeff = -len(stored_identifiers) for irow, row in six.iteritems(coeff_matrix): # For eg: (u_xxx / rscale**3) = (u_yy / rscale**2) * coeff1 + # (u_xx / rscale**2) * coeff2 @@ -422,8 +424,17 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): pde = self.get_single_pde() assert len(pde.eqs) == 1 pde_dict = pde.eqs[0] - max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for ident in - pde_dict.keys() if ident.mi in coeff_ident_enumerate_dict) + for ident in pde_dict.keys(): + if ident.mi not in coeff_ident_enumerate_dict: + coeff_matrix = defaultdict(list) + reconstruct_matrix = [] + for i in range(len(mis)): + coeff_matrix[i] = [(i, 1)] + reconstruct_matrix.append([]) + return mis, coeff_matrix, reconstruct_matrix + + max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for + ident in pde_dict.keys()) max_mi = mis[max_mi_idx] max_mi_coeff = pde_dict[CoeffIdentifier(max_mi, 0)] max_mi_mult = -1/sym.sympify(max_mi_coeff) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index e2150bfe..526da89d 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -57,7 +57,7 @@ class LineTaylorLocalExpansion(LocalExpansionBase): def get_coefficient_identifiers(self): return list(range(self.order+1)) - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): # no point in heeding rscale here--just ignore it if bvec is None: raise RuntimeError("cannot use line-Taylor expansions in a setting " @@ -115,7 +115,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): Coefficients represent derivative values of the kernel. """ - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): from sumpy.tools import MiDerivativeTaker ppkernel = self.kernel.postprocess_at_source( self.kernel.get_expression(avec), avec) @@ -291,7 +291,7 @@ class _FourierBesselLocalExpansion(LocalExpansionBase): def get_coefficient_identifiers(self): return list(range(-self.order, self.order+1)) - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): if not self.use_rscale: rscale = 1 diff --git a/sumpy/expansion/multipole.py b/sumpy/expansion/multipole.py index 6f31164b..c602e356 100644 --- a/sumpy/expansion/multipole.py +++ b/sumpy/expansion/multipole.py @@ -54,7 +54,7 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): Coefficients represent the terms in front of the kernel derivatives. """ - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): from sumpy.kernel import DirectionalSourceDerivative kernel = self.kernel @@ -95,7 +95,7 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): for mi in self.get_full_coefficient_identifiers()] return ( self.expansion_terms_wrangler.get_stored_mpole_coefficients_from_full( - result, rscale)) + result, rscale, sac=sac)) def get_scaled_multipole(self, expr, bvec, rscale, nderivatives, nderivatives_for_scaling=None): @@ -242,7 +242,7 @@ class _HankelBased2DMultipoleExpansion(MultipoleExpansionBase): def get_coefficient_identifiers(self): return list(range(-self.order, self.order+1)) - def coefficients_from_source(self, avec, bvec, rscale): + def coefficients_from_source(self, avec, bvec, rscale, sac=None): if not self.use_rscale: rscale = 1 diff --git a/sumpy/p2e.py b/sumpy/p2e.py index daa7d93d..f5472fc4 100644 --- a/sumpy/p2e.py +++ b/sumpy/p2e.py @@ -86,9 +86,9 @@ class P2EBase(KernelCacheWrapper): sac = SymbolicAssignmentCollection() coeff_names = [ - sac.assign_unique("coeff%d" % i, coeff_i) - for i, coeff_i in enumerate( - self.expansion.coefficients_from_source(avec, None, rscale))] + sac.assign_unique("coeff%d" % i, coeff_i) + for i, coeff_i in enumerate( + self.expansion.coefficients_from_source(avec, None, rscale, sac))] sac.run_global_cse() -- GitLab From 9ba7f4872904dc327067c54fa5519c5f72c22ae3 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 28 Jun 2019 19:27:00 -0400 Subject: [PATCH 64/68] Fix copying list --- sumpy/expansion/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index baf09da4..aca5ac7d 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -284,7 +284,7 @@ def _fast_spmv(reconstruct_matrix, vec, sac, transpose=False): return res else: res = [] - expr_all = vec.copy() + expr_all = list(vec) for row, deps in reversed(list(enumerate(reconstruct_matrix))): if len(deps) == 0: res.append(expr_all[row]) -- GitLab From 7ac23bf59e64303d4d5dd10e5fb48468a7719d8a Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 5 Jul 2019 12:37:02 -0500 Subject: [PATCH 65/68] single_pde -> scalar_pde --- sumpy/expansion/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index aca5ac7d..160563c1 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -421,7 +421,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): coeff_ident_enumerate_dict = dict((tuple(mi), i) for (i, mi) in enumerate(mis)) - pde = self.get_single_pde() + pde = self.get_scalar_pde() assert len(pde.eqs) == 1 pde_dict = pde.eqs[0] for ident in pde_dict.keys(): @@ -497,7 +497,10 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): raise NotImplementedError @memoize_method - def get_single_pde(self): + def get_scalar_pde(self): + r""" + Returns a scalar PDE corresponding to the component `iexpr`. + """ from six import iteritems from sumpy.tools import nullspace -- GitLab From 061fa4449187fec93a05b4b5a2d6d4af18201b1c Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sun, 7 Jul 2019 14:12:54 -0500 Subject: [PATCH 66/68] Use fast spmv in L2P --- sumpy/e2p.py | 2 +- sumpy/expansion/__init__.py | 2 +- sumpy/expansion/local.py | 8 ++++---- sumpy/expansion/multipole.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sumpy/e2p.py b/sumpy/e2p.py index 7b0072ad..408b7364 100644 --- a/sumpy/e2p.py +++ b/sumpy/e2p.py @@ -91,7 +91,7 @@ class E2PBase(KernelCacheWrapper): coeff_exprs = [sym.Symbol("coeff%d" % i) for i in range(len(self.expansion.get_coefficient_identifiers()))] - value = self.expansion.evaluate(coeff_exprs, bvec, rscale) + value = self.expansion.evaluate(coeff_exprs, bvec, rscale, sac=sac) result_names = [ sac.assign_unique("result_%d_p" % i, diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 160563c1..04880219 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -129,7 +129,7 @@ class ExpansionBase(object): """ raise NotImplementedError - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): """ :return: a :mod:`sympy` expression corresponding to the evaluated expansion with the coefficients diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index 526da89d..6b368a95 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -98,7 +98,7 @@ class LineTaylorLocalExpansion(LocalExpansionBase): .subs("tau", 0) for i in self.get_coefficient_identifiers()] - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): # no point in heeding rscale here--just ignore it from pytools import factorial return sym.Add(*( @@ -125,11 +125,11 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): taker.diff(mi) * rscale ** sum(mi) for mi in self.get_coefficient_identifiers()] - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): from sumpy.tools import mi_power, mi_factorial evaluated_coeffs = ( self.expansion_terms_wrangler.get_full_kernel_derivatives_from_stored( - coeffs, rscale)) + coeffs, rscale, sac=sac)) bvec = bvec * rscale**-1 result = sum( coeff @@ -309,7 +309,7 @@ class _FourierBesselLocalExpansion(LocalExpansionBase): * sym.exp(sym.I * l * source_angle_rel_center), avec) for l in self.get_coefficient_identifiers()] - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): if not self.use_rscale: rscale = 1 diff --git a/sumpy/expansion/multipole.py b/sumpy/expansion/multipole.py index c602e356..c4dcb217 100644 --- a/sumpy/expansion/multipole.py +++ b/sumpy/expansion/multipole.py @@ -113,7 +113,7 @@ class VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase): else: return (rscale**nderivatives_for_scaling * expr) - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): if not self.use_rscale: rscale = 1 @@ -262,7 +262,7 @@ class _HankelBased2DMultipoleExpansion(MultipoleExpansionBase): avec) for l in self.get_coefficient_identifiers()] - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): if not self.use_rscale: rscale = 1 -- GitLab From cd76b90c6e57d7a7758a6753561e0489e29440cc Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 11 Feb 2020 02:49:45 -0600 Subject: [PATCH 67/68] Fix tests --- sumpy/expansion/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index d24dd5a1..d82d5124 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -125,7 +125,7 @@ class VolumeTaylorLocalExpansionBase(LocalExpansionBase): taker.diff(mi) * rscale ** sum(mi) for mi in self.get_coefficient_identifiers()] - def evaluate(self, coeffs, bvec, rscale): + def evaluate(self, coeffs, bvec, rscale, sac=None): from pytools import factorial evaluated_coeffs = ( -- GitLab From 3806f72e0aab42fa678b4722338b4155e51403d6 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Thu, 13 Feb 2020 17:54:30 -0600 Subject: [PATCH 68/68] Remove systems of PDEs --- examples/check_m2m.py | 247 ----------------------------------- sumpy/expansion/__init__.py | 166 ++++++++--------------- sumpy/expansion/local.py | 9 +- sumpy/expansion/multipole.py | 9 +- sumpy/expansion/pde_utils.py | 146 +++++---------------- sumpy/tools.py | 30 ----- test/test_kernels.py | 8 +- 7 files changed, 103 insertions(+), 512 deletions(-) delete mode 100644 examples/check_m2m.py diff --git a/examples/check_m2m.py b/examples/check_m2m.py deleted file mode 100644 index ea09cee0..00000000 --- a/examples/check_m2m.py +++ /dev/null @@ -1,247 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__copyright__ = "Copyright (C) 2012 Andreas Kloeckner" - -__license__ = """ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -""" -import numpy as np -import numpy.linalg as la - -import pyopencl as cl -from pyopencl.tools import ( # noqa - pytest_generate_tests_for_pyopencl as pytest_generate_tests) - -from sumpy.expansion.multipole import ( - VolumeTaylorMultipoleExpansion, - LaplaceConformingVolumeTaylorMultipoleExpansion, - HelmholtzConformingVolumeTaylorMultipoleExpansion) -from sumpy.expansion.local import ( - VolumeTaylorLocalExpansion, - LaplaceConformingVolumeTaylorLocalExpansion, - HelmholtzConformingVolumeTaylorLocalExpansion) -from sumpy.kernel import LaplaceKernel, HelmholtzKernel - -import logging -logger = logging.getLogger(__name__) - -try: - import faulthandler -except ImportError: - pass -else: - faulthandler.enable() - -import matplotlib.pyplot as plt - - -def test_m2m(ctx_getter, order): - logging.basicConfig(level=logging.INFO) - - from sympy.core.cache import clear_cache - clear_cache() - - ctx = ctx_getter() - queue = cl.CommandQueue(ctx) - - np.random.seed(17) - - res = 200 - nsources = 150 - - knl = HelmholtzKernel(2) - out_kernels = [knl] - - extra_kwargs = {} - if isinstance(knl, HelmholtzKernel): - extra_kwargs["k"] = 0.1 - - # Just to make sure things also work away from the origin - origin = np.array([2, 1], np.float64) - strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) - - from sumpy.visualization import FieldPlotter - - eval_offset = np.array([5.5, 0.0]) - - centers = (np.array( - [ - # box 0: particles, first mpole here - [0, 0], - - # box 1: second mpole here - np.array([-0.2, 0.1], np.float64), - - # box 2: first local here - eval_offset + np.array([0.3, -0.2], np.float64), - - # box 3: second local and eval here - eval_offset - ], - dtype=np.float64) + origin).T.copy() - - del eval_offset - - nboxes = centers.shape[-1] - - def eval_at(e2p, source_box_nr, rscale): - e2p_target_boxes = np.array([source_box_nr], dtype=np.int32) - - # These are indexed by global box numbers. - e2p_box_target_starts = np.array([0, 0, 0, 0], dtype=np.int32) - e2p_box_target_counts_nonchild = np.array([0, 0, 0, 0], - dtype=np.int32) - e2p_box_target_counts_nonchild[source_box_nr] = ntargets - - evt, (pot,) = e2p( - queue, - - src_expansions=mpoles, - src_base_ibox=0, - - target_boxes=e2p_target_boxes, - box_target_starts=e2p_box_target_starts, - box_target_counts_nonchild=e2p_box_target_counts_nonchild, - centers=centers, - targets=targets, - - rscale=rscale, - - out_host=True, **extra_kwargs - ) - - return pot - - if isinstance(knl, LaplaceKernel): - mpole_expn_classes = [LaplaceConformingVolumeTaylorMultipoleExpansion, - VolumeTaylorMultipoleExpansion] - local_expn_classes = [LaplaceConformingVolumeTaylorLocalExpansion, - VolumeTaylorLocalExpansion] - elif isinstance(knl, HelmholtzKernel): - mpole_expn_classes = [HelmholtzConformingVolumeTaylorMultipoleExpansion, - VolumeTaylorMultipoleExpansion] - local_expn_classes = [HelmholtzConformingVolumeTaylorLocalExpansion, - VolumeTaylorLocalExpansion] - - h_values = 2.0**np.arange(-3, 7) - distances = [] - errs = [] - for h in h_values: - sources = (h*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) - + origin[:, np.newaxis]) - distances.append(np.max(np.abs(sources - centers[:, 1].reshape(2, 1)))) - m2m_vals = [] - for i, (mpole_expn_class, local_expn_class) in \ - enumerate(zip(mpole_expn_classes, local_expn_classes)): - m_expn = mpole_expn_class(knl, order=order) - - from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR - p2m = P2EFromSingleBox(ctx, m_expn) - m2m = E2EFromCSR(ctx, m_expn, m_expn) - m2p = E2PFromSingleBox(ctx, m_expn, out_kernels) - p2p = P2P(ctx, out_kernels, exclude_self=False) - - fp = FieldPlotter(centers[:, -1], extent=0.1, npoints=res) - targets = fp.points - - m1_rscale = 0.5 - m2_rscale = 0.25 - - # {{{ apply P2M - - p2m_source_boxes = np.array([0], dtype=np.int32) - - # These are indexed by global box numbers. - p2m_box_source_starts = np.array([0, 0, 0, 0], dtype=np.int32) - p2m_box_source_counts_nonchild = np.array([nsources, 0, 0, 0], - dtype=np.int32) - - evt, (mpoles,) = p2m(queue, - source_boxes=p2m_source_boxes, - box_source_starts=p2m_box_source_starts, - box_source_counts_nonchild=p2m_box_source_counts_nonchild, - centers=centers, - sources=sources, - strengths=strengths, - nboxes=nboxes, - rscale=m1_rscale, - - tgt_base_ibox=0, - out_host=True, **extra_kwargs) - - # }}} - - ntargets = targets.shape[-1] - - # {{{ apply M2M - - m2m_target_boxes = np.array([1], dtype=np.int32) - m2m_src_box_starts = np.array([0, 1], dtype=np.int32) - m2m_src_box_lists = np.array([0], dtype=np.int32) - - evt, (mpoles,) = m2m(queue, - src_expansions=mpoles, - src_base_ibox=0, - tgt_base_ibox=0, - ntgt_level_boxes=mpoles.shape[0], - - target_boxes=m2m_target_boxes, - - src_box_starts=m2m_src_box_starts, - src_box_lists=m2m_src_box_lists, - centers=centers, - - src_rscale=m1_rscale, - tgt_rscale=m2_rscale, - - #flags="print_hl_cl", - out_host=True, **extra_kwargs) - - # }}} - - pot = eval_at(m2p, 1, m2_rscale) - - evt, (pot_direct,) = p2p( - queue, - targets, sources, (strengths,), - out_host=True, **extra_kwargs) - m2m_vals.append(pot) - - err = la.norm(m2m_vals[1] - m2m_vals[0])/la.norm(m2m_vals[1]) - print(err) - errs.append(abs(err)) - - p1 = 5 - distances = np.array(distances) - plt.loglog(distances, errs, label="order={}".format(order), marker='x') - plt.loglog(distances, distances**(order+1) / distances[p1]**(order+1) * errs[p1], - label="h**%.2f" % (order+1)) - plt.loglog(distances, distances**(order) / distances[p1]**(order) * errs[p1], - label="h**%.2f" % (order)) - - plt.xlabel("Distance between furthest source and center") - plt.ylabel("Error between reduced and full") - plt.legend() - plt.show() - - -if __name__ == "__main__": - test_m2m(cl.create_some_context, 8) - -# vim: fdm=marker diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py index 04880219..85c93850 100644 --- a/sumpy/expansion/__init__.py +++ b/sumpy/expansion/__init__.py @@ -28,9 +28,8 @@ import logging from pytools import memoize_method import sumpy.symbolic as sym from collections import defaultdict -from sumpy.tools import add_mi, find_linear_independent_row, CoeffIdentifier -from .pde_utils import (make_pde_syms, laplacian, div, grad, - PDE) +from sumpy.tools import add_mi +from .pde_utils import make_pde_sym, laplacian __doc__ = """ .. autoclass:: ExpansionBase @@ -299,7 +298,7 @@ def _fast_spmv(reconstruct_matrix, vec, sac, transpose=False): class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): """ .. automethod:: __init__ - .. automethod:: get_pdes + .. automethod:: get_pde .. automethod:: get_reduced_coeffs """ @@ -416,16 +415,13 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): from pytools import ProcessLogger plog = ProcessLogger(logger, "compute PDE for Taylor coefficients") - pdes, iexpr, nexpr = self.get_pdes() mis = self.get_full_coefficient_identifiers() coeff_ident_enumerate_dict = dict((tuple(mi), i) for (i, mi) in enumerate(mis)) - pde = self.get_scalar_pde() - assert len(pde.eqs) == 1 - pde_dict = pde.eqs[0] + pde_dict = self.get_pde().eq for ident in pde_dict.keys(): - if ident.mi not in coeff_ident_enumerate_dict: + if ident not in coeff_ident_enumerate_dict: coeff_matrix = defaultdict(list) reconstruct_matrix = [] for i in range(len(mis)): @@ -433,10 +429,10 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): reconstruct_matrix.append([]) return mis, coeff_matrix, reconstruct_matrix - max_mi_idx = max(coeff_ident_enumerate_dict[ident.mi] for + max_mi_idx = max(coeff_ident_enumerate_dict[ident] for ident in pde_dict.keys()) max_mi = mis[max_mi_idx] - max_mi_coeff = pde_dict[CoeffIdentifier(max_mi, 0)] + max_mi_coeff = pde_dict[max_mi] max_mi_mult = -1/sym.sympify(max_mi_coeff) def is_stored(mi): @@ -458,7 +454,7 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): continue diff = [mi[d] - max_mi[d] for d in range(self.dim)] for other_mi, coeff in iteritems(pde_dict): - j = coeff_ident_enumerate_dict[add_mi(other_mi.mi, diff)] + j = coeff_ident_enumerate_dict[add_mi(other_mi, diff)] if i == j: continue # PDE might not have max_mi_coeff = -1, divide by -max_mi_coeff @@ -486,71 +482,15 @@ class LinearPDEBasedExpansionTermsWrangler(ExpansionTermsWrangler): return stored_identifiers, coeff_matrix, reconstruct_matrix - def get_pdes(self): + def get_pde(self): r""" - Returns a list of PDEs, expression number, number of expressions. - A PDE is a dictionary of (ident, coeff) such that ident is a - namedtuple of (mi, iexpr) where mi is the multi-index of the - derivative, iexpr is the expression number + Returns a PDE. A PDE stores a dictionary of (mi, coeff) + where mi is the multi-index of the derivative and coeff is the + coefficient """ raise NotImplementedError - @memoize_method - def get_scalar_pde(self): - r""" - Returns a scalar PDE corresponding to the component `iexpr`. - """ - from six import iteritems - from sumpy.tools import nullspace - - pdes, iexpr, nexpr = self.get_pdes() - if nexpr == 1: - return pdes - - from pytools import ProcessLogger - plog = ProcessLogger(logger, "computing single PDE for multiple PDEs") - - from pytools import ( - generate_nonnegative_integer_tuples_summing_to_at_most - as gnitstam) - - for order in range(1, 100): - mis = sorted(gnitstam(order, self.dim), key=sum) - - pde_mat = [] - coeff_ident_enumerate_dict = dict((tuple(mi), i) for - (i, mi) in enumerate(mis)) - offset = len(mis) - - for mi in mis: - for pde_dict in pdes.eqs: - eq = [0]*(len(mis)*nexpr) - for ident, coeff in iteritems(pde_dict): - c = tuple(add_mi(ident.mi, mi)) - if c not in coeff_ident_enumerate_dict: - break - idx = offset*ident.iexpr + coeff_ident_enumerate_dict[c] - eq[idx] = coeff - else: - pde_mat.append(eq) - - if len(pde_mat) == 0: - continue - - n = nullspace(pde_mat)[offset*iexpr:offset*(iexpr+1), :] - indep_row = find_linear_independent_row(n) - if len(indep_row) > 0: - pde_dict = {} - mult = indep_row[max(indep_row.keys())] - for k, v in indep_row.items(): - pde_dict[CoeffIdentifier(mis[k], 0)] = v / mult - plog.done() - return PDE(self.dim, pde_dict) - - plog.done() - assert False - class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): @@ -560,9 +500,9 @@ class LaplaceExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): super(LaplaceExpansionTermsWrangler, self).__init__(order=order, dim=dim, max_mi=max_mi) - def get_pdes(self): - w = make_pde_syms(self.dim, 1) - return laplacian(w), 0, 1 + def get_pde(self): + w = make_pde_sym(self.dim) + return laplacian(w) class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): @@ -574,29 +514,23 @@ class HelmholtzExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): super(HelmholtzExpansionTermsWrangler, self).__init__(order=order, dim=dim, max_mi=max_mi) - def get_pdes(self, **kwargs): - w = make_pde_syms(self.dim, 1) + def get_pde(self, **kwargs): + w = make_pde_sym(self.dim) k = sym.Symbol(self.helmholtz_k_name) - return (laplacian(w) + k**2 * w), 0, 1 + return (laplacian(w) + k**2 * w) -class StokesExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): +class BiharmonicExpansionTermsWrangler(LinearPDEBasedExpansionTermsWrangler): - init_arg_names = ("order", "dim", "icomp", "viscosity_mu_name", "max_mi") + init_arg_names = ("order", "dim", "max_mi") - def __init__(self, order, dim, icomp, viscosity_mu_name, max_mi=None): - self.viscosity_mu_name = viscosity_mu_name - self.icomp = icomp - super(StokesExpansionTermsWrangler, self).__init__(order=order, dim=dim, + def __init__(self, order, dim, max_mi=None): + super(BiharmonicExpansionTermsWrangler, self).__init__(order=order, dim=dim, max_mi=max_mi) - def get_pdes(self, **kwargs): - w = make_pde_syms(self.dim, self.dim+1) - mu = sym.Symbol(self.viscosity_mu_name) - u = w[:self.dim] - p = w[-1] - pdes = PDE(self.dim, mu * laplacian(u) - grad(p), div(u)) - return pdes, self.icomp, self.dim+1 + def get_pde(self, **kwargs): + w = make_pde_sym(self.dim) + return laplacian(laplacian(w)) # }}} @@ -674,17 +608,14 @@ class HelmholtzConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): self.expansion_terms_wrangler_key = (order, kernel.dim, helmholtz_k_name) -class StokesConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): +class BiharmonicConformingVolumeTaylorExpansion(VolumeTaylorExpansionBase): - expansion_terms_wrangler_class = StokesExpansionTermsWrangler + expansion_terms_wrangler_class = BiharmonicExpansionTermsWrangler expansion_terms_wrangler_cache = {} # not user-facing, be strict about having to pass use_rscale def __init__(self, kernel, order, use_rscale): - icomp = kernel.get_base_kernel().icomp - viscosity_mu_name = kernel.get_base_kernel().viscosity_mu_name - self.expansion_terms_wrangler_key = (order, kernel.dim, - icomp, viscosity_mu_name) + self.expansion_terms_wrangler_key = (order, kernel.dim) # }}} @@ -734,49 +665,58 @@ class DefaultExpansionFactory(ExpansionFactoryBase): def get_local_expansion_class(self, base_kernel): """Returns a subclass of :class:`ExpansionBase` suitable for *base_kernel*. """ - from sumpy.kernel import HelmholtzKernel, LaplaceKernel, YukawaKernel + from sumpy.kernel import (HelmholtzKernel, LaplaceKernel, YukawaKernel, + BiharmonicKernel, StokesletKernel, StressletKernel) + + from sumpy.expansion.local import (H2DLocalExpansion, Y2DLocalExpansion, + HelmholtzConformingVolumeTaylorLocalExpansion, + LaplaceConformingVolumeTaylorLocalExpansion, + BiharmonicConformingVolumeTaylorLocalExpansion, + VolumeTaylorLocalExpansion) + if (isinstance(base_kernel.get_base_kernel(), HelmholtzKernel) and base_kernel.dim == 2): - from sumpy.expansion.local import H2DLocalExpansion return H2DLocalExpansion elif (isinstance(base_kernel.get_base_kernel(), YukawaKernel) and base_kernel.dim == 2): - from sumpy.expansion.local import Y2DLocalExpansion return Y2DLocalExpansion elif isinstance(base_kernel.get_base_kernel(), HelmholtzKernel): - from sumpy.expansion.local import \ - HelmholtzConformingVolumeTaylorLocalExpansion return HelmholtzConformingVolumeTaylorLocalExpansion elif isinstance(base_kernel.get_base_kernel(), LaplaceKernel): - from sumpy.expansion.local import \ - LaplaceConformingVolumeTaylorLocalExpansion return LaplaceConformingVolumeTaylorLocalExpansion + elif isinstance(base_kernel.get_base_kernel(), + (BiharmonicKernel, StokesletKernel, StressletKernel)): + return BiharmonicConformingVolumeTaylorLocalExpansion else: - from sumpy.expansion.local import VolumeTaylorLocalExpansion return VolumeTaylorLocalExpansion def get_multipole_expansion_class(self, base_kernel): """Returns a subclass of :class:`ExpansionBase` suitable for *base_kernel*. """ - from sumpy.kernel import HelmholtzKernel, LaplaceKernel, YukawaKernel + from sumpy.kernel import (HelmholtzKernel, LaplaceKernel, YukawaKernel, + BiharmonicKernel, StokesletKernel, StressletKernel) + + from sumpy.expansion.multipole import (H2DMultipoleExpansion, + Y2DMultipoleExpansion, + LaplaceConformingVolumeTaylorMultipoleExpansion, + HelmholtzConformingVolumeTaylorMultipoleExpansion, + BiharmonicConformingVolumeTaylorMultipoleExpansion, + VolumeTaylorMultipoleExpansion) + if (isinstance(base_kernel.get_base_kernel(), HelmholtzKernel) and base_kernel.dim == 2): - from sumpy.expansion.multipole import H2DMultipoleExpansion return H2DMultipoleExpansion elif (isinstance(base_kernel.get_base_kernel(), YukawaKernel) and base_kernel.dim == 2): - from sumpy.expansion.multipole import Y2DMultipoleExpansion return Y2DMultipoleExpansion elif isinstance(base_kernel.get_base_kernel(), LaplaceKernel): - from sumpy.expansion.multipole import ( - LaplaceConformingVolumeTaylorMultipoleExpansion) return LaplaceConformingVolumeTaylorMultipoleExpansion elif isinstance(base_kernel.get_base_kernel(), HelmholtzKernel): - from sumpy.expansion.multipole import ( - HelmholtzConformingVolumeTaylorMultipoleExpansion) return HelmholtzConformingVolumeTaylorMultipoleExpansion + elif isinstance(base_kernel.get_base_kernel(), + (BiharmonicKernel, StokesletKernel, StressletKernel)): + return BiharmonicConformingVolumeTaylorMultipoleExpansion else: - from sumpy.expansion.multipole import VolumeTaylorMultipoleExpansion return VolumeTaylorMultipoleExpansion # }}} diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py index d82d5124..a58c5981 100644 --- a/sumpy/expansion/local.py +++ b/sumpy/expansion/local.py @@ -27,7 +27,8 @@ import sumpy.symbolic as sym from sumpy.expansion import ( ExpansionBase, VolumeTaylorExpansion, LaplaceConformingVolumeTaylorExpansion, - HelmholtzConformingVolumeTaylorExpansion, StokesConformingVolumeTaylorExpansion) + HelmholtzConformingVolumeTaylorExpansion, + BiharmonicConformingVolumeTaylorExpansion) class LocalExpansionBase(ExpansionBase): @@ -347,13 +348,13 @@ class HelmholtzConformingVolumeTaylorLocalExpansion( self, kernel, order, use_rscale) -class StokesConformingVolumeTaylorLocalExpansion( - StokesConformingVolumeTaylorExpansion, +class BiharmonicConformingVolumeTaylorLocalExpansion( + BiharmonicConformingVolumeTaylorExpansion, VolumeTaylorLocalExpansionBase): def __init__(self, kernel, order, use_rscale=None): VolumeTaylorLocalExpansionBase.__init__(self, kernel, order, use_rscale) - StokesConformingVolumeTaylorExpansion.__init__( + BiharmonicConformingVolumeTaylorExpansion.__init__( self, kernel, order, use_rscale) # }}} diff --git a/sumpy/expansion/multipole.py b/sumpy/expansion/multipole.py index 4d261ccc..1a53aa40 100644 --- a/sumpy/expansion/multipole.py +++ b/sumpy/expansion/multipole.py @@ -28,7 +28,8 @@ import sumpy.symbolic as sym # noqa from sumpy.symbolic import vector_xreplace from sumpy.expansion import ( ExpansionBase, VolumeTaylorExpansion, LaplaceConformingVolumeTaylorExpansion, - HelmholtzConformingVolumeTaylorExpansion, StokesConformingVolumeTaylorExpansion) + HelmholtzConformingVolumeTaylorExpansion, + BiharmonicConformingVolumeTaylorExpansion) from pytools import cartesian_product import logging @@ -272,13 +273,13 @@ class HelmholtzConformingVolumeTaylorMultipoleExpansion( self, kernel, order, use_rscale) -class StokesConformingVolumeTaylorMultipoleExpansion( - StokesConformingVolumeTaylorExpansion, +class BiharmonicConformingVolumeTaylorMultipoleExpansion( + BiharmonicConformingVolumeTaylorExpansion, VolumeTaylorMultipoleExpansionBase): def __init__(self, kernel, order, use_rscale=None): VolumeTaylorMultipoleExpansionBase.__init__(self, kernel, order, use_rscale) - StokesConformingVolumeTaylorExpansion.__init__( + BiharmonicConformingVolumeTaylorExpansion.__init__( self, kernel, order, use_rscale) # }}} diff --git a/sumpy/expansion/pde_utils.py b/sumpy/expansion/pde_utils.py index 34c669ac..146bd510 100644 --- a/sumpy/expansion/pde_utils.py +++ b/sumpy/expansion/pde_utils.py @@ -22,144 +22,70 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -from collections import defaultdict -from sumpy.tools import CoeffIdentifier, add_mi +from sumpy.tools import add_mi class PDE(object): r""" - Represents a system of PDEs of dimension `dim`. It is represented by a - list of dictionaries with each dictionary representing a single PDE. - Each dictionary maps a :class:`CoeffIdentifier` object to a value, - - .. math:: - - \sum_{(mi, ident), c \text{pde\_dict}} - \frac{\partial^{\sum(mi)}}{\partial u^mi} c = 0, - - where :math:`u` is the solution vector of the PDE. + Represents a iscalar PDEs of dimension `dim`. It is represented by a + dictionary. A dictionary maps a a multi-index given as a tuple + to the coefficient. """ - def __init__(self, dim, *eqs): + def __init__(self, dim, eq): """ :arg dim: dimension of the PDE - :arg eqs: list of dictionaries mapping a :class:`CoeffIdentifier` to a - value or PDE instance + :arg eq: A dictionary mapping a multi-index to a value """ self.dim = dim - self.eqs = [] - for obj in eqs: - if isinstance(obj, PDE): - self.eqs.extend(obj.eqs) - else: - self.eqs.append(obj) + self.eq = eq def __mul__(self, param): - eqs = [] - for eq in self.eqs: - new_eq = dict() - for k, v in eq.items(): - new_eq[k] = eq[k] * param - eqs.append(new_eq) - return PDE(self.dim, *eqs) + res = PDE(self.dim, {}) + for k, v in self.eq.items(): + res.eq[k] = v * param + return res __rmul__ = __mul__ def __add__(self, other_pde): assert self.dim == other_pde.dim - assert len(self.eqs) == len(other_pde.eqs) - eqs = [] - for eq1, eq2 in zip(self.eqs, other_pde.eqs): - eq = defaultdict(lambda: 0) - for k, v in eq1.items(): - eq[k] += v - for k, v in eq2.items(): - eq[k] += v - eqs.append(dict(eq)) - return PDE(self.dim, *eqs) + res = PDE(self.dim, self.eq.copy()) + for k, v in other_pde.eq.items(): + if k in res.eq: + res.eq[k] += v + else: + res.eq[k] = v + return res __radd__ = __add__ def __sub__(self, other_pde): return self + (-1)*other_pde - def __getitem__(self, key): - eqs = self.eqs.__getitem__(key) - if not isinstance(eqs, list): - eqs = [eqs] - return PDE(self.dim, *eqs) - def __repr__(self): - return repr(self.eqs) + return repr(self.eq) def laplacian(pde): - eqs = [] - for j in range(len(pde.eqs)): - eqs.append(div(grad(pde[j]))) - return PDE(pde.dim, *eqs) + dim = pde.dim + res = PDE(dim, {}) + for j in range(dim): + mi = [0]*dim + mi[j] = 2 + res = res + diff(pde, tuple(mi)) + return res def diff(pde, mi): - eqs = [] - for eq in pde.eqs: - new_eq = defaultdict(lambda: 0) - for ident, v in eq.items(): - new_mi = add_mi(ident.mi, mi) - new_ident = CoeffIdentifier(tuple(new_mi), ident.iexpr) - new_eq[new_ident] += v - eqs.append(dict(new_eq)) - return PDE(pde.dim, *eqs) - - -def grad(pde): - assert len(pde.eqs) == 1 - eqs = [] - for d in range(pde.dim): - mi = [0]*pde.dim - mi[d] += 1 - eqs.append(diff(pde, mi)) - return PDE(pde.dim, *eqs) - - -def curl(pde): - assert len(pde.eqs) == pde.dim - if pde.dim == 2: - f1, f2 = pde[0], pde[1] - return diff(f2, (1, 0)) - diff(f1, (0, 1)) - - assert pde.dim == 3 - eqs = [] - for d in range(3): - f1, f2 = pde[(d+1) % 3], pde[(d+2) % 3] - mi1 = [0, 0, 0] - mi1[(d+1) % 3] = 1 - mi2 = [0, 0, 0] - mi2[(d+2) % 3] = 1 - new_eqs = diff(f2, mi1) - diff(f1, mi2) - eqs.extend(new_eqs.eqs) - return PDE(pde.dim, *eqs) - - -def div(pde): - result = defaultdict(lambda: 0) - for d, eq in enumerate(pde.eqs): - for ident, v in eq.items(): - mi = list(ident.mi) - mi[d] += 1 - new_ident = CoeffIdentifier(tuple(mi), ident.iexpr) - result[new_ident] += v - return PDE(pde.dim, dict(result)) - - -def make_pde_syms(dim, nexprs): + res = PDE(pde.dim, {}) + for eq_mi, v in pde.eq.items(): + res.eq[add_mi(eq_mi, mi)] = v + return res + + +def make_pde_sym(dim): """ - Returns a list of expressions of size `nexprs` to create a PDE - of dimension `dim`. + Returns a PDE u = 0 """ - eqs = [] - for iexpr in range(nexprs): - mi = [0]*dim - eq = dict() - eq[CoeffIdentifier(tuple(mi), iexpr)] = 1 - eqs.append(eq) - return PDE(dim, *eqs) + mi = tuple([0]*dim) + return PDE(dim, {mi: 1}) diff --git a/sumpy/tools.py b/sumpy/tools.py index 1c0c0785..2719f43a 100644 --- a/sumpy/tools.py +++ b/sumpy/tools.py @@ -27,7 +27,6 @@ THE SOFTWARE. import six from six.moves import range, zip -from collections import namedtuple from pytools import memoize_method, memoize_in import numpy as np import sumpy.symbolic as sym @@ -761,33 +760,4 @@ def solve_symbolic(A, b): # noqa: N803 red = reduced_row_echelon_form(big)[0] return red[:, big.shape[0]:] - -CoeffIdentifier = namedtuple('CoeffIdentifier', ['mi', 'iexpr']) - - -def find_linear_independent_row(nullspace): - """ - This method does elementary row operations to figure out the first row - which is linearly dependent on the previous rows. Partial pivoting is not done - to find the row with the lowest degree. - """ - ncols = nullspace.shape[1] - nrows = min(nullspace.shape[0], ncols+1) - augment = np.eye(nrows, nrows, dtype=nullspace.dtype) - mat = np.hstack((nullspace[:nrows, :], augment)) - for i in range(nrows): - for j in range(ncols): - if mat[i, j] != 0: - col = j - break - else: - pde_dict = {} - for col in range(ncols, ncols+nrows): - if mat[i, col] != 0: - pde_dict[col-ncols] = mat[i, col] - return pde_dict - for j in range(i+1, nrows): - mat[j, :] = mat[j, :]*mat[i, col] - mat[i, :]*mat[j, col] - return {} - # vim: fdm=marker diff --git a/test/test_kernels.py b/test/test_kernels.py index 1a9d0f3f..8225723c 100644 --- a/test/test_kernels.py +++ b/test/test_kernels.py @@ -36,12 +36,12 @@ from sumpy.expansion.multipole import ( VolumeTaylorMultipoleExpansionBase, LaplaceConformingVolumeTaylorMultipoleExpansion, HelmholtzConformingVolumeTaylorMultipoleExpansion, - StokesConformingVolumeTaylorMultipoleExpansion) + BiharmonicConformingVolumeTaylorMultipoleExpansion) from sumpy.expansion.local import ( VolumeTaylorLocalExpansion, H2DLocalExpansion, LaplaceConformingVolumeTaylorLocalExpansion, HelmholtzConformingVolumeTaylorLocalExpansion, - StokesConformingVolumeTaylorLocalExpansion) + BiharmonicConformingVolumeTaylorLocalExpansion) from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, DirectionalSourceDerivative, BiharmonicKernel, StokesletKernel) from pytools.convergence import PConvergenceVerifier @@ -356,8 +356,8 @@ def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): (HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion), (StokesletKernel(2, 0, 0), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), - (StokesletKernel(2, 0, 0), StokesConformingVolumeTaylorLocalExpansion, - StokesConformingVolumeTaylorMultipoleExpansion), + (StokesletKernel(2, 0, 0), BiharmonicConformingVolumeTaylorLocalExpansion, + BiharmonicConformingVolumeTaylorMultipoleExpansion), ]) def test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class): logging.basicConfig(level=logging.INFO) -- GitLab