From edd4536cbf0f7778fa0d214eaa523a4d140d3528 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 28 May 2018 15:14:48 -0600 Subject: [PATCH 01/19] Add benchmarks for translations --- .gitignore | 1 + asv.conf.json | 160 +++++++++++++++++++++++++++++++ benchmarks/__init__.py | 0 benchmarks/bench_translations.py | 71 ++++++++++++++ benchmarks/run_asv.sh | 16 ++++ 5 files changed, 248 insertions(+) create mode 100644 asv.conf.json create mode 100644 benchmarks/__init__.py create mode 100644 benchmarks/bench_translations.py create mode 100644 benchmarks/run_asv.sh diff --git a/.gitignore b/.gitignore index 9bae8f40..e0287a53 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ doc/_build .pytest_cache sumpy/_git_rev.py +.asv diff --git a/asv.conf.json b/asv.conf.json new file mode 100644 index 00000000..029a0be1 --- /dev/null +++ b/asv.conf.json @@ -0,0 +1,160 @@ +{ + // The version of the config file format. Do not change, unless + // you know what you are doing. + "version": 1, + + // The name of the project being benchmarked + "project": "project", + + // The project's homepage + "project_url": "https://documen.tician.de/sumpy", + + // The URL or local path of the source code repository for the + // project being benchmarked + "repo": ".", + + // The Python project's subdirectory in your repo. If missing or + // the empty string, the project is assumed to be located at the root + // of the repository. + // "repo_subdir": "", + + // List of branches to benchmark. If not provided, defaults to "master" + // (for git) or "default" (for mercurial). + // "branches": ["master"], // for git + // "branches": ["default"], // for mercurial + + // The DVCS being used. If not set, it will be automatically + // determined from "repo" by looking at the protocol in the URL + // (if remote), or by looking for special directories, such as + // ".git" (if local). + // "dvcs": "git", + + // The tool to use to create environments. May be "conda", + // "virtualenv" or other value depending on the plugins in use. + // If missing or the empty string, the tool will be automatically + // determined by looking for tools on the PATH environment + // variable. + "environment_type": "conda", + + // timeout in seconds for installing any dependencies in environment + // defaults to 10 min + //"install_timeout": 600, + + // the base URL to show a commit for the project. + // "show_commit_url": "http://github.com/owner/project/commit/", + + // The Pythons you'd like to test against. If not provided, defaults + // to the current version of Python used to run `asv`. + // "pythons": ["2.7", "3.6"], + + // The list of conda channel names to be searched for benchmark + // dependency packages in the specified order + "conda_channels": ["conda-forge", "defaults"], + + // The matrix of dependencies to test. Each key is the name of a + // package (in PyPI) and the values are version numbers. An empty + // list or empty string indicates to just test against the default + // (latest) version. null indicates that the package is to not be + // installed. If the package to be tested is only available from + // PyPi, and the 'environment_type' is conda, then you can preface + // the package name by 'pip+', and the package will be installed via + // pip (with all the conda available packages installed first, + // followed by the pip installed packages). + // + // "matrix": { + // "numpy": ["1.6", "1.7"], + // "six": ["", null], // test with and without six installed + // "pip+emcee": [""], // emcee is only available for install with pip. + // }, + "matrix": { + "numpy" : [""], + "sympy" : ["1.0"], + "pyopencl" : [""], + "islpy" : [""], + "pocl" : [""], + "pip+git+https://github.com/inducer/pymbolic": [""], + "pip+git+https://gitlab.tiker.net/inducer/boxtree": [""], + "pip+git+https://github.com/inducer/loopy": [""], + }, + + // Combinations of libraries/python versions can be excluded/included + // from the set to test. Each entry is a dictionary containing additional + // key-value pairs to include/exclude. + // + // An exclude entry excludes entries where all values match. The + // values are regexps that should match the whole string. + // + // An include entry adds an environment. Only the packages listed + // are installed. The 'python' key is required. The exclude rules + // do not apply to includes. + // + // In addition to package names, the following keys are available: + // + // - python + // Python version, as in the *pythons* variable above. + // - environment_type + // Environment type, as above. + // - sys_platform + // Platform, as in sys.platform. Possible values for the common + // cases: 'linux2', 'win32', 'cygwin', 'darwin'. + // + // "exclude": [ + // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows + // {"environment_type": "conda", "six": null}, // don't run without six on conda + // ], + // + // "include": [ + // // additional env for python2.7 + // {"python": "2.7", "numpy": "1.8"}, + // // additional env if run on windows+conda + // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, + // ], + + // The directory (relative to the current directory) that benchmarks are + // stored in. If not provided, defaults to "benchmarks" + // "benchmark_dir": "benchmarks", + + // The directory (relative to the current directory) to cache the Python + // environments in. If not provided, defaults to "env" + "env_dir": ".asv/env", + + // The directory (relative to the current directory) that raw benchmark + // results are stored in. If not provided, defaults to "results". + "results_dir": ".asv/results", + + // The directory (relative to the current directory) that the html tree + // should be written to. If not provided, defaults to "html". + "html_dir": ".asv/html", + + // The number of characters to retain in the commit hashes. + // "hash_length": 8, + + // `asv` will cache wheels of the recent builds in each + // environment, making them faster to install next time. This is + // number of builds to keep, per environment. + // "wheel_cache_size": 0 + + // The commits after which the regression search in `asv publish` + // should start looking for regressions. Dictionary whose keys are + // regexps matching to benchmark names, and values corresponding to + // the commit (exclusive) after which to start looking for + // regressions. The default is to start from the first commit + // with results. If the commit is `null`, regression detection is + // skipped for the matching benchmark. + // + // "regressions_first_commits": { + // "some_benchmark": "352cdf", // Consider regressions only after this commit + // "another_benchmark": null, // Skip regression detection altogether + // } + + // The thresholds for relative change in results, after which `asv + // publish` starts reporting regressions. Dictionary of the same + // form as in ``regressions_first_commits``, with values + // indicating the thresholds. If multiple entries match, the + // maximum is taken. If no entry matches, the default is 5%. + // + // "regressions_thresholds": { + // "some_benchmark": 0.01, // Threshold of 1% + // "another_benchmark": 0.5, // Threshold of 50% + // } +} diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py new file mode 100644 index 00000000..8578a4b8 --- /dev/null +++ b/benchmarks/bench_translations.py @@ -0,0 +1,71 @@ +import numpy as np + +import pytest +import pyopencl as cl +from pyopencl.tools import ( # noqa + pytest_generate_tests_for_pyopencl as pytest_generate_tests) + +from sumpy.expansion.multipole import ( + VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion, + VolumeTaylorMultipoleExpansionBase, + LaplaceConformingVolumeTaylorMultipoleExpansion, + HelmholtzConformingVolumeTaylorMultipoleExpansion) +from sumpy.expansion.local import ( + VolumeTaylorLocalExpansion, H2DLocalExpansion, + LaplaceConformingVolumeTaylorLocalExpansion, + HelmholtzConformingVolumeTaylorLocalExpansion) + +from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative, + DirectionalSourceDerivative) + +import logging +logger = logging.getLogger(__name__) + +import sympy +import sumpy.symbolic as sym + + +class Param: + def __init__(self, knl, local_expn_class, mpole_expn_class): + self.knl = knl + self.local_expn_class = local_expn_class + self.mpole_expn_class = mpole_expn_class + + def __repr__(self): + return "{}_{}_{}".format(self.knl, self.local_expn_class.__name__, self.mpole_expn_class.__name__) + + +class TranslationSuite: + + params = [ + Param(LaplaceKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), + Param(LaplaceKernel(2), LaplaceConformingVolumeTaylorLocalExpansion, + LaplaceConformingVolumeTaylorMultipoleExpansion), + Param(HelmholtzKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), + Param(HelmholtzKernel(2), HelmholtzConformingVolumeTaylorLocalExpansion, + HelmholtzConformingVolumeTaylorMultipoleExpansion), + Param(HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion) + ] + param_names = ['translation'] + + def setup(self, param): + logging.basicConfig(level=logging.INFO) + self.ctx = cl.create_some_context() + self.queue = cl.CommandQueue(self.ctx) + np.random.seed(17) + + def track_m2l_op_count(self, param): + knl = param.knl + m_expn = param.mpole_expn_class(knl, order=3) + l_expn = param.local_expn_class(knl, order=3) + + src_coeff_exprs = [sym.Symbol("src_coeff%d" % i) + for i in range(len(m_expn))] + dvec = sym.make_sym_vector("d", knl.dim) + src_rscale = sym.Symbol("src_rscale") + tgt_rscale = sym.Symbol("tgt_rscale") + result = l_expn.translate_from(m_expn, src_coeff_exprs, src_rscale, + dvec, tgt_rscale) + return sympy.count_ops(result) + + track_m2l_op_count.unit = "ops" diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh new file mode 100644 index 00000000..f9a7dcfc --- /dev/null +++ b/benchmarks/run_asv.sh @@ -0,0 +1,16 @@ +pip install asv +asv setup --verbose +master_commit=`git rev-parse master` +test_commit=`git rev-parse HEAD` + +export PYOPENCL_CTX=0 + +asv run $master_commit...$master_commit~ --skip-existing --verbose +asv run $test_commit...$test_commit~ --skip-existing --verbose + +output=`asv compare $master_commit $test_commit --factor 1 -s` +echo "$output" + +if [[ "$output" = *"worse"* ]]; then + echo "Some of the benchmarks have gotten worse" +fi -- GitLab From 11077d882bf5a668c6d759d583e8cb13ccc4d592 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 17:21:22 -0500 Subject: [PATCH 02/19] Use pocl in benchmarks and store results in .sumpy --- benchmarks/run_asv.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh index f9a7dcfc..f1db6495 100644 --- a/benchmarks/run_asv.sh +++ b/benchmarks/run_asv.sh @@ -1,9 +1,15 @@ pip install asv + +if [[ ! -z "$CI" ]]; then + mkdir -p ~/.sumpy/asv + ln -s ~/.sumpy/asv .asv +fi + asv setup --verbose master_commit=`git rev-parse master` test_commit=`git rev-parse HEAD` -export PYOPENCL_CTX=0 +export PYOPENCL_CTX=port asv run $master_commit...$master_commit~ --skip-existing --verbose asv run $test_commit...$test_commit~ --skip-existing --verbose @@ -13,4 +19,5 @@ echo "$output" if [[ "$output" = *"worse"* ]]; then echo "Some of the benchmarks have gotten worse" + exit 1 fi -- GitLab From 2914f6055abc7d64bf504519e213fb2f11ab23f3 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 17:24:45 -0500 Subject: [PATCH 03/19] Add new benchmarks job --- .gitlab-ci.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 946ff3b1..e19f549c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -99,3 +99,16 @@ Flake8: - python3.5 except: - tags + +Benchmarks: + script: + - CONDA_ENVIRONMENT=.test-conda-env-py3.yml + - REQUIREMENTS_TXT=.test-conda-env-py3-requirements.txt + - rm -rf test + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh + - ". ./build-and-test-py-project-within-miniconda.sh" + - ./benchmarks/run_asv.sh + tags: + - linux + except: + - tags -- GitLab From 6c569dad4af9c0e0e86d9d18c74b8395b1cd8df7 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 17:30:53 -0500 Subject: [PATCH 04/19] Make run_asv.sh executable --- benchmarks/run_asv.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 benchmarks/run_asv.sh diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh old mode 100644 new mode 100755 -- GitLab From 7ec6a2e2a25abaaf1dbd00a96e5a17574b3f17f7 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 17:39:38 -0500 Subject: [PATCH 05/19] Add conda channels --- .gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e19f549c..90795628 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -107,6 +107,8 @@ Benchmarks: - rm -rf test - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh - ". ./build-and-test-py-project-within-miniconda.sh" + - conda config --add channels conda-forge + - conda config --add channels inducer - ./benchmarks/run_asv.sh tags: - linux -- GitLab From 1a2b7e7e395cb1e64dd256b6fe7c87bc65ed451e Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 18:57:32 -0500 Subject: [PATCH 06/19] Run asv machine --- benchmarks/run_asv.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh index f1db6495..a8643b37 100755 --- a/benchmarks/run_asv.sh +++ b/benchmarks/run_asv.sh @@ -5,6 +5,7 @@ if [[ ! -z "$CI" ]]; then ln -s ~/.sumpy/asv .asv fi +asv machine --yes asv setup --verbose master_commit=`git rev-parse master` test_commit=`git rev-parse HEAD` -- GitLab From 1c54b2c5348c6b7fbc527827020661ad9166f1fa Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 19:08:23 -0500 Subject: [PATCH 07/19] Use benchmark tag for benchmark CI job --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 90795628..93e4c216 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -112,5 +112,6 @@ Benchmarks: - ./benchmarks/run_asv.sh tags: - linux + - benchmark except: - tags -- GitLab From d5b7fbf79a140188dc737b766a9fec478921e546 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 21 Aug 2018 19:37:21 -0500 Subject: [PATCH 08/19] Publish results as html --- benchmarks/run_asv.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh index a8643b37..8489fb60 100755 --- a/benchmarks/run_asv.sh +++ b/benchmarks/run_asv.sh @@ -22,3 +22,7 @@ if [[ "$output" = *"worse"* ]]; then echo "Some of the benchmarks have gotten worse" exit 1 fi + +if [[ ! -z "$CI" ]]; then + asv publish --html-dir ~/.scicomp-benchmarks/asv/sumpy +fi -- GitLab From 1a92620b2c1831d9d1447ae65cdd3df078be9fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Tue, 21 Aug 2018 23:14:39 -0400 Subject: [PATCH 09/19] Add a readme with a link to the benchmarks --- README.rst | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 README.rst diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..6ad38100 --- /dev/null +++ b/README.rst @@ -0,0 +1,35 @@ +sumpy: n-body kernels and translation operators +=============================================== + +.. image:: https://gitlab.tiker.net/inducer/sumpy/badges/master/pipeline.svg + :target: https://gitlab.tiker.net/inducer/sumpy/commits/master +.. image:: https://badge.fury.io/py/sumpy.png + :target: http://pypi.python.org/pypi/sumpy + +Sumpy is mainly a 'scaffolding' package for Fast Multipole and quadrature methods. +If you're building one of those and need code generation for the required Multipole +and local expansions, come right on in. Together with boxtree, there is a full, +symbolically kernel-independent FMM implementation here. + +Sumpy relies on + +* `numpy `_ for arrays +* `boxtree `_ for FMM tree building +* `sumpy `_ for expansions and analytical routines +* `loopy `_ for fast array operations +* `pytest `_ for automated testing + +and, indirectly, + +* `PyOpenCL `_ as computational infrastructure + +PyOpenCL is likely the only package you'll have to install +by hand, all the others will be installed automatically. + +Resources: + +* `documentation `_ +* `source code via git `_ + +If you can see inside the UIUC firewall, you may browse +`benchmark results `_. \ No newline at end of file -- GitLab From 95bd3028b9c0402b8353d80cfa7912c513aab718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Tue, 21 Aug 2018 23:16:58 -0400 Subject: [PATCH 10/19] Tweak asv config file --- asv.conf.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/asv.conf.json b/asv.conf.json index 029a0be1..6642e26f 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -4,7 +4,7 @@ "version": 1, // The name of the project being benchmarked - "project": "project", + "project": "sumpy", // The project's homepage "project_url": "https://documen.tician.de/sumpy", @@ -41,7 +41,7 @@ //"install_timeout": 600, // the base URL to show a commit for the project. - // "show_commit_url": "http://github.com/owner/project/commit/", + "show_commit_url": "http://gitlab.tiker.net/inducer/sumpy/commits/", // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. -- GitLab From d1225897de46a1365c073de17c938bd8b0da2afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kl=C3=B6ckner?= Date: Wed, 22 Aug 2018 00:12:12 -0400 Subject: [PATCH 11/19] Fix benchmarks URL --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 6ad38100..1f6e76d1 100644 --- a/README.rst +++ b/README.rst @@ -32,4 +32,4 @@ Resources: * `source code via git `_ If you can see inside the UIUC firewall, you may browse -`benchmark results `_. \ No newline at end of file +`benchmark results `_. \ No newline at end of file -- GitLab From 56b25f07833a5567da796c8040f764b471c81643 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Fri, 24 Aug 2018 08:41:35 -0500 Subject: [PATCH 12/19] Use ci-support benchmark script --- .gitlab-ci.yml | 9 +++------ benchmarks/run_asv.sh | 28 ---------------------------- 2 files changed, 3 insertions(+), 34 deletions(-) delete mode 100755 benchmarks/run_asv.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ed8f5f40..d767ba51 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -104,12 +104,9 @@ Benchmarks: script: - CONDA_ENVIRONMENT=.test-conda-env-py3.yml - REQUIREMENTS_TXT=.test-conda-env-py3-requirements.txt - - rm -rf test - - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh - - ". ./build-and-test-py-project-within-miniconda.sh" - - conda config --add channels conda-forge - - conda config --add channels inducer - - ./benchmarks/run_asv.sh + - PROJECT=sumpy + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-benchmark-py-project.sh + - ". ./build-and-benchmark-py-project.sh" tags: - linux - benchmark diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh deleted file mode 100755 index 8489fb60..00000000 --- a/benchmarks/run_asv.sh +++ /dev/null @@ -1,28 +0,0 @@ -pip install asv - -if [[ ! -z "$CI" ]]; then - mkdir -p ~/.sumpy/asv - ln -s ~/.sumpy/asv .asv -fi - -asv machine --yes -asv setup --verbose -master_commit=`git rev-parse master` -test_commit=`git rev-parse HEAD` - -export PYOPENCL_CTX=port - -asv run $master_commit...$master_commit~ --skip-existing --verbose -asv run $test_commit...$test_commit~ --skip-existing --verbose - -output=`asv compare $master_commit $test_commit --factor 1 -s` -echo "$output" - -if [[ "$output" = *"worse"* ]]; then - echo "Some of the benchmarks have gotten worse" - exit 1 -fi - -if [[ ! -z "$CI" ]]; then - asv publish --html-dir ~/.scicomp-benchmarks/asv/sumpy -fi -- GitLab From 1e607cb6ec433ab1b961490b4ec619ca501571a5 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Sat, 25 Aug 2018 08:00:27 -0500 Subject: [PATCH 13/19] Set PYOPENCL_TEST --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d767ba51..97c8ee24 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -105,6 +105,7 @@ Benchmarks: - CONDA_ENVIRONMENT=.test-conda-env-py3.yml - REQUIREMENTS_TXT=.test-conda-env-py3-requirements.txt - PROJECT=sumpy + - PYOPENCL_TEST=portable - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-benchmark-py-project.sh - ". ./build-and-benchmark-py-project.sh" tags: -- GitLab From 827977d9397e154fc9e9e9463110b41fe6752874 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 27 Aug 2018 11:20:27 -0500 Subject: [PATCH 14/19] get flop count after doing cse and killing trivial assignments --- benchmarks/bench_translations.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index 8578a4b8..d6e6ee5d 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -22,8 +22,12 @@ import logging logger = logging.getLogger(__name__) import sympy -import sumpy.symbolic as sym +import six +import pymbolic.mapper.flop_counter +import sumpy.symbolic as sym +from sumpy.assignment_collection import SymbolicAssignmentCollection +from sumpy.codegen import to_loopy_insns class Param: def __init__(self, knl, local_expn_class, mpole_expn_class): @@ -66,6 +70,13 @@ class TranslationSuite: tgt_rscale = sym.Symbol("tgt_rscale") result = l_expn.translate_from(m_expn, src_coeff_exprs, src_rscale, dvec, tgt_rscale) - return sympy.count_ops(result) + sac = SymbolicAssignmentCollection() + for i, expr in enumerate(result): + sac.assign_unique("coeff%d" % i, expr) + sac.run_global_cse() + insns = to_loopy_insns(six.iteritems(sac.assignments)) + counter = pymbolic.mapper.flop_counter.FlopCounter() + + return sum([counter.rec(insn.expression) for insn in insns]) track_m2l_op_count.unit = "ops" -- GitLab From 9235bbd4c971e3a6a8d584cf2a377f9b7f075104 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 27 Aug 2018 11:22:34 -0500 Subject: [PATCH 15/19] count assignment --- benchmarks/bench_translations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index d6e6ee5d..98ab2325 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -77,6 +77,6 @@ class TranslationSuite: insns = to_loopy_insns(six.iteritems(sac.assignments)) counter = pymbolic.mapper.flop_counter.FlopCounter() - return sum([counter.rec(insn.expression) for insn in insns]) + return sum([counter.rec(insn.expression)+1 for insn in insns]) track_m2l_op_count.unit = "ops" -- GitLab From 8c6315f38ab3cdbabcac0d882e6271de5601589b Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 27 Aug 2018 13:16:16 -0500 Subject: [PATCH 16/19] Add different dims, orders for benchmarks --- benchmarks/bench_translations.py | 96 +++++++++++++++++++++++++------- 1 file changed, 77 insertions(+), 19 deletions(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index 98ab2325..539b09b1 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -30,38 +30,39 @@ from sumpy.assignment_collection import SymbolicAssignmentCollection from sumpy.codegen import to_loopy_insns class Param: - def __init__(self, knl, local_expn_class, mpole_expn_class): - self.knl = knl - self.local_expn_class = local_expn_class - self.mpole_expn_class = mpole_expn_class + def __init__(self, dim, order): + self.dim = dim + self.order = order def __repr__(self): - return "{}_{}_{}".format(self.knl, self.local_expn_class.__name__, self.mpole_expn_class.__name__) + return "{}D_order_{}".format(self.dim, self.order) -class TranslationSuite: +class TranslationBenchmarkSuite: params = [ - Param(LaplaceKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), - Param(LaplaceKernel(2), LaplaceConformingVolumeTaylorLocalExpansion, - LaplaceConformingVolumeTaylorMultipoleExpansion), - Param(HelmholtzKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion), - Param(HelmholtzKernel(2), HelmholtzConformingVolumeTaylorLocalExpansion, - HelmholtzConformingVolumeTaylorMultipoleExpansion), - Param(HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion) + Param(2, 10), + Param(2, 15), + Param(2, 20), + Param(3, 5), + Param(3, 10), ] - param_names = ['translation'] + + param_names = ['order'] def setup(self, param): logging.basicConfig(level=logging.INFO) - self.ctx = cl.create_some_context() - self.queue = cl.CommandQueue(self.ctx) np.random.seed(17) + if self.__class__ == TranslationBenchmarkSuite: + raise NotImplementedError + mpole_expn_class = self.mpole_expn_class + if param.order == 3 and H2DMultipoleExpansion == mpole_expn_class: + raise NotImplementedError def track_m2l_op_count(self, param): - knl = param.knl - m_expn = param.mpole_expn_class(knl, order=3) - l_expn = param.local_expn_class(knl, order=3) + knl = self.knl(param.dim) + m_expn = self.mpole_expn_class(knl, order=param.order) + l_expn = self.local_expn_class(knl, order=param.order) src_coeff_exprs = [sym.Symbol("src_coeff%d" % i) for i in range(len(m_expn))] @@ -80,3 +81,60 @@ class TranslationSuite: return sum([counter.rec(insn.expression)+1 for insn in insns]) track_m2l_op_count.unit = "ops" + + +class LaplaceVolumeTaylorTranslation(TranslationBenchmarkSuite): + knl = LaplaceKernel + local_expn_class = VolumeTaylorLocalExpansion + mpole_expn_class = VolumeTaylorMultipoleExpansion + params = [ + Param(2, 10), + Param(3, 5), + ] + + +class LaplaceConformingVolumeTaylorTranslation(TranslationBenchmarkSuite): + knl = LaplaceKernel + local_expn_class = LaplaceConformingVolumeTaylorLocalExpansion + mpole_expn_class = LaplaceConformingVolumeTaylorMultipoleExpansion + params = [ + Param(2, 10), + Param(2, 15), + Param(2, 20), + Param(3, 5), + ] + + +class HelmholtzVolumeTaylorTranslation(TranslationBenchmarkSuite): + knl = HelmholtzKernel + local_expn_class = VolumeTaylorLocalExpansion + mpole_expn_class = VolumeTaylorMultipoleExpansion + params = [ + Param(2, 10), + Param(3, 5), + ] + + +class HelmholtzConformingVolumeTaylorTranslation(TranslationBenchmarkSuite): + knl = HelmholtzKernel + local_expn_class = HelmholtzConformingVolumeTaylorLocalExpansion + mpole_expn_class = HelmholtzConformingVolumeTaylorMultipoleExpansion + params = [ + Param(2, 10), + Param(2, 15), + Param(2, 20), + Param(3, 5), + ] + + +class Helmholtz2DTranslation(TranslationBenchmarkSuite): + knl = HelmholtzKernel + local_expn_class = H2DLocalExpansion + mpole_expn_class = H2DMultipoleExpansion + params = [ + Param(2, 10), + Param(2, 15), + Param(2, 20), + ] + + -- GitLab From a922003211c7d2de2794d3a1d9279e8880444471 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Mon, 27 Aug 2018 14:58:20 -0500 Subject: [PATCH 17/19] Increase timeout and remove long running benchmarks --- benchmarks/bench_translations.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index 539b09b1..ad7686bb 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -81,6 +81,7 @@ class TranslationBenchmarkSuite: return sum([counter.rec(insn.expression)+1 for insn in insns]) track_m2l_op_count.unit = "ops" + track_m2l_op_count.timeout = 200.0 class LaplaceVolumeTaylorTranslation(TranslationBenchmarkSuite): @@ -121,8 +122,6 @@ class HelmholtzConformingVolumeTaylorTranslation(TranslationBenchmarkSuite): mpole_expn_class = HelmholtzConformingVolumeTaylorMultipoleExpansion params = [ Param(2, 10), - Param(2, 15), - Param(2, 20), Param(3, 5), ] @@ -134,7 +133,6 @@ class Helmholtz2DTranslation(TranslationBenchmarkSuite): params = [ Param(2, 10), Param(2, 15), - Param(2, 20), ] -- GitLab From 0b862ec05b6236c4e9cad898147521b4ec95e350 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 28 Aug 2018 08:30:29 -0500 Subject: [PATCH 18/19] Add more benchmark params --- benchmarks/bench_translations.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index ad7686bb..07dd8aec 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -98,12 +98,6 @@ class LaplaceConformingVolumeTaylorTranslation(TranslationBenchmarkSuite): knl = LaplaceKernel local_expn_class = LaplaceConformingVolumeTaylorLocalExpansion mpole_expn_class = LaplaceConformingVolumeTaylorMultipoleExpansion - params = [ - Param(2, 10), - Param(2, 15), - Param(2, 20), - Param(3, 5), - ] class HelmholtzVolumeTaylorTranslation(TranslationBenchmarkSuite): @@ -120,10 +114,6 @@ class HelmholtzConformingVolumeTaylorTranslation(TranslationBenchmarkSuite): knl = HelmholtzKernel local_expn_class = HelmholtzConformingVolumeTaylorLocalExpansion mpole_expn_class = HelmholtzConformingVolumeTaylorMultipoleExpansion - params = [ - Param(2, 10), - Param(3, 5), - ] class Helmholtz2DTranslation(TranslationBenchmarkSuite): @@ -133,6 +123,7 @@ class Helmholtz2DTranslation(TranslationBenchmarkSuite): params = [ Param(2, 10), Param(2, 15), + Param(2, 20), ] -- GitLab From f689043c31a221f87ade1a9e660c4664ac4d35bb Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Tue, 28 Aug 2018 08:37:11 -0500 Subject: [PATCH 19/19] Use CSEAwareFlopCounter --- benchmarks/bench_translations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py index 07dd8aec..8d6cfdd8 100644 --- a/benchmarks/bench_translations.py +++ b/benchmarks/bench_translations.py @@ -76,7 +76,7 @@ class TranslationBenchmarkSuite: sac.assign_unique("coeff%d" % i, expr) sac.run_global_cse() insns = to_loopy_insns(six.iteritems(sac.assignments)) - counter = pymbolic.mapper.flop_counter.FlopCounter() + counter = pymbolic.mapper.flop_counter.CSEAwareFlopCounter() return sum([counter.rec(insn.expression)+1 for insn in insns]) -- GitLab