From edd4536cbf0f7778fa0d214eaa523a4d140d3528 Mon Sep 17 00:00:00 2001
From: Isuru Fernando <isuruf@gmail.com>
Date: Mon, 28 May 2018 15:14:48 -0600
Subject: [PATCH] Add benchmarks for translations

---
 .gitignore                       |   1 +
 asv.conf.json                    | 160 +++++++++++++++++++++++++++++++
 benchmarks/__init__.py           |   0
 benchmarks/bench_translations.py |  71 ++++++++++++++
 benchmarks/run_asv.sh            |  16 ++++
 5 files changed, 248 insertions(+)
 create mode 100644 asv.conf.json
 create mode 100644 benchmarks/__init__.py
 create mode 100644 benchmarks/bench_translations.py
 create mode 100644 benchmarks/run_asv.sh

diff --git a/.gitignore b/.gitignore
index 9bae8f40..e0287a53 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,3 +19,4 @@ doc/_build
 .pytest_cache
 
 sumpy/_git_rev.py
+.asv
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 00000000..029a0be1
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,160 @@
+{
+    // The version of the config file format.  Do not change, unless
+    // you know what you are doing.
+    "version": 1,
+
+    // The name of the project being benchmarked
+    "project": "project",
+
+    // The project's homepage
+    "project_url": "https://documen.tician.de/sumpy",
+
+    // The URL or local path of the source code repository for the
+    // project being benchmarked
+    "repo": ".",
+
+    // The Python project's subdirectory in your repo.  If missing or
+    // the empty string, the project is assumed to be located at the root
+    // of the repository.
+    // "repo_subdir": "",
+
+    // List of branches to benchmark. If not provided, defaults to "master"
+    // (for git) or "default" (for mercurial).
+    // "branches": ["master"], // for git
+    // "branches": ["default"],    // for mercurial
+
+    // The DVCS being used.  If not set, it will be automatically
+    // determined from "repo" by looking at the protocol in the URL
+    // (if remote), or by looking for special directories, such as
+    // ".git" (if local).
+    // "dvcs": "git",
+
+    // The tool to use to create environments.  May be "conda",
+    // "virtualenv" or other value depending on the plugins in use.
+    // If missing or the empty string, the tool will be automatically
+    // determined by looking for tools on the PATH environment
+    // variable.
+    "environment_type": "conda",
+
+    // timeout in seconds for installing any dependencies in environment
+    // defaults to 10 min
+    //"install_timeout": 600,
+
+    // the base URL to show a commit for the project.
+    // "show_commit_url": "http://github.com/owner/project/commit/",
+
+    // The Pythons you'd like to test against.  If not provided, defaults
+    // to the current version of Python used to run `asv`.
+    // "pythons": ["2.7", "3.6"],
+
+    // The list of conda channel names to be searched for benchmark
+    // dependency packages in the specified order
+    "conda_channels": ["conda-forge", "defaults"],
+
+    // The matrix of dependencies to test.  Each key is the name of a
+    // package (in PyPI) and the values are version numbers.  An empty
+    // list or empty string indicates to just test against the default
+    // (latest) version. null indicates that the package is to not be
+    // installed. If the package to be tested is only available from
+    // PyPi, and the 'environment_type' is conda, then you can preface
+    // the package name by 'pip+', and the package will be installed via
+    // pip (with all the conda available packages installed first,
+    // followed by the pip installed packages).
+    //
+    // "matrix": {
+    //     "numpy": ["1.6", "1.7"],
+    //     "six": ["", null],        // test with and without six installed
+    //     "pip+emcee": [""],   // emcee is only available for install with pip.
+    // },
+    "matrix": {
+        "numpy" : [""],
+        "sympy" : ["1.0"],
+        "pyopencl" : [""],
+        "islpy" : [""],
+        "pocl" : [""],
+        "pip+git+https://github.com/inducer/pymbolic": [""],
+        "pip+git+https://gitlab.tiker.net/inducer/boxtree": [""],
+        "pip+git+https://github.com/inducer/loopy": [""],
+    },
+
+    // Combinations of libraries/python versions can be excluded/included
+    // from the set to test. Each entry is a dictionary containing additional
+    // key-value pairs to include/exclude.
+    //
+    // An exclude entry excludes entries where all values match. The
+    // values are regexps that should match the whole string.
+    //
+    // An include entry adds an environment. Only the packages listed
+    // are installed. The 'python' key is required. The exclude rules
+    // do not apply to includes.
+    //
+    // In addition to package names, the following keys are available:
+    //
+    // - python
+    //     Python version, as in the *pythons* variable above.
+    // - environment_type
+    //     Environment type, as above.
+    // - sys_platform
+    //     Platform, as in sys.platform. Possible values for the common
+    //     cases: 'linux2', 'win32', 'cygwin', 'darwin'.
+    //
+    // "exclude": [
+    //     {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
+    //     {"environment_type": "conda", "six": null}, // don't run without six on conda
+    // ],
+    //
+    // "include": [
+    //     // additional env for python2.7
+    //     {"python": "2.7", "numpy": "1.8"},
+    //     // additional env if run on windows+conda
+    //     {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
+    // ],
+
+    // The directory (relative to the current directory) that benchmarks are
+    // stored in.  If not provided, defaults to "benchmarks"
+    // "benchmark_dir": "benchmarks",
+
+    // The directory (relative to the current directory) to cache the Python
+    // environments in.  If not provided, defaults to "env"
+    "env_dir": ".asv/env",
+
+    // The directory (relative to the current directory) that raw benchmark
+    // results are stored in.  If not provided, defaults to "results".
+    "results_dir": ".asv/results",
+
+    // The directory (relative to the current directory) that the html tree
+    // should be written to.  If not provided, defaults to "html".
+    "html_dir": ".asv/html",
+
+    // The number of characters to retain in the commit hashes.
+    // "hash_length": 8,
+
+    // `asv` will cache wheels of the recent builds in each
+    // environment, making them faster to install next time.  This is
+    // number of builds to keep, per environment.
+    // "wheel_cache_size": 0
+
+    // The commits after which the regression search in `asv publish`
+    // should start looking for regressions. Dictionary whose keys are
+    // regexps matching to benchmark names, and values corresponding to
+    // the commit (exclusive) after which to start looking for
+    // regressions.  The default is to start from the first commit
+    // with results. If the commit is `null`, regression detection is
+    // skipped for the matching benchmark.
+    //
+    // "regressions_first_commits": {
+    //    "some_benchmark": "352cdf",  // Consider regressions only after this commit
+    //    "another_benchmark": null,   // Skip regression detection altogether
+    // }
+
+    // The thresholds for relative change in results, after which `asv
+    // publish` starts reporting regressions. Dictionary of the same
+    // form as in ``regressions_first_commits``, with values
+    // indicating the thresholds.  If multiple entries match, the
+    // maximum is taken. If no entry matches, the default is 5%.
+    //
+    // "regressions_thresholds": {
+    //    "some_benchmark": 0.01,     // Threshold of 1%
+    //    "another_benchmark": 0.5,   // Threshold of 50%
+    // }
+}
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py
new file mode 100644
index 00000000..8578a4b8
--- /dev/null
+++ b/benchmarks/bench_translations.py
@@ -0,0 +1,71 @@
+import numpy as np
+
+import pytest
+import pyopencl as cl
+from pyopencl.tools import (  # noqa
+        pytest_generate_tests_for_pyopencl as pytest_generate_tests)
+
+from sumpy.expansion.multipole import (
+        VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion,
+        VolumeTaylorMultipoleExpansionBase,
+        LaplaceConformingVolumeTaylorMultipoleExpansion,
+        HelmholtzConformingVolumeTaylorMultipoleExpansion)
+from sumpy.expansion.local import (
+        VolumeTaylorLocalExpansion, H2DLocalExpansion,
+        LaplaceConformingVolumeTaylorLocalExpansion,
+        HelmholtzConformingVolumeTaylorLocalExpansion)
+
+from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative,
+        DirectionalSourceDerivative)
+
+import logging
+logger = logging.getLogger(__name__)
+
+import sympy
+import sumpy.symbolic as sym
+
+
+class Param:
+    def __init__(self, knl, local_expn_class, mpole_expn_class):
+        self.knl = knl
+        self.local_expn_class = local_expn_class
+        self.mpole_expn_class = mpole_expn_class
+
+    def __repr__(self):
+        return "{}_{}_{}".format(self.knl, self.local_expn_class.__name__, self.mpole_expn_class.__name__)
+
+
+class TranslationSuite:
+
+    params = [
+        Param(LaplaceKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion),
+        Param(LaplaceKernel(2), LaplaceConformingVolumeTaylorLocalExpansion,
+         LaplaceConformingVolumeTaylorMultipoleExpansion),
+        Param(HelmholtzKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion),
+        Param(HelmholtzKernel(2), HelmholtzConformingVolumeTaylorLocalExpansion,
+         HelmholtzConformingVolumeTaylorMultipoleExpansion),
+        Param(HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion)
+    ]
+    param_names = ['translation']
+
+    def setup(self, param):
+        logging.basicConfig(level=logging.INFO)
+        self.ctx = cl.create_some_context()
+        self.queue = cl.CommandQueue(self.ctx)
+        np.random.seed(17)
+
+    def track_m2l_op_count(self, param):
+        knl = param.knl
+        m_expn = param.mpole_expn_class(knl, order=3)
+        l_expn = param.local_expn_class(knl, order=3)
+
+        src_coeff_exprs = [sym.Symbol("src_coeff%d" % i)
+                for i in range(len(m_expn))]
+        dvec = sym.make_sym_vector("d", knl.dim)
+        src_rscale = sym.Symbol("src_rscale")
+        tgt_rscale = sym.Symbol("tgt_rscale")
+        result = l_expn.translate_from(m_expn, src_coeff_exprs, src_rscale,
+                                       dvec, tgt_rscale)
+        return sympy.count_ops(result)
+
+    track_m2l_op_count.unit = "ops"
diff --git a/benchmarks/run_asv.sh b/benchmarks/run_asv.sh
new file mode 100644
index 00000000..f9a7dcfc
--- /dev/null
+++ b/benchmarks/run_asv.sh
@@ -0,0 +1,16 @@
+pip install asv
+asv setup --verbose
+master_commit=`git rev-parse master`
+test_commit=`git rev-parse HEAD`
+
+export PYOPENCL_CTX=0
+
+asv run $master_commit...$master_commit~ --skip-existing --verbose
+asv run $test_commit...$test_commit~ --skip-existing --verbose
+
+output=`asv compare $master_commit $test_commit --factor 1 -s`
+echo "$output"
+
+if [[ "$output" = *"worse"* ]]; then
+  echo "Some of the benchmarks have gotten worse"
+fi
-- 
GitLab