diff --git a/.gitignore b/.gitignore
index 9bae8f4083047f33b6d4306be469ffa8451f977b..e0287a537d33cf2e5d567c9ee2d2463955238248 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,3 +19,4 @@ doc/_build
 .pytest_cache
 
 sumpy/_git_rev.py
+.asv
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8a2048727bcd5d7a36c55f006c8851c2e18cf7dc..97c8ee2404035df17dc5dade007d6593a117b3a1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -99,3 +99,17 @@ Flake8:
   - python3.5
   except:
   - tags
+
+Benchmarks:
+  script:
+  - CONDA_ENVIRONMENT=.test-conda-env-py3.yml
+  - REQUIREMENTS_TXT=.test-conda-env-py3-requirements.txt
+  - PROJECT=sumpy
+  - PYOPENCL_TEST=portable
+  - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-benchmark-py-project.sh
+  - ". ./build-and-benchmark-py-project.sh"
+  tags:
+  - linux
+  - benchmark
+  except:
+  - tags
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1f6e76d1584fbdfd2075d965b492507a742c92a0
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,35 @@
+sumpy: n-body kernels and translation operators
+===============================================
+
+.. image:: https://gitlab.tiker.net/inducer/sumpy/badges/master/pipeline.svg
+   :target: https://gitlab.tiker.net/inducer/sumpy/commits/master
+.. image:: https://badge.fury.io/py/sumpy.png
+    :target: http://pypi.python.org/pypi/sumpy
+
+Sumpy is mainly a 'scaffolding' package for Fast Multipole and quadrature methods.
+If you're building one of those and need code generation for the required Multipole
+and local expansions, come right on in. Together with boxtree, there is a full,
+symbolically kernel-independent FMM implementation here.
+
+Sumpy relies on
+
+* `numpy <http://pypi.python.org/pypi/numpy>`_ for arrays
+* `boxtree <http://pypi.python.org/pypi/boxtree>`_ for FMM tree building
+* `sumpy <http://pypi.python.org/pypi/sumpy>`_ for expansions and analytical routines
+* `loopy <http://pypi.python.org/pypi/loo.py>`_ for fast array operations
+* `pytest <http://pypi.python.org/pypi/pytest>`_ for automated testing
+
+and, indirectly,
+
+* `PyOpenCL <http://pypi.python.org/pypi/pyopencl>`_ as computational infrastructure
+
+PyOpenCL is likely the only package you'll have to install
+by hand, all the others will be installed automatically.
+
+Resources:
+
+* `documentation <http://documen.tician.de/sumpy>`_
+* `source code via git <http://github.com/inducer/sumpy>`_
+
+If you can see inside the UIUC firewall, you may browse
+`benchmark results <http://koelsch.d.tiker.net/benchmarks/asv/sumpy/>`_.
\ No newline at end of file
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 0000000000000000000000000000000000000000..6642e26fe4bb7db29060fdfe8f02df466a4f1876
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,160 @@
+{
+    // The version of the config file format.  Do not change, unless
+    // you know what you are doing.
+    "version": 1,
+
+    // The name of the project being benchmarked
+    "project": "sumpy",
+
+    // The project's homepage
+    "project_url": "https://documen.tician.de/sumpy",
+
+    // The URL or local path of the source code repository for the
+    // project being benchmarked
+    "repo": ".",
+
+    // The Python project's subdirectory in your repo.  If missing or
+    // the empty string, the project is assumed to be located at the root
+    // of the repository.
+    // "repo_subdir": "",
+
+    // List of branches to benchmark. If not provided, defaults to "master"
+    // (for git) or "default" (for mercurial).
+    // "branches": ["master"], // for git
+    // "branches": ["default"],    // for mercurial
+
+    // The DVCS being used.  If not set, it will be automatically
+    // determined from "repo" by looking at the protocol in the URL
+    // (if remote), or by looking for special directories, such as
+    // ".git" (if local).
+    // "dvcs": "git",
+
+    // The tool to use to create environments.  May be "conda",
+    // "virtualenv" or other value depending on the plugins in use.
+    // If missing or the empty string, the tool will be automatically
+    // determined by looking for tools on the PATH environment
+    // variable.
+    "environment_type": "conda",
+
+    // timeout in seconds for installing any dependencies in environment
+    // defaults to 10 min
+    //"install_timeout": 600,
+
+    // the base URL to show a commit for the project.
+    "show_commit_url": "http://gitlab.tiker.net/inducer/sumpy/commits/",
+
+    // The Pythons you'd like to test against.  If not provided, defaults
+    // to the current version of Python used to run `asv`.
+    // "pythons": ["2.7", "3.6"],
+
+    // The list of conda channel names to be searched for benchmark
+    // dependency packages in the specified order
+    "conda_channels": ["conda-forge", "defaults"],
+
+    // The matrix of dependencies to test.  Each key is the name of a
+    // package (in PyPI) and the values are version numbers.  An empty
+    // list or empty string indicates to just test against the default
+    // (latest) version. null indicates that the package is to not be
+    // installed. If the package to be tested is only available from
+    // PyPi, and the 'environment_type' is conda, then you can preface
+    // the package name by 'pip+', and the package will be installed via
+    // pip (with all the conda available packages installed first,
+    // followed by the pip installed packages).
+    //
+    // "matrix": {
+    //     "numpy": ["1.6", "1.7"],
+    //     "six": ["", null],        // test with and without six installed
+    //     "pip+emcee": [""],   // emcee is only available for install with pip.
+    // },
+    "matrix": {
+        "numpy" : [""],
+        "sympy" : ["1.0"],
+        "pyopencl" : [""],
+        "islpy" : [""],
+        "pocl" : [""],
+        "pip+git+https://github.com/inducer/pymbolic": [""],
+        "pip+git+https://gitlab.tiker.net/inducer/boxtree": [""],
+        "pip+git+https://github.com/inducer/loopy": [""],
+    },
+
+    // Combinations of libraries/python versions can be excluded/included
+    // from the set to test. Each entry is a dictionary containing additional
+    // key-value pairs to include/exclude.
+    //
+    // An exclude entry excludes entries where all values match. The
+    // values are regexps that should match the whole string.
+    //
+    // An include entry adds an environment. Only the packages listed
+    // are installed. The 'python' key is required. The exclude rules
+    // do not apply to includes.
+    //
+    // In addition to package names, the following keys are available:
+    //
+    // - python
+    //     Python version, as in the *pythons* variable above.
+    // - environment_type
+    //     Environment type, as above.
+    // - sys_platform
+    //     Platform, as in sys.platform. Possible values for the common
+    //     cases: 'linux2', 'win32', 'cygwin', 'darwin'.
+    //
+    // "exclude": [
+    //     {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
+    //     {"environment_type": "conda", "six": null}, // don't run without six on conda
+    // ],
+    //
+    // "include": [
+    //     // additional env for python2.7
+    //     {"python": "2.7", "numpy": "1.8"},
+    //     // additional env if run on windows+conda
+    //     {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
+    // ],
+
+    // The directory (relative to the current directory) that benchmarks are
+    // stored in.  If not provided, defaults to "benchmarks"
+    // "benchmark_dir": "benchmarks",
+
+    // The directory (relative to the current directory) to cache the Python
+    // environments in.  If not provided, defaults to "env"
+    "env_dir": ".asv/env",
+
+    // The directory (relative to the current directory) that raw benchmark
+    // results are stored in.  If not provided, defaults to "results".
+    "results_dir": ".asv/results",
+
+    // The directory (relative to the current directory) that the html tree
+    // should be written to.  If not provided, defaults to "html".
+    "html_dir": ".asv/html",
+
+    // The number of characters to retain in the commit hashes.
+    // "hash_length": 8,
+
+    // `asv` will cache wheels of the recent builds in each
+    // environment, making them faster to install next time.  This is
+    // number of builds to keep, per environment.
+    // "wheel_cache_size": 0
+
+    // The commits after which the regression search in `asv publish`
+    // should start looking for regressions. Dictionary whose keys are
+    // regexps matching to benchmark names, and values corresponding to
+    // the commit (exclusive) after which to start looking for
+    // regressions.  The default is to start from the first commit
+    // with results. If the commit is `null`, regression detection is
+    // skipped for the matching benchmark.
+    //
+    // "regressions_first_commits": {
+    //    "some_benchmark": "352cdf",  // Consider regressions only after this commit
+    //    "another_benchmark": null,   // Skip regression detection altogether
+    // }
+
+    // The thresholds for relative change in results, after which `asv
+    // publish` starts reporting regressions. Dictionary of the same
+    // form as in ``regressions_first_commits``, with values
+    // indicating the thresholds.  If multiple entries match, the
+    // maximum is taken. If no entry matches, the default is 5%.
+    //
+    // "regressions_thresholds": {
+    //    "some_benchmark": 0.01,     // Threshold of 1%
+    //    "another_benchmark": 0.5,   // Threshold of 50%
+    // }
+}
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d6cfdd882be3c8bc91f0078e9c1f82cfc1c841c
--- /dev/null
+++ b/benchmarks/bench_translations.py
@@ -0,0 +1,129 @@
+import numpy as np
+
+import pytest
+import pyopencl as cl
+from pyopencl.tools import (  # noqa
+        pytest_generate_tests_for_pyopencl as pytest_generate_tests)
+
+from sumpy.expansion.multipole import (
+        VolumeTaylorMultipoleExpansion, H2DMultipoleExpansion,
+        VolumeTaylorMultipoleExpansionBase,
+        LaplaceConformingVolumeTaylorMultipoleExpansion,
+        HelmholtzConformingVolumeTaylorMultipoleExpansion)
+from sumpy.expansion.local import (
+        VolumeTaylorLocalExpansion, H2DLocalExpansion,
+        LaplaceConformingVolumeTaylorLocalExpansion,
+        HelmholtzConformingVolumeTaylorLocalExpansion)
+
+from sumpy.kernel import (LaplaceKernel, HelmholtzKernel, AxisTargetDerivative,
+        DirectionalSourceDerivative)
+
+import logging
+logger = logging.getLogger(__name__)
+
+import sympy
+import six
+import pymbolic.mapper.flop_counter
+
+import sumpy.symbolic as sym
+from sumpy.assignment_collection import SymbolicAssignmentCollection
+from sumpy.codegen import to_loopy_insns
+
+class Param:
+    def __init__(self, dim, order):
+        self.dim = dim
+        self.order = order
+
+    def __repr__(self):
+        return "{}D_order_{}".format(self.dim, self.order)
+
+
+class TranslationBenchmarkSuite:
+
+    params = [
+        Param(2, 10),
+        Param(2, 15),
+        Param(2, 20),
+        Param(3, 5),
+        Param(3, 10),
+    ]
+
+    param_names = ['order']
+
+    def setup(self, param):
+        logging.basicConfig(level=logging.INFO)
+        np.random.seed(17)
+        if self.__class__ == TranslationBenchmarkSuite:
+            raise NotImplementedError
+        mpole_expn_class = self.mpole_expn_class
+        if param.order == 3 and H2DMultipoleExpansion == mpole_expn_class:
+            raise NotImplementedError
+
+    def track_m2l_op_count(self, param):
+        knl = self.knl(param.dim)
+        m_expn = self.mpole_expn_class(knl, order=param.order)
+        l_expn = self.local_expn_class(knl, order=param.order)
+
+        src_coeff_exprs = [sym.Symbol("src_coeff%d" % i)
+                for i in range(len(m_expn))]
+        dvec = sym.make_sym_vector("d", knl.dim)
+        src_rscale = sym.Symbol("src_rscale")
+        tgt_rscale = sym.Symbol("tgt_rscale")
+        result = l_expn.translate_from(m_expn, src_coeff_exprs, src_rscale,
+                                       dvec, tgt_rscale)
+        sac = SymbolicAssignmentCollection()
+        for i, expr in enumerate(result):
+            sac.assign_unique("coeff%d" % i, expr)
+        sac.run_global_cse()
+        insns = to_loopy_insns(six.iteritems(sac.assignments))
+        counter = pymbolic.mapper.flop_counter.CSEAwareFlopCounter()
+
+        return sum([counter.rec(insn.expression)+1 for insn in insns])
+
+    track_m2l_op_count.unit = "ops"
+    track_m2l_op_count.timeout = 200.0
+
+
+class LaplaceVolumeTaylorTranslation(TranslationBenchmarkSuite):
+    knl = LaplaceKernel
+    local_expn_class = VolumeTaylorLocalExpansion
+    mpole_expn_class = VolumeTaylorMultipoleExpansion
+    params = [
+        Param(2, 10),
+        Param(3, 5),
+    ]
+
+
+class LaplaceConformingVolumeTaylorTranslation(TranslationBenchmarkSuite):
+    knl = LaplaceKernel
+    local_expn_class = LaplaceConformingVolumeTaylorLocalExpansion
+    mpole_expn_class = LaplaceConformingVolumeTaylorMultipoleExpansion
+
+
+class HelmholtzVolumeTaylorTranslation(TranslationBenchmarkSuite):
+    knl = HelmholtzKernel
+    local_expn_class = VolumeTaylorLocalExpansion
+    mpole_expn_class = VolumeTaylorMultipoleExpansion
+    params = [
+        Param(2, 10),
+        Param(3, 5),
+    ]
+
+
+class HelmholtzConformingVolumeTaylorTranslation(TranslationBenchmarkSuite):
+    knl = HelmholtzKernel
+    local_expn_class = HelmholtzConformingVolumeTaylorLocalExpansion
+    mpole_expn_class = HelmholtzConformingVolumeTaylorMultipoleExpansion
+
+
+class Helmholtz2DTranslation(TranslationBenchmarkSuite):
+    knl = HelmholtzKernel
+    local_expn_class = H2DLocalExpansion
+    mpole_expn_class = H2DMultipoleExpansion
+    params = [
+        Param(2, 10),
+        Param(2, 15),
+        Param(2, 20),
+    ]
+
+