diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 688892022a2d5cddfaf3306678dca44485c230d0..01e2cbfc5ee45291dbc4054c48ef98aec487e150 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -22,7 +22,7 @@ jobs:
         -   name: "Main Script"
             run: |
                 curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh
-                . ./prepare-and-run-flake8.sh ./meshmode ./test
+                . ./prepare-and-run-flake8.sh "$(basename $GITHUB_REPOSITORY)" ./test
 
     pytest2:
         name: Pytest Conda Py2
@@ -51,4 +51,19 @@ jobs:
                 curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project-within-miniconda.sh
                 . ./build-and-test-py-project-within-miniconda.sh
 
+    examples3:
+        name: Examples Conda Py3
+        runs-on: ubuntu-latest
+        steps:
+        -   uses: actions/checkout@v2
+        -   name: "Main Script"
+            run: |
+                CONDA_ENVIRONMENT=.test-conda-env-py3.yml
+                sudo apt update
+                sudo apt install gfortran-7
+                sudo ln -s /usr/bin/gfortran-7 /usr/bin/gfortran
+                USE_CONDA_BUILD=1
+                curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh
+                . ./build-py-project-and-run-examples.sh
+
 # vim: sw=4
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e0331f1b3a86f533ba2cfec432a1a7a58627b102..34ebe33ae273291063fc884cb3bd4d157271a499 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -69,6 +69,22 @@ Python 3 POCL:
     reports:
       junit: test/pytest.xml
 
+Python 3 POCL Examples:
+  script:
+  - test -n "$SKIP_EXAMPLES" && exit
+  - export PY_EXE=python3
+  - export PYOPENCL_TEST=portable:pthread
+  # cython is here because pytential (for now, for TS) depends on it
+  - export EXTRA_INSTALL="pybind11 cython numpy mako pyvisfile matplotlib"
+  - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh
+  - ". ./build-py-project-and-run-examples.sh"
+  tags:
+  - python3
+  - pocl
+  - large-node
+  except:
+  - tags
+
 Documentation:
   script:
   - EXTRA_INSTALL="pybind11 cython numpy"
@@ -82,7 +98,7 @@ Documentation:
 Flake8:
   script:
   - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh
-  - ". ./prepare-and-run-flake8.sh meshmode test"
+  - . ./prepare-and-run-flake8.sh "$CI_PROJECT_NAME" test
   tags:
   - python3
   except:
diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml
index a7d4c6d4cea39193fb02d5b06c3116f58bfd1a19..f277a48523488996c4296fac3c29d0ee4ee5ab0f 100644
--- a/.test-conda-env-py3.yml
+++ b/.test-conda-env-py3.yml
@@ -37,3 +37,6 @@ dependencies:
 
     # requires pymetis for tests for partition_mesh
     - git+https://gitlab.tiker.net/inducer/pymetis.git
+
+    # required for plot-connectivity.py example
+    - git+https://gitlab.tiker.net/inducer/pyvisfile.git
diff --git a/doc/conf.py b/doc/conf.py
index ac63d6cbeaf793d197dbfc821de4ffce568d4587..866653625acb09a79319be6bec7c07872081f38f 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -274,8 +274,11 @@ texinfo_documents = [
 # If true, do not generate a @detailmenu in the "Top" node's menu.
 #texinfo_no_detailmenu = False
 
-
 intersphinx_mapping = {
     'http://docs.python.org/': None,
-    'https://documen.tician.de/pyopencl': None
+    'http://docs.scipy.org/doc/numpy/': None,
+    'https://documen.tician.de/pyopencl': None,
+    'https://documen.tician.de/meshpy': None,
+    'https://documen.tician.de/modepy': None,
+    'https://documen.tician.de/loopy': None
 }
diff --git a/examples/.gitignore b/examples/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a1363379944a5745ceb49c0e493d80eb9335c79a
--- /dev/null
+++ b/examples/.gitignore
@@ -0,0 +1 @@
+*.pdf
diff --git a/examples/multiple-meshes.py b/examples/multiple-meshes.py
index 84918e1779a34932ed6269a8884e30e63bb846d0..2dab3fabb65cc4b86477c5a7325acaff752e3ad2 100644
--- a/examples/multiple-meshes.py
+++ b/examples/multiple-meshes.py
@@ -1,6 +1,7 @@
 from __future__ import division
 
 import numpy as np  # noqa
+import sys
 
 
 order = 4
@@ -20,7 +21,10 @@ def main():
     draw_2d_mesh(mesh, set_bounding_box=True)
 
     import matplotlib.pyplot as pt
-    pt.show()
+    if sys.stdin.isatty():
+        pt.show()
+    else:
+        pt.savefig("plot.pdf")
 
 if __name__ == "__main__":
     main()
diff --git a/examples/simple-dg.py b/examples/simple-dg.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fe6f70600c3230bca4240b3c0808bf853ea650b
--- /dev/null
+++ b/examples/simple-dg.py
@@ -0,0 +1,525 @@
+from __future__ import division, print_function
+
+__copyright__ = "Copyright (C) 2020 Andreas Kloeckner"
+
+__license__ = """
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+"""
+
+
+import numpy as np
+import numpy.linalg as la  # noqa
+import pyopencl as cl
+import pyopencl.array as cla  # noqa
+import pyopencl.clmath as clmath
+from pytools import memoize_method, memoize_in
+from pytools.obj_array import (
+        join_fields, make_obj_array,
+        with_object_array_or_scalar,
+        is_obj_array)
+import loopy as lp
+from meshmode.mesh import BTAG_ALL, BTAG_NONE  # noqa
+
+
+# Features lost vs. https://github.com/inducer/grudge:
+# - dimension independence / differential geometry
+# - overintegration
+# - operator fusion
+# - distributed-memory
+
+
+def with_queue(queue, ary):
+    return with_object_array_or_scalar(
+            lambda x: x.with_queue(queue), ary)
+
+
+def without_queue(ary):
+    return with_queue(None, ary)
+
+
+# {{{ discretization
+
+def parametrization_derivative(queue, discr):
+    result = np.zeros((discr.ambient_dim, discr.dim), dtype=object)
+    for iambient in range(discr.ambient_dim):
+        for idim in range(discr.dim):
+            result[iambient, idim] = discr.num_reference_derivative(
+                    queue, (idim,), discr.nodes()[iambient])
+
+    return result
+
+
+class DGDiscretization:
+    def __init__(self, cl_ctx, mesh, order):
+        self.order = order
+
+        from meshmode.discretization import Discretization
+        from meshmode.discretization.poly_element import \
+                PolynomialWarpAndBlendGroupFactory
+        self.group_factory = PolynomialWarpAndBlendGroupFactory(order=order)
+        self.volume_discr = Discretization(cl_ctx, mesh, self.group_factory)
+
+        assert self.volume_discr.dim == 2
+
+    @property
+    def cl_context(self):
+        return self.volume_discr.cl_context
+
+    @property
+    def dim(self):
+        return self.volume_discr.dim
+
+    # {{{ discretizations/connections
+
+    @memoize_method
+    def boundary_connection(self, boundary_tag):
+        from meshmode.discretization.connection import make_face_restriction
+        return make_face_restriction(
+                        self.volume_discr,
+                        self.group_factory,
+                        boundary_tag=boundary_tag)
+
+    @memoize_method
+    def interior_faces_connection(self):
+        from meshmode.discretization.connection import (
+                make_face_restriction, FACE_RESTR_INTERIOR)
+        return make_face_restriction(
+                        self.volume_discr,
+                        self.group_factory,
+                        FACE_RESTR_INTERIOR,
+                        per_face_groups=False)
+
+    @memoize_method
+    def opposite_face_connection(self):
+        from meshmode.discretization.connection import \
+                make_opposite_face_connection
+
+        return make_opposite_face_connection(self.interior_faces_connection())
+
+    @memoize_method
+    def all_faces_connection(self):
+        from meshmode.discretization.connection import (
+                make_face_restriction, FACE_RESTR_ALL)
+        return make_face_restriction(
+                        self.volume_discr,
+                        self.group_factory,
+                        FACE_RESTR_ALL,
+                        per_face_groups=False)
+
+    @memoize_method
+    def get_to_all_face_embedding(self, where):
+        from meshmode.discretization.connection import \
+                make_face_to_all_faces_embedding
+
+        faces_conn = self.get_connection("vol", where)
+        return make_face_to_all_faces_embedding(
+                faces_conn, self.get_discr("all_faces"))
+
+    def get_connection(self, src, tgt):
+        src_tgt = (src, tgt)
+
+        if src_tgt == ("vol", "int_faces"):
+            return self.interior_faces_connection()
+        elif src_tgt == ("vol", "all_faces"):
+            return self.all_faces_connection()
+        elif src_tgt == ("vol", BTAG_ALL):
+            return self.boundary_connection(tgt)
+        elif src_tgt == ("int_faces", "all_faces"):
+            return self.get_to_all_face_embedding(src)
+        elif src_tgt == (BTAG_ALL, "all_faces"):
+            return self.get_to_all_face_embedding(src)
+        else:
+            raise ValueError(f"locations '{src}'->'{tgt}' not understood")
+
+    def interp(self, src, tgt, vec):
+        if is_obj_array(vec):
+            return with_object_array_or_scalar(
+                    lambda el: self.interp(src, tgt, el), vec)
+
+        return self.get_connection(src, tgt)(vec.queue, vec)
+
+    def get_discr(self, where):
+        if where == "vol":
+            return self.volume_discr
+        elif where == "all_faces":
+            return self.all_faces_connection().to_discr
+        elif where == "int_faces":
+            return self.interior_faces_connection().to_discr
+        elif where == BTAG_ALL:
+            return self.boundary_connection(where).to_discr
+        else:
+            raise ValueError(f"location '{where}' not understood")
+
+    # }}}
+
+    @memoize_method
+    def parametrization_derivative(self):
+        with cl.CommandQueue(self.cl_context) as queue:
+            return without_queue(
+                    parametrization_derivative(queue, self.volume_discr))
+
+    @memoize_method
+    def vol_jacobian(self):
+        with cl.CommandQueue(self.cl_context) as queue:
+            [a, b], [c, d] = with_queue(queue, self.parametrization_derivative())
+            return (a*d-b*c).with_queue(None)
+
+    @memoize_method
+    def inverse_parametrization_derivative(self):
+        with cl.CommandQueue(self.cl_context) as queue:
+            [a, b], [c, d] = with_queue(queue, self.parametrization_derivative())
+
+            result = np.zeros((2, 2), dtype=object)
+            det = a*d-b*c
+            result[0, 0] = d/det
+            result[0, 1] = -b/det
+            result[1, 0] = -c/det
+            result[1, 1] = a/det
+
+            return without_queue(result)
+
+    def zeros(self, queue):
+        return self.volume_discr.zeros(queue)
+
+    def grad(self, vec):
+        ipder = self.inverse_parametrization_derivative()
+
+        queue = vec.queue
+        dref = [
+                self.volume_discr.num_reference_derivative(
+                    queue, (idim,), vec).with_queue(queue)
+                for idim in range(self.volume_discr.dim)]
+
+        return make_obj_array([
+            sum(dref_i*ipder_i for dref_i, ipder_i in zip(dref, ipder[iambient]))
+            for iambient in range(self.volume_discr.ambient_dim)])
+
+    def div(self, vecs):
+        return sum(
+                self.grad(vec_i)[i] for i, vec_i in enumerate(vecs))
+
+    @memoize_method
+    def normal(self, where):
+        bdry_discr = self.get_discr(where)
+
+        with cl.CommandQueue(self.cl_context) as queue:
+            ((a,), (b,)) = with_queue(
+                    queue, parametrization_derivative(queue, bdry_discr))
+
+            nrm = 1/(a**2+b**2)**0.5
+            return without_queue(join_fields(b*nrm, -a*nrm))
+
+    @memoize_method
+    def face_jacobian(self, where):
+        bdry_discr = self.get_discr(where)
+
+        with cl.CommandQueue(self.cl_context) as queue:
+            ((a,), (b,)) = with_queue(queue,
+                    parametrization_derivative(queue, bdry_discr))
+
+            return ((a**2 + b**2)**0.5).with_queue(None)
+
+    @memoize_method
+    def get_inverse_mass_matrix(self, grp, dtype):
+        import modepy as mp
+        matrix = mp.inverse_mass_matrix(
+                grp.basis(),
+                grp.unit_nodes)
+
+        with cl.CommandQueue(self.cl_context) as queue:
+            return (cla.to_device(queue, matrix)
+                    .with_queue(None))
+
+    def inverse_mass(self, vec):
+        if is_obj_array(vec):
+            return with_object_array_or_scalar(
+                    lambda el: self.inverse_mass(el), vec)
+
+        @memoize_in(self, "elwise_linear_knl")
+        def knl():
+            knl = lp.make_kernel(
+                """{[k,i,j]:
+                    0<=k<nelements and
+                    0<=i<ndiscr_nodes_out and
+                    0<=j<ndiscr_nodes_in}""",
+                "result[k,i] = sum(j, mat[i, j] * vec[k, j])",
+                default_offset=lp.auto, name="diff")
+
+            knl = lp.split_iname(knl, "i", 16, inner_tag="l.0")
+            return lp.tag_inames(knl, dict(k="g.0"))
+
+        discr = self.volume_discr
+
+        result = discr.empty(queue=vec.queue, dtype=vec.dtype)
+
+        for grp in discr.groups:
+            matrix = self.get_inverse_mass_matrix(grp, vec.dtype)
+
+            knl()(vec.queue, mat=matrix, result=grp.view(result),
+                    vec=grp.view(vec))
+
+        return result/self.vol_jacobian()
+
+    @memoize_method
+    def get_local_face_mass_matrix(self, afgrp, volgrp, dtype):
+        nfaces = volgrp.mesh_el_group.nfaces
+        assert afgrp.nelements == nfaces * volgrp.nelements
+
+        matrix = np.empty(
+                (volgrp.nunit_nodes,
+                    nfaces,
+                    afgrp.nunit_nodes),
+                dtype=dtype)
+
+        from modepy.tools import UNIT_VERTICES
+        import modepy as mp
+        for iface, fvi in enumerate(
+                volgrp.mesh_el_group.face_vertex_indices()):
+            face_vertices = UNIT_VERTICES[volgrp.dim][np.array(fvi)].T
+            matrix[:, iface, :] = mp.nodal_face_mass_matrix(
+                    volgrp.basis(), volgrp.unit_nodes, afgrp.unit_nodes,
+                    volgrp.order,
+                    face_vertices)
+
+        with cl.CommandQueue(self.cl_context) as queue:
+            return (cla.to_device(queue, matrix)
+                    .with_queue(None))
+
+    def face_mass(self, vec):
+        if is_obj_array(vec):
+            return with_object_array_or_scalar(
+                    lambda el: self.face_mass(el), vec)
+
+        @memoize_in(self, "face_mass_knl")
+        def knl():
+            knl = lp.make_kernel(
+                """{[k,i,f,j]:
+                    0<=k<nelements and
+                    0<=f<nfaces and
+                    0<=i<nvol_nodes and
+                    0<=j<nface_nodes}""",
+                "result[k,i] = sum(f, sum(j, mat[i, f, j] * vec[f, k, j]))",
+                default_offset=lp.auto, name="face_mass")
+
+            knl = lp.split_iname(knl, "i", 16, inner_tag="l.0")
+            return lp.tag_inames(knl, dict(k="g.0"))
+
+        all_faces_conn = self.get_connection("vol", "all_faces")
+        all_faces_discr = all_faces_conn.to_discr
+        vol_discr = all_faces_conn.from_discr
+
+        result = vol_discr.empty(queue=vec.queue, dtype=vec.dtype)
+
+        fj = self.face_jacobian("all_faces")
+        vec = vec*fj
+
+        assert len(all_faces_discr.groups) == len(vol_discr.groups)
+
+        for afgrp, volgrp in zip(all_faces_discr.groups, vol_discr.groups):
+            nfaces = volgrp.mesh_el_group.nfaces
+
+            matrix = self.get_local_face_mass_matrix(afgrp, volgrp, vec.dtype)
+
+            input_view = afgrp.view(vec).reshape(
+                    nfaces, volgrp.nelements, afgrp.nunit_nodes)
+            knl()(vec.queue, mat=matrix, result=volgrp.view(result),
+                    vec=input_view)
+
+        return result
+
+# }}}
+
+
+# {{{ trace pair
+
+class TracePair:
+    def __init__(self, where, interior, exterior):
+        self.where = where
+        self.interior = interior
+        self.exterior = exterior
+
+    def __getitem__(self, index):
+        return TracePair(
+                self.where,
+                self.exterior[index],
+                self.interior[index])
+
+    def __len__(self):
+        assert len(self.exterior) == len(self.interior)
+        return len(self.exterior)
+
+    @property
+    def int(self):
+        return self.interior
+
+    @property
+    def ext(self):
+        return self.exterior
+
+    @property
+    def avg(self):
+        return 0.5*(self.int + self.ext)
+
+
+def interior_trace_pair(discr, vec):
+    i = discr.interp("vol", "int_faces", vec)
+    e = with_object_array_or_scalar(
+            lambda el: discr.opposite_face_connection()(el.queue, el),
+            i)
+    return TracePair("int_faces", i, e)
+
+# }}}
+
+
+# {{{ wave equation bits
+
+def wave_flux(discr, c, w_tpair):
+    u = w_tpair[0]
+    v = w_tpair[1:]
+
+    normal = with_queue(u.int.queue, discr.normal(w_tpair.where))
+
+    flux_weak = join_fields(
+            np.dot(v.avg, normal),
+            normal[0] * u.avg,
+            normal[1] * u.avg)
+
+    # upwind
+    v_jump = np.dot(normal, v.int-v.ext)
+    flux_weak -= join_fields(
+            0.5*(u.int-u.ext),
+            0.5*normal[0]*v_jump,
+            0.5*normal[1]*v_jump,
+            )
+
+    flux_strong = join_fields(
+            np.dot(v.int, normal),
+            u.int * normal[0],
+            u.int * normal[1],
+            ) - flux_weak
+
+    return discr.interp(w_tpair.where, "all_faces", c*flux_strong)
+
+
+def wave_operator(discr, c, w):
+    u = w[0]
+    v = w[1:]
+
+    dir_u = discr.interp("vol", BTAG_ALL, u)
+    dir_v = discr.interp("vol", BTAG_ALL, v)
+    dir_bval = join_fields(dir_u, dir_v)
+    dir_bc = join_fields(-dir_u, dir_v)
+
+    return (
+            - join_fields(
+                -c*discr.div(v),
+                -c*discr.grad(u)
+                )
+            +  # noqa: W504
+            discr.inverse_mass(
+                discr.face_mass(
+                    wave_flux(discr, c=c, w_tpair=interior_trace_pair(discr, w))
+                    + wave_flux(discr, c=c, w_tpair=TracePair(
+                        BTAG_ALL, dir_bval, dir_bc))
+                    ))
+                )
+
+
+# }}}
+
+
+def rk4_step(y, t, h, f):
+    k1 = f(t, y)
+    k2 = f(t+h/2, y + h/2*k1)
+    k3 = f(t+h/2, y + h/2*k2)
+    k4 = f(t+h, y + h*k3)
+    return y + h/6*(k1 + 2*k2 + 2*k3 + k4)
+
+
+def bump(discr, queue, t=0):
+    source_center = np.array([0.0, 0.05])
+    source_width = 0.05
+    source_omega = 3
+
+    nodes = discr.volume_discr.nodes().with_queue(queue)
+    center_dist = join_fields([
+        nodes[0] - source_center[0],
+        nodes[1] - source_center[1],
+        ])
+
+    return (
+        np.cos(source_omega*t)
+        * clmath.exp(
+            -np.dot(center_dist, center_dist)
+            / source_width**2))
+
+
+def main():
+    cl_ctx = cl.create_some_context()
+    queue = cl.CommandQueue(cl_ctx)
+
+    nel_1d = 16
+    from meshmode.mesh.generation import generate_regular_rect_mesh
+    mesh = generate_regular_rect_mesh(
+            a=(-0.5, -0.5),
+            b=(0.5, 0.5),
+            n=(nel_1d, nel_1d))
+
+    order = 3
+
+    # no deep meaning here, just a fudge factor
+    dt = 0.75/(nel_1d*order**2)
+
+    print("%d elements" % mesh.nelements)
+
+    discr = DGDiscretization(cl_ctx, mesh, order=order)
+
+    fields = join_fields(
+            bump(discr, queue),
+            [discr.zeros(queue) for i in range(discr.dim)]
+            )
+
+    from meshmode.discretization.visualization import make_visualizer
+    vis = make_visualizer(queue, discr.volume_discr, discr.order+3)
+
+    def rhs(t, w):
+        return wave_operator(discr, c=1, w=w)
+
+    t = 0
+    t_final = 3
+    istep = 0
+    while t < t_final:
+        fields = rk4_step(fields, t, dt, rhs)
+
+        if istep % 10 == 0:
+            print(istep, t, la.norm(fields[0].get()))
+            vis.write_vtk_file("fld-wave-min-%04d.vtu" % istep,
+                    [
+                        ("u", fields[0]),
+                        ("v", fields[1:]),
+                        ])
+
+        t += dt
+        istep += 1
+
+
+if __name__ == "__main__":
+    main()
+
+# vim: foldmethod=marker
diff --git a/examples/refinement-playground.py b/experiments/refinement-playground.py
similarity index 100%
rename from examples/refinement-playground.py
rename to experiments/refinement-playground.py
diff --git a/meshmode/discretization/connection/face.py b/meshmode/discretization/connection/face.py
index a0d4e71e2fb4dd9443dfe08714cb274973375d92..f45a95c1b72fe742505c90535aa783c6ae5fb971 100644
--- a/meshmode/discretization/connection/face.py
+++ b/meshmode/discretization/connection/face.py
@@ -168,9 +168,9 @@ def make_face_restriction(discr, group_factory, boundary_tag,
 
     :arg boundary_tag: The boundary tag for which to create a face
         restriction. May be
-        :class:`meshmode.discretization.connection.FRESTR_INTERIOR_FACES`
+        :class:`FACE_RESTR_INTERIOR`
         to indicate interior faces, or
-        :class:`meshmode.discretization.connection.FRESTR_ALL_FACES`
+        :class:`FACE_RESTR_ALL`
         to make a discretization consisting of all (interior and
         boundary) faces.
 
@@ -199,7 +199,7 @@ def make_face_restriction(discr, group_factory, boundary_tag,
         boundary_tag = FACE_RESTR_INTERIOR
         from warnings import warn
         warn("passing *None* for boundary_tag is deprecated--pass "
-                "FRESTR_INTERIOR_FACES instead",
+                "FACE_RESTR_INTERIOR instead",
                 DeprecationWarning, stacklevel=2)
 
     logger.info("building face restriction: start")
@@ -223,7 +223,8 @@ def make_face_restriction(discr, group_factory, boundary_tag,
     bdry_mesh_groups = []
     connection_data = {}
 
-    btag_bit = discr.mesh.boundary_tag_bit(boundary_tag)
+    if boundary_tag not in [FACE_RESTR_ALL, FACE_RESTR_INTERIOR]:
+        btag_bit = discr.mesh.boundary_tag_bit(boundary_tag)
 
     for igrp, (grp, fagrp_map) in enumerate(
             zip(discr.groups, discr.mesh.facial_adjacency_groups)):
diff --git a/meshmode/discretization/connection/opposite_face.py b/meshmode/discretization/connection/opposite_face.py
index 8458e95982cfb1ea83adacee7f0bf5b990607fd8..97570b338f9938165004a46c59fb3ec1570e0cbe 100644
--- a/meshmode/discretization/connection/opposite_face.py
+++ b/meshmode/discretization/connection/opposite_face.py
@@ -35,20 +35,34 @@ logger = logging.getLogger(__name__)
 
 # {{{ _make_cross_face_batches
 
-def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr,
-                                    i_tgt_grp, i_src_grp,
-                                    tgt_bdry_element_indices,
-                                    src_bdry_element_indices):
+def _make_cross_face_batches(queue,
+        tgt_bdry_discr, src_bdry_discr,
+        i_tgt_grp, i_src_grp,
+        tgt_bdry_element_indices, src_bdry_element_indices):
+    def to_dev(ary):
+        return cl.array.to_device(queue, ary, array_queue=None)
+
+    from meshmode.discretization.connection.direct import InterpolationBatch
+    if tgt_bdry_discr.dim == 0:
+        yield InterpolationBatch(
+            from_group_index=i_src_grp,
+            from_element_indices=to_dev(src_bdry_element_indices),
+            to_element_indices=to_dev(tgt_bdry_element_indices),
+            result_unit_nodes=src_bdry_discr.groups[i_src_grp].unit_nodes,
+            to_element_face=None)
+        return
 
     # FIXME: This should view-then-transfer
     # (but PyOpenCL doesn't do non-contiguous transfers for now).
-    tgt_bdry_nodes = (tgt_bdry_discr.groups[i_tgt_grp].view(tgt_bdry_discr.nodes().
-                        get(queue=queue))[:, tgt_bdry_element_indices])
+    tgt_bdry_nodes = (tgt_bdry_discr.groups[i_tgt_grp]
+            .view(tgt_bdry_discr.nodes().get(queue=queue))
+            [:, tgt_bdry_element_indices])
 
     # FIXME: This should view-then-transfer
     # (but PyOpenCL doesn't do non-contiguous transfers for now).
-    src_bdry_nodes = (src_bdry_discr.groups[i_src_grp].view(src_bdry_discr.nodes().
-                        get(queue=queue))[:, src_bdry_element_indices])
+    src_bdry_nodes = (src_bdry_discr.groups[i_src_grp]
+            .view(src_bdry_discr.nodes().get(queue=queue))
+            [:, src_bdry_element_indices])
 
     tol = 1e4 * np.finfo(tgt_bdry_nodes.dtype).eps
 
@@ -222,9 +236,6 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr,
 
     # {{{ find groups of src_unit_nodes
 
-    def to_dev(ary):
-        return cl.array.to_device(queue, ary, array_queue=None)
-
     done_elements = np.zeros(nelements, dtype=np.bool)
     while True:
         todo_elements, = np.where(~done_elements)
@@ -241,11 +252,6 @@ def _make_cross_face_batches(queue, tgt_bdry_discr, src_bdry_discr,
         close_els = todo_elements[unit_node_dist < tol]
         done_elements[close_els] = True
 
-        unit_node_dist = np.max(np.max(np.abs(
-                src_unit_nodes[:, todo_elements, :]
-                - template_unit_nodes.reshape(dim, 1, -1)),
-                axis=2), axis=0)
-
         from meshmode.discretization.connection.direct import InterpolationBatch
         yield InterpolationBatch(
                 from_group_index=i_src_grp,
@@ -388,11 +394,12 @@ def make_opposite_face_connection(volume_to_bdry_conn):
 
                     # }}}
 
-                    groups[i_tgt_grp].extend(_make_cross_face_batches(queue,
+                    batches = _make_cross_face_batches(queue,
                             bdry_discr, bdry_discr,
                             i_tgt_grp, i_src_grp,
                             tgt_bdry_element_indices,
-                            src_bdry_element_indices))
+                            src_bdry_element_indices)
+                    groups[i_tgt_grp].extend(batches)
 
     from meshmode.discretization.connection import (
             DirectDiscretizationConnection, DiscretizationConnectionElementGroup)
@@ -485,10 +492,10 @@ def make_partition_connection(local_bdry_conn, i_local_part,
                     local_bdry_indices = local_el_lookup[elems, faces]
 
                     batches = _make_cross_face_batches(queue,
-                                                       local_bdry, remote_bdry,
-                                                       i_local_grp, i_remote_grp,
-                                                       local_bdry_indices,
-                                                       remote_bdry_indices)
+                            local_bdry, remote_bdry,
+                            i_local_grp, i_remote_grp,
+                            local_bdry_indices,
+                            remote_bdry_indices)
 
                     part_batches[i_local_grp].extend(batches)
 
diff --git a/meshmode/discretization/connection/projection.py b/meshmode/discretization/connection/projection.py
index 5d274a968d73cf773238a881e42e4c36eab9ac21..fae3f9b7435b526cd7342090bd31012885945ff9 100644
--- a/meshmode/discretization/connection/projection.py
+++ b/meshmode/discretization/connection/projection.py
@@ -54,6 +54,9 @@ class L2ProjectionInverseDiscretizationConnection(DiscretizationConnection):
         if isinstance(connections, DirectDiscretizationConnection):
             return DiscretizationConnection.__new__(cls)
         elif isinstance(connections, ChainedDiscretizationConnection):
+            if len(connections.connections) == 0:
+                return connections
+
             return cls(connections.connections, is_surjective=is_surjective)
         else:
             conns = []
diff --git a/meshmode/discretization/poly_element.py b/meshmode/discretization/poly_element.py
index 1d25e337d146d57f9d52dccb08e855f99c135664..436b1fb1229e2dc11bc56faa280cf0185763735c 100644
--- a/meshmode/discretization/poly_element.py
+++ b/meshmode/discretization/poly_element.py
@@ -141,7 +141,9 @@ class InterpolatoryQuadratureSimplexElementGroup(PolynomialSimplexElementGroupBa
     @memoize_method
     def _quadrature_rule(self):
         dims = self.mesh_el_group.dim
-        if dims == 1:
+        if dims == 0:
+            return mp.Quadrature(np.empty((0, 1)), np.empty((0, 1)))
+        elif dims == 1:
             return mp.LegendreGaussQuadrature(self.order)
         else:
             return mp.VioreanuRokhlinSimplexQuadrature(self.order, dims)
@@ -175,7 +177,9 @@ class QuadratureSimplexElementGroup(SimplexElementGroupBase):
     @memoize_method
     def _quadrature_rule(self):
         dims = self.mesh_el_group.dim
-        if dims == 1:
+        if dims == 0:
+            return mp.Quadrature(np.empty((0, 1)), np.empty((0, 1)))
+        elif dims == 1:
             return mp.LegendreGaussQuadrature(self.order)
         else:
             return mp.XiaoGimbutasSimplexQuadrature(self.order, dims)
diff --git a/meshmode/distributed.py b/meshmode/distributed.py
index 7ddaa8041725e7dede750955c3a5025ec0b835c4..54135fb061097ed8064b696b1b4b778e70dd02c4 100644
--- a/meshmode/distributed.py
+++ b/meshmode/distributed.py
@@ -239,10 +239,12 @@ def get_connected_partitions(mesh):
     connected_parts = set()
     from meshmode.mesh import InterPartitionAdjacencyGroup
     for adj in mesh.facial_adjacency_groups:
-        if isinstance(adj[None], InterPartitionAdjacencyGroup):
-            indices = (adj[None].neighbor_partitions >= 0)
+        grp = adj.get(None, None)
+        if isinstance(grp, InterPartitionAdjacencyGroup):
+            indices = grp.neighbor_partitions >= 0
             connected_parts = connected_parts.union(
-                                        adj[None].neighbor_partitions[indices])
+                    grp.neighbor_partitions[indices])
+
     return connected_parts
 
 
diff --git a/meshmode/mesh/__init__.py b/meshmode/mesh/__init__.py
index 6d6191aa394544b47f8b994ddc6ea7c8691b88a0..19ef8e5ddaf46b283ea9ca3431d672f57deca4fb 100644
--- a/meshmode/mesh/__init__.py
+++ b/meshmode/mesh/__init__.py
@@ -42,6 +42,7 @@ __doc__ = """
 
 .. autoclass:: NodalAdjacency
 .. autoclass:: FacialAdjacencyGroup
+.. autoclass:: InterPartitionAdjacencyGroup
 
 .. autofunction:: as_python
 .. autofunction:: check_bc_coverage
@@ -68,22 +69,22 @@ class BTAG_NONE(object):  # noqa
 class BTAG_ALL(object):  # noqa
     """A boundary tag representing the entire boundary or volume.
 
-    In the case of the boundary, TAG_ALL does not include rank boundaries,
-    or, more generally, anything tagged with TAG_NO_BOUNDARY."""
+    In the case of the boundary, :class:`BTAG_ALL` does not include rank boundaries,
+    or, more generally, anything tagged with :class:`BTAG_NO_BOUNDARY`."""
     pass
 
 
 class BTAG_REALLY_ALL(object):  # noqa
     """A boundary tag representing the entire boundary.
 
-    Unlike :class:`TAG_ALL`, this includes rank boundaries,
-    or, more generally, everything tagged with :class:`TAG_NO_BOUNDARY`."""
+    Unlike :class:`BTAG_ALL`, this includes rank boundaries,
+    or, more generally, everything tagged with :class:`BTAG_NO_BOUNDARY`."""
     pass
 
 
 class BTAG_NO_BOUNDARY(object):  # noqa
     """A boundary tag indicating that this edge should not fall under
-    :class:`TAG_ALL`. Among other things, this is used to keep rank boundaries
+    :class:`BTAG_ALL`. Among other things, this is used to keep rank boundaries
     out of :class:`BTAG_ALL`.
     """
     pass
@@ -114,6 +115,9 @@ class BTAG_PARTITION(object):  # noqa
     def __ne__(self, other):
         return not self.__eq__(other)
 
+    def __repr__(self):
+        return "<%s(%s)>" % (type(self).__name__, repr(self.part_nr))
+
 
 SYSTEM_TAGS = set([BTAG_NONE, BTAG_ALL, BTAG_REALLY_ALL, BTAG_NO_BOUNDARY,
                    BTAG_PARTITION])
@@ -171,7 +175,7 @@ class MeshElementGroup(Record):
             element_nr_base=None, node_nr_base=None,
             unit_nodes=None, dim=None):
         """
-        :arg order: the mamximum total degree used for interpolation.
+        :arg order: the maximum total degree used for interpolation.
         :arg nodes: ``[ambient_dim, nelements, nunit_nodes]``
             The nodes are assumed to be mapped versions of *unit_nodes*.
         :arg unit_nodes: ``[dim, nunit_nodes]``
@@ -213,7 +217,7 @@ class MeshElementGroup(Record):
 
     @property
     def nelements(self):
-        return self.vertex_indices.shape[0]
+        return self.nodes.shape[1]
 
     @property
     def nnodes(self):
@@ -263,7 +267,7 @@ class SimplexElementGroup(MeshElementGroup):
             element_nr_base=None, node_nr_base=None,
             unit_nodes=None, dim=None):
         """
-        :arg order: the mamximum total degree used for interpolation.
+        :arg order: the maximum total degree used for interpolation.
         :arg nodes: ``[ambient_dim, nelements, nunit_nodes]``
             The nodes are assumed to be mapped versions of *unit_nodes*.
         :arg unit_nodes: ``[dim, nunit_nodes]``
@@ -279,9 +283,6 @@ class SimplexElementGroup(MeshElementGroup):
         automatically assigned.
         """
 
-        if not issubclass(vertex_indices.dtype.type, np.integer):
-            raise TypeError("vertex_indices must be integral")
-
         if unit_nodes is None:
             if dim is None:
                 raise TypeError("'dim' must be passed "
@@ -294,10 +295,14 @@ class SimplexElementGroup(MeshElementGroup):
 
         dims = unit_nodes.shape[0]
 
-        if vertex_indices.shape[-1] != dims+1:
-            raise ValueError("vertex_indices has wrong number of vertices per "
-                    "element. expected: %d, got: %d" % (dims+1,
-                        vertex_indices.shape[-1]))
+        if vertex_indices is not None:
+            if not issubclass(vertex_indices.dtype.type, np.integer):
+                raise TypeError("vertex_indices must be integral")
+
+            if vertex_indices.shape[-1] != dims+1:
+                raise ValueError("vertex_indices has wrong number of vertices per "
+                        "element. expected: %d, got: %d" % (dims+1,
+                            vertex_indices.shape[-1]))
 
         super(SimplexElementGroup, self).__init__(order, vertex_indices, nodes,
                 element_nr_base, node_nr_base, unit_nodes, dim)
@@ -338,7 +343,7 @@ class TensorProductElementGroup(MeshElementGroup):
             element_nr_base=None, node_nr_base=None,
             unit_nodes=None):
         """
-        :arg order: the mamximum total degree used for interpolation.
+        :arg order: the maximum total degree used for interpolation.
         :arg nodes: ``[ambient_dim, nelements, nunit_nodes]``
             The nodes are assumed to be mapped versions of *unit_nodes*.
         :arg unit_nodes: ``[dim, nunit_nodes]``
@@ -349,15 +354,16 @@ class TensorProductElementGroup(MeshElementGroup):
         automatically assigned.
         """
 
-        if not issubclass(vertex_indices.dtype.type, np.integer):
-            raise TypeError("vertex_indices must be integral")
-
         dims = unit_nodes.shape[0]
 
-        if vertex_indices.shape[-1] != 2**dims:
-            raise ValueError("vertex_indices has wrong number of vertices per "
-                    "element. expected: %d, got: %d" % (2**dims,
-                        vertex_indices.shape[-1]))
+        if vertex_indices is not None:
+            if not issubclass(vertex_indices.dtype.type, np.integer):
+                raise TypeError("vertex_indices must be integral")
+
+            if vertex_indices.shape[-1] != 2**dims:
+                raise ValueError("vertex_indices has wrong number of vertices per "
+                        "element. expected: %d, got: %d" % (2**dims,
+                            vertex_indices.shape[-1]))
 
         super(TensorProductElementGroup, self).__init__(order, vertex_indices, nodes,
                 element_nr_base, node_nr_base, unit_nodes)
@@ -568,8 +574,9 @@ class Mesh(Record):
     """
     .. attribute:: vertices
 
-        An array of vertex coordinates with shape
-        *(ambient_dim, nvertices)*
+        *None* or an array of vertex coordinates with shape
+        *(ambient_dim, nvertices)*. If *None*, vertices are not
+        known for this mesh.
 
     .. attribute:: groups
 
@@ -591,7 +598,7 @@ class Mesh(Record):
         the set of facial adjacency relations between group *igrp*
         and *ineighbor_group*. *ineighbor_group* and *igrp* may be
         identical, or *ineighbor_group* may be *None*, in which case
-        an :class:``InterPartitionAdjacency`` group containing boundary
+        an :class:`InterPartitionAdjacencyGroup` group containing boundary
         faces is returned.
 
         Referencing this attribute may raise
@@ -628,6 +635,11 @@ class Mesh(Record):
         A mapping that maps boundary tag identifiers to their
         corresponding index.
 
+        .. note::
+
+            Elements of :attr:`boundary_tags` that do not cover any
+            part of the boundary will not be keys in this dictionary.
+
     .. attribute:: vertex_id_dtype
 
     .. attribute:: element_id_dtype
@@ -720,6 +732,9 @@ class Mesh(Record):
 
         # }}}
 
+        if vertices is None:
+            is_conforming = None
+
         if not is_conforming:
             if nodal_adjacency is None:
                 nodal_adjacency = False
@@ -753,7 +768,8 @@ class Mesh(Record):
                         self, node_vertex_consistency_tolerance)
 
             for g in self.groups:
-                assert g.vertex_indices.dtype == self.vertex_id_dtype
+                if g.vertex_indices is not None:
+                    assert g.vertex_indices.dtype == self.vertex_id_dtype
 
             if nodal_adjacency:
                 assert nodal_adjacency.neighbors_starts.shape == (self.nelements+1,)
@@ -812,7 +828,8 @@ class Mesh(Record):
 
     @property
     def ambient_dim(self):
-        return self.vertices.shape[0]
+        from pytools import single_valued
+        return single_valued(grp.nodes.shape[0] for grp in self.groups)
 
     @property
     def dim(self):
@@ -821,6 +838,10 @@ class Mesh(Record):
 
     @property
     def nvertices(self):
+        if self.vertices is None:
+            from meshmode import DataUnavailable
+            raise DataUnavailable("vertices")
+
         return self.vertices.shape[-1]
 
     @property
@@ -829,13 +850,12 @@ class Mesh(Record):
 
     @property
     def nodal_adjacency(self):
+        from meshmode import DataUnavailable
         if self._nodal_adjacency is False:
-            from meshmode import DataUnavailable
             raise DataUnavailable("nodal_adjacency")
 
         elif self._nodal_adjacency is None:
             if not self.is_conforming:
-                from meshmode import DataUnavailable
                 raise DataUnavailable("nodal_adjacency can only "
                         "be computed for known-conforming meshes")
 
@@ -844,21 +864,20 @@ class Mesh(Record):
         return self._nodal_adjacency
 
     def nodal_adjacency_init_arg(self):
-        """Returns an 'nodal_adjacency' argument that can be
-        passed to a Mesh constructor.
+        """Returns a *nodal_adjacency* argument that can be
+        passed to a :class:`Mesh` constructor.
         """
 
         return self._nodal_adjacency
 
     @property
     def facial_adjacency_groups(self):
+        from meshmode import DataUnavailable
         if self._facial_adjacency_groups is False:
-            from meshmode import DataUnavailable
             raise DataUnavailable("facial_adjacency_groups")
 
         elif self._facial_adjacency_groups is None:
             if not self.is_conforming:
-                from meshmode import DataUnavailable
                 raise DataUnavailable("facial_adjacency_groups can only "
                         "be computed for known-conforming meshes")
 
@@ -871,6 +890,12 @@ class Mesh(Record):
         return self._facial_adjacency_groups
 
     def boundary_tag_bit(self, boundary_tag):
+        if boundary_tag is BTAG_NONE:
+            return 0
+
+        if boundary_tag not in self.boundary_tags:
+            raise ValueError("boundary tag '%s' is not known" % boundary_tag)
+
         try:
             return 1 << self.btag_to_index[boundary_tag]
         except KeyError:
@@ -904,6 +929,9 @@ class Mesh(Record):
 # {{{ node-vertex consistency test
 
 def _test_node_vertex_consistency_simplex(mesh, mgrp, tol):
+    if mesh.vertices is None:
+        return True
+
     if mgrp.nelements == 0:
         return True
 
diff --git a/meshmode/mesh/generation.py b/meshmode/mesh/generation.py
index d5153ec66b07358263e1b361db8801370b58987e..3955fbdd814baf5706f6c04dbe109dbc6f744cf5 100644
--- a/meshmode/mesh/generation.py
+++ b/meshmode/mesh/generation.py
@@ -237,6 +237,7 @@ starfish = NArmedStarfish(5, 0.25)
 def make_curve_mesh(curve_f, element_boundaries, order,
         unit_nodes=None,
         node_vertex_consistency_tolerance=None,
+        closed=True,
         return_parametrization_points=False):
     """
     :arg curve_f: A callable representing a parametrization for a curve,
@@ -245,10 +246,12 @@ def make_curve_mesh(curve_f, element_boundaries, order,
     :arg element_boundaries: a vector of element boundary locations in
         :math:`[0,1]`, in order. 0 must be the first entry, 1 the
         last one.
+    :arg closed: if *True*, the curve is assumed closed and the first and
+        last of the *element_boundaries* must match.
     :arg unit_nodes: if given, the unit nodes to use. Must have shape
-        ``(dim, nnoodes)``.
+        ``(dim, nnodes)``.
     :returns: a :class:`meshmode.mesh.Mesh`, or if *return_parametrization_points*
-        is True, a tuple ``(mesh, par_points)``, where *par_points* is an array of
+        is *True*, a tuple ``(mesh, par_points)``, where *par_points* is an array of
         parametrization points.
     """
 
@@ -260,7 +263,22 @@ def make_curve_mesh(curve_f, element_boundaries, order,
         unit_nodes = mp.warp_and_blend_nodes(1, order)
     nodes_01 = 0.5*(unit_nodes+1)
 
-    vertices = curve_f(element_boundaries)
+    wrap = nelements
+    if not closed:
+        wrap += 1
+
+    vertices = curve_f(element_boundaries)[:, :wrap]
+    vertex_indices = np.vstack([
+        np.arange(0, nelements, dtype=np.int32),
+        np.arange(1, nelements + 1, dtype=np.int32) % wrap
+        ]).T
+
+    assert vertices.shape[1] == np.max(vertex_indices) + 1
+    if closed:
+        start_end_par = np.array([0, 1], dtype=np.float64)
+        start_end_curve = curve_f(start_end_par)
+
+        assert la.norm(start_end_curve[:, 0] - start_end_curve[:, 1]) < 1.0e-12
 
     el_lengths = np.diff(element_boundaries)
     el_starts = element_boundaries[:-1]
@@ -273,10 +291,7 @@ def make_curve_mesh(curve_f, element_boundaries, order,
     from meshmode.mesh import Mesh, SimplexElementGroup
     egroup = SimplexElementGroup(
             order,
-            vertex_indices=np.vstack([
-                np.arange(nelements, dtype=np.int32),
-                np.arange(1, nelements+1, dtype=np.int32) % nelements,
-                ]).T,
+            vertex_indices=vertex_indices,
             nodes=nodes,
             unit_nodes=unit_nodes)
 
@@ -563,12 +578,12 @@ def refine_mesh_and_get_urchin_warper(order, m, n, est_rel_interp_tolerance,
         min_rad=0.2, uniform_refinement_rounds=0):
     """
     :returns: a tuple ``(refiner, warp_mesh)``, where *refiner* is
-        a :class:`meshmode.refinement.Refiner` (from which the unwarped mesh
+        a :class:`~meshmode.mesh.refinement.Refiner` (from which the unwarped mesh
         may be obtained), and whose
-        :meth:`meshmode.refinement.Refiner.get_current_mesh` returns a
-        locally-refined :class:`meshmode.mesh.Mesh` of a sphere and *warp_mesh*
+        :meth:`~meshmode.mesh.refinement.Refiner.get_current_mesh` returns a
+        locally-refined :class:`~meshmode.mesh.Mesh` of a sphere and *warp_mesh*
         is a callable taking and returning a mesh that warps the unwarped mesh
-        into a smooth shape govered by a spherical harmonic of order *(m, n)*.
+        into a smooth shape covered by a spherical harmonic of order *(m, n)*.
     :arg order: the polynomial order of the returned mesh
     :arg est_rel_interp_tolerance: a tolerance for the relative
         interpolation error estimates on the warped version of the mesh.
@@ -640,7 +655,7 @@ def refine_mesh_and_get_urchin_warper(order, m, n, est_rel_interp_tolerance,
 
 def generate_urchin(order, m, n, est_rel_interp_tolerance, min_rad=0.2):
     """
-    :returns: a refined :class:`meshmode.mesh.Mesh` of a smooth shape govered
+    :returns: a refined :class:`~meshmode.mesh.Mesh` of a smooth shape covered
         by a spherical harmonic of order *(m, n)*.
     :arg order: the polynomial order of the returned mesh
     :arg est_rel_interp_tolerance: a tolerance for the relative
@@ -892,14 +907,14 @@ def generate_warped_rect_mesh(dim, order, n):
 @log_process(logger)
 def warp_and_refine_until_resolved(
         unwarped_mesh_or_refiner, warp_callable, est_rel_interp_tolerance):
-    """Given an original ("un-warped") :class:`meshmode.mesh.Mesh` and a
+    """Given an original ("unwarped") :class:`meshmode.mesh.Mesh` and a
     warping function *warp_callable* that takes and returns a mesh and a
     tolerance to which the mesh should be resolved by the mapping polynomials,
     this function will iteratively refine the *unwarped_mesh* until relative
     interpolation error estimates on the warped version are smaller than
     *est_rel_interp_tolerance* on each element.
 
-    :returns: The refined, un-warped mesh.
+    :returns: The refined, unwarped mesh.
 
     .. versionadded:: 2018.1
     """
diff --git a/meshmode/mesh/io.py b/meshmode/mesh/io.py
index de21ddfbfe08c964b6dbc242fad7386694493ae8..30c3fcbfd45037c11684a9cb3329108f1c44be9b 100644
--- a/meshmode/mesh/io.py
+++ b/meshmode/mesh/io.py
@@ -298,7 +298,7 @@ def generate_gmsh(source, dimensions=None, order=None, other_options=[],
 
     :arg source: an instance of either :class:`FileSource` or
         :class:`LiteralSource`
-    :arg force_ambient_dim: if not None, truncate point coordinates to
+    :arg force_ambient_dim: if not *None*, truncate point coordinates to
         this many dimensions.
     :arg mesh_construction_kwargs: *None* or a dictionary of keyword
         arguments passed to the :class:`meshmode.mesh.Mesh` constructor.
@@ -349,7 +349,7 @@ def generate_gmsh(source, dimensions=None, order=None, other_options=[],
 def from_meshpy(mesh_info, order=1):
     """Imports a mesh from a :mod:`meshpy` *mesh_info* data structure,
     which may be generated by either :mod:`meshpy.triangle` or
-    :mod:`meshpy_tet`.
+    :mod:`meshpy.tet`.
     """
     from meshmode.mesh import Mesh
     from meshmode.mesh.generation import make_group_from_vertices
@@ -407,7 +407,7 @@ def from_vertices_and_simplices(vertices, simplices, order=1, fix_orientation=Fa
 
 def to_json(mesh):
     """Return a JSON-able Python data structure for *mesh*. The structure directly
-    reflects the :class:`Mesh` data structure."""
+    reflects the :class:`meshmode.mesh.Mesh` data structure."""
 
     def btag_to_json(btag):
         if isinstance(btag, str):
diff --git a/meshmode/mesh/processing.py b/meshmode/mesh/processing.py
index 9cb71a1a501f2e79d94e9c09a7ef6847b5b621f7..97b9bbda9c99fd0dc1f537e6cbbbaf043bc6140c 100644
--- a/meshmode/mesh/processing.py
+++ b/meshmode/mesh/processing.py
@@ -44,11 +44,11 @@ __doc__ = """
 
 def find_group_indices(groups, meshwide_elems):
     """
-    :arg groups: A list of :class:``MeshElementGroup`` instances that contain
-        ``meshwide_elems``.
-    :arg meshwide_elems: A :class:``numpy.ndarray`` of mesh-wide element numbers
+    :arg groups: A list of :class:`~meshmode.mesh.MeshElementGroup` instances
+        that contain *meshwide_elems*.
+    :arg meshwide_elems: A :class:`numpy.ndarray` of mesh-wide element numbers.
         Usually computed by ``elem + element_nr_base``.
-    :returns: A :class:``numpy.ndarray`` of group numbers that ``meshwide_elem``
+    :returns: A :class:`numpy.ndarray` of group numbers that *meshwide_elem*
         belongs to.
     """
     grps = np.zeros_like(meshwide_elems)
@@ -63,14 +63,14 @@ def find_group_indices(groups, meshwide_elems):
 
 def partition_mesh(mesh, part_per_element, part_num):
     """
-    :arg mesh: A :class:`meshmode.mesh.Mesh` to be partitioned.
+    :arg mesh: A :class:`~meshmode.mesh.Mesh` to be partitioned.
     :arg part_per_element: A :class:`numpy.ndarray` containing one
         integer per element of *mesh* indicating which part of the
         partitioned mesh the element is to become a part of.
     :arg part_num: The part number of the mesh to return.
 
     :returns: A tuple ``(part_mesh, part_to_global)``, where *part_mesh*
-        is a :class:`meshmode.mesh.Mesh` that is a partition of mesh, and
+        is a :class:`~meshmode.mesh.Mesh` that is a partition of mesh, and
         *part_to_global* is a :class:`numpy.ndarray` mapping element
         numbers on *part_mesh* to ones in *mesh*.
 
@@ -321,7 +321,7 @@ def find_volume_mesh_element_orientations(mesh, tolerate_unimplemented_checks=Fa
     each negatively oriented element.
 
     :arg tolerate_unimplemented_checks: If *True*, elements for which no
-        check is available will return NaN.
+        check is available will return *NaN*.
     """
 
     result = np.empty(mesh.nelements, dtype=np.float64)
@@ -414,7 +414,8 @@ def flip_simplex_element_group(vertices, grp, grp_flip_flags):
 
 def perform_flips(mesh, flip_flags, skip_tests=False):
     """
-    :arg flip_flags: A :class:`numpy.ndarray` with *mesh.nelements* entries
+    :arg flip_flags: A :class:`numpy.ndarray` with
+        :attr:`meshmode.mesh.Mesh.nelements` entries
         indicating by their Boolean value whether the element is to be
         flipped.
     """
diff --git a/meshmode/mesh/refinement/no_adjacency.py b/meshmode/mesh/refinement/no_adjacency.py
index 70279bd948163d501e3b5294f6afb2397f57ba9b..f37f66215ded63f530db8d5a2cb465d38771b5db 100644
--- a/meshmode/mesh/refinement/no_adjacency.py
+++ b/meshmode/mesh/refinement/no_adjacency.py
@@ -146,17 +146,19 @@ class RefinerWithoutAdjacency(object):
         """
 
         mesh = self._current_mesh
-
         refine_flags = np.asarray(refine_flags, dtype=np.bool)
 
         if len(refine_flags) != mesh.nelements:
             raise ValueError("length of refine_flags does not match "
                     "element count of last generated mesh")
 
+        perform_vertex_updates = mesh.vertices is not None
+
         new_el_groups = []
         group_refinement_records = []
         additional_vertices = []
-        inew_vertex = mesh.nvertices
+        if perform_vertex_updates:
+            inew_vertex = mesh.nvertices
 
         for igrp, group in enumerate(mesh.groups):
             bisection_info = self._get_bisection_tesselation_info(
@@ -195,55 +197,59 @@ class RefinerWithoutAdjacency(object):
 
             # {{{ get new vertices together
 
-            midpoints = bisection_info.resampler.get_midpoints(
-                    group, bisection_info, refining_el_old_indices)
+            if perform_vertex_updates:
+                midpoints = bisection_info.resampler.get_midpoints(
+                        group, bisection_info, refining_el_old_indices)
 
-            new_vertex_indices = np.empty(
-                (new_nelements, group.vertex_indices.shape[1]),
-                dtype=mesh.vertex_id_dtype)
-            new_vertex_indices.fill(-17)
+                new_vertex_indices = np.empty(
+                    (new_nelements, group.vertex_indices.shape[1]),
+                    dtype=mesh.vertex_id_dtype)
+                new_vertex_indices.fill(-17)
 
-            # copy over unchanged vertices
-            new_vertex_indices[unrefined_el_new_indices] = \
-                    group.vertex_indices[~grp_flags]
+                # copy over unchanged vertices
+                new_vertex_indices[unrefined_el_new_indices] = \
+                        group.vertex_indices[~grp_flags]
 
-            for old_iel in refining_el_old_indices:
-                new_iel_base = child_el_indices[old_iel]
+                for old_iel in refining_el_old_indices:
+                    new_iel_base = child_el_indices[old_iel]
 
-                refining_vertices = np.empty(len(bisection_info.ref_vertices),
+                    refining_vertices = np.empty(len(bisection_info.ref_vertices),
                         dtype=mesh.vertex_id_dtype)
-                refining_vertices.fill(-17)
+                    refining_vertices.fill(-17)
 
-                # carry over old vertices
-                refining_vertices[bisection_info.orig_vertex_indices] = \
-                        group.vertex_indices[old_iel]
+                    # carry over old vertices
+                    refining_vertices[bisection_info.orig_vertex_indices] = \
+                            group.vertex_indices[old_iel]
 
-                for imidpoint, (iref_midpoint, (v1, v2)) in enumerate(zip(
-                        bisection_info.midpoint_indices,
-                        bisection_info.midpoint_vertex_pairs)):
+                    for imidpoint, (iref_midpoint, (v1, v2)) in enumerate(zip(
+                            bisection_info.midpoint_indices,
+                            bisection_info.midpoint_vertex_pairs)):
 
-                    global_v1 = group.vertex_indices[old_iel, v1]
-                    global_v2 = group.vertex_indices[old_iel, v2]
+                        global_v1 = group.vertex_indices[old_iel, v1]
+                        global_v2 = group.vertex_indices[old_iel, v2]
 
-                    if global_v1 > global_v2:
-                        global_v1, global_v2 = global_v2, global_v1
+                        if global_v1 > global_v2:
+                            global_v1, global_v2 = global_v2, global_v1
 
-                    try:
-                        global_midpoint = self.global_vertex_pair_to_midpoint[
-                                global_v1, global_v2]
-                    except KeyError:
-                        global_midpoint = inew_vertex
-                        additional_vertices.append(midpoints[old_iel][:, imidpoint])
-                        inew_vertex += 1
+                        try:
+                            global_midpoint = self.global_vertex_pair_to_midpoint[
+                                    global_v1, global_v2]
+                        except KeyError:
+                            global_midpoint = inew_vertex
+                            additional_vertices.append(
+                                    midpoints[old_iel][:, imidpoint])
+                            inew_vertex += 1
 
-                    refining_vertices[iref_midpoint] = global_midpoint
+                        refining_vertices[iref_midpoint] = global_midpoint
 
-                assert (refining_vertices >= 0).all()
+                    assert (refining_vertices >= 0).all()
 
-                new_vertex_indices[new_iel_base:new_iel_base+nchildren] = \
-                        refining_vertices[bisection_info.children]
+                    new_vertex_indices[new_iel_base:new_iel_base+nchildren] = \
+                            refining_vertices[bisection_info.children]
 
-            assert (new_vertex_indices >= 0).all()
+                assert (new_vertex_indices >= 0).all()
+            else:
+                new_vertex_indices = None
 
             # }}}
 
@@ -277,11 +283,14 @@ class RefinerWithoutAdjacency(object):
                     nodes=new_nodes,
                     unit_nodes=group.unit_nodes))
 
-        new_vertices = np.empty(
-                (mesh.ambient_dim, mesh.nvertices + len(additional_vertices)),
-                mesh.vertices.dtype)
-        new_vertices[:, :mesh.nvertices] = mesh.vertices
-        new_vertices[:, mesh.nvertices:] = np.array(additional_vertices).T
+        if perform_vertex_updates:
+            new_vertices = np.empty(
+                    (mesh.ambient_dim, mesh.nvertices + len(additional_vertices)),
+                    mesh.vertices.dtype)
+            new_vertices[:, :mesh.nvertices] = mesh.vertices
+            new_vertices[:, mesh.nvertices:] = np.array(additional_vertices).T
+        else:
+            new_vertices = None
 
         from meshmode.mesh import Mesh
         new_mesh = Mesh(new_vertices, new_el_groups, is_conforming=(
diff --git a/meshmode/mesh/refinement/resampler.py b/meshmode/mesh/refinement/resampler.py
index 794c1c5a2362d1255f5e3d329c48fb766c6229a9..4631c9caa7220ee7a742670d10b0a7a5998003bd 100644
--- a/meshmode/mesh/refinement/resampler.py
+++ b/meshmode/mesh/refinement/resampler.py
@@ -74,7 +74,8 @@ class SimplexResampler(object):
             follows their ordering in the tesselation (see also
             :meth:`SimplexResampler.get_vertex_pair_to_midpoint_order`)
         """
-        assert len(group.vertex_indices[0]) == group.dim + 1
+        if group.vertex_indices is not None:
+            assert len(group.vertex_indices[0]) == group.dim + 1
 
         # Get midpoints, converted to unit coordinates.
         midpoints = -1 + np.array([vertex for vertex in
@@ -106,7 +107,8 @@ class SimplexResampler(object):
             The ordering of the child nodes follows the ordering
             of ``tesselation.children.``
         """
-        assert len(group.vertex_indices[0]) == group.dim + 1
+        if group.vertex_indices is not None:
+            assert len(group.vertex_indices[0]) == group.dim + 1
 
         from meshmode.mesh.refinement.utils import map_unit_nodes_to_children
 
diff --git a/test/test_meshmode.py b/test/test_meshmode.py
index 697e0bbd134bf48babd23b5a5db22e85e7e6ca28..fe6a1ea235841f1293a858d80bded22bc191fdeb 100644
--- a/test/test_meshmode.py
+++ b/test/test_meshmode.py
@@ -391,6 +391,7 @@ def test_all_faces_interpolation(ctx_factory, mesh_name, dim, mesh_pars,
     PolynomialWarpAndBlendGroupFactory
     ])
 @pytest.mark.parametrize(("mesh_name", "dim", "mesh_pars"), [
+    ("segment", 1, [8, 16, 32]),
     ("blob", 2, [1e-1, 8e-2, 5e-2]),
     ("warp", 2, [3, 5, 7]),
     ("warp", 3, [3, 5]),
@@ -418,7 +419,15 @@ def test_opposite_face_interpolation(ctx_factory, group_factory,
     for mesh_par in mesh_pars:
         # {{{ get mesh
 
-        if mesh_name == "blob":
+        if mesh_name == "segment":
+            assert dim == 1
+
+            from meshmode.mesh.generation import generate_box_mesh
+            mesh = generate_box_mesh(
+                    [np.linspace(-0.5, 0.5, mesh_par)],
+                    order=order)
+            h = 1.0 / mesh_par
+        elif mesh_name == "blob":
             assert dim == 2
 
             h = mesh_par
@@ -457,7 +466,6 @@ def test_opposite_face_interpolation(ctx_factory, group_factory,
 
         bdry_x = bdry_discr.nodes()[0].with_queue(queue)
         bdry_f = f(bdry_x)
-
         bdry_f_2 = opp_face(queue, bdry_f)
 
         err = la.norm((bdry_f-bdry_f_2).get(), np.inf)
@@ -1152,6 +1160,7 @@ def test_vtk_overwrite(ctx_getter):
 
 
 # {{{ test_mesh_to_tikz
+
 def test_mesh_to_tikz():
     from meshmode.mesh.io import generate_gmsh, FileSource
 
@@ -1189,6 +1198,63 @@ def test_affine_map():
             assert la.norm(x-m_inv(m(x))) < 1e-10
 
 
+def test_mesh_without_vertices(ctx_factory):
+    ctx = ctx_factory()
+    queue = cl.CommandQueue(ctx)
+
+    # create a mesh
+    from meshmode.mesh.generation import generate_icosphere
+    mesh = generate_icosphere(r=1.0, order=4)
+
+    # create one without the vertices
+    from meshmode.mesh import Mesh
+    grp, = mesh.groups
+    groups = [grp.copy(nodes=grp.nodes, vertex_indices=None) for grp in mesh.groups]
+    mesh = Mesh(None, groups, is_conforming=False)
+
+    # try refining it
+    from meshmode.mesh.refinement import refine_uniformly
+    mesh = refine_uniformly(mesh, 1)
+
+    # make sure the world doesn't end
+    from meshmode.discretization import Discretization
+    from meshmode.discretization.poly_element import \
+            InterpolatoryQuadratureSimplexGroupFactory as GroupFactory
+    discr = Discretization(ctx, mesh, GroupFactory(4))
+    discr.nodes().with_queue(queue)
+
+    from meshmode.discretization.visualization import make_visualizer
+    make_visualizer(queue, discr, 4)
+
+
+@pytest.mark.parametrize("curve_name", ["ellipse", "arc"])
+def test_open_curved_mesh(curve_name):
+    def arc_curve(t, start=0, end=np.pi):
+        return np.vstack([
+            np.cos((end - start) * t + start),
+            np.sin((end - start) * t + start)
+            ])
+
+    if curve_name == "ellipse":
+        from functools import partial
+        from meshmode.mesh.generation import ellipse
+        curve_f = partial(ellipse, 2.0)
+        closed = True
+    elif curve_name == "arc":
+        curve_f = arc_curve
+        closed = False
+    else:
+        raise ValueError("unknown curve")
+
+    from meshmode.mesh.generation import make_curve_mesh
+    nelements = 32
+    order = 4
+    make_curve_mesh(curve_f,
+            np.linspace(0.0, 1.0, nelements + 1),
+            order=order,
+            closed=closed)
+
+
 if __name__ == "__main__":
     import sys
     if len(sys.argv) > 1: