diff --git a/benchmarks/bench_translations.py b/benchmarks/bench_translations.py
index 8c31eb8b15827792f6fe559b6d773f93302d7c13..0601889c8aafd24e0fa5184cd04cd680e60c337a 100644
--- a/benchmarks/bench_translations.py
+++ b/benchmarks/bench_translations.py
@@ -33,15 +33,15 @@ class Param:
 
 class TranslationBenchmarkSuite:
 
-    params = [
+    params = (
         Param(2, 10),
         Param(2, 15),
         Param(2, 20),
         Param(3, 5),
         Param(3, 10),
-    ]
+    )
 
-    param_names = ["order"]
+    param_names = ("order",)
 
     def setup(self, param):
         logging.basicConfig(level=logging.INFO)
@@ -88,10 +88,10 @@ class LaplaceVolumeTaylorTranslation(TranslationBenchmarkSuite):
     knl = LaplaceKernel
     local_expn_class = VolumeTaylorLocalExpansion
     mpole_expn_class = VolumeTaylorMultipoleExpansion
-    params = [
+    params = (
         Param(2, 10),
         Param(3, 5),
-    ]
+    )
 
 
 class LaplaceConformingVolumeTaylorTranslation(TranslationBenchmarkSuite):
@@ -104,10 +104,10 @@ class HelmholtzVolumeTaylorTranslation(TranslationBenchmarkSuite):
     knl = HelmholtzKernel
     local_expn_class = VolumeTaylorLocalExpansion
     mpole_expn_class = VolumeTaylorMultipoleExpansion
-    params = [
+    params = (
         Param(2, 10),
         Param(3, 5),
-    ]
+    )
 
 
 class HelmholtzConformingVolumeTaylorTranslation(TranslationBenchmarkSuite):
@@ -120,8 +120,8 @@ class Helmholtz2DTranslation(TranslationBenchmarkSuite):
     knl = HelmholtzKernel
     local_expn_class = H2DLocalExpansion
     mpole_expn_class = H2DMultipoleExpansion
-    params = [
+    params = (
         Param(2, 10),
         Param(2, 15),
         Param(2, 20),
-    ]
+    )
diff --git a/examples/expansion-toys.py b/examples/expansion-toys.py
index 6e7d66b38528eba8ce3c65e4f33965a1424b87b7..e76e958d0a5657721cd9146da6b85ebfd715cca5 100644
--- a/examples/expansion-toys.py
+++ b/examples/expansion-toys.py
@@ -43,7 +43,7 @@ def main():
         plt.show()
 
     mexp = t.multipole_expand(pt_src, [0, 0], 5)
-    mexp2 = t.multipole_expand(mexp, [0, 0.25])  # noqa: F841
+    mexp2 = t.multipole_expand(mexp, [0, 0.25])
     lexp = t.local_expand(mexp, [3, 0])
     lexp2 = t.local_expand(lexp, [3, 1], 3)
 
diff --git a/pyproject.toml b/pyproject.toml
index 11cf370cfd19c9865f9d1b80db71357390896f12..19db2276adfd94e21b5d7c77aa83bf90a488cc73 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,8 +13,8 @@ extend-select = [
     "N",   # pep8-naming
     "NPY", # numpy
     "Q",   # flake8-quotes
-    # "UP",  # pyupgrade
-    # "RUF", # ruff
+    "UP",  # pyupgrade
+    "RUF", # ruff
     # "W",   # pycodestyle
 ]
 extend-ignore = [
diff --git a/sumpy/__init__.py b/sumpy/__init__.py
index d03122e14e5301266d25c630b880ec921af1a15c..e4daaaae77d7811060b9999c75fcf8335c22c2b2 100644
--- a/sumpy/__init__.py
+++ b/sumpy/__init__.py
@@ -32,13 +32,20 @@ from sumpy.version import VERSION_TEXT
 from pytools.persistent_dict import WriteOncePersistentDict
 
 __all__ = [
-    "P2P", "P2PFromCSR",
-    "P2EFromSingleBox", "P2EFromCSR",
-    "E2PFromSingleBox", "E2PFromCSR",
-    "E2EFromCSR", "E2EFromChildren", "E2EFromParent",
-    "M2LUsingTranslationClassesDependentData",
+    "P2P",
+    "E2EFromCSR",
+    "E2EFromChildren",
+    "E2EFromParent",
+    "E2PFromCSR",
+    "E2PFromSingleBox",
     "M2LGenerateTranslationClassesDependentData",
-    "M2LPreprocessMultipole", "M2LPostprocessLocal"]
+    "M2LPostprocessLocal",
+    "M2LPreprocessMultipole",
+    "M2LUsingTranslationClassesDependentData",
+    "P2EFromCSR",
+    "P2EFromSingleBox",
+    "P2PFromCSR",
+]
 
 
 code_cache = WriteOncePersistentDict("sumpy-code-cache-v6-"+VERSION_TEXT,
diff --git a/sumpy/cse.py b/sumpy/cse.py
index 635c26f12de2be095e3a43899b17063f7b045d35..c401e84fb6c5d23c7f0ef6c253384f3abba50d8d 100644
--- a/sumpy/cse.py
+++ b/sumpy/cse.py
@@ -304,7 +304,7 @@ def match_common_args(func_class, funcs, opt_subs):
         # This makes us try combining smaller matches first.
         common_arg_candidates = OrderedSet(sorted(
                 common_arg_candidates_counts.keys(),
-                key=lambda k: (common_arg_candidates_counts[k], k)))  # noqa: B023
+                key=lambda k: (common_arg_candidates_counts[k], k)))
 
         while common_arg_candidates:
             j = common_arg_candidates.pop(last=False)
diff --git a/sumpy/e2e.py b/sumpy/e2e.py
index c66f9c02254e70de6c8f60aec52882a83125e5eb..4167d993aecacd729cb597fc0af6b429b82de3e0 100644
--- a/sumpy/e2e.py
+++ b/sumpy/e2e.py
@@ -257,9 +257,10 @@ class E2EFromCSR(E2EBase):
                         shape=("nsrc_level_boxes", ncoeff_src), offset=lp.auto),
                     lp.GlobalArg("tgt_expansions", None,
                         shape=("ntgt_level_boxes", ncoeff_tgt), offset=lp.auto),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion,
-                                            self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion,
+                                            self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="ntgt_boxes>=1",
                 silenced_warnings="write_race(write_expn*)",
@@ -489,9 +490,10 @@ class M2LUsingTranslationClassesDependentData(E2EFromCSR):
                         offset=lp.auto),
                     lp.ValueArg("ntranslation_classes, ntranslation_classes_lists",
                         np.int32),
-                    ...
-                ] + gather_loopy_arguments([self.src_expansion,
-                                            self.tgt_expansion]),
+                    ...,
+                    *gather_loopy_arguments([self.src_expansion,
+                                            self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="ntgt_boxes>=1",
                 default_offset=lp.auto,
@@ -600,8 +602,9 @@ class M2LGenerateTranslationClassesDependentData(E2EBase):
                     lp.ValueArg("ntranslation_classes", np.int32),
                     lp.ValueArg("ntranslation_vectors", np.int32),
                     lp.ValueArg("translation_classes_level_start", np.int32),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion, self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="ntranslation_classes>=1",
                 default_offset=lp.auto,
@@ -712,8 +715,9 @@ class M2LPreprocessMultipole(E2EBase):
                     lp.GlobalArg("preprocessed_src_expansions", None,
                         shape=("nsrc_boxes", npreprocessed_src_coeffs),
                         offset=lp.auto),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion, self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="nsrc_boxes>=1",
                 fixed_parameters={
@@ -807,8 +811,9 @@ class M2LPostprocessLocal(E2EBase):
                     lp.GlobalArg("tgt_expansions_before_postprocessing", None,
                         shape=("ntgt_boxes", ntgt_coeffs_before_postprocessing),
                         offset=lp.auto),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion, self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="ntgt_boxes>=1",
                 default_offset=lp.auto,
@@ -931,8 +936,9 @@ class E2EFromChildren(E2EBase):
                     lp.ValueArg("src_base_ibox,tgt_base_ibox", np.int32),
                     lp.ValueArg("ntgt_level_boxes,nsrc_level_boxes", np.int32),
                     lp.ValueArg("aligned_nboxes", np.int32),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion, self.tgt_expansion])
+                ],
                 name=self.name,
                 assumptions="ntgt_boxes>=1",
                 silenced_warnings="write_race(write_expn*)",
@@ -1036,8 +1042,9 @@ class E2EFromParent(E2EBase):
                         shape=("ntgt_level_boxes", ncoeffs_tgt), offset=lp.auto),
                     lp.GlobalArg("src_expansions", None,
                         shape=("nsrc_level_boxes", ncoeffs_src), offset=lp.auto),
-                    "..."
-                ] + gather_loopy_arguments([self.src_expansion, self.tgt_expansion]),
+                    "...",
+                    *gather_loopy_arguments([self.src_expansion, self.tgt_expansion])
+                ],
                 name=self.name, assumptions="ntgt_boxes>=1",
                 silenced_warnings="write_race(write_expn*)",
                 fixed_parameters={"dim": self.dim, "nchildren": 2**self.dim},
diff --git a/sumpy/e2p.py b/sumpy/e2p.py
index 55af69b3a9c75a557bdd42ea40a4cae121da1203..f19ca7d74a16ada6135558ecb7761741d7a17b81 100644
--- a/sumpy/e2p.py
+++ b/sumpy/e2p.py
@@ -97,7 +97,7 @@ class E2PBase(KernelCacheMixin, ABC):
         return loopy_knl
 
     def get_loopy_args(self):
-        return gather_loopy_arguments((self.expansion,) + tuple(self.kernels))
+        return gather_loopy_arguments((self.expansion, *tuple(self.kernels)))
 
     def get_kernel_scaling_assignment(self):
         from sumpy.symbolic import SympyToPymbolicMapper
@@ -128,9 +128,9 @@ class E2PFromSingleBox(E2PBase):
                     "{[itgt,idim]: itgt_start<=itgt<itgt_end and 0<=idim<dim}",
                     "{[icoeff]: 0<=icoeff<ncoeffs}",
                     "{[iknl]: 0<=iknl<nresults}",
-                ],
-                self.get_kernel_scaling_assignment()
-                + ["""
+                ], [
+                *self.get_kernel_scaling_assignment(),
+                """
                 for itgt_box
                     <> tgt_ibox = target_boxes[itgt_box]
                     <> itgt_start = box_target_starts[tgt_ibox]
@@ -243,9 +243,9 @@ class E2PFromCSR(E2PBase):
                     "{[idim]: 0<=idim<dim}",
                     "{[icoeff]: 0<=icoeff<ncoeffs}",
                     "{[iknl]: 0<=iknl<nresults}",
-                ],
-                self.get_kernel_scaling_assignment()
-                + ["""
+                ], [
+                *self.get_kernel_scaling_assignment(),
+                """
                 for itgt_box
                     <> tgt_ibox = target_boxes[itgt_box]
                     <> itgt_start = box_target_starts[tgt_ibox]
diff --git a/sumpy/expansion/__init__.py b/sumpy/expansion/__init__.py
index 493ff71087258cd99be5ea109bcb5de9715d3cad..294337dc482e3cb73f6e6e2d9dcae9f44ca1c6fa 100644
--- a/sumpy/expansion/__init__.py
+++ b/sumpy/expansion/__init__.py
@@ -419,7 +419,7 @@ class FullExpansionTermsWrangler(ExpansionTermsWrangler):
                 mi = ident.mi
             else:
                 mi = ident
-            return tuple([sum(mi)] + list(reversed(mi)))
+            return (sum(mi), *list(reversed(mi)))
 
         return mi_key, axis_permutation
 # }}}
diff --git a/sumpy/expansion/diff_op.py b/sumpy/expansion/diff_op.py
index 8d8700baca9376b8fef3389a6ea48dd79aedf0ad..bca672c7d0f13319bce1fdbecdcaa8efc6a6434d 100644
--- a/sumpy/expansion/diff_op.py
+++ b/sumpy/expansion/diff_op.py
@@ -122,7 +122,7 @@ class LinearPDESystemOperator:
         return self + (-1)*other_diff_op
 
     def __repr__(self):
-        return f"LinearPDESystemOperator({self.dim}, {repr(self.eqs)})"
+        return f"LinearPDESystemOperator({self.dim}, {self.eqs!r})"
 
     def __getitem__(self, idx):
         item = self.eqs.__getitem__(idx)
@@ -313,7 +313,7 @@ def as_scalar_pde(pde: LinearPDESystemOperator, comp_idx: int) \
             indices.add(deriv_ident.vec_idx)
 
     # this is already a scalar pde
-    if len(indices) == 1 and list(indices)[0] == comp_idx:
+    if len(indices) == 1 and next(iter(indices)) == comp_idx:
         return pde
 
     return _get_all_scalar_pdes(pde)[comp_idx]
diff --git a/sumpy/expansion/level_to_order.py b/sumpy/expansion/level_to_order.py
index 82db1aef2ed1f5253a8e39342ce6249fbb7f313e..9c9f13103ffd1397f217ac38487af63cd9b9bec7 100644
--- a/sumpy/expansion/level_to_order.py
+++ b/sumpy/expansion/level_to_order.py
@@ -133,7 +133,7 @@ class SimpleExpansionOrderFinder:
 
         laplace_order = int(np.ceil(
                 (np.log(self.tol) - np.log(self.err_const_laplace))
-                /  # noqa: W504
+                /
                 np.log(
                     np.sqrt(tree.dimensions)/3
                     ) - 1))
diff --git a/sumpy/expansion/local.py b/sumpy/expansion/local.py
index 7f0607f0d9194bdbad5df9828f8e562196a4e862..13933d736a86ae2d5dc0c33a3fa4dc92cce51a3e 100644
--- a/sumpy/expansion/local.py
+++ b/sumpy/expansion/local.py
@@ -340,8 +340,7 @@ class VolumeTaylorLocalExpansionBase(VolumeTaylorExpansionMixin, LocalExpansionB
         for axis in {d for d, _ in tgt_split}:
             # Use the axis as the first dimension to vary so that the below
             # algorithm is O(p^{d+1}) for full and O(p^{d}) for compressed
-            dims = [axis] + list(range(axis)) + \
-                    list(range(axis+1, self.dim))
+            dims = [axis, *list(range(axis)), *list(range(axis + 1, self.dim))]
             # Start with source coefficients. Gets updated after each axis.
             cur_dim_input_coeffs = src_coeffs
             # O(1) iterations
diff --git a/sumpy/expansion/loopy.py b/sumpy/expansion/loopy.py
index 00bbd3dbe253c370e8e751f3a7915c4600449975..20be150a90ab24517d71637530c3c607b0867da7 100644
--- a/sumpy/expansion/loopy.py
+++ b/sumpy/expansion/loopy.py
@@ -64,7 +64,7 @@ def make_e2p_loopy_kernel(
             expression="target[idim]-center[idim]",
             temp_var_type=lp.Optional(None),
         ))
-    target_args = gather_loopy_arguments((expansion,) + tuple(kernels))
+    target_args = gather_loopy_arguments((expansion, *tuple(kernels)))
 
     coeff_exprs = sym.make_sym_vector("coeffs", ncoeffs)
     coeff_names = [
@@ -159,7 +159,7 @@ def make_p2e_loopy_kernel(
             expression="center[idim]-source[idim]",
             temp_var_type=lp.Optional(None),
         ))
-    source_args = gather_loopy_source_arguments((expansion,) + tuple(kernels))
+    source_args = gather_loopy_source_arguments((expansion, *tuple(kernels)))
 
     all_strengths = sym.make_sym_vector("strength", nstrengths)
     strengths = [all_strengths[i] for i in strength_usage]
diff --git a/sumpy/fmm.py b/sumpy/fmm.py
index de7ac6aec4d1cd22cee9f044c8dd038f2fa5dcb8..d956c100be8c159213970501a1eba41827712b00 100644
--- a/sumpy/fmm.py
+++ b/sumpy/fmm.py
@@ -28,7 +28,7 @@ __doc__ = """Integrates :mod:`boxtree` with :mod:`sumpy`.
 
 
 import pyopencl as cl
-import pyopencl.array  # noqa
+import pyopencl.array
 
 from pytools import memoize_method
 from boxtree.fmm import TreeIndependentDataForWrangler, ExpansionWranglerInterface
diff --git a/sumpy/kernel.py b/sumpy/kernel.py
index d3edefa09db40298262ce067a61002a9a0e0e3f6..0c68aa15d6947f3da0b04f7768b9a60b20d585b2 100644
--- a/sumpy/kernel.py
+++ b/sumpy/kernel.py
@@ -160,7 +160,7 @@ class Kernel:
         try:
             return self.hash_value
         except AttributeError:
-            self.hash_value = hash((type(self),) + self.__getinitargs__())
+            self.hash_value = hash((type(self), *self.__getinitargs__()))
             return self.hash_value
 
     def update_persistent_hash(self, key_hash, key_builder):
@@ -692,7 +692,7 @@ class ElasticityKernel(ExpressionKernel):
             # See (Berger and Karageorghis 2001)
             expr = (
                 -var("log")(r)*((3 - 4 * nu) if icomp == jcomp else 0)
-                +  # noqa: W504
+                +
                 d[icomp]*d[jcomp]/r**2
                 )
             scaling = -1/(8*var("pi")*(1 - nu)*mu)
@@ -703,7 +703,7 @@ class ElasticityKernel(ExpressionKernel):
             # Kelvin solution
             expr = (
                 (1/r)*((3 - 4*nu) if icomp == jcomp else 0)
-                +  # noqa: W504
+                +
                 d[icomp]*d[jcomp]/r**3
                 )
             scaling = -1/(16*var("pi")*(1 - nu)*mu)
@@ -1081,7 +1081,7 @@ class _VectorIndexAdder(CSECachingMapperMixin, IdentityMapper):
         if expr.aggregate.name == self.vec_name \
                 and isinstance(expr.index, int):
             return CommonSubexpression(
-                    expr.aggregate.index((expr.index,) + self.additional_indices),
+                    expr.aggregate.index((expr.index, *self.additional_indices)),
                     prefix=None, scope=cse_scope.EVALUATION)
         else:
             return IdentityMapper.map_subscript(self, expr)
@@ -1231,8 +1231,8 @@ class DirectionalSourceDerivative(DirectionalDerivative):
                         None,
                         shape=(self.dim, "nsources"),
                         offset=lp.auto),
-                    )
-                    ] + self.inner_kernel.get_source_args()
+                    ),
+                    *self.inner_kernel.get_source_args()]
 
     def prepare_loopy_kernel(self, loopy_knl):
         loopy_knl = self.inner_kernel.prepare_loopy_kernel(loopy_knl)
diff --git a/sumpy/p2e.py b/sumpy/p2e.py
index acc7e7269a74393a54434d23e2c13900ca822bef..7268275c26fbf6c0c18bb340cd7ef3304fea8b82 100644
--- a/sumpy/p2e.py
+++ b/sumpy/p2e.py
@@ -102,7 +102,7 @@ class P2EBase(KernelCacheMixin, KernelComputation):
     def get_loopy_args(self):
         from sumpy.tools import gather_loopy_source_arguments
         return gather_loopy_source_arguments(
-                (self.expansion,) + tuple(self.source_kernels))
+                (self.expansion, *tuple(self.source_kernels)))
 
     def get_cache_key(self):
         return (type(self).__name__, self.name, self.expansion,
diff --git a/sumpy/p2p.py b/sumpy/p2p.py
index 6babc2372caff30f66268e59eadebd2d7db4947b..ed3e801295510b64939ddb425ca01f1c5ce3c491 100644
--- a/sumpy/p2p.py
+++ b/sumpy/p2p.py
@@ -210,14 +210,13 @@ class P2P(P2PBase):
 
     def get_kernel(self):
         loopy_insns, result_names = self.get_loopy_insns_and_result_names()
-        arguments = (
-            self.get_default_src_tgt_arguments()
-            + [
+        arguments = [
+                *self.get_default_src_tgt_arguments(),
                 lp.GlobalArg("strength", None,
                     shape="nstrengths, nsources", dim_tags="sep,C"),
                 lp.GlobalArg("result", None,
                     shape="nresults, ntargets", dim_tags="sep,C")
-            ])
+            ]
 
         loopy_knl = lp.make_kernel(["""
             {[itgt, isrc, idim]: \
@@ -454,8 +453,8 @@ class P2PFromCSR(P2PBase):
     def get_kernel(self, max_nsources_in_one_box, max_ntargets_in_one_box,
             work_items_per_group=32):
         loopy_insns, result_names = self.get_loopy_insns_and_result_names()
-        arguments = self.get_default_src_tgt_arguments() \
-            + [
+        arguments = [
+                *self.get_default_src_tgt_arguments(),
                 lp.GlobalArg("box_target_starts",
                     None, shape=None),
                 lp.GlobalArg("box_target_counts_nonchild",
diff --git a/sumpy/qbx.py b/sumpy/qbx.py
index 19ff0f7fd8f3c83f4bed32c89d75730273c6ab43..345d251aad4c85aa92b1b4b3e76553c3015cfe6e 100644
--- a/sumpy/qbx.py
+++ b/sumpy/qbx.py
@@ -168,7 +168,7 @@ class LayerPotentialBase(KernelCacheMixin, KernelComputation):
 
     def get_default_src_tgt_arguments(self):
         from sumpy.tools import gather_loopy_source_arguments
-        return ([
+        return [
                 lp.GlobalArg("sources", None,
                     shape=(self.dim, "nsources"), order="C"),
                 lp.GlobalArg("targets", None,
@@ -178,8 +178,9 @@ class LayerPotentialBase(KernelCacheMixin, KernelComputation):
                 lp.GlobalArg("expansion_radii",
                     None, shape="ntargets"),
                 lp.ValueArg("nsources", None),
-                lp.ValueArg("ntargets", None)]
-                + gather_loopy_source_arguments(self.source_kernels))
+                lp.ValueArg("ntargets", None),
+                *gather_loopy_source_arguments(self.source_kernels)
+            ]
 
     def get_kernel(self):
         raise NotImplementedError
diff --git a/sumpy/symbolic.py b/sumpy/symbolic.py
index 48d43a5efbc813d111f2003cc293b6bf82ec3e39..089b1507ca591aa738420128d077654942da0387 100644
--- a/sumpy/symbolic.py
+++ b/sumpy/symbolic.py
@@ -129,13 +129,13 @@ def _coeff_isneg(a):
 
 
 if USE_SYMENGINE:
-    def UnevaluatedExpr(x):  # noqa: F811, N802
+    def UnevaluatedExpr(x):  # noqa: N802
         return x
 else:
     try:
         from sympy import UnevaluatedExpr
     except ImportError:
-        def UnevaluatedExpr(x):  # noqa: F811, N802
+        def UnevaluatedExpr(x):  # noqa: N802
             return x
 
 
diff --git a/sumpy/tools.py b/sumpy/tools.py
index 8c666b6132ab1c14d2154afd5138ebad57bcd4b0..6b8b68825a18ac5755cbb9f107a8f046d27ef3e6 100644
--- a/sumpy/tools.py
+++ b/sumpy/tools.py
@@ -955,7 +955,8 @@ def run_opencl_fft(
         queue: "pyopencl.CommandQueue",
         input_vec: Any,
         inverse: bool = False,
-        wait_for: List["pyopencl.Event"] = None) -> Tuple["pyopencl.Event", Any]:
+        wait_for: Optional[List["pyopencl.Event"]] = None
+    ) -> Tuple["pyopencl.Event", Any]:
     """Runs an FFT on input_vec and returns a :class:`MarkerBasedProfilingEvent`
     that indicate the end and start of the operations carried out and the output
     vector.
diff --git a/sumpy/toys.py b/sumpy/toys.py
index 41117eedb609621a79f65db40dab3d501e66a29f..45d71425efd50a58f73f3b3913724e50957e32ff 100644
--- a/sumpy/toys.py
+++ b/sumpy/toys.py
@@ -35,9 +35,9 @@ from sumpy.kernel import TargetTransformationRemover
 if TYPE_CHECKING:
     from sumpy.kernel import Kernel
     from sumpy.visualization import FieldPlotter
-    import pyopencl  # noqa: F401
+    import pyopencl
 
-import numpy as np  # noqa: F401
+import numpy as np
 import loopy as lp  # noqa: F401
 import pyopencl as cl
 import pyopencl.array
diff --git a/test/test_cse.py b/test/test_cse.py
index e84548c7c48a5eef731ef077192c16b3c06b4021..e884078800660b802ed5b4141c9f137e85267848 100644
--- a/test/test_cse.py
+++ b/test/test_cse.py
@@ -142,8 +142,8 @@ def test_cse_not_possible():
     assert substs == []
     assert reduced == [x + y]
     # issue 6329
-    eq = (meijerg((1, 2), (y, 4), (5,), [], x)  # pylint: disable=possibly-used-before-assignment  # noqa: E501
-          + meijerg((1, 3), (y, 4), (5,), [], x))  # pylint: disable=possibly-used-before-assignment  # noqa: E501
+    eq = (meijerg((1, 2), (y, 4), (5,), [], x)  # pylint: disable=possibly-used-before-assignment
+          + meijerg((1, 3), (y, 4), (5,), [], x))  # pylint: disable=possibly-used-before-assignment
     assert cse(eq) == ([], [eq])
 
 # }}}
@@ -168,7 +168,7 @@ def test_subtraction_opt():
     # Make sure subtraction is optimized.
     e = (x - y)*(z - y) + sym.exp((x - y)*(z - y))
     substs, reduced = cse(
-        [e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])  # pylint: disable=possibly-used-before-assignment  # noqa: E501
+        [e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])  # pylint: disable=possibly-used-before-assignment
     assert substs == [(x0, (x - y)*(y - z))]
     assert reduced == [-x0 + sym.exp(-x0)]
     e = -(x - y)*(z - y) + sym.exp(-(x - y)*(z - y))
@@ -180,7 +180,7 @@ def test_subtraction_opt():
     n = -1 + 1/x
     e = n/x/(-n)**2 - 1/n/x
     assert cse(e, optimizations=[
-               (cse_opts.sub_pre, cse_opts.sub_post)]  # pylint: disable=possibly-used-before-assignment  # noqa: E501
+               (cse_opts.sub_pre, cse_opts.sub_post)]  # pylint: disable=possibly-used-before-assignment
                ) == ([], [0])
 
 # }}}
@@ -332,7 +332,7 @@ def test_issue_6169():
     assert cse(r) == ([], [r])
     # and a check that the right thing is done with the new
     # mechanism
-    assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y  # pylint: disable=possibly-used-before-assignment  # noqa: E501
+    assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y  # pylint: disable=possibly-used-before-assignment
 
 # }}}