#ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
 #include <core/oclass.h>
+#include <core/intr.h>
 enum nvkm_subdev_type;
 
 enum nvkm_device_type {
 #undef NVKM_LAYOUT_INST
 #undef NVKM_LAYOUT_ONCE
        struct list_head subdev;
+
+       struct {
+               spinlock_t lock;
+               int irq;
+               bool alloc;
+               bool armed;
+       } intr;
 };
 
 struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
        int (*preinit)(struct nvkm_device *);
        int (*init)(struct nvkm_device *);
        void (*fini)(struct nvkm_device *, bool suspend);
+       int (*irq)(struct nvkm_device *);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
        bool cpu_coherent;
 
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_INTR_H__
+#define __NVKM_INTR_H__
+#include <core/os.h>
+struct nvkm_device;
+
+void nvkm_intr_ctor(struct nvkm_device *);
+void nvkm_intr_dtor(struct nvkm_device *);
+int nvkm_intr_install(struct nvkm_device *);
+void nvkm_intr_unarm(struct nvkm_device *);
+void nvkm_intr_rearm(struct nvkm_device *);
+#endif
 
        const struct nvkm_device_tegra_func *func;
        struct nvkm_device device;
        struct platform_device *pdev;
-       int irq;
 
        struct reset_control *rst;
        struct clk *clk;
 
        const struct nvkm_pci_func *func;
        struct nvkm_subdev subdev;
        struct pci_dev *pdev;
-       int irq;
 
        struct {
                struct agp_bridge_data *bridge;
 void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
 u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
 void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
+void nvkm_pci_msi_rearm(struct nvkm_device *);
 
 int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
 int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
 
 nvkm-y += nvkm/core/event.o
 nvkm-y += nvkm/core/firmware.o
 nvkm-y += nvkm/core/gpuobj.o
+nvkm-y += nvkm/core/intr.o
 nvkm-y += nvkm/core/ioctl.o
 nvkm-y += nvkm/core/memory.o
 nvkm-y += nvkm/core/mm.o
 
--- /dev/null
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/intr.h>
+
+#include <subdev/pci.h>
+#include <subdev/mc.h>
+
+static void
+nvkm_intr_rearm_locked(struct nvkm_device *device)
+{
+       nvkm_mc_intr_rearm(device);
+}
+
+static void
+nvkm_intr_unarm_locked(struct nvkm_device *device)
+{
+       nvkm_mc_intr_unarm(device);
+}
+
+static irqreturn_t
+nvkm_intr(int irq, void *arg)
+{
+       struct nvkm_device *device = arg;
+       irqreturn_t ret = IRQ_NONE;
+       bool handled;
+
+       spin_lock(&device->intr.lock);
+       if (!device->intr.armed)
+               goto done_unlock;
+
+       nvkm_intr_unarm_locked(device);
+       nvkm_pci_msi_rearm(device);
+
+       nvkm_mc_intr(device, &handled);
+       if (handled)
+               ret = IRQ_HANDLED;
+
+       nvkm_intr_rearm_locked(device);
+done_unlock:
+       spin_unlock(&device->intr.lock);
+       return ret;
+}
+
+void
+nvkm_intr_rearm(struct nvkm_device *device)
+{
+       spin_lock_irq(&device->intr.lock);
+       nvkm_intr_rearm_locked(device);
+       device->intr.armed = true;
+       spin_unlock_irq(&device->intr.lock);
+}
+
+void
+nvkm_intr_unarm(struct nvkm_device *device)
+{
+       spin_lock_irq(&device->intr.lock);
+       nvkm_intr_unarm_locked(device);
+       device->intr.armed = false;
+       spin_unlock_irq(&device->intr.lock);
+}
+
+int
+nvkm_intr_install(struct nvkm_device *device)
+{
+       int ret;
+
+       device->intr.irq = device->func->irq(device);
+       if (device->intr.irq < 0)
+               return device->intr.irq;
+
+       ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
+       if (ret)
+               return ret;
+
+       device->intr.alloc = true;
+       return 0;
+}
+
+void
+nvkm_intr_dtor(struct nvkm_device *device)
+{
+       if (device->intr.alloc)
+               free_irq(device->intr.irq, device);
+}
+
+void
+nvkm_intr_ctor(struct nvkm_device *device)
+{
+       spin_lock_init(&device->intr.lock);
+}
 
        if (device->func->fini)
                device->func->fini(device, suspend);
 
+       nvkm_intr_unarm(device);
+
        time = ktime_to_us(ktime_get()) - time;
        nvdev_trace(device, "%s completed in %lldus...\n", action, time);
        return 0;
        nvdev_trace(device, "preinit running...\n");
        time = ktime_to_us(ktime_get());
 
+       nvkm_intr_unarm(device);
+
        if (device->func->preinit) {
                ret = device->func->preinit(device);
                if (ret)
        nvdev_trace(device, "init running...\n");
        time = ktime_to_us(ktime_get());
 
+       nvkm_intr_rearm(device);
+
        if (device->func->init) {
                ret = device->func->init(device);
                if (ret)
        if (device) {
                mutex_lock(&nv_devices_mutex);
 
+               nvkm_intr_dtor(device);
+
                list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
                        nvkm_subdev_del(&subdev);
 
                device->name = device->chip->name;
 
        mutex_init(&device->mutex);
+       nvkm_intr_ctor(device);
 
 #define NVKM_LAYOUT_ONCE(type,data,ptr)                                                      \
        if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) {                     \
 #undef NVKM_LAYOUT_INST
 #undef NVKM_LAYOUT_ONCE
 
-       ret = 0;
+       ret = nvkm_intr_install(device);
 done:
        if (device->pri && (!mmio || ret)) {
                iounmap(device->pri);
 
        return pci_resource_len(pdev->pdev, bar);
 }
 
+static int
+nvkm_device_pci_irq(struct nvkm_device *device)
+{
+       return nvkm_device_pci(device)->pdev->irq;
+}
+
 static void
 nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
 {
        .dtor = nvkm_device_pci_dtor,
        .preinit = nvkm_device_pci_preinit,
        .fini = nvkm_device_pci_fini,
+       .irq = nvkm_device_pci_irq,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
        .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
 
        return res ? resource_size(res) : 0;
 }
 
-static irqreturn_t
-nvkm_device_tegra_intr(int irq, void *arg)
-{
-       struct nvkm_device_tegra *tdev = arg;
-       struct nvkm_device *device = &tdev->device;
-       bool handled = false;
-       nvkm_mc_intr_unarm(device);
-       nvkm_mc_intr(device, &handled);
-       nvkm_mc_intr_rearm(device);
-       return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static void
-nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
-{
-       struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
-       if (tdev->irq) {
-               free_irq(tdev->irq, tdev);
-               tdev->irq = 0;
-       }
-}
-
 static int
-nvkm_device_tegra_init(struct nvkm_device *device)
+nvkm_device_tegra_irq(struct nvkm_device *device)
 {
        struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
-       int irq, ret;
-
-       irq = platform_get_irq_byname(tdev->pdev, "stall");
-       if (irq < 0)
-               return irq;
 
-       ret = request_irq(irq, nvkm_device_tegra_intr,
-                         IRQF_SHARED, "nvkm", tdev);
-       if (ret)
-               return ret;
-
-       tdev->irq = irq;
-       return 0;
+       return platform_get_irq_byname(tdev->pdev, "stall");
 }
 
 static void *
 nvkm_device_tegra_func = {
        .tegra = nvkm_device_tegra,
        .dtor = nvkm_device_tegra_dtor,
-       .init = nvkm_device_tegra_init,
-       .fini = nvkm_device_tegra_fini,
+       .irq = nvkm_device_tegra_irq,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
        .cpu_coherent = false,
 
 
 #include <core/option.h>
 #include <core/pci.h>
-#include <subdev/mc.h>
+
+void
+nvkm_pci_msi_rearm(struct nvkm_device *device)
+{
+       struct nvkm_pci *pci = device->pci;
+
+       if (pci && pci->msi)
+               pci->func->msi_rearm(pci);
+}
 
 u32
 nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
        nvkm_pci_wr32(pci, 0x0050, data);
 }
 
-static irqreturn_t
-nvkm_pci_intr(int irq, void *arg)
-{
-       struct nvkm_pci *pci = arg;
-       struct nvkm_device *device = pci->subdev.device;
-       bool handled = false;
-
-       if (pci->irq < 0)
-               return IRQ_HANDLED;
-
-       nvkm_mc_intr_unarm(device);
-       if (pci->msi)
-               pci->func->msi_rearm(pci);
-       nvkm_mc_intr(device, &handled);
-       nvkm_mc_intr_rearm(device);
-       return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
 static int
 nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
 {
 nvkm_pci_oneinit(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
-       struct pci_dev *pdev = pci->pdev;
        int ret;
 
        if (pci_is_pcie(pci->pdev)) {
                        return ret;
        }
 
-       ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
-       if (ret)
-               return ret;
-
-       pci->irq = pdev->irq;
        return 0;
 }
 
 
        nvkm_agp_dtor(pci);
 
-       if (pci->irq >= 0) {
-               /* freq_irq() will call the handler, we use pci->irq == -1
-                * to signal that it's been torn down and should be a noop.
-                */
-               int irq = pci->irq;
-               pci->irq = -1;
-               free_irq(irq, pci);
-       }
-
        if (pci->msi)
                pci_disable_msi(pci->pdev);
 
        nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
        pci->func = func;
        pci->pdev = device->func->pci(device)->pdev;
-       pci->irq = -1;
        pci->pcie.speed = -1;
        pci->pcie.width = -1;