#define DMAR_PERFCFGOFF_REG    0x310
 #define DMAR_PERFOVFOFF_REG    0x318
 #define DMAR_PERFCNTROFF_REG   0x31c
+#define DMAR_PERFINTRSTS_REG   0x324
+#define DMAR_PERFINTRCTL_REG   0x328
 #define DMAR_PERFEVNTCAP_REG   0x380
 #define DMAR_ECMD_REG          0x400
 #define DMAR_ECEO_REG          0x408
 
 #define DMA_VCS_PAS    ((u64)1)
 
+/* PERFINTRSTS_REG */
+#define DMA_PERFINTRSTS_PIS    ((u32)1)
+
 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)                    \
 do {                                                                   \
        cycles_t start_time = get_cycles();                             \
        struct pmu              pmu;
        DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
        struct perf_event       *event_list[IOMMU_PMU_IDX_MAX];
+       unsigned char           irq_name[16];
 };
 
+#define IOMMU_IRQ_ID_OFFSET_PRQ                (DMAR_UNITS_SUPPORTED)
+#define IOMMU_IRQ_ID_OFFSET_PERF       (2 * DMAR_UNITS_SUPPORTED)
+
 struct intel_iommu {
        void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
        u64             reg_phys; /* physical address of hw register set */
        int             seq_id; /* sequence id of the iommu */
        int             agaw; /* agaw of this iommu */
        int             msagaw; /* max sagaw of this iommu */
-       unsigned int    irq, pr_irq;
+       unsigned int    irq, pr_irq, perf_irq;
        u16             segment;     /* PCI segment# */
        unsigned char   name[13];    /* Device Name */
 
 
        ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0);
 }
 
+static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu)
+{
+       struct perf_event *event;
+       u64 status;
+       int i;
+
+       /*
+        * Two counters may be overflowed very close. Always check
+        * whether there are more to handle.
+        */
+       while ((status = dmar_readq(iommu_pmu->overflow))) {
+               for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) {
+                       /*
+                        * Find the assigned event of the counter.
+                        * Accumulate the value into the event->count.
+                        */
+                       event = iommu_pmu->event_list[i];
+                       if (!event) {
+                               pr_warn_once("Cannot find the assigned event for counter %d\n", i);
+                               continue;
+                       }
+                       iommu_pmu_event_update(event);
+               }
+
+               dmar_writeq(iommu_pmu->overflow, status);
+       }
+}
+
+static irqreturn_t iommu_pmu_irq_handler(int irq, void *dev_id)
+{
+       struct intel_iommu *iommu = dev_id;
+
+       if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG))
+               return IRQ_NONE;
+
+       iommu_pmu_counter_overflow(iommu->pmu);
+
+       /* Clear the status bit */
+       dmar_writel(iommu->reg + DMAR_PERFINTRSTS_REG, DMA_PERFINTRSTS_PIS);
+
+       return IRQ_HANDLED;
+}
+
 static int __iommu_pmu_register(struct intel_iommu *iommu)
 {
        struct iommu_pmu *iommu_pmu = iommu->pmu;
        iommu->pmu = NULL;
 }
 
+static int iommu_pmu_set_interrupt(struct intel_iommu *iommu)
+{
+       struct iommu_pmu *iommu_pmu = iommu->pmu;
+       int irq, ret;
+
+       irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu);
+       if (irq <= 0)
+               return -EINVAL;
+
+       snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id);
+
+       iommu->perf_irq = irq;
+       ret = request_threaded_irq(irq, NULL, iommu_pmu_irq_handler,
+                                  IRQF_ONESHOT, iommu_pmu->irq_name, iommu);
+       if (ret) {
+               dmar_free_hwirq(irq);
+               iommu->perf_irq = 0;
+               return ret;
+       }
+       return 0;
+}
+
+static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
+{
+       if (!iommu->perf_irq)
+               return;
+
+       free_irq(iommu->perf_irq, iommu);
+       dmar_free_hwirq(iommu->perf_irq);
+       iommu->perf_irq = 0;
+}
+
 static int iommu_pmu_cpu_online(unsigned int cpu)
 {
        if (cpumask_empty(&iommu_pmu_cpu_mask))
        if (iommu_pmu_cpuhp_setup(iommu_pmu))
                goto unregister;
 
+       /* Set interrupt for overflow */
+       if (iommu_pmu_set_interrupt(iommu))
+               goto cpuhp_free;
+
        return;
 
+cpuhp_free:
+       iommu_pmu_cpuhp_free(iommu_pmu);
 unregister:
        perf_pmu_unregister(&iommu_pmu->pmu);
 err:
        if (!iommu_pmu)
                return;
 
+       iommu_pmu_unset_interrupt(iommu);
        iommu_pmu_cpuhp_free(iommu_pmu);
        perf_pmu_unregister(&iommu_pmu->pmu);
 }