pr_debug("irq_idx=%d\n", irq_idx);
 
-       if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
-               DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
-                       atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
-       }
+       if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+               DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
 
        atomic_inc(&irq_obj->irq_counts[irq_idx]);
 
        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 }
 
-/**
- * _dpu_core_irq_enable - enable core interrupt given by the index
- * @dpu_kms:           Pointer to dpu kms context
- * @irq_idx:           interrupt index
- */
-static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
-{
-       unsigned long irq_flags;
-       int ret = 0, enable_count;
-
-       if (!dpu_kms->hw_intr ||
-                       !dpu_kms->irq_obj.enable_counts ||
-                       !dpu_kms->irq_obj.irq_counts) {
-               DPU_ERROR("invalid params\n");
-               return -EINVAL;
-       }
-
-       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
-               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-               return -EINVAL;
-       }
-
-       enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
-       DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
-       trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
-
-       if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
-               ret = dpu_kms->hw_intr->ops.enable_irq(
-                               dpu_kms->hw_intr,
-                               irq_idx);
-               if (ret)
-                       DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
-                                       irq_idx);
-
-               DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-
-               spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
-               /* empty callback list but interrupt is enabled */
-               if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
-                       DPU_ERROR("irq_idx=%d enabled with no callback\n",
-                                       irq_idx);
-               spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
-       }
-
-       return ret;
-}
-
-int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
-       int i, ret = 0, counts;
-
-       if (!irq_idxs || !irq_count) {
-               DPU_ERROR("invalid params\n");
-               return -EINVAL;
-       }
-
-       counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
-       if (counts)
-               DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
-       for (i = 0; (i < irq_count) && !ret; i++)
-               ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
-
-       return ret;
-}
-
-/**
- * _dpu_core_irq_disable - disable core interrupt given by the index
- * @dpu_kms:           Pointer to dpu kms context
- * @irq_idx:           interrupt index
- */
-static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
-{
-       int ret = 0, enable_count;
-
-       if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
-               DPU_ERROR("invalid params\n");
-               return -EINVAL;
-       }
-
-       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
-               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-               return -EINVAL;
-       }
-
-       enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
-       DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
-       trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
-
-       if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
-               ret = dpu_kms->hw_intr->ops.disable_irq(
-                               dpu_kms->hw_intr,
-                               irq_idx);
-               if (ret)
-                       DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
-                                       irq_idx);
-               DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-       }
-
-       return ret;
-}
-
-int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
-       int i, ret = 0, counts;
-
-       if (!irq_idxs || !irq_count) {
-               DPU_ERROR("invalid params\n");
-               return -EINVAL;
-       }
-
-       counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
-       if (counts == 2)
-               DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
-       for (i = 0; (i < irq_count) && !ret; i++)
-               ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
-
-       return ret;
-}
-
 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
 {
        if (!dpu_kms->hw_intr ||
        list_del_init(®ister_irq_cb->list);
        list_add_tail(®ister_irq_cb->list,
                        &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+       if (list_is_first(®ister_irq_cb->list,
+                       &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+               int ret = dpu_kms->hw_intr->ops.enable_irq(
+                               dpu_kms->hw_intr,
+                               irq_idx);
+               if (ret)
+                       DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+                                       irq_idx);
+       }
+
        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 
        return 0;
        trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
        list_del_init(®ister_irq_cb->list);
        /* empty callback list but interrupt is still enabled */
-       if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
-                       atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
-               DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+       if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+               int ret = dpu_kms->hw_intr->ops.disable_irq(
+                               dpu_kms->hw_intr,
+                               irq_idx);
+               if (ret)
+                       DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+                                       irq_idx);
+               DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+       }
        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 
        return 0;
        struct dpu_irq *irq_obj = s->private;
        struct dpu_irq_callback *cb;
        unsigned long irq_flags;
-       int i, irq_count, enable_count, cb_count;
+       int i, irq_count, cb_count;
 
-       if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
+       if (WARN_ON(!irq_obj->irq_cb_tbl))
                return 0;
 
        for (i = 0; i < irq_obj->total_irqs; i++) {
                spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
                cb_count = 0;
                irq_count = atomic_read(&irq_obj->irq_counts[i]);
-               enable_count = atomic_read(&irq_obj->enable_counts[i]);
                list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
                        cb_count++;
                spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
 
-               if (irq_count || enable_count || cb_count)
-                       seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
-                                       i, irq_count, enable_count, cb_count);
+               if (irq_count || cb_count)
+                       seq_printf(s, "idx:%d irq:%d cb:%d\n",
+                                       i, irq_count, cb_count);
        }
 
        return 0;
        dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
        dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
                        sizeof(struct list_head), GFP_KERNEL);
-       dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
-                       sizeof(atomic_t), GFP_KERNEL);
        dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
                        sizeof(atomic_t), GFP_KERNEL);
        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
                INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
-               atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
                atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
        }
 }
 
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
-               if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
-                               !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+               if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
                        DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
 
        dpu_clear_all_irqs(dpu_kms);
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        kfree(dpu_kms->irq_obj.irq_cb_tbl);
-       kfree(dpu_kms->irq_obj.enable_counts);
        kfree(dpu_kms->irq_obj.irq_counts);
        dpu_kms->irq_obj.irq_cb_tbl = NULL;
-       dpu_kms->irq_obj.enable_counts = NULL;
        dpu_kms->irq_obj.irq_counts = NULL;
        dpu_kms->irq_obj.total_irqs = 0;
 }