*/
 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
 {
-       struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
        struct dpu_irq_callback *cb;
 
        VERB("irq_idx=%d\n", irq_idx);
 
-       if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+       if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
                DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
 
-       atomic_inc(&irq_obj->irq_counts[irq_idx]);
+       atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
 
        /*
         * Perform registered function callback
         */
-       list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+       list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
                if (cb->func)
                        cb->func(cb->arg, irq_idx);
 }
 {
        if (intr) {
                kfree(intr->cache_irq_mask);
+
+               kfree(intr->irq_cb_tbl);
+               kfree(intr->irq_counts);
+
                kfree(intr);
        }
 }
 {
        unsigned long irq_flags;
 
-       if (!dpu_kms->irq_obj.irq_cb_tbl) {
+       if (!dpu_kms->hw_intr->irq_cb_tbl) {
                DPU_ERROR("invalid params\n");
                return -EINVAL;
        }
        trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
        list_del_init(®ister_irq_cb->list);
        list_add_tail(®ister_irq_cb->list,
-                       &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+                       &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
        if (list_is_first(®ister_irq_cb->list,
-                       &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+                       &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
                int ret = dpu_hw_intr_enable_irq_locked(
                                dpu_kms->hw_intr,
                                irq_idx);
 {
        unsigned long irq_flags;
 
-       if (!dpu_kms->irq_obj.irq_cb_tbl) {
+       if (!dpu_kms->hw_intr->irq_cb_tbl) {
                DPU_ERROR("invalid params\n");
                return -EINVAL;
        }
        trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
        list_del_init(®ister_irq_cb->list);
        /* empty callback list but interrupt is still enabled */
-       if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+       if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
                int ret = dpu_hw_intr_disable_irq_locked(
                                dpu_kms->hw_intr,
                                irq_idx);
 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 {
        struct dpu_kms *dpu_kms = s->private;
-       struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
        struct dpu_irq_callback *cb;
        unsigned long irq_flags;
        int i, irq_count, cb_count;
 
-       if (WARN_ON(!irq_obj->irq_cb_tbl))
+       if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
                return 0;
 
-       for (i = 0; i < irq_obj->total_irqs; i++) {
+       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
                spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
                cb_count = 0;
-               irq_count = atomic_read(&irq_obj->irq_counts[i]);
-               list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+               irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
+               list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
                        cb_count++;
                spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
 
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        /* Create irq callbacks for all possible irq_idx */
-       dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
-       dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+       dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
                        sizeof(struct list_head), GFP_KERNEL);
-       dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+       dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
                        sizeof(atomic_t), GFP_KERNEL);
-       for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
-               INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
-               atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+               INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
+               atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
        }
 }
 
        int i;
 
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
-       for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
-               if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+               if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
                        DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
 
        dpu_clear_irqs(dpu_kms);
        dpu_disable_all_irqs(dpu_kms);
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-       kfree(dpu_kms->irq_obj.irq_cb_tbl);
-       kfree(dpu_kms->irq_obj.irq_counts);
-       dpu_kms->irq_obj.irq_cb_tbl = NULL;
-       dpu_kms->irq_obj.irq_counts = NULL;
-       dpu_kms->irq_obj.total_irqs = 0;
 }