struct amdgpu_display_manager *dm;
        /* DAL irq source which registered for this interrupt. */
        enum dc_irq_source irq_source;
+       struct work_struct work;
 };
 
 #define DM_IRQ_TABLE_LOCK(adev, flags) \
  */
 static void dm_irq_work_func(struct work_struct *work)
 {
-       struct irq_list_head *irq_list_head =
-               container_of(work, struct irq_list_head, work);
-       struct list_head *handler_list = &irq_list_head->head;
-       struct amdgpu_dm_irq_handler_data *handler_data;
-
-       list_for_each_entry(handler_data, handler_list, list) {
-               DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
-                               handler_data->irq_source);
+       struct amdgpu_dm_irq_handler_data *handler_data =
+               container_of(work, struct amdgpu_dm_irq_handler_data, work);
 
-               DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
-                       handler_data->irq_source);
-
-               handler_data->handler(handler_data->handler_arg);
-       }
+       handler_data->handler(handler_data->handler_arg);
 
        /* Call a DAL subcomponent which registered for interrupt notification
         * at INTERRUPT_LOW_IRQ_CONTEXT.
                break;
        case INTERRUPT_LOW_IRQ_CONTEXT:
        default:
-               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
                break;
        }
 
                break;
        case INTERRUPT_LOW_IRQ_CONTEXT:
        default:
-               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
+               INIT_WORK(&handler_data->work, dm_irq_work_func);
                break;
        }
 
 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 {
        int src;
-       struct irq_list_head *lh;
+       struct list_head *lh;
 
        DRM_DEBUG_KMS("DM_IRQ\n");
 
        for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
                /* low context handler list init */
                lh = &adev->dm.irq_handler_list_low_tab[src];
-               INIT_LIST_HEAD(&lh->head);
-               INIT_WORK(&lh->work, dm_irq_work_func);
-
+               INIT_LIST_HEAD(lh);
                /* high context handler init */
                INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
        }
 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
        int src;
-       struct irq_list_head *lh;
+       struct list_head *lh;
+       struct list_head *entry, *tmp;
+       struct amdgpu_dm_irq_handler_data *handler;
        unsigned long irq_table_flags;
+
        DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
        for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
                DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
                 * (because no code can schedule a new one). */
                lh = &adev->dm.irq_handler_list_low_tab[src];
                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-               flush_work(&lh->work);
+
+               if (!list_empty(lh)) {
+                       list_for_each_safe(entry, tmp, lh) {
+                               handler = list_entry(
+                                       entry,
+                                       struct amdgpu_dm_irq_handler_data,
+                                       list);
+                               flush_work(&handler->work);
+                       }
+               }
        }
 }
 
        struct list_head *hnd_list_h;
        struct list_head *hnd_list_l;
        unsigned long irq_table_flags;
+       struct list_head *entry, *tmp;
+       struct amdgpu_dm_irq_handler_data *handler;
 
        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 
         * will be disabled from manage_dm_interrupts on disable CRTC.
         */
        for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
-               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
                        dc_interrupt_set(adev->dm.dc, src, false);
 
                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-               flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
 
+               if (!list_empty(hnd_list_l)) {
+                       list_for_each_safe (entry, tmp, hnd_list_l) {
+                               handler = list_entry(
+                                       entry,
+                                       struct amdgpu_dm_irq_handler_data,
+                                       list);
+                               flush_work(&handler->work);
+                       }
+               }
                DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
        }
 
 
        /* re-enable short pulse interrupts HW interrupt */
        for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
-               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
                        dc_interrupt_set(adev->dm.dc, src, true);
         * will be enabled from manage_dm_interrupts on enable CRTC.
         */
        for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
-               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
                        dc_interrupt_set(adev->dm.dc, src, true);
 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
                                        enum dc_irq_source irq_source)
 {
-       unsigned long irq_table_flags;
-       struct work_struct *work = NULL;
+       struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
+       struct  amdgpu_dm_irq_handler_data *handler_data;
+       bool    work_queued = false;
 
-       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+       if (list_empty(handler_list))
+               return;
+
+       list_for_each_entry (handler_data, handler_list, list) {
+               if (!queue_work(system_highpri_wq, &handler_data->work)) {
+                       continue;
+               } else {
+                       work_queued = true;
+                       break;
+               }
+       }
 
-       if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
-               work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+       if (!work_queued) {
+               struct  amdgpu_dm_irq_handler_data *handler_data_add;
+               /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
+               handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
 
-       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+               /*allocate a new amdgpu_dm_irq_handler_data*/
+               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+               if (!handler_data_add) {
+                       DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+                       return;
+               }
 
-       if (work) {
-               if (!schedule_work(work))
-                       DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
-                                               irq_source);
-       }
+               /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
+               handler_data_add->handler       = handler_data->handler;
+               handler_data_add->handler_arg   = handler_data->handler_arg;
+               handler_data_add->dm            = handler_data->dm;
+               handler_data_add->irq_source    = irq_source;
 
+               list_add_tail(&handler_data_add->list, handler_list);
+
+               INIT_WORK(&handler_data_add->work, dm_irq_work_func);
+
+               if (queue_work(system_highpri_wq, &handler_data_add->work))
+                       DRM_DEBUG("Queued work for handling interrupt from "
+                                 "display for IRQ source %d\n",
+                                 irq_source);
+               else
+                       DRM_ERROR("Failed to queue work for handling interrupt "
+                                 "from display for IRQ source %d\n",
+                                 irq_source);
+       }
 }
 
 /*