}
 
 #endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+       struct hpd_rx_irq_offload_work *offload_work;
+       struct amdgpu_dm_connector *aconnector;
+       struct dc_link *dc_link;
+       struct amdgpu_device *adev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
+       unsigned long flags;
+
+       offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+       aconnector = offload_work->offload_wq->aconnector;
+
+       if (!aconnector) {
+               DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+               goto skip;
+       }
+
+       adev = drm_to_adev(aconnector->base.dev);
+       dc_link = aconnector->dc_link;
+
+       mutex_lock(&aconnector->hpd_lock);
+       if (!dc_link_detect_sink(dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+       mutex_unlock(&aconnector->hpd_lock);
+
+       if (new_connection_type == dc_connection_none)
+               goto skip;
+
+       if (amdgpu_in_reset(adev))
+               goto skip;
+
+       mutex_lock(&adev->dm.dc_lock);
+       if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+               dc_link_dp_handle_automated_test(dc_link);
+       else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+                       hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+                       dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+               dc_link_dp_handle_link_loss(dc_link);
+               spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+               offload_work->offload_wq->is_handling_link_loss = false;
+               spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+       }
+       mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+       kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+       int max_caps = dc->caps.max_links;
+       int i = 0;
+       struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+       hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+       if (!hpd_rx_offload_wq)
+               return NULL;
+
+
+       for (i = 0; i < max_caps; i++) {
+               hpd_rx_offload_wq[i].wq =
+                                   create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+               if (hpd_rx_offload_wq[i].wq == NULL) {
+                       DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+                       return NULL;
+               }
+
+               spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+       }
+
+       return hpd_rx_offload_wq;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
 
        dc_hardware_init(adev->dm.dc);
 
+       adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+       if (!adev->dm.hpd_rx_offload_wq) {
+               DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+               goto error;
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
                struct dc_phy_addr_space_config pa_config;
                adev->dm.freesync_module = NULL;
        }
 
+       if (adev->dm.hpd_rx_offload_wq) {
+               for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+                       if (adev->dm.hpd_rx_offload_wq[i].wq) {
+                               destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+                               adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+                       }
+               }
+
+               kfree(adev->dm.hpd_rx_offload_wq);
+               adev->dm.hpd_rx_offload_wq = NULL;
+       }
+
        mutex_destroy(&adev->dm.audio_lock);
        mutex_destroy(&adev->dm.dc_lock);
 
        return res;
 }
 
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+       int i;
+
+       if (dm->hpd_rx_offload_wq) {
+               for (i = 0; i < dm->dc->caps.max_links; i++)
+                       flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+       }
+}
+
 static int dm_suspend(void *handle)
 {
        struct amdgpu_device *adev = handle;
 
                amdgpu_dm_irq_suspend(adev);
 
+               hpd_rx_irq_work_suspend(dm);
+
                return ret;
        }
 
 
        amdgpu_dm_irq_suspend(adev);
 
+       hpd_rx_irq_work_suspend(dm);
+
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
 
 }
 
-
-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 {
        uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
        uint8_t dret;
                DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
 }
 
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+                                                       union hpd_irq_data hpd_irq_data)
+{
+       struct hpd_rx_irq_offload_work *offload_work =
+                               kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+       if (!offload_work) {
+               DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+               return;
+       }
+
+       INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+       offload_work->data = hpd_irq_data;
+       offload_work->offload_wq = offload_wq;
+
+       queue_work(offload_wq->wq, &offload_work->work);
+       DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
 static void handle_hpd_rx_irq(void *param)
 {
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
        union hpd_irq_data hpd_irq_data;
-       bool lock_flag = 0;
+       bool link_loss = false;
+       bool has_left_work = false;
+       int idx = aconnector->base.index;
+       struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 
        if (adev->dm.disable_hpd_irq)
                return;
 
-
        /*
         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
         * conflict, after implement i2c helper, this mutex should be
         */
        mutex_lock(&aconnector->hpd_lock);
 
-       read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
+       result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+                                               &link_loss, true, &has_left_work);
 
-       if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
-               (dc_link->type == dc_connection_mst_branch)) {
-               if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
-                       result = true;
-                       dm_handle_hpd_rx_irq(aconnector);
-                       goto out;
-               } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
-                       result = false;
-                       dm_handle_hpd_rx_irq(aconnector);
+       if (!has_left_work)
+               goto out;
+
+       if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+               goto out;
+       }
+
+       if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+               if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+                       hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+                       dm_handle_mst_sideband_msg(aconnector);
                        goto out;
                }
-       }
 
-       /*
-        * TODO: We need the lock to avoid touching DC state while it's being
-        * modified during automated compliance testing, or when link loss
-        * happens. While this should be split into subhandlers and proper
-        * interfaces to avoid having to conditionally lock like this in the
-        * outer layer, we need this workaround temporarily to allow MST
-        * lightup in some scenarios to avoid timeout.
-        */
-       if (!amdgpu_in_reset(adev) &&
-           (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
-            hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
-               mutex_lock(&adev->dm.dc_lock);
-               lock_flag = 1;
-       }
+               if (link_loss) {
+                       bool skip = false;
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-       result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
-#else
-       result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
-#endif
-       if (!amdgpu_in_reset(adev) && lock_flag)
-               mutex_unlock(&adev->dm.dc_lock);
+                       spin_lock(&offload_wq->offload_lock);
+                       skip = offload_wq->is_handling_link_loss;
+
+                       if (!skip)
+                               offload_wq->is_handling_link_loss = true;
+
+                       spin_unlock(&offload_wq->offload_lock);
+
+                       if (!skip)
+                               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+                       goto out;
+               }
+       }
 
 out:
        if (result && !is_mst_root_connector) {
                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
+
+                       if (adev->dm.hpd_rx_offload_wq)
+                               adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                                       aconnector;
                }
        }
 }