#include <linux/of.h>
 #include <linux/bitfield.h>
 #include <linux/blk-pm.h>
+#include <linux/blkdev.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode);
 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+                                        struct ufs_vreg *vreg);
 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
        }
 }
 
+static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+{
+       pm_runtime_get_sync(hba->dev);
+       if (pm_runtime_suspended(hba->dev)) {
+               /*
+                * Don't assume anything of pm_runtime_get_sync(), if
+                * resume fails, irq and clocks can be OFF, and powers
+                * can be OFF or in LPM.
+                */
+               ufshcd_setup_hba_vreg(hba, true);
+               ufshcd_enable_irq(hba);
+               ufshcd_setup_vreg(hba, true);
+               ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+               ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+               ufshcd_hold(hba, false);
+               if (!ufshcd_is_clkgating_allowed(hba))
+                       ufshcd_setup_clocks(hba, true);
+               ufshcd_release(hba);
+               ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
+       } else {
+               ufshcd_hold(hba, false);
+               if (hba->clk_scaling.is_allowed) {
+                       cancel_work_sync(&hba->clk_scaling.suspend_work);
+                       cancel_work_sync(&hba->clk_scaling.resume_work);
+                       ufshcd_suspend_clkscaling(hba);
+               }
+       }
+}
+
+static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
+{
+       ufshcd_release(hba);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
+       pm_runtime_put(hba->dev);
+}
+
+static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
+{
+       return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+               (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
+                       ufshcd_is_link_broken(hba))));
+}
+
+#ifdef CONFIG_PM
+static void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+       struct Scsi_Host *shost = hba->host;
+       struct scsi_device *sdev;
+       struct request_queue *q;
+       int ret;
+
+       /*
+        * Set RPM status of hba device to RPM_ACTIVE,
+        * this also clears its runtime error.
+        */
+       ret = pm_runtime_set_active(hba->dev);
+       /*
+        * If hba device had runtime error, we also need to resume those
+        * scsi devices under hba in case any of them has failed to be
+        * resumed due to hba runtime resume failure. This is to unblock
+        * blk_queue_enter in case there are bios waiting inside it.
+        */
+       if (!ret) {
+               shost_for_each_device(sdev, shost) {
+                       q = sdev->request_queue;
+                       if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+                                      q->rpm_status == RPM_SUSPENDING))
+                               pm_request_resume(q->dev);
+               }
+       }
+}
+#else
+static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+}
+#endif
+
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
  * @work: pointer to work structure
        hba = container_of(work, struct ufs_hba, eh_work);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
-           (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
-               ufshcd_is_link_broken(hba)))) {
+       if (ufshcd_err_handling_should_stop(hba)) {
                if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
                        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
        }
        ufshcd_set_eh_in_progress(hba);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-       pm_runtime_get_sync(hba->dev);
-       ufshcd_hold(hba, false);
-
+       ufshcd_err_handling_prepare(hba);
        spin_lock_irqsave(hba->host->host_lock, flags);
+       /*
+        * A full reset and restore might have happened after preparation
+        * is finished, double check whether we should stop.
+        */
+       if (ufshcd_err_handling_should_stop(hba)) {
+               if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
+                       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+               goto out;
+       }
        hba->ufshcd_state = UFSHCD_STATE_RESET;
 
        /* Complete requests that have door-bell cleared by h/w */
                hba->force_reset = false;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
                err = ufshcd_reset_and_restore(hba);
-               spin_lock_irqsave(hba->host->host_lock, flags);
                if (err)
                        dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
                                        __func__, err);
+               else
+                       ufshcd_recover_pm_error(hba);
+               spin_lock_irqsave(hba->host->host_lock, flags);
        }
 
 skip_err_handling:
                            __func__, hba->saved_err, hba->saved_uic_err);
        }
 
+out:
        ufshcd_clear_eh_in_progress(hba);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        ufshcd_scsi_unblock_requests(hba);
-       ufshcd_release(hba);
-       pm_runtime_put_sync(hba->dev);
+       ufshcd_err_handling_unprepare(hba);
 }
 
 /**