#include <linux/blk-pm.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_driver.h>
-#include <scsi/scsi_transport.h>
-#include "../scsi_transport_api.h"
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
 out:
        up_read(&hba->clk_scaling_lock);
 
-       if (ufs_trigger_eh())
-               scsi_schedule_eh(hba->host);
+       if (ufs_trigger_eh()) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               ufshcd_schedule_eh_work(hba);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 
        return err;
 }
 }
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
-       lockdep_assert_held(hba->host->host_lock);
-
-       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
-              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-static void ufshcd_schedule_eh(struct ufs_hba *hba)
-{
-       bool schedule_eh = false;
-       unsigned long flags;
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       /* handle fatal errors only when link is not in error state */
-       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
-               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
-                   ufshcd_is_saved_err_fatal(hba))
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
-               else
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
-               schedule_eh = true;
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               scsi_schedule_eh(hba->host);
-}
-
 /**
  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  * state) and waits for it to take effect.
 {
        DECLARE_COMPLETION_ONSTACK(uic_async_done);
        unsigned long flags;
-       bool schedule_eh = false;
        u8 status;
        int ret;
        bool reenable_intr = false;
                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        if (ret) {
                ufshcd_set_link_broken(hba);
-               schedule_eh = true;
+               ufshcd_schedule_eh_work(hba);
        }
-
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               ufshcd_schedule_eh(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 
        return ret;
        return err_handling;
 }
 
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+       /* handle fatal errors only when link is not in error state */
+       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+                   ufshcd_is_saved_err_fatal(hba))
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+               else
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+               queue_work(hba->eh_wq, &hba->eh_work);
+       }
+}
+
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
 
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @host: SCSI host pointer
+ * @work: pointer to work structure
  */
-static void ufshcd_err_handler(struct Scsi_Host *host)
+static void ufshcd_err_handler(struct work_struct *work)
 {
-       struct ufs_hba *hba = shost_priv(host);
+       struct ufs_hba *hba;
        unsigned long flags;
        bool err_xfer = false;
        bool err_tm = false;
        int tag;
        bool needs_reset = false, needs_restore = false;
 
+       hba = container_of(work, struct ufs_hba, eh_work);
+
        down(&hba->host_sem);
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->host->host_eh_scheduled = 0;
        if (ufshcd_err_handling_should_stop(hba)) {
                if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
                        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
                                         "host_regs: ");
                        ufshcd_print_pwr_info(hba);
                }
+               ufshcd_schedule_eh_work(hba);
                retval |= IRQ_HANDLED;
        }
        /*
        hba->errors = 0;
        hba->uic_error = 0;
        spin_unlock(hba->host->host_lock);
-
-       if (queue_eh_work)
-               ufshcd_schedule_eh(hba);
-
        return retval;
 }
 
         * will be to send LU reset which, again, is a spec violation.
         * To avoid these unnecessary/illegal steps, first we clean up
         * the lrb taken by this cmd and re-set it in outstanding_reqs,
-        * then queue the error handler and bail.
+        * then queue the eh_work and bail.
         */
        if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
                ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
 
                spin_lock_irqsave(host->host_lock, flags);
                hba->force_reset = true;
+               ufshcd_schedule_eh_work(hba);
                spin_unlock_irqrestore(host->host_lock, flags);
-
-               ufshcd_schedule_eh(hba);
-
                goto release;
        }
 
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->force_reset = true;
+       ufshcd_schedule_eh_work(hba);
        dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       ufshcd_err_handler(hba->host);
+       flush_work(&hba->eh_work);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
        if (hba->is_powered) {
                ufshcd_exit_clk_scaling(hba);
                ufshcd_exit_clk_gating(hba);
+               if (hba->eh_wq)
+                       destroy_workqueue(hba->eh_wq);
                ufs_debugfs_hba_exit(hba);
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
        return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
 }
 
-static struct scsi_transport_template ufshcd_transport_template = {
-       .eh_strategy_handler = ufshcd_err_handler,
-};
-
 /**
  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
                err = -ENOMEM;
                goto out_error;
        }
-       host->transportt = &ufshcd_transport_template;
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
        int err;
        struct Scsi_Host *host = hba->host;
        struct device *dev = hba->dev;
+       char eh_wq_name[sizeof("ufs_eh_wq_00")];
 
        if (!mmio_base) {
                dev_err(hba->dev,
 
        hba->max_pwr_info.is_valid = false;
 
+       /* Initialize work queues */
+       snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+                hba->host->host_no);
+       hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+       if (!hba->eh_wq) {
+               dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+                       __func__);
+               err = -ENOMEM;
+               goto out_disable;
+       }
+       INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 
        sema_init(&hba->host_sem, 1);