* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  * @hba: per adapter instance
  * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
  *
  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+                     bool completion)
 {
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                return -EIO;
        }
 
-       init_completion(&uic_cmd->done);
+       if (completion)
+               init_completion(&uic_cmd->done);
 
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
 
        ufshcd_add_delay_before_dme_cmd(hba);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+       ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
        unsigned long flags;
        u8 status;
        int ret;
+       bool reenable_intr = false;
 
        mutex_lock(&hba->uic_cmd_mutex);
        init_completion(&uic_async_done);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->uic_async_done = &uic_async_done;
-       ret = __ufshcd_send_uic_cmd(hba, cmd);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-       if (ret) {
-               dev_err(hba->dev,
-                       "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
-                       cmd->command, cmd->argument3, ret);
-               goto out;
+       if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+               ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+               /*
+                * Make sure UIC command completion interrupt is disabled before
+                * issuing UIC command.
+                */
+               wmb();
+               reenable_intr = true;
        }
-       ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+       ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (ret) {
                dev_err(hba->dev,
                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
        }
 out:
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->active_uic_cmd = NULL;
        hba->uic_async_done = NULL;
+       if (reenable_intr)
+               ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        mutex_unlock(&hba->uic_cmd_mutex);
 
  */
 static irqreturn_t ufshcd_intr(int irq, void *__hba)
 {
-       u32 intr_status;
+       u32 intr_status, enabled_intr_status;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
 
        spin_lock(hba->host->host_lock);
        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+       enabled_intr_status =
+               intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 
-       if (intr_status) {
+       if (intr_status)
                ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
-               ufshcd_sl_intr(hba, intr_status);
+
+       if (enabled_intr_status) {
+               ufshcd_sl_intr(hba, enabled_intr_status);
                retval = IRQ_HANDLED;
        }
        spin_unlock(hba->host->host_lock);