}
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
 
+void nvme_unfreeze(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               blk_mq_unfreeze_queue(ns->queue);
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_unfreeze);
+
+void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
+               timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
+               if (timeout <= 0)
+                       break;
+       }
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
+
+void nvme_wait_freeze(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               blk_mq_freeze_queue_wait(ns->queue);
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_wait_freeze);
+
+void nvme_start_freeze(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               blk_mq_freeze_queue_start(ns->queue);
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_start_freeze);
+
 void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
 void nvme_start_queues(struct nvme_ctrl *ctrl);
 void nvme_kill_queues(struct nvme_ctrl *ctrl);
+void nvme_unfreeze(struct nvme_ctrl *ctrl);
+void nvme_wait_freeze(struct nvme_ctrl *ctrl);
+void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+void nvme_start_freeze(struct nvme_ctrl *ctrl);
 
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
 
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
        int i, queues;
-       u32 csts = -1;
+       bool dead = true;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        del_timer_sync(&dev->watchdog_timer);
 
        mutex_lock(&dev->shutdown_lock);
-       if (pci_is_enabled(to_pci_dev(dev->dev))) {
-               nvme_stop_queues(&dev->ctrl);
-               csts = readl(dev->bar + NVME_REG_CSTS);
+       if (pci_is_enabled(pdev)) {
+               u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+               if (dev->ctrl.state == NVME_CTRL_LIVE)
+                       nvme_start_freeze(&dev->ctrl);
+               dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
+                       pdev->error_state  != pci_channel_io_normal);
        }
 
+       /*
+        * Give the controller a chance to complete all entered requests if
+        * doing a safe shutdown.
+        */
+       if (!dead && shutdown)
+               nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+       nvme_stop_queues(&dev->ctrl);
+
        queues = dev->online_queues - 1;
        for (i = dev->queue_count - 1; i > 0; i--)
                nvme_suspend_queue(dev->queues[i]);
 
-       if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
+       if (dead) {
                /* A device might become IO incapable very soon during
                 * probe, before the admin queue is configured. Thus,
                 * queue_count can be 0 here.
 
        blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
        blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
+
+       /*
+        * The driver will not be starting up queues again if shutting down so
+        * must flush all entered requests to their failed completion to avoid
+        * deadlocking blk-mq hot-cpu notifier.
+        */
+       if (shutdown)
+               nvme_start_queues(&dev->ctrl);
        mutex_unlock(&dev->shutdown_lock);
 }
 
                nvme_remove_namespaces(&dev->ctrl);
        } else {
                nvme_start_queues(&dev->ctrl);
+               nvme_wait_freeze(&dev->ctrl);
                nvme_dev_add(dev);
+               nvme_unfreeze(&dev->ctrl);
        }
 
        if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {