/* Forcibly unquiesce queues to avoid blocking dispatch */
        if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
-               blk_mq_unquiesce_queue(ctrl->admin_q);
+               nvme_start_admin_queue(ctrl);
 
        list_for_each_entry(ns, &ctrl->namespaces, list)
                nvme_set_queue_dying(ns);
 
        list_del(&ctrl->ctrl_list);
        spin_unlock_irqrestore(&ctrl->rport->lock, flags);
 
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
        /*
         * clean up the admin queue. Same thing as above.
         */
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
        ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
                                                (ilog2(SZ_4K) - 9);
 
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
 
        ret = nvme_init_ctrl_finish(&ctrl->ctrl);
        if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
        nvme_fc_free_queue(&ctrl->queues[0]);
 
        /* re-enable the admin_q so anything new can fast fail */
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
 
        /* resume the io queues so that things will fast fail */
        nvme_start_queues(&ctrl->ctrl);
 
 
        nvmeq->dev->online_queues--;
        if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
-               blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
+               nvme_stop_admin_queue(&nvmeq->dev->ctrl);
        if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
                pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
        return 0;
                 * user requests may be waiting on a stopped queue. Start the
                 * queue to flush these to completion.
                 */
-               blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+               nvme_start_admin_queue(&dev->ctrl);
                blk_cleanup_queue(dev->ctrl.admin_q);
                blk_mq_free_tag_set(&dev->admin_tagset);
        }
                        return -ENODEV;
                }
        } else
-               blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+               nvme_start_admin_queue(&dev->ctrl);
 
        return 0;
 }
        if (shutdown) {
                nvme_start_queues(&dev->ctrl);
                if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
-                       blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+                       nvme_start_admin_queue(&dev->ctrl);
        }
        mutex_unlock(&dev->shutdown_lock);
 }
 
        else
                ctrl->ctrl.max_integrity_segments = 0;
 
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
 
        error = nvme_init_ctrl_finish(&ctrl->ctrl);
        if (error)
        return 0;
 
 out_quiesce_queue:
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
 out_stop_queue:
        nvme_rdma_stop_queue(&ctrl->queues[0]);
 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_cancel_admin_tagset(&ctrl->ctrl);
        if (remove)
-               blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+               nvme_start_admin_queue(&ctrl->ctrl);
        nvme_rdma_destroy_admin_queue(ctrl, remove);
 }
 
                nvme_rdma_destroy_io_queues(ctrl, new);
        }
 destroy_admin:
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_cancel_admin_tagset(&ctrl->ctrl);
        nvme_rdma_teardown_io_queues(ctrl, false);
        nvme_start_queues(&ctrl->ctrl);
        nvme_rdma_teardown_admin_queue(ctrl, false);
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
        cancel_delayed_work_sync(&ctrl->reconnect_work);
 
        nvme_rdma_teardown_io_queues(ctrl, shutdown);
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        if (shutdown)
                nvme_shutdown_ctrl(&ctrl->ctrl);
        else
 
        if (error)
                goto out_stop_queue;
 
-       blk_mq_unquiesce_queue(ctrl->admin_q);
+       nvme_start_admin_queue(ctrl);
 
        error = nvme_init_ctrl_finish(ctrl);
        if (error)
        return 0;
 
 out_quiesce_queue:
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       nvme_stop_admin_queue(ctrl);
        blk_sync_queue(ctrl->admin_q);
 out_stop_queue:
        nvme_tcp_stop_queue(ctrl, 0);
 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
                bool remove)
 {
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       nvme_stop_admin_queue(ctrl);
        blk_sync_queue(ctrl->admin_q);
        nvme_tcp_stop_queue(ctrl, 0);
        nvme_cancel_admin_tagset(ctrl);
        if (remove)
-               blk_mq_unquiesce_queue(ctrl->admin_q);
+               nvme_start_admin_queue(ctrl);
        nvme_tcp_destroy_admin_queue(ctrl, remove);
 }
 
 {
        if (ctrl->queue_count <= 1)
                return;
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       nvme_stop_admin_queue(ctrl);
        nvme_start_freeze(ctrl);
        nvme_stop_queues(ctrl);
        nvme_sync_io_queues(ctrl);
                nvme_tcp_destroy_io_queues(ctrl, new);
        }
 destroy_admin:
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       nvme_stop_admin_queue(ctrl);
        blk_sync_queue(ctrl->admin_q);
        nvme_tcp_stop_queue(ctrl, 0);
        nvme_cancel_admin_tagset(ctrl);
        /* unquiesce to fail fast pending requests */
        nvme_start_queues(ctrl);
        nvme_tcp_teardown_admin_queue(ctrl, false);
-       blk_mq_unquiesce_queue(ctrl->admin_q);
+       nvme_start_admin_queue(ctrl);
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
        cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
 
        nvme_tcp_teardown_io_queues(ctrl, shutdown);
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       nvme_stop_admin_queue(ctrl);
        if (shutdown)
                nvme_shutdown_ctrl(ctrl);
        else
 
        ctrl->ctrl.max_hw_sectors =
                (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
 
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       nvme_start_admin_queue(&ctrl->ctrl);
 
        error = nvme_init_ctrl_finish(&ctrl->ctrl);
        if (error)
                nvme_loop_destroy_io_queues(ctrl);
        }
 
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_stop_admin_queue(&ctrl->ctrl);
        if (ctrl->ctrl.state == NVME_CTRL_LIVE)
                nvme_shutdown_ctrl(&ctrl->ctrl);