static void nvme_start_ns_queue(struct nvme_ns *ns)
 {
-       blk_mq_unquiesce_queue(ns->queue);
+       if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
+               blk_mq_unquiesce_queue(ns->queue);
 }
 
 static void nvme_stop_ns_queue(struct nvme_ns *ns)
 {
-       blk_mq_quiesce_queue(ns->queue);
+       if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
+               blk_mq_quiesce_queue(ns->queue);
 }
 
 /*
 
 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
 {
-       blk_mq_quiesce_queue(ctrl->admin_q);
+       if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+               blk_mq_quiesce_queue(ctrl->admin_q);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
 
 void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
 {
-       blk_mq_unquiesce_queue(ctrl->admin_q);
+       if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+               blk_mq_unquiesce_queue(ctrl->admin_q);
 }
 EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
 
 
        int nr_reconnects;
        unsigned long flags;
 #define NVME_CTRL_FAILFAST_EXPIRED     0
+#define NVME_CTRL_ADMIN_Q_STOPPED      1
        struct nvmf_ctrl_options *opts;
 
        struct page *discard_page;
 #define NVME_NS_ANA_PENDING    2
 #define NVME_NS_FORCE_RO       3
 #define NVME_NS_READY          4
+#define NVME_NS_STOPPED                5
 
        struct cdev             cdev;
        struct device           cdev_device;