{
        if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
                nvme_req(req)->retries++;
-               blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
+               blk_mq_requeue_request(req, true);
                return;
        }
 
        /* Forcibly unquiesce queues to avoid blocking dispatch */
        blk_mq_unquiesce_queue(ctrl->admin_q);
 
-       /* Forcibly start all queues to avoid having stuck requests */
-       blk_mq_start_hw_queues(ctrl->admin_q);
-
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
 
                /* Forcibly unquiesce queues to avoid blocking dispatch */
                blk_mq_unquiesce_queue(ns->queue);
-
-               /*
-                * Forcibly start all queues to avoid having stuck requests.
-                * Note that we must ensure the queues are not stopped
-                * when the final removal happens.
-                */
-               blk_mq_start_hw_queues(ns->queue);
-
-               /* draining requests in requeue list */
-               blk_mq_kick_requeue_list(ns->queue);
        }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->namespaces_mutex);
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
+       list_for_each_entry(ns, &ctrl->namespaces, list)
                blk_mq_unquiesce_queue(ns->queue);
-               blk_mq_kick_requeue_list(ns->queue);
-       }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);