* blk_poll - poll for IO completions
  * @q:  the queue
  * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
  *
  * Description:
  *    Poll for completions on the passed in queue. Returns number of
- *    completed entries found.
+ *    completed entries found. If @spin is true, then blk_poll will continue
+ *    looping until at least one completion is found, unless the task is
+ *    otherwise marked running (or we need to reschedule).
  */
-int blk_poll(struct request_queue *q, blk_qc_t cookie)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
        if (!q->poll_fn || !blk_qc_t_valid(cookie))
                return 0;
 
        if (current->plug)
                blk_flush_plug_list(current->plug, false);
-       return q->poll_fn(q, cookie);
+       return q->poll_fn(q, cookie, spin);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
 
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
 
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
 
        return blk_mq_poll_hybrid_sleep(q, hctx, rq);
 }
 
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
        struct blk_mq_hw_ctx *hctx;
        long state;
 
                if (current->state == TASK_RUNNING)
                        return 1;
-               if (ret < 0)
+               if (ret < 0 || !spin)
                        break;
                cpu_relax();
        }
 
        return ret;
 }
 
-static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
+static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc, bool spin)
 {
        struct nvme_ns_head *head = q->queuedata;
        struct nvme_ns *ns;
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
        if (likely(ns && nvme_path_is_optimized(ns)))
-               found = ns->queue->poll_fn(q, qc);
+               found = ns->queue->poll_fn(q, qc, spin);
        srcu_read_unlock(&head->srcu, srcu_idx);
        return found;
 }
 
                        break;
 
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
                        break;
 
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
 
                dio->waiter = current;
                spin_unlock_irqrestore(&dio->bio_lock, flags);
                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(dio->bio_disk->queue, dio->bio_cookie))
+                   !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
                        io_schedule();
                /* wake up sets us TASK_RUNNING */
                spin_lock_irqsave(&dio->bio_lock, flags);
 
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
                            !dio->submit.last_queue ||
                            !blk_poll(dio->submit.last_queue,
-                                        dio->submit.cookie))
+                                        dio->submit.cookie, true))
                                io_schedule();
                }
                __set_current_state(TASK_RUNNING);
 
 struct blk_queue_ctx;
 
 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t);
+typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t, bool spin);
 
 struct bio_vec;
 typedef int (dma_drain_needed_fn)(struct request *);
 int blk_status_to_errno(blk_status_t status);
 blk_status_t errno_to_blk_status(int errno);
 
-int blk_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
 
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_poll(disk->queue, qc))
+               if (!blk_poll(disk->queue, qc, true))
                        break;
        }
        __set_current_state(TASK_RUNNING);