static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
        do {
-               blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
+               blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
                cond_resched();
        } while (!completion_done(wait));
 }
 
 }
 
 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
-               bool spin)
+               unsigned int flags)
 {
        struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
        long state = get_current_state();
                if (task_is_running(current))
                        return 1;
 
-               if (ret < 0 || !spin)
+               if (ret < 0 || (flags & BLK_POLL_ONESHOT))
                        break;
                cpu_relax();
        } while (!need_resched());
  * blk_poll - poll for IO completions
  * @q:  the queue
  * @cookie: cookie passed back at IO submission time
- * @spin: whether to spin for completions
+ * @flags: BLK_POLL_* flags that control the behavior
  *
  * Description:
  *    Poll for completions on the passed in queue. Returns number of
- *    completed entries found. If @spin is true, then blk_poll will continue
- *    looping until at least one completion is found, unless the task is
- *    otherwise marked running (or we need to reschedule).
+ *    completed entries found.
  */
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
 {
        if (cookie == BLK_QC_T_NONE ||
            !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
        if (current->plug)
                blk_flush_plug_list(current->plug, false);
 
-       /* If specified not to spin, we also should not sleep. */
-       if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+       if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
                if (blk_mq_poll_hybrid(q, cookie))
                        return 1;
        }
-       return blk_mq_poll_classic(q, cookie, spin);
+       return blk_mq_poll_classic(q, cookie, flags);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
 
                if (!READ_ONCE(bio.bi_private))
                        break;
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc, true))
+                   !blk_poll(bdev_get_queue(bdev), qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);
 
 static struct bio_set blkdev_dio_pool;
 
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
 {
        struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
        struct request_queue *q = bdev_get_queue(bdev);
 
-       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
 }
 
 static void blkdev_bio_end_io(struct bio *bio)
                if (!READ_ONCE(dio->waiter))
                        break;
 
-               if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
+               if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);
 
                        long min)
 {
        struct io_kiocb *req, *tmp;
+       unsigned int poll_flags = 0;
        LIST_HEAD(done);
-       bool spin;
 
        /*
         * Only spin for completions if we don't have multiple devices hanging
         * off our complete list, and we're under the requested amount.
         */
-       spin = !ctx->poll_multi_queue && *nr_events < min;
+       if (ctx->poll_multi_queue || *nr_events >= min)
+               poll_flags |= BLK_POLL_ONESHOT;
 
        list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
                struct kiocb *kiocb = &req->rw.kiocb;
                if (!list_empty(&done))
                        break;
 
-               ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
+               ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
                if (unlikely(ret < 0))
                        return ret;
                else if (ret)
-                       spin = false;
+                       poll_flags |= BLK_POLL_ONESHOT;
 
                /* iopoll may have completed current req */
                if (READ_ONCE(req->iopoll_completed))
 
        };
 };
 
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
 {
        struct request_queue *q = READ_ONCE(kiocb->private);
 
        if (!q)
                return 0;
-       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
+       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
 }
 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
 
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
                            !dio->submit.last_queue ||
                            !blk_poll(dio->submit.last_queue,
-                                        dio->submit.cookie, true))
+                                        dio->submit.cookie, 0))
                                blk_io_schedule();
                }
                __set_current_state(TASK_RUNNING);
 
 int blk_status_to_errno(blk_status_t status);
 blk_status_t errno_to_blk_status(int errno);
 
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT               (1 << 0)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
 
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
-       int (*iopoll)(struct kiocb *kiocb, bool spin);
+       int (*iopoll)(struct kiocb *kiocb, unsigned int flags);
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
 
                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
                unsigned int dio_flags);
 ssize_t iomap_dio_complete(struct iomap_dio *dio);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags);
 
 #ifdef CONFIG_SWAP
 struct file;
 
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_poll(disk->queue, qc, true))
+               if (!blk_poll(disk->queue, qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);