[BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
        [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
        [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+       [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
 
        /* device mapper special case, should not leak out: */
        [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
        if (!IS_ERR(rq))
                return rq;
 
+       if (op & REQ_NOWAIT) {
+               blk_put_rl(rl);
+               return ERR_PTR(-EAGAIN);
+       }
+
        if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return rq;
                goto end_io;
        }
 
+       /*
+        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+        * if queue is not a request based queue.
+        */
+
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+               goto not_supported;
+
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, false) == 0)) {
+               if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
                        bio_list_merge(&bio_list_on_stack[0], &same);
                        bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       bio_io_error(bio);
+                       if (unlikely(!blk_queue_dying(q) &&
+                                       (bio->bi_opf & REQ_NOWAIT)))
+                               bio_wouldblock_error(bio);
+                       else
+                               bio_io_error(bio);
                }
                bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
 
                data->ctx = blk_mq_get_ctx(q);
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
+       if (op & REQ_NOWAIT)
+               data->flags |= BLK_MQ_REQ_NOWAIT;
 
        if (e) {
                data->flags |= BLK_MQ_REQ_INTERNAL;
        rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
        if (unlikely(!rq)) {
                __wbt_done(q->rq_wb, wb_acct);
+               if (bio->bi_opf & REQ_NOWAIT)
+                       bio_wouldblock_error(bio);
                return BLK_QC_T_NONE;
        }
 
 
        unsigned i;
        blk_status_t err = bio->bi_status;
 
-       if (err)
-               dio->io_error = -EIO;
+       if (err) {
+               if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
+                       dio->io_error = -EAGAIN;
+               else
+                       dio->io_error = -EIO;
+       }
 
        if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
                bio_check_pages_dirty(bio);     /* transfers ownership */
        if (iov_iter_rw(iter) == WRITE) {
                dio->op = REQ_OP_WRITE;
                dio->op_flags = REQ_SYNC | REQ_IDLE;
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       dio->op_flags |= REQ_NOWAIT;
        } else {
                dio->op = REQ_OP_READ;
        }
 
        bio_endio(bio);
 }
 
+static inline void bio_wouldblock_error(struct bio *bio)
+{
+       bio->bi_status = BLK_STS_AGAIN;
+       bio_endio(bio);
+}
+
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
 
 /* hack for device mapper, don't use elsewhere: */
 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
 
+#define BLK_STS_AGAIN          ((__force blk_status_t)12)
+
 struct blk_issue_stat {
        u64 stat;
 };
        /* command specific flags for REQ_OP_WRITE_ZEROES: */
        __REQ_NOUNMAP,          /* do not free blocks when zeroing */
 
+       __REQ_NOWAIT,           /* Don't wait if request will block */
        __REQ_NR_BITS,          /* stops here */
 };
 
 #define REQ_BACKGROUND         (1ULL << __REQ_BACKGROUND)
 
 #define REQ_NOUNMAP            (1ULL << __REQ_NOUNMAP)
+#define REQ_NOWAIT             (1ULL << __REQ_NOWAIT)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)