};
        struct request *rq;
 
-       if (unlikely(bio_queue_enter(bio)))
-               return NULL;
-       if (unlikely(!submit_bio_checks(bio)))
-               goto put_exit;
        if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
-               goto put_exit;
+               return NULL;
 
        rq_qos_throttle(q, bio);
 
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
-put_exit:
-       blk_queue_exit(q);
+
        return NULL;
 }
 
+static inline bool blk_mq_can_use_cached_rq(struct request *rq,
+               struct bio *bio)
+{
+       if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+               return false;
+
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+               return false;
+
+       return true;
+}
+
 static inline struct request *blk_mq_get_request(struct request_queue *q,
                                                 struct blk_plug *plug,
                                                 struct bio *bio,
                                                 unsigned int nsegs,
                                                 bool *same_queue_rq)
 {
+       struct request *rq;
+       bool checked = false;
+
        if (plug) {
-               struct request *rq;
 
                rq = rq_list_peek(&plug->cached_rq);
                if (rq && rq->q == q) {
                        if (blk_mq_attempt_bio_merge(q, bio, nsegs,
                                                same_queue_rq))
                                return NULL;
+                       checked = true;
+                       if (!blk_mq_can_use_cached_rq(rq, bio))
+                               goto fallback;
+                       rq->cmd_flags = bio->bi_opf;
                        plug->cached_rq = rq_list_next(rq);
                        INIT_LIST_HEAD(&rq->queuelist);
                        rq_qos_throttle(q, bio);
                }
        }
 
-       return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+fallback:
+       if (unlikely(bio_queue_enter(bio)))
+               return NULL;
+       if (!checked && !submit_bio_checks(bio))
+               return NULL;
+       rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+       if (!rq)
+               blk_queue_exit(q);
+       return rq;
 }
 
 /**
 
        return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 }
 
-/*
- * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
- * @flags: request command flags
- * @ctx: software queue cpu ctx
- */
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-                                                    unsigned int flags,
-                                                    struct blk_mq_ctx *ctx)
+static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
 {
        enum hctx_type type = HCTX_TYPE_DEFAULT;
 
                type = HCTX_TYPE_POLL;
        else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
                type = HCTX_TYPE_READ;
-       
-       return ctx->hctxs[type];
+       return type;
+}
+
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @ctx: software queue cpu ctx
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+                                                    unsigned int flags,
+                                                    struct blk_mq_ctx *ctx)
+{
+       return ctx->hctxs[blk_mq_get_hctx_type(flags)];
 }
 
 /*