* Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count)
+                           unsigned int *request_count,
+                           struct request **same_queue_rq)
 {
        struct blk_plug *plug;
        struct request *rq;
        list_for_each_entry_reverse(rq, plug_list, queuelist) {
                int el_ret;
 
-               if (rq->q == q)
+               if (rq->q == q) {
                        (*request_count)++;
+                       /*
+                        * Only blk-mq multiple hardware queues case checks the
+                        * rq in the same queue, there should be only one such
+                        * rq in a queue
+                        **/
+                       if (same_queue_rq)
+                               *same_queue_rq = rq;
+               }
 
                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
                        continue;
         * any locks.
         */
        if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, NULL))
                return;
 
        spin_lock_irq(q->queue_lock);
 
        struct request *rq;
        unsigned int request_count = 0;
        struct blk_plug *plug;
+       struct request *same_queue_rq = NULL;
 
        blk_queue_bounce(q, &bio);
 
        }
 
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return;
 
        rq = blk_mq_map_request(q, bio, &data);
                 * issued. So the plug list will have one request at most
                 */
                if (plug) {
-                       if (!list_empty(&plug->mq_list)) {
-                               old_rq = list_first_entry(&plug->mq_list,
-                                       struct request, queuelist);
+                       /*
+                        * The plug list might get flushed before this. If that
+                        * happens, same_queue_rq is invalid and plug list is empty
+                        **/
+                       if (same_queue_rq && !list_empty(&plug->mq_list)) {
+                               old_rq = same_queue_rq;
                                list_del_init(&old_rq->queuelist);
                        }
                        list_add_tail(&rq->queuelist, &plug->mq_list);
        }
 
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, NULL))
                return;
 
        rq = blk_mq_map_request(q, bio, &data);
 
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
                            struct bio *bio);
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count);
+                           unsigned int *request_count,
+                           struct request **same_queue_rq);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);