* Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count,
                            struct request **same_queue_rq)
 {
        struct blk_plug *plug;
        plug = current->plug;
        if (!plug)
                return false;
-       *request_count = 0;
 
        plug_list = &plug->mq_list;
 
        list_for_each_entry_reverse(rq, plug_list, queuelist) {
                bool merged = false;
 
-               if (rq->q == q) {
-                       (*request_count)++;
+               if (rq->q == q && same_queue_rq) {
                        /*
                         * Only blk-mq multiple hardware queues case checks the
                         * rq in the same queue, there should be only one such
                         * rq in a queue
                         **/
-                       if (same_queue_rq)
-                               *same_queue_rq = rq;
+                       *same_queue_rq = rq;
                }
 
                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
        return false;
 }
 
-unsigned int blk_plug_queued_count(struct request_queue *q)
-{
-       struct blk_plug *plug;
-       struct request *rq;
-       struct list_head *plug_list;
-       unsigned int ret = 0;
-
-       plug = current->plug;
-       if (!plug)
-               goto out;
-
-       plug_list = &plug->mq_list;
-       list_for_each_entry(rq, plug_list, queuelist) {
-               if (rq->q == q)
-                       ret++;
-       }
-out:
-       return ret;
-}
-
 void blk_init_request_from_bio(struct request *req, struct bio *bio)
 {
        if (bio->bi_opf & REQ_RAHEAD)
 
        INIT_LIST_HEAD(&plug->mq_list);
        INIT_LIST_HEAD(&plug->cb_list);
+       plug->rq_count = 0;
+
        /*
         * Store ordering should not be needed here, since a potential
         * preempt will imply a full memory barrier
 
        unsigned int depth;
 
        list_splice_init(&plug->mq_list, &list);
+       plug->rq_count = 0;
 
        list_sort(NULL, &list, plug_rq_cmp);
 
        const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
        struct request *rq;
-       unsigned int request_count = 0;
        struct blk_plug *plug;
        struct request *same_queue_rq = NULL;
        blk_qc_t cookie;
                return BLK_QC_T_NONE;
 
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+           blk_attempt_plug_merge(q, bio, &same_queue_rq))
                return BLK_QC_T_NONE;
 
        if (blk_mq_sched_bio_merge(q, bio))
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
        } else if (plug && q->nr_hw_queues == 1) {
+               unsigned int request_count = plug->rq_count;
                struct request *last = NULL;
 
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
 
-               /*
-                * @request_count may become stale because of schedule
-                * out, so check the list again.
-                */
-               if (list_empty(&plug->mq_list))
-                       request_count = 0;
-               else if (blk_queue_nomerges(q))
-                       request_count = blk_plug_queued_count(q);
-
                if (!request_count)
                        trace_block_plug(q);
                else
                }
 
                list_add_tail(&rq->queuelist, &plug->mq_list);
+               plug->rq_count++;
        } else if (plug && !blk_queue_nomerges(q)) {
                blk_mq_bio_to_request(rq, bio);
 
                if (same_queue_rq)
                        list_del_init(&same_queue_rq->queuelist);
                list_add_tail(&rq->queuelist, &plug->mq_list);
+               plug->rq_count++;
 
                blk_mq_put_ctx(data.ctx);
 
 
 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
                struct bio *bio);
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count,
                            struct request **same_queue_rq);
-unsigned int blk_plug_queued_count(struct request_queue *q);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
 
 struct blk_plug {
        struct list_head mq_list; /* blk-mq requests */
        struct list_head cb_list; /* md requires an unplug callback */
+       unsigned short rq_count;
 };
 #define BLK_MAX_REQUEST_COUNT 16
 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)