blk_mq_run_hw_queue(hctx, async);
 }
 
-void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
-                                 struct blk_mq_ctx *ctx,
-                                 struct list_head *list, bool run_queue_async)
-{
-       struct elevator_queue *e;
-       struct request_queue *q = hctx->queue;
-
-       /*
-        * blk_mq_sched_insert_requests() is called from flush plug
-        * context only, and hold one usage counter to prevent queue
-        * from being released.
-        */
-       percpu_ref_get(&q->q_usage_counter);
-
-       e = hctx->queue->elevator;
-       if (e) {
-               e->type->ops.insert_requests(hctx, list, false);
-               blk_mq_run_hw_queue(hctx, run_queue_async);
-       } else {
-               blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
-       }
-       percpu_ref_put(&q->q_usage_counter);
-}
-
 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
                                          struct blk_mq_hw_ctx *hctx,
                                          unsigned int hctx_idx)
 
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
                                 bool run_queue, bool async);
-void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
-                                 struct blk_mq_ctx *ctx,
-                                 struct list_head *list, bool run_queue_async);
 
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
 
 
                blk_mq_run_hw_queue(hctx, false);
 }
 
-void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                           struct list_head *list, bool run_queue_async)
-
+static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
+               struct blk_mq_ctx *ctx, struct list_head *list,
+               bool run_queue_async)
 {
        struct request *rq;
        enum hctx_type type = hctx->type;
 
        plug->mq_list = requeue_list;
        trace_block_unplug(this_hctx->queue, depth, !from_sched);
-       blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
+
+       percpu_ref_get(&this_hctx->queue->q_usage_counter);
+       if (this_hctx->queue->elevator) {
+               this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
+                               &list, false);
+               blk_mq_run_hw_queue(this_hctx, from_sched);
+       } else {
+               blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
+       }
+       percpu_ref_put(&this_hctx->queue->q_usage_counter);
 }
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 
                                bool at_head);
 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
                                  bool run_queue);
-void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                               struct list_head *list, bool run_queue_async);
 
 /*
  * CPU -> queue mappings
 
 }
 
 /*
- * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
+ * Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list().
  */
 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
                               struct list_head *list, bool at_head)