case REQ_FSEQ_DATA:
                list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
-               blk_mq_add_to_requeue_list(rq, true);
+               blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD);
                blk_mq_kick_requeue_list(q);
                break;
 
        smp_wmb();
        req_ref_set(flush_rq, 1);
 
-       blk_mq_add_to_requeue_list(flush_rq, false);
+       blk_mq_add_to_requeue_list(flush_rq, BLK_MQ_INSERT_AT_HEAD);
        blk_mq_kick_requeue_list(q);
 }
 
 
        /* this request will be re-inserted to io scheduler queue */
        blk_mq_sched_requeue_request(rq);
 
-       blk_mq_add_to_requeue_list(rq, true);
+       blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD);
 
        if (kick_requeue_list)
                blk_mq_kick_requeue_list(q);
        blk_mq_run_hw_queues(q, false);
 }
 
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags)
 {
        struct request_queue *q = rq->q;
        unsigned long flags;
        BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
 
        spin_lock_irqsave(&q->requeue_lock, flags);
-       if (at_head) {
+       if (insert_flags & BLK_MQ_INSERT_AT_HEAD) {
                rq->rq_flags |= RQF_SOFTBARRIER;
                list_add(&rq->queuelist, &q->requeue_list);
        } else {
 
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
                             unsigned int);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
                                        struct blk_mq_ctx *start);