}
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
+static void blk_mq_finish_request(struct request *rq)
+{
+       struct request_queue *q = rq->q;
+
+       if (rq->rq_flags & RQF_USE_SCHED) {
+               q->elevator->type->ops.finish_request(rq);
+               /*
+                * For postflush request that may need to be
+                * completed twice, we should clear this flag
+                * to avoid double finish_request() on the rq.
+                */
+               rq->rq_flags &= ~RQF_USE_SCHED;
+       }
+}
+
 static void __blk_mq_free_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 {
        struct request_queue *q = rq->q;
 
-       if ((rq->rq_flags & RQF_USE_SCHED) &&
-           q->elevator->type->ops.finish_request)
-               q->elevator->type->ops.finish_request(rq);
+       blk_mq_finish_request(rq);
 
        if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
                laptop_io_completion(q->disk->bdi);
        if (blk_mq_need_time_stamp(rq))
                __blk_mq_end_request_acct(rq, ktime_get_ns());
 
+       blk_mq_finish_request(rq);
+
        if (rq->end_io) {
                rq_qos_done(rq->q, rq);
                if (rq->end_io(rq, error) == RQ_END_IO_FREE)
                if (iob->need_ts)
                        __blk_mq_end_request_acct(rq, now);
 
+               blk_mq_finish_request(rq);
+
                rq_qos_done(rq->q, rq);
 
                /*
 
 
 int elv_register(struct elevator_type *e)
 {
+       /* finish request is mandatory */
+       if (WARN_ON_ONCE(!e->ops.finish_request))
+               return -EINVAL;
        /* insert_requests and dispatch_request are mandatory */
        if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
                return -EINVAL;