It is reasonable to allocate flush req in blk_mq_init_flush().
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
 
-void blk_mq_init_flush(struct request_queue *q)
+int blk_mq_init_flush(struct request_queue *q)
 {
+       struct blk_mq_tag_set *set = q->tag_set;
+
        spin_lock_init(&q->mq_flush_lock);
+
+       q->flush_rq = kzalloc(round_up(sizeof(struct request) +
+                               set->cmd_size, cache_line_size()),
+                               GFP_KERNEL);
+       if (!q->flush_rq)
+               return -ENOMEM;
+       return 0;
 }
 
        if (set->ops->complete)
                blk_queue_softirq_done(q, set->ops->complete);
 
-       blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-       q->flush_rq = kzalloc(round_up(sizeof(struct request) +
-                               set->cmd_size, cache_line_size()),
-                               GFP_KERNEL);
-       if (!q->flush_rq)
-               goto err_hw;
-
        if (blk_mq_init_hw_queues(q, set))
-               goto err_flush_rq;
+               goto err_hw;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
 
        blk_mq_add_queue_tag_set(set, q);
 
+       if (blk_mq_init_flush(q))
+               goto err_hw_queues;
+
        blk_mq_map_swqueue(q);
 
        return q;
 
-err_flush_rq:
-       kfree(q->flush_rq);
+err_hw_queues:
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 err_hw:
        blk_cleanup_queue(q);
 err_hctxs:
 
 
 void __blk_mq_complete_request(struct request *rq);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
-void blk_mq_init_flush(struct request_queue *q);
+int blk_mq_init_flush(struct request_queue *q);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,