if (!uninit_q)
                return NULL;
 
+       uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+       if (!uninit_q->flush_rq)
+               goto out_cleanup_queue;
+
        q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
-               blk_cleanup_queue(uninit_q);
-
+               goto out_free_flush_rq;
        return q;
+
+out_free_flush_rq:
+       kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+       blk_cleanup_queue(uninit_q);
+       return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 
 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 {
        if (q->mq_ops)
-               return blk_mq_alloc_request(q, rw, gfp_mask, false);
+               return blk_mq_alloc_request(q, rw, gfp_mask);
        else
                return blk_old_get_request(q, rw, gfp_mask);
 }
 
        blk_clear_rq_complete(rq);
 }
 
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
 {
        struct request *rq;
 
-       rq = container_of(work, struct request, mq_flush_data);
+       rq = container_of(work, struct request, mq_flush_work);
 
        memset(&rq->csd, 0, sizeof(rq->csd));
        blk_mq_run_request(rq, true, false);
 }
 
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
 {
-       INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
-       kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+       if (rq->q->mq_ops) {
+               INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+               kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+               return false;
+       } else {
+               list_add_tail(&rq->queuelist, &rq->q->queue_head);
+               return true;
+       }
 }
 
 /**
 
        case REQ_FSEQ_DATA:
                list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
-               if (q->mq_ops)
-                       blk_mq_flush_data_insert(rq);
-               else {
-                       list_add(&rq->queuelist, &q->queue_head);
-                       queued = true;
-               }
+               queued = blk_flush_queue_rq(rq);
                break;
 
        case REQ_FSEQ_DONE:
        }
 
        kicked = blk_kick_flush(q);
-       /* blk_mq_run_flush will run queue */
-       if (q->mq_ops)
-               return queued;
        return kicked | queued;
 }
 
        struct request *rq, *n;
        unsigned long flags = 0;
 
-       if (q->mq_ops) {
-               blk_mq_free_request(flush_rq);
+       if (q->mq_ops)
                spin_lock_irqsave(&q->mq_flush_lock, flags);
-       }
+
        running = &q->flush_queue[q->flush_running_idx];
        BUG_ON(q->flush_pending_idx == q->flush_running_idx);
 
         * kblockd.
         */
        if (queued || q->flush_queue_delayed) {
-               if (!q->mq_ops)
-                       blk_run_queue_async(q);
-               else
-               /*
-                * This can be optimized to only run queues with requests
-                * queued if necessary.
-                */
-                       blk_mq_run_queues(q, true);
+               WARN_ON(q->mq_ops);
+               blk_run_queue_async(q);
        }
        q->flush_queue_delayed = 0;
        if (q->mq_ops)
                spin_unlock_irqrestore(&q->mq_flush_lock, flags);
 }
 
-static void mq_flush_work(struct work_struct *work)
-{
-       struct request_queue *q;
-       struct request *rq;
-
-       q = container_of(work, struct request_queue, mq_flush_work);
-
-       rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
-               __GFP_WAIT|GFP_ATOMIC, false);
-       rq->cmd_type = REQ_TYPE_FS;
-       rq->end_io = flush_end_io;
-
-       blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
-       kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
         * different from running_idx, which means flush is in flight.
         */
        q->flush_pending_idx ^= 1;
+
        if (q->mq_ops) {
-               mq_run_flush(q);
-               return true;
+               struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+               struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+               blk_mq_rq_init(hctx, q->flush_rq);
+               q->flush_rq->mq_ctx = ctx;
+
+               /*
+                * Reuse the tag value from the fist waiting request,
+                * with blk-mq the tag is generated during request
+                * allocation and drivers can rely on it being inside
+                * the range they asked for.
+                */
+               q->flush_rq->tag = first_rq->tag;
+       } else {
+               blk_rq_init(q, q->flush_rq);
        }
 
-       blk_rq_init(q, &q->flush_rq);
-       q->flush_rq.cmd_type = REQ_TYPE_FS;
-       q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
-       q->flush_rq.rq_disk = first_rq->rq_disk;
-       q->flush_rq.end_io = flush_end_io;
+       q->flush_rq->cmd_type = REQ_TYPE_FS;
+       q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+       q->flush_rq->rq_disk = first_rq->rq_disk;
+       q->flush_rq->end_io = flush_end_io;
 
-       list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
-       return true;
+       return blk_flush_queue_rq(q->flush_rq);
 }
 
 static void flush_data_end_io(struct request *rq, int error)
        /*
         * @policy now records what operations need to be done.  Adjust
         * REQ_FLUSH and FUA for the driver.
-        * We keep REQ_FLUSH for mq to track flush requests. For !FUA,
-        * we never dispatch the request directly.
         */
-       if (rq->cmd_flags & REQ_FUA)
-               rq->cmd_flags &= ~REQ_FLUSH;
+       rq->cmd_flags &= ~REQ_FLUSH;
        if (!(fflags & REQ_FUA))
                rq->cmd_flags &= ~REQ_FUA;
 
 void blk_mq_init_flush(struct request_queue *q)
 {
        spin_lock_init(&q->mq_flush_lock);
-       INIT_WORK(&q->mq_flush_work, mq_flush_work);
 }
 
 }
 
 static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
-                                             gfp_t gfp, bool reserved,
-                                             int rw)
+                                             gfp_t gfp, bool reserved)
 {
-       struct request *req;
-       bool is_flush = false;
-       /*
-        * flush need allocate a request, leave at least one request for
-        * non-flush IO to avoid deadlock
-        */
-       if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
-               if (atomic_inc_return(&hctx->pending_flush) >=
-                   hctx->queue_depth - hctx->reserved_tags - 1) {
-                       atomic_dec(&hctx->pending_flush);
-                       return NULL;
-               }
-               is_flush = true;
-       }
-       req = blk_mq_alloc_rq(hctx, gfp, reserved);
-       if (!req && is_flush)
-               atomic_dec(&hctx->pending_flush);
-       return req;
+       return blk_mq_alloc_rq(hctx, gfp, reserved);
 }
 
 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
                struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
                struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-               rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw);
+               rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
                if (rq) {
                        blk_mq_rq_ctx_init(q, ctx, rq, rw);
                        break;
        return rq;
 }
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
-               gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
 {
        struct request *rq;
 
        if (blk_mq_queue_enter(q))
                return NULL;
 
-       rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+       rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
        if (rq)
                blk_mq_put_ctx(rq->mq_ctx);
        return rq;
 /*
  * Re-init and set pdu, if we have it
  */
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
 {
        blk_rq_init(hctx->queue, rq);
 
        const int tag = rq->tag;
        struct request_queue *q = rq->q;
 
-       if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
-               atomic_dec(&hctx->pending_flush);
-
        blk_mq_rq_init(hctx, rq);
        blk_mq_put_tag(hctx->tags, tag);
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        trace_block_getrq(q, bio, rw);
-       rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw);
+       rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
        if (likely(rq))
-               blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw);
+               blk_mq_rq_ctx_init(q, ctx, rq, rw);
        else {
                blk_mq_put_ctx(ctx);
                trace_block_sleeprq(q, bio, rw);
-               rq = blk_mq_alloc_request_pinned(q, bio->bi_rw,
-                               __GFP_WAIT|GFP_ATOMIC, false);
+               rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
+                                                       false);
                ctx = rq->mq_ctx;
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
        }
                hctx->queue_num = i;
                hctx->flags = reg->flags;
                hctx->queue_depth = reg->queue_depth;
-               hctx->reserved_tags = reg->reserved_tags;
                hctx->cmd_size = reg->cmd_size;
-               atomic_set(&hctx->pending_flush, 0);
 
                blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                                blk_mq_hctx_notify, hctx);
        blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
 
-       if (blk_mq_init_hw_queues(q, reg, driver_data))
+       q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+                               cache_line_size()), GFP_KERNEL);
+       if (!q->flush_rq)
                goto err_hw;
 
+       if (blk_mq_init_hw_queues(q, reg, driver_data))
+               goto err_flush_rq;
+
        blk_mq_map_swqueue(q);
 
        mutex_lock(&all_q_mutex);
        mutex_unlock(&all_q_mutex);
 
        return q;
+
+err_flush_rq:
+       kfree(q->flush_rq);
 err_hw:
        kfree(q->mq_map);
 err_map:
 
 void blk_mq_init_flush(struct request_queue *q);
 void blk_mq_drain_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
 
 /*
  * CPU hotplug helpers
 
        if (q->mq_ops)
                blk_mq_free_queue(q);
 
+       kfree(q->flush_rq);
+
        blk_trace_shutdown(q);
 
        bdi_destroy(&q->backing_dev_info);
 
        struct list_head        page_list;
        struct blk_mq_tags      *tags;
 
-       atomic_t                pending_flush;
-
        unsigned long           queued;
        unsigned long           run;
 #define BLK_MQ_MAX_DISPATCH_ORDER      10
        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 
        unsigned int            queue_depth;
-       unsigned int            reserved_tags;
        unsigned int            numa_node;
        unsigned int            cmd_size;       /* per-request extra data */
 
 void blk_mq_run_queues(struct request_queue *q, bool async);
 void blk_mq_free_request(struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
 struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
 
 
        };
        union {
                struct call_single_data csd;
-               struct work_struct mq_flush_data;
+               struct work_struct mq_flush_work;
        };
 
        struct request_queue *q;
        unsigned long           flush_pending_since;
        struct list_head        flush_queue[2];
        struct list_head        flush_data_in_flight;
-       union {
-               struct request  flush_rq;
-               struct {
-                       spinlock_t mq_flush_lock;
-                       struct work_struct mq_flush_work;
-               };
-       };
+       struct request          *flush_rq;
+       spinlock_t              mq_flush_lock;
 
        struct mutex            sysfs_lock;