&hctx->cpuhp_dead);
 }
 
+/*
+ * Before freeing hw queue, clearing the flush request reference in
+ * tags->rqs[] for avoiding potential UAF.
+ */
+static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
+               unsigned int queue_depth, struct request *flush_rq)
+{
+       int i;
+       unsigned long flags;
+
+       /* The hw queue may not be mapped yet */
+       if (!tags)
+               return;
+
+       WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
+
+       for (i = 0; i < queue_depth; i++)
+               cmpxchg(&tags->rqs[i], flush_rq, NULL);
+
+       /*
+        * Wait until all pending iteration is done.
+        *
+        * Request reference is cleared and it is guaranteed to be observed
+        * after the ->lock is released.
+        */
+       spin_lock_irqsave(&tags->lock, flags);
+       spin_unlock_irqrestore(&tags->lock, flags);
+}
+
 /* hctx->ctxs will be freed in queue's release handler */
 static void blk_mq_exit_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 {
+       struct request *flush_rq = hctx->fq->flush_rq;
+
        if (blk_mq_hw_queue_mapped(hctx))
                blk_mq_tag_idle(hctx);
 
+       blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
+                       set->queue_depth, flush_rq);
        if (set->ops->exit_request)
-               set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
+               set->ops->exit_request(set, flush_rq, hctx_idx);
 
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);