return set->tags[hctx_idx];
 }
 
-static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
-                                        unsigned int hctx_idx)
+void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
+                            struct blk_mq_tags *tags,
+                            unsigned int hctx_idx)
 {
        unsigned int flags = set->flags;
 
-       if (set->tags && set->tags[hctx_idx]) {
-               blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
-               blk_mq_free_rq_map(set->tags[hctx_idx], flags);
-               set->tags[hctx_idx] = NULL;
+       if (tags) {
+               blk_mq_free_rqs(set, tags, hctx_idx);
+               blk_mq_free_rq_map(tags, flags);
        }
 }
 
                         * fallback in case of a new remap fails
                         * allocation
                         */
-                       if (i && set->tags[i])
-                               blk_mq_free_map_and_requests(set, i);
+                       if (i && set->tags[i]) {
+                               blk_mq_free_map_and_rqs(set, set->tags[i], i);
+                               set->tags[i] = NULL;
+                       }
 
                        hctx->tags = NULL;
                        continue;
                struct blk_mq_hw_ctx *hctx = hctxs[j];
 
                if (hctx) {
-                       if (hctx->tags)
-                               blk_mq_free_map_and_requests(set, j);
+                       blk_mq_free_map_and_rqs(set, set->tags[j], j);
+                       set->tags[j] = NULL;
                        blk_mq_exit_hctx(q, set, hctx, j);
                        hctxs[j] = NULL;
                }
        return 0;
 
 out_unwind:
-       while (--i >= 0)
-               blk_mq_free_map_and_requests(set, i);
+       while (--i >= 0) {
+               blk_mq_free_map_and_rqs(set, set->tags[i], i);
+               set->tags[i] = NULL;
+       }
 
        return -ENOMEM;
 }
        return 0;
 
 out_free_mq_rq_maps:
-       for (i = 0; i < set->nr_hw_queues; i++)
-               blk_mq_free_map_and_requests(set, i);
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               blk_mq_free_map_and_rqs(set, set->tags[i], i);
+               set->tags[i] = NULL;
+       }
 out_free_mq_map:
        for (i = 0; i < set->nr_maps; i++) {
                kfree(set->map[i].mq_map);
 {
        int i, j;
 
-       for (i = 0; i < set->nr_hw_queues; i++)
-               blk_mq_free_map_and_requests(set, i);
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               blk_mq_free_map_and_rqs(set, set->tags[i], i);
+               set->tags[i] = NULL;
+       }
 
        if (blk_mq_is_sbitmap_shared(set->flags))
                blk_mq_exit_shared_sbitmap(set);
 
 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
                                unsigned int hctx_idx, unsigned int depth);
-
+void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
+                            struct blk_mq_tags *tags,
+                            unsigned int hctx_idx);
 /*
  * Internal helpers for request insertion into sw queues
  */