struct blk_mq_hw_ctx *hctx,
                                          unsigned int hctx_idx)
 {
-       struct blk_mq_tag_set *set = q->tag_set;
-       int ret;
+       hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
+                                                   q->nr_requests);
 
-       hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
-                                              set->reserved_tags, set->flags);
        if (!hctx->sched_tags)
                return -ENOMEM;
-
-       ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
-       if (ret) {
-               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
-               hctx->sched_tags = NULL;
-       }
-
-       return ret;
+       return 0;
 }
 
 /* called in queue's release handler, tagset has gone away */
 
        if (tdepth > tags->nr_tags) {
                struct blk_mq_tag_set *set = hctx->queue->tag_set;
                struct blk_mq_tags *new;
-               bool ret;
 
                if (!can_grow)
                        return -EINVAL;
                if (tdepth > MAX_SCHED_RQ)
                        return -EINVAL;
 
-               new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
-                               tags->nr_reserved_tags, set->flags);
+               new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
                if (!new)
                        return -ENOMEM;
-               ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
-               if (ret) {
-                       blk_mq_free_rq_map(new, set->flags);
-                       return -ENOMEM;
-               }
 
                blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
                blk_mq_free_rq_map(*tagsptr, set->flags);
 
        blk_mq_free_tags(tags, flags);
 }
 
-struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
-                                       unsigned int hctx_idx,
-                                       unsigned int nr_tags,
-                                       unsigned int reserved_tags,
-                                       unsigned int flags)
+static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
+                                              unsigned int hctx_idx,
+                                              unsigned int nr_tags,
+                                              unsigned int reserved_tags,
+                                              unsigned int flags)
 {
        struct blk_mq_tags *tags;
        int node;
        return 0;
 }
 
-int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
-                    unsigned int hctx_idx, unsigned int depth)
+static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
+                           struct blk_mq_tags *tags,
+                           unsigned int hctx_idx, unsigned int depth)
 {
        unsigned int i, j, entries_per_page, max_order = 4;
        size_t rq_size, left;
        }
 }
 
-static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
-                                       int hctx_idx)
+struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+                                            unsigned int hctx_idx,
+                                            unsigned int depth)
 {
-       unsigned int flags = set->flags;
-       int ret = 0;
+       struct blk_mq_tags *tags;
+       int ret;
 
-       set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
-                                       set->queue_depth, set->reserved_tags, flags);
-       if (!set->tags[hctx_idx])
-               return false;
+       tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags,
+                                  set->flags);
+       if (!tags)
+               return NULL;
 
-       ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
-                               set->queue_depth);
-       if (!ret)
-               return true;
+       ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
+       if (ret) {
+               blk_mq_free_rq_map(tags, set->flags);
+               return NULL;
+       }
 
-       blk_mq_free_rq_map(set->tags[hctx_idx], flags);
-       set->tags[hctx_idx] = NULL;
-       return false;
+       return tags;
+}
+
+static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+                                      int hctx_idx)
+{
+       set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
+                                                      set->queue_depth);
+
+       return set->tags[hctx_idx];
 }
 
 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
                        hctx_idx = set->map[j].mq_map[i];
                        /* unmapped hw queue can be remapped after CPU topo changed */
                        if (!set->tags[hctx_idx] &&
-                           !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
+                           !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
                                /*
                                 * If tags initialization fail for some hctx,
                                 * that hctx won't be brought online.  In this
        int i;
 
        for (i = 0; i < set->nr_hw_queues; i++) {
-               if (!__blk_mq_alloc_map_and_request(set, i))
+               if (!__blk_mq_alloc_map_and_rqs(set, i))
                        goto out_unwind;
                cond_resched();
        }
  * may reduce the depth asked for, if memory is tight. set->queue_depth
  * will be updated to reflect the allocated depth.
  */
-static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
+static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
 {
        unsigned int depth;
        int err;
        if (ret)
                goto out_free_mq_map;
 
-       ret = blk_mq_alloc_map_and_requests(set);
+       ret = blk_mq_alloc_set_map_and_rqs(set);
        if (ret)
                goto out_free_mq_map;
 
 
 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
                     unsigned int hctx_idx);
 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
-struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
-                                       unsigned int hctx_idx,
-                                       unsigned int nr_tags,
-                                       unsigned int reserved_tags,
-                                       unsigned int flags);
-int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
-                    unsigned int hctx_idx, unsigned int depth);
+struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+                               unsigned int hctx_idx, unsigned int depth);
 
 /*
  * Internal helpers for request insertion into sw queues