return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
-                           const struct cpumask *online_mask)
+int blk_mq_map_queues(struct blk_mq_tag_set *set)
 {
+       unsigned int *map = set->mq_map;
+       unsigned int nr_queues = set->nr_hw_queues;
+       const struct cpumask *online_mask = cpu_online_mask;
        unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
        cpumask_var_t cpus;
 
        if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
-               return 1;
+               return -ENOMEM;
 
        cpumask_clear(cpus);
        nr_cpus = nr_uniq_cpus = 0;
        return 0;
 }
 
-unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
-{
-       unsigned int *map;
-
-       /* If cpus are offline, map them to first hctx */
-       map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
-                               set->numa_node);
-       if (!map)
-               return NULL;
-
-       if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
-               return map;
-
-       kfree(map);
-       return NULL;
-}
-
 /*
  * We have no quick way of doing reverse lookups. This is only used at
  * queue init time, so runtime isn't important.
 
  */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
+       int ret;
+
        BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
 
        if (!set->nr_hw_queues)
        if (!set->tags)
                return -ENOMEM;
 
-       set->mq_map = blk_mq_make_queue_map(set);
+       ret = -ENOMEM;
+       set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
+                       GFP_KERNEL, set->numa_node);
        if (!set->mq_map)
                goto out_free_tags;
 
-       if (blk_mq_alloc_rq_maps(set))
+       if (set->ops->map_queues)
+               ret = set->ops->map_queues(set);
+       else
+               ret = blk_mq_map_queues(set);
+       if (ret)
+               goto out_free_mq_map;
+
+       ret = blk_mq_alloc_rq_maps(set);
+       if (ret)
                goto out_free_mq_map;
 
        mutex_init(&set->tag_list_lock);
 out_free_tags:
        kfree(set->tags);
        set->tags = NULL;
-       return -ENOMEM;
+       return ret;
 }
 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
 
 
 /*
  * CPU -> queue mappings
  */
-extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
-                                  const struct cpumask *online_mask);
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 
                bool);
 typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
 
 
 struct blk_mq_ops {
        init_request_fn         *init_request;
        exit_request_fn         *exit_request;
        reinit_request_fn       *reinit_request;
+
+       map_queues_fn           *map_queues;
 };
 
 enum {