This patch does not change any functionality.
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
-       return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
+       return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
        kblockd_schedule_work(&q->timeout_work);
 }
 
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+                                          spinlock_t *lock)
 {
        struct request_queue *q;
 
 {
        struct request_queue *q;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+       q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL);
        if (!q)
                return NULL;
 
 
 {
        struct request_queue *uninit_q, *q;
 
-       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
        if (!uninit_q)
                return ERR_PTR(-ENOMEM);
 
 
                }
                null_init_queues(nullb);
        } else if (dev->queue_mode == NULL_Q_BIO) {
-               nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
+               nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
+                                               NULL);
                if (!nullb->q) {
                        rv = -ENOMEM;
                        goto out_cleanup_queues;
 
         *      limits and LBA48 we could raise it but as yet
         *      do not.
         */
-       q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
+       q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL);
        if (!q)
                return 1;
 
 
                goto err_dev;
        }
 
-       tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+       tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
        if (!tqueue) {
                ret = -ENOMEM;
                goto err_disk;
 
        INIT_LIST_HEAD(&md->table_devices);
        spin_lock_init(&md->uevent_lock);
 
-       md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
+       md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
        if (!md->queue)
                goto bad;
        md->queue->queuedata = md;
 
                return -EBUSY;
        }
 
-       q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
+       q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
        if (!q)
                return -ENOMEM;
 
 
        if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
                return 0;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
        if (!q)
                goto out;
        q->queuedata = head;
 
        struct Scsi_Host *shost = sdev->host;
        struct request_queue *q;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
        if (!q)
                return NULL;
        q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
 
 
 bool __must_check blk_get_queue(struct request_queue *);
 struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+                                          spinlock_t *lock);
 extern void blk_put_queue(struct request_queue *);
 extern void blk_set_queue_dying(struct request_queue *);