dev->nullb = NULL;
 }
 
-static void null_config_discard(struct nullb *nullb)
+static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
 {
        if (nullb->dev->discard == false)
                return;
                return;
        }
 
-       blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+       lim->max_hw_discard_sectors = UINT_MAX >> 9;
 }
 
 static const struct block_device_operations null_ops = {
 
 static int null_add_dev(struct nullb_device *dev)
 {
+       struct queue_limits lim = {
+               .logical_block_size     = dev->blocksize,
+               .physical_block_size    = dev->blocksize,
+               .max_hw_sectors         = dev->max_sectors,
+       };
+
        struct nullb *nullb;
        int rv;
 
        if (rv)
                goto out_cleanup_queues;
 
-       nullb->disk = blk_mq_alloc_disk(nullb->tag_set, NULL, nullb);
+       if (dev->virt_boundary)
+               lim.virt_boundary_mask = PAGE_SIZE - 1;
+       null_config_discard(nullb, &lim);
+       if (dev->zoned) {
+               rv = null_init_zoned_dev(dev, &lim);
+               if (rv)
+                       goto out_cleanup_tags;
+       }
+
+       nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
        if (IS_ERR(nullb->disk)) {
                rv = PTR_ERR(nullb->disk);
-               goto out_cleanup_tags;
+               goto out_cleanup_zone;
        }
        nullb->q = nullb->disk->queue;
 
                blk_queue_write_cache(nullb->q, true, true);
        }
 
-       if (dev->zoned) {
-               rv = null_init_zoned_dev(dev, nullb->q);
-               if (rv)
-                       goto out_cleanup_disk;
-       }
-
        nullb->q->queuedata = nullb;
        blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
 
        rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
        if (rv < 0) {
                mutex_unlock(&lock);
-               goto out_cleanup_zone;
+               goto out_cleanup_disk;
        }
        nullb->index = rv;
        dev->index = rv;
        mutex_unlock(&lock);
 
-       blk_queue_logical_block_size(nullb->q, dev->blocksize);
-       blk_queue_physical_block_size(nullb->q, dev->blocksize);
-       if (dev->max_sectors)
-               blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
-
-       if (dev->virt_boundary)
-               blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
-
-       null_config_discard(nullb);
-
        if (config_item_name(&dev->group.cg_item)) {
                /* Use configfs dir name as the device name */
                snprintf(nullb->disk_name, sizeof(nullb->disk_name),
 
                              sector_t sector, unsigned int nr_sectors);
 
 #ifdef CONFIG_BLK_DEV_ZONED
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
+int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
 int null_register_zoned_dev(struct nullb *nullb);
 void null_free_zoned_dev(struct nullb_device *dev);
 int null_report_zones(struct gendisk *disk, sector_t sector,
                        size_t count, enum blk_zone_cond cond);
 #else
 static inline int null_init_zoned_dev(struct nullb_device *dev,
-                                     struct request_queue *q)
+               struct queue_limits *lim)
 {
        pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
        return -EINVAL;
 
                mutex_unlock(&zone->mutex);
 }
 
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+int null_init_zoned_dev(struct nullb_device *dev,
+                       struct queue_limits *lim)
 {
        sector_t dev_capacity_sects, zone_capacity_sects;
        struct nullb_zone *zone;
                sector += dev->zone_size_sects;
        }
 
+       lim->zoned = true;
+       lim->chunk_sectors = dev->zone_size_sects;
+       lim->max_zone_append_sectors = dev->zone_size_sects;
+       lim->max_open_zones = dev->zone_max_open;
+       lim->max_active_zones = dev->zone_max_active;
        return 0;
 }
 
 int null_register_zoned_dev(struct nullb *nullb)
 {
-       struct nullb_device *dev = nullb->dev;
        struct request_queue *q = nullb->q;
 
-       disk_set_zoned(nullb->disk);
        blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
        blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-       blk_queue_chunk_sectors(q, dev->zone_size_sects);
        nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
-       blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
-       disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
-       disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
-
        return blk_revalidate_disk_zones(nullb->disk, NULL);
 }