ns->lba_shift = id->lbaf[lbaf].ds;
        nvme_set_queue_limits(ns->ctrl, ns->queue);
 
+       ret = nvme_configure_metadata(ns, id);
+       if (ret)
+               goto out_unfreeze;
+       nvme_set_chunk_sectors(ns, id);
+       nvme_update_disk_info(ns->disk, ns, id);
+
        if (ns->head->ids.csi == NVME_CSI_ZNS) {
                ret = nvme_update_zone_info(ns, lbaf);
                if (ret)
                        goto out_unfreeze;
        }
 
-       ret = nvme_configure_metadata(ns, id);
-       if (ret)
-               goto out_unfreeze;
-       nvme_set_chunk_sectors(ns, id);
-       nvme_update_disk_info(ns->disk, ns, id);
        blk_mq_unfreeze_queue(ns->disk->queue);
 
        if (blk_queue_is_zoned(ns->queue)) {
 
 
 int nvme_revalidate_zones(struct nvme_ns *ns)
 {
-       struct request_queue *q = ns->queue;
-       int ret;
-
-       ret = blk_revalidate_disk_zones(ns->disk, NULL);
-       if (!ret)
-               blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
-       return ret;
+       return blk_revalidate_disk_zones(ns->disk, NULL);
 }
 
 static int nvme_set_max_append(struct nvme_ctrl *ctrl)
                goto free_data;
        }
 
-       q->limits.zoned = BLK_ZONED_HM;
+       blk_queue_set_zoned(ns->disk, BLK_ZONED_HM);
        blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
        blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
        blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+       blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
 free_data:
        kfree(id);
        return status;