return BLK_STS_IOERR;
 
        /* Make sure the BIO is small enough and will not get split */
-       if (nr_sectors > queue_max_zone_append_sectors(q))
+       if (nr_sectors > q->limits.max_zone_append_sectors)
                return BLK_STS_IOERR;
 
        bio->bi_opf |= REQ_NOMERGE;
 
 struct bio *bio_split_zone_append(struct bio *bio,
                const struct queue_limits *lim, unsigned *nr_segs)
 {
-       unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
        int split_sectors;
 
        split_sectors = bio_split_rw_at(bio, lim, nr_segs,
-                       max_sectors << SECTOR_SHIFT);
+                       lim->max_zone_append_sectors << SECTOR_SHIFT);
        if (WARN_ON_ONCE(split_sectors > 0))
                split_sectors = -EINVAL;
        return bio_submit_split(bio, split_sectors);
 
        lim->max_sectors = UINT_MAX;
        lim->max_dev_sectors = UINT_MAX;
        lim->max_write_zeroes_sectors = UINT_MAX;
-       lim->max_zone_append_sectors = UINT_MAX;
+       lim->max_hw_zone_append_sectors = UINT_MAX;
        lim->max_user_discard_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
        if (lim->zone_write_granularity < lim->logical_block_size)
                lim->zone_write_granularity = lim->logical_block_size;
 
-       if (lim->max_zone_append_sectors) {
-               /*
-                * The Zone Append size is limited by the maximum I/O size
-                * and the zone size given that it can't span zones.
-                */
-               lim->max_zone_append_sectors =
-                       min3(lim->max_hw_sectors,
-                            lim->max_zone_append_sectors,
-                            lim->chunk_sectors);
-       }
-
+       /*
+        * The Zone Append size is limited by the maximum I/O size and the zone
+        * size given that it can't span zones.
+        *
+        * If no max_hw_zone_append_sectors limit is provided, the block layer
+        * will emulated it, else we're also bound by the hardware limit.
+        */
+       lim->max_zone_append_sectors =
+               min_not_zero(lim->max_hw_zone_append_sectors,
+                       min(lim->chunk_sectors, lim->max_hw_sectors));
        return 0;
 }
 
        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
        t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
                                        b->max_write_zeroes_sectors);
-       t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
-                                        queue_limits_max_zone_append_sectors(b));
+       t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
+                                       b->max_hw_zone_append_sectors);
 
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
                                            b->seg_boundary_mask);
 
 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
 
 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)                   \
 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
        return ret;
 }
 
-/*
- * For zone append queue_max_zone_append_sectors does not just return the
- * underlying queue limits, but actually contains a calculation.  Because of
- * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
- */
-static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
-{
-       return sprintf(page, "%llu\n",
-               (u64)queue_max_zone_append_sectors(disk->queue) <<
-                       SECTOR_SHIFT);
-}
-
 static ssize_t
 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
 {
 
 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
-QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
+QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
 
 QUEUE_RO_ENTRY(queue_zoned, "zoned");
        &queue_atomic_write_unit_max_entry.attr,
        &queue_write_same_max_entry.attr,
        &queue_max_write_zeroes_sectors_entry.attr,
-       &queue_zone_append_max_entry.attr,
+       &queue_max_zone_append_sectors_entry.attr,
        &queue_zone_write_granularity_entry.attr,
        &queue_rotational_entry.attr,
        &queue_zoned_entry.attr,
 
 
        lim->features |= BLK_FEAT_ZONED;
        lim->chunk_sectors = dev->zone_size_sects;
-       lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+       lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
        lim->max_open_zones = dev->zone_max_open;
        lim->max_active_zones = dev->zone_max_active;
        return 0;
 
                lim.features |= BLK_FEAT_ZONED;
                lim.max_active_zones = p->max_active_zones;
                lim.max_open_zones =  p->max_open_zones;
-               lim.max_zone_append_sectors = p->max_zone_append_sectors;
+               lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
        }
 
        if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
 
                        wg, v);
                return -ENODEV;
        }
-       lim->max_zone_append_sectors = v;
+       lim->max_hw_zone_append_sectors = v;
        dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
 
        return 0;
 
                clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
        } else {
                set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
-               lim->max_zone_append_sectors = 0;
+               lim->max_hw_zone_append_sectors = 0;
        }
 
        /*
        if (!zlim.mapped_nr_seq_zones) {
                lim->max_open_zones = 0;
                lim->max_active_zones = 0;
-               lim->max_zone_append_sectors = 0;
+               lim->max_hw_zone_append_sectors = 0;
                lim->zone_write_granularity = 0;
                lim->chunk_sectors = 0;
                lim->features &= ~BLK_FEAT_ZONED;
 
        if (head->ids.csi == NVME_CSI_ZNS)
                lim.features |= BLK_FEAT_ZONED;
        else
-               lim.max_zone_append_sectors = 0;
+               lim.max_hw_zone_append_sectors = 0;
 
        head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
        if (IS_ERR(head->disk))
 
        lim->features |= BLK_FEAT_ZONED;
        lim->max_open_zones = zi->max_open_zones;
        lim->max_active_zones = zi->max_active_zones;
-       lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+       lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append;
        lim->chunk_sectors = ns->head->zsze =
                nvme_lba_to_sect(ns->head, zi->zone_size);
 }
 
                lim->max_open_zones = sdkp->zones_max_open;
        lim->max_active_zones = 0;
        lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks);
-       /* Enable block layer zone append emulation */
-       lim->max_zone_append_sectors = 0;
 
        return 0;
 
 
        unsigned int            max_user_discard_sectors;
        unsigned int            max_secure_erase_sectors;
        unsigned int            max_write_zeroes_sectors;
+       unsigned int            max_hw_zone_append_sectors;
        unsigned int            max_zone_append_sectors;
        unsigned int            discard_granularity;
        unsigned int            discard_alignment;
        return q->limits.max_segment_size;
 }
 
-static inline unsigned int
-queue_limits_max_zone_append_sectors(const struct queue_limits *l)
-{
-       unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
-
-       return min_not_zero(l->max_zone_append_sectors, max_sectors);
-}
-
-static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
-{
-       if (!blk_queue_is_zoned(q))
-               return 0;
-
-       return queue_limits_max_zone_append_sectors(&q->limits);
-}
-
 static inline bool queue_emulates_zone_append(struct request_queue *q)
 {
-       return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
+       return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
 }
 
 static inline bool bdev_emulates_zone_append(struct block_device *bdev)
 static inline unsigned int
 bdev_max_zone_append_sectors(struct block_device *bdev)
 {
-       return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+       return bdev_limits(bdev)->max_zone_append_sectors;
 }
 
 static inline unsigned int bdev_max_segments(struct block_device *bdev)