#include <linux/blkdev.h>
 #include <linux/sched/mm.h>
 #include <linux/atomic.h>
+#include <linux/vmalloc.h>
 #include "ctree.h"
 #include "volumes.h"
 #include "zoned.h"
 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
                               struct blk_zone *zones, unsigned int *nr_zones)
 {
+       struct btrfs_zoned_device_info *zinfo = device->zone_info;
+       u32 zno;
        int ret;
 
        if (!*nr_zones)
                return 0;
        }
 
+       /* Check cache */
+       if (zinfo->zone_cache) {
+               unsigned int i;
+
+               ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
+               zno = pos >> zinfo->zone_size_shift;
+               /*
+                * We cannot report zones beyond the zone end. So, it is OK to
+                * cap *nr_zones to at the end.
+                */
+               *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
+
+               for (i = 0; i < *nr_zones; i++) {
+                       struct blk_zone *zone_info;
+
+                       zone_info = &zinfo->zone_cache[zno + i];
+                       if (!zone_info->len)
+                               break;
+               }
+
+               if (i == *nr_zones) {
+                       /* Cache hit on all the zones */
+                       memcpy(zones, zinfo->zone_cache + zno,
+                              sizeof(*zinfo->zone_cache) * *nr_zones);
+                       return 0;
+               }
+       }
+
        ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
                                  copy_zone_info_cb, zones);
        if (ret < 0) {
        if (!ret)
                return -EIO;
 
+       /* Populate cache */
+       if (zinfo->zone_cache)
+               memcpy(zinfo->zone_cache + zno, zones,
+                      sizeof(*zinfo->zone_cache) * *nr_zones);
+
        return 0;
 }
 
                if (!device->bdev)
                        continue;
 
-               ret = btrfs_get_dev_zone_info(device);
+               ret = btrfs_get_dev_zone_info(device, true);
                if (ret)
                        break;
        }
        return ret;
 }
 
-int btrfs_get_dev_zone_info(struct btrfs_device *device)
+int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
 {
        struct btrfs_fs_info *fs_info = device->fs_info;
        struct btrfs_zoned_device_info *zone_info = NULL;
        if (!zone_info)
                return -ENOMEM;
 
+       device->zone_info = zone_info;
+
        if (!bdev_is_zoned(bdev)) {
                if (!fs_info->zone_size) {
                        ret = calculate_emulated_zone_size(fs_info);
                goto out;
        }
 
+       /*
+        * Enable zone cache only for a zoned device. On a non-zoned device, we
+        * fill the zone info with emulated CONVENTIONAL zones, so no need to
+        * use the cache.
+        */
+       if (populate_cache && bdev_is_zoned(device->bdev)) {
+               zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
+                                               zone_info->nr_zones);
+               if (!zone_info->zone_cache) {
+                       btrfs_err_in_rcu(device->fs_info,
+                               "zoned: failed to allocate zone cache for %s",
+                               rcu_str_deref(device->name));
+                       ret = -ENOMEM;
+                       goto out;
+               }
+       }
+
        /* Get zones type */
        nactive = 0;
        while (sector < nr_sectors) {
 
        kfree(zones);
 
-       device->zone_info = zone_info;
-
        switch (bdev_zoned_model(bdev)) {
        case BLK_ZONED_HM:
                model = "host-managed zoned";
 out:
        kfree(zones);
 out_free_zone_info:
-       bitmap_free(zone_info->active_zones);
-       bitmap_free(zone_info->empty_zones);
-       bitmap_free(zone_info->seq_zones);
-       kfree(zone_info);
-       device->zone_info = NULL;
+       btrfs_destroy_dev_zone_info(device);
 
        return ret;
 }
        bitmap_free(zone_info->active_zones);
        bitmap_free(zone_info->seq_zones);
        bitmap_free(zone_info->empty_zones);
+       vfree(zone_info->zone_cache);
        kfree(zone_info);
        device->zone_info = NULL;
 }
                fs_info->data_reloc_bg = 0;
        spin_unlock(&fs_info->relocation_bg_lock);
 }
+
+void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct btrfs_device *device;
+
+       if (!btrfs_is_zoned(fs_info))
+               return;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (device->zone_info) {
+                       vfree(device->zone_info->zone_cache);
+                       device->zone_info->zone_cache = NULL;
+               }
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+}
 
        unsigned long *seq_zones;
        unsigned long *empty_zones;
        unsigned long *active_zones;
+       struct blk_zone *zone_cache;
        struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
 };
 
 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
                       struct blk_zone *zone);
 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
-int btrfs_get_dev_zone_info(struct btrfs_device *device);
+int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
 void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
                             u64 length);
 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
+void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
                                     struct blk_zone *zone)
        return 0;
 }
 
-static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
+static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
+                                         bool populate_cache)
 {
        return 0;
 }
 
 static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
 
+static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)