#include "delalloc-space.h"
 #include "discard.h"
 #include "raid56.h"
+#include "zoned.h"
 
 /*
  * Return target flags in extended format or 0 if restripe for this chunk_type
                        goto error;
        }
 
+       ret = btrfs_load_block_group_zone_info(cache);
+       if (ret) {
+               btrfs_err(info, "zoned: failed to load zone info of bg %llu",
+                         cache->start);
+               goto error;
+       }
+
        /*
         * We need to exclude the super stripes now so that the space info has
         * super bytes accounted for, otherwise we'll think we have more space
        cache->cached = BTRFS_CACHE_FINISHED;
        if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
                cache->needs_free_space = 1;
+
+       ret = btrfs_load_block_group_zone_info(cache);
+       if (ret) {
+               btrfs_put_block_group(cache);
+               return ret;
+       }
+
        ret = exclude_super_stripes(cache);
        if (ret) {
                /* We may have excluded something, so call this just in case */
 
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "volumes.h"
 #include "zoned.h"
 #include "rcu-string.h"
 #include "disk-io.h"
+#include "block-group.h"
 
 /* Maximum number of zones to report per blkdev_report_zones() call */
 #define BTRFS_REPORT_NR_ZONES   4096
+/* Invalid allocation pointer value for missing devices */
+#define WP_MISSING_DEV ((u64)-1)
+/* Pseudo write pointer value for conventional zone */
+#define WP_CONVENTIONAL ((u64)-2)
 
 /* Number of superblock log zones */
 #define BTRFS_NR_SB_LOG_ZONES 2
 
        return 0;
 }
+
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
+{
+       struct btrfs_fs_info *fs_info = cache->fs_info;
+       struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+       struct extent_map *em;
+       struct map_lookup *map;
+       struct btrfs_device *device;
+       u64 logical = cache->start;
+       u64 length = cache->length;
+       u64 physical = 0;
+       int ret;
+       int i;
+       unsigned int nofs_flag;
+       u64 *alloc_offsets = NULL;
+       u32 num_sequential = 0, num_conventional = 0;
+
+       if (!btrfs_is_zoned(fs_info))
+               return 0;
+
+       /* Sanity check */
+       if (!IS_ALIGNED(length, fs_info->zone_size)) {
+               btrfs_err(fs_info,
+               "zoned: block group %llu len %llu unaligned to zone size %llu",
+                         logical, length, fs_info->zone_size);
+               return -EIO;
+       }
+
+       /* Get the chunk mapping */
+       read_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, logical, length);
+       read_unlock(&em_tree->lock);
+
+       if (!em)
+               return -EINVAL;
+
+       map = em->map_lookup;
+
+       alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
+       if (!alloc_offsets) {
+               free_extent_map(em);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < map->num_stripes; i++) {
+               bool is_sequential;
+               struct blk_zone zone;
+
+               device = map->stripes[i].dev;
+               physical = map->stripes[i].physical;
+
+               if (device->bdev == NULL) {
+                       alloc_offsets[i] = WP_MISSING_DEV;
+                       continue;
+               }
+
+               is_sequential = btrfs_dev_is_sequential(device, physical);
+               if (is_sequential)
+                       num_sequential++;
+               else
+                       num_conventional++;
+
+               if (!is_sequential) {
+                       alloc_offsets[i] = WP_CONVENTIONAL;
+                       continue;
+               }
+
+               /*
+                * This zone will be used for allocation, so mark this zone
+                * non-empty.
+                */
+               btrfs_dev_clear_zone_empty(device, physical);
+
+               /*
+                * The group is mapped to a sequential zone. Get the zone write
+                * pointer to determine the allocation offset within the zone.
+                */
+               WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
+               nofs_flag = memalloc_nofs_save();
+               ret = btrfs_get_dev_zone(device, physical, &zone);
+               memalloc_nofs_restore(nofs_flag);
+               if (ret == -EIO || ret == -EOPNOTSUPP) {
+                       ret = 0;
+                       alloc_offsets[i] = WP_MISSING_DEV;
+                       continue;
+               } else if (ret) {
+                       goto out;
+               }
+
+               switch (zone.cond) {
+               case BLK_ZONE_COND_OFFLINE:
+               case BLK_ZONE_COND_READONLY:
+                       btrfs_err(fs_info,
+               "zoned: offline/readonly zone %llu on device %s (devid %llu)",
+                                 physical >> device->zone_info->zone_size_shift,
+                                 rcu_str_deref(device->name), device->devid);
+                       alloc_offsets[i] = WP_MISSING_DEV;
+                       break;
+               case BLK_ZONE_COND_EMPTY:
+                       alloc_offsets[i] = 0;
+                       break;
+               case BLK_ZONE_COND_FULL:
+                       alloc_offsets[i] = fs_info->zone_size;
+                       break;
+               default:
+                       /* Partially used zone */
+                       alloc_offsets[i] =
+                                       ((zone.wp - zone.start) << SECTOR_SHIFT);
+                       break;
+               }
+       }
+
+       if (num_conventional > 0) {
+               /*
+                * Since conventional zones do not have a write pointer, we
+                * cannot determine alloc_offset from the pointer
+                */
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+       case 0: /* single */
+               cache->alloc_offset = alloc_offsets[0];
+               break;
+       case BTRFS_BLOCK_GROUP_DUP:
+       case BTRFS_BLOCK_GROUP_RAID1:
+       case BTRFS_BLOCK_GROUP_RAID0:
+       case BTRFS_BLOCK_GROUP_RAID10:
+       case BTRFS_BLOCK_GROUP_RAID5:
+       case BTRFS_BLOCK_GROUP_RAID6:
+               /* non-single profiles are not supported yet */
+       default:
+               btrfs_err(fs_info, "zoned: profile %s not yet supported",
+                         btrfs_bg_type_to_raid_name(map->type));
+               ret = -EINVAL;
+               goto out;
+       }
+
+out:
+       kfree(alloc_offsets);
+       free_extent_map(em);
+
+       return ret;
+}