bio->bi_end_io = end_compressed_bio_write;
 
        if (use_append) {
-               struct extent_map *em;
-               struct map_lookup *map;
-               struct block_device *bdev;
+               struct btrfs_device *device;
 
-               em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
-               if (IS_ERR(em)) {
+               device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
+               if (IS_ERR(device)) {
                        kfree(cb);
                        bio_put(bio);
                        return BLK_STS_NOTSUPP;
                }
 
-               map = em->map_lookup;
-               /* We only support single profile for now */
-               ASSERT(map->num_stripes == 1);
-               bdev = map->stripes[0].dev->bdev;
-
-               bio_set_dev(bio, bdev);
-               free_extent_map(em);
+               bio_set_dev(bio, device->bdev);
        }
 
        if (blkcg_css) {
 
                wbc_account_cgroup_owner(wbc, page, io_size);
        }
        if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
-               struct extent_map *em;
-               struct map_lookup *map;
+               struct btrfs_device *device;
 
-               em = btrfs_get_chunk_map(fs_info, disk_bytenr, io_size);
-               if (IS_ERR(em))
-                       return PTR_ERR(em);
+               device = btrfs_zoned_get_device(fs_info, disk_bytenr, io_size);
+               if (IS_ERR(device))
+                       return PTR_ERR(device);
 
-               map = em->map_lookup;
-               /* We only support single profile for now */
-               ASSERT(map->num_stripes == 1);
-               btrfs_io_bio(bio)->device = map->stripes[0].dev;
-
-               free_extent_map(em);
+               btrfs_io_bio(bio)->device = device;
        }
 
        *bio_ret = bio;
 
        length = wp - physical_pos;
        return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
 }
+
+struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+                                           u64 logical, u64 length)
+{
+       struct btrfs_device *device;
+       struct extent_map *em;
+       struct map_lookup *map;
+
+       em = btrfs_get_chunk_map(fs_info, logical, length);
+       if (IS_ERR(em))
+               return ERR_CAST(em);
+
+       map = em->map_lookup;
+       /* We only support single profile for now */
+       ASSERT(map->num_stripes == 1);
+       device = map->stripes[0].dev;
+
+       free_extent_map(em);
+
+       return device;
+}
 
 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
                                  u64 physical_start, u64 physical_pos);
+struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+                                           u64 logical, u64 length);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
                                     struct blk_zone *zone)
        return -EOPNOTSUPP;
 }
 
+static inline struct btrfs_device *btrfs_zoned_get_device(
+                                                 struct btrfs_fs_info *fs_info,
+                                                 u64 logical, u64 length)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)