uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
           block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
           subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
-          lru_cache.o
+          lru_cache.o raid-stripe-tree.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
 
 #include "rcu-string.h"
 #include "zoned.h"
 #include "file-item.h"
+#include "raid-stripe-tree.h"
 
 static struct bio_set btrfs_bioset;
 static struct bio_set btrfs_clone_bioset;
        else
                bio->bi_status = BLK_STS_OK;
 
+       if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+               stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
        btrfs_orig_bbio_end_io(bbio);
        btrfs_put_bioc(bioc);
 }
        if (bio->bi_status) {
                atomic_inc(&stripe->bioc->error);
                btrfs_log_dev_io_error(bio, stripe->dev);
+       } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+               stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
        }
 
        /* Pass on control to the original bio this one was cloned from */
        bio->bi_private = &bioc->stripes[dev_nr];
        bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
        bioc->stripes[dev_nr].bioc = bioc;
+       bioc->size = bio->bi_iter.bi_size;
        btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
 }
 
        if (!bioc) {
                /* Single mirror read/write fast path. */
                btrfs_bio(bio)->mirror_num = mirror_num;
+               if (bio_op(bio) != REQ_OP_READ)
+                       btrfs_bio(bio)->orig_physical = smap->physical;
                bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
                if (bio_op(bio) != REQ_OP_READ)
                        btrfs_bio(bio)->orig_physical = smap->physical;
                        bio->bi_opf |= REQ_OP_ZONE_APPEND;
                }
 
+               if (is_data_bbio(bbio) && bioc &&
+                   btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
+                       /*
+                        * No locking for the list update, as we only add to
+                        * the list in the I/O submission path, and list
+                        * iteration only happens in the completion path, which
+                        * can't happen until after the last submission.
+                        */
+                       btrfs_get_bioc(bioc);
+                       list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
+               }
+
                /*
                 * Csum items for reloc roots have already been cloned at this
                 * point, so they are handled as part of the no-checksum case.
 
 #include "file-item.h"
 #include "orphan.h"
 #include "tree-checker.h"
+#include "raid-stripe-tree.h"
 
 #undef SCRAMBLE_DELAYED_REFS
 
 
 #include "super.h"
 #include "orphan.h"
 #include "backref.h"
+#include "raid-stripe-tree.h"
 
 struct btrfs_iget_args {
        u64 ino;
 
        trans->block_rsv = &inode->block_rsv;
 
+       ret = btrfs_insert_raid_extent(trans, ordered_extent);
+       if (ret)
+               goto out;
+
        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
                compress_type = ordered_extent->compress_type;
        if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
 {
        if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
-           !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+           !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
+           list_empty(&ordered->bioc_list))
                btrfs_finish_ordered_zoned(ordered);
        return btrfs_finish_one_ordered(ordered);
 }
 
        INIT_LIST_HEAD(&entry->log_list);
        INIT_LIST_HEAD(&entry->root_extent_list);
        INIT_LIST_HEAD(&entry->work_list);
+       INIT_LIST_HEAD(&entry->bioc_list);
        init_completion(&entry->completion);
 
        /*
 
        struct completion completion;
        struct btrfs_work flush_work;
        struct list_head work_list;
+
+       struct list_head bioc_list;
 };
 
 static inline void
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/btrfs_tree.h>
+#include "ctree.h"
+#include "fs.h"
+#include "accessors.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "raid-stripe-tree.h"
+#include "volumes.h"
+#include "misc.h"
+#include "print-tree.h"
+
+static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+                                       struct btrfs_io_context *bioc)
+{
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+       struct btrfs_key stripe_key;
+       struct btrfs_root *stripe_root = fs_info->stripe_root;
+       const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
+       u8 encoding = btrfs_bg_flags_to_raid_index(bioc->map_type);
+       struct btrfs_stripe_extent *stripe_extent;
+       const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
+       int ret;
+
+       stripe_extent = kzalloc(item_size, GFP_NOFS);
+       if (!stripe_extent) {
+               btrfs_abort_transaction(trans, -ENOMEM);
+               btrfs_end_transaction(trans);
+               return -ENOMEM;
+       }
+
+       btrfs_set_stack_stripe_extent_encoding(stripe_extent, encoding);
+       for (int i = 0; i < num_stripes; i++) {
+               u64 devid = bioc->stripes[i].dev->devid;
+               u64 physical = bioc->stripes[i].physical;
+               u64 length = bioc->stripes[i].length;
+               struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
+
+               if (length == 0)
+                       length = bioc->size;
+
+               btrfs_set_stack_raid_stride_devid(raid_stride, devid);
+               btrfs_set_stack_raid_stride_physical(raid_stride, physical);
+       }
+
+       stripe_key.objectid = bioc->logical;
+       stripe_key.type = BTRFS_RAID_STRIPE_KEY;
+       stripe_key.offset = bioc->size;
+
+       ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
+                               item_size);
+       if (ret)
+               btrfs_abort_transaction(trans, ret);
+
+       kfree(stripe_extent);
+
+       return ret;
+}
+
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+                            struct btrfs_ordered_extent *ordered_extent)
+{
+       struct btrfs_io_context *bioc;
+       int ret;
+
+       if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE))
+               return 0;
+
+       list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) {
+               ret = btrfs_insert_one_raid_extent(trans, bioc);
+               if (ret)
+                       return ret;
+       }
+
+       while (!list_empty(&ordered_extent->bioc_list)) {
+               bioc = list_first_entry(&ordered_extent->bioc_list,
+                                       typeof(*bioc), rst_ordered_entry);
+               list_del(&bioc->rst_ordered_entry);
+               btrfs_put_bioc(bioc);
+       }
+
+       return ret;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef BTRFS_RAID_STRIPE_TREE_H
+#define BTRFS_RAID_STRIPE_TREE_H
+
+struct btrfs_io_context;
+struct btrfs_io_stripe;
+struct btrfs_ordered_extent;
+struct btrfs_trans_handle;
+
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+                            struct btrfs_ordered_extent *ordered_extent);
+
+static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
+                                                u64 map_type)
+{
+       u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK;
+       u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE))
+               return false;
+
+       if (type != BTRFS_BLOCK_GROUP_DATA)
+               return false;
+
+       if (profile & BTRFS_BLOCK_GROUP_RAID1_MASK)
+               return true;
+
+       return false;
+}
+
+#endif
 
 }
 
 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+                                                      u64 logical,
                                                       u16 total_stripes)
 {
        struct btrfs_io_context *bioc;
        bioc->fs_info = fs_info;
        bioc->replace_stripe_src = -1;
        bioc->full_stripe_logical = (u64)-1;
+       bioc->logical = logical;
 
        return bioc;
 }
                goto out;
        }
 
-       bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes);
+       bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
        if (!bioc) {
                ret = -ENOMEM;
                goto out;
 
 
 struct btrfs_io_stripe {
        struct btrfs_device *dev;
-       union {
-               /* Block mapping */
-               u64 physical;
-               /* For the endio handler */
-               struct btrfs_io_context *bioc;
-       };
+       /* Block mapping. */
+       u64 physical;
+       u64 length;
+       /* For the endio handler. */
+       struct btrfs_io_context *bioc;
 };
 
 struct btrfs_discard_stripe {
        atomic_t error;
        u16 max_errors;
 
+       u64 logical;
+       u64 size;
+       /* Raid stripe tree ordered entry. */
+       struct list_head rst_ordered_entry;
+
        /*
         * The total number of stripes, including the extra duplicated
         * stripe for replace.