}
 
 /*
- * Find extent buffer for a givne bytenr.
+ * Find extent buffer for a given bytenr.
  *
  * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
  * in endio context.
                return (struct extent_buffer *)page->private;
        }
 
-       /* For subpage case, we need to lookup buffer radix tree */
-       rcu_read_lock();
-       eb = radix_tree_lookup(&fs_info->buffer_radix,
-                              bytenr >> fs_info->sectorsize_bits);
-       rcu_read_unlock();
+       /* For subpage case, we need to lookup extent buffer xarray */
+       eb = xa_load(&fs_info->extent_buffers,
+                    bytenr >> fs_info->sectorsize_bits);
        ASSERT(eb);
        return eb;
 }
        struct extent_buffer *eb;
 
        rcu_read_lock();
-       eb = radix_tree_lookup(&fs_info->buffer_radix,
-                              start >> fs_info->sectorsize_bits);
+       eb = xa_load(&fs_info->extent_buffers,
+                    start >> fs_info->sectorsize_bits);
        if (eb && atomic_inc_not_zero(&eb->refs)) {
                rcu_read_unlock();
                return eb;
        if (!eb)
                return ERR_PTR(-ENOMEM);
        eb->fs_info = fs_info;
-again:
-       ret = radix_tree_preload(GFP_NOFS);
-       if (ret) {
-               exists = ERR_PTR(ret);
-               goto free_eb;
-       }
-       spin_lock(&fs_info->buffer_lock);
-       ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> fs_info->sectorsize_bits, eb);
-       spin_unlock(&fs_info->buffer_lock);
-       radix_tree_preload_end();
-       if (ret == -EEXIST) {
-               exists = find_extent_buffer(fs_info, start);
-               if (exists)
+
+       do {
+               ret = xa_insert(&fs_info->extent_buffers,
+                               start >> fs_info->sectorsize_bits,
+                               eb, GFP_NOFS);
+               if (ret == -ENOMEM) {
+                       exists = ERR_PTR(ret);
                        goto free_eb;
-               else
-                       goto again;
-       }
+               }
+               if (ret == -EBUSY) {
+                       exists = find_extent_buffer(fs_info, start);
+                       if (exists)
+                               goto free_eb;
+               }
+       } while (ret);
+
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
        }
        if (uptodate)
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-again:
-       ret = radix_tree_preload(GFP_NOFS);
-       if (ret) {
-               exists = ERR_PTR(ret);
-               goto free_eb;
-       }
-
-       spin_lock(&fs_info->buffer_lock);
-       ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> fs_info->sectorsize_bits, eb);
-       spin_unlock(&fs_info->buffer_lock);
-       radix_tree_preload_end();
-       if (ret == -EEXIST) {
-               exists = find_extent_buffer(fs_info, start);
-               if (exists)
+
+       do {
+               ret = xa_insert(&fs_info->extent_buffers,
+                               start >> fs_info->sectorsize_bits,
+                               eb, GFP_NOFS);
+               if (ret == -ENOMEM) {
+                       exists = ERR_PTR(ret);
                        goto free_eb;
-               else
-                       goto again;
-       }
+               }
+               if (ret == -EBUSY) {
+                       exists = find_extent_buffer(fs_info, start);
+                       if (exists)
+                               goto free_eb;
+               }
+       } while (ret);
+
        /* add one reference for the tree */
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
                        spin_unlock(&eb->refs_lock);
 
-                       spin_lock(&fs_info->buffer_lock);
-                       radix_tree_delete(&fs_info->buffer_radix,
-                                         eb->start >> fs_info->sectorsize_bits);
-                       spin_unlock(&fs_info->buffer_lock);
+                       xa_erase(&fs_info->extent_buffers,
+                                eb->start >> fs_info->sectorsize_bits);
                } else {
                        spin_unlock(&eb->refs_lock);
                }
        }
 }
 
-#define GANG_LOOKUP_SIZE       16
 static struct extent_buffer *get_next_extent_buffer(
                struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
 {
-       struct extent_buffer *gang[GANG_LOOKUP_SIZE];
-       struct extent_buffer *found = NULL;
+       struct extent_buffer *eb;
+       unsigned long index;
        u64 page_start = page_offset(page);
-       u64 cur = page_start;
 
        ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
        lockdep_assert_held(&fs_info->buffer_lock);
 
-       while (cur < page_start + PAGE_SIZE) {
-               int ret;
-               int i;
-
-               ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
-                               (void **)gang, cur >> fs_info->sectorsize_bits,
-                               min_t(unsigned int, GANG_LOOKUP_SIZE,
-                                     PAGE_SIZE / fs_info->nodesize));
-               if (ret == 0)
-                       goto out;
-               for (i = 0; i < ret; i++) {
-                       /* Already beyond page end */
-                       if (gang[i]->start >= page_start + PAGE_SIZE)
-                               goto out;
-                       /* Found one */
-                       if (gang[i]->start >= bytenr) {
-                               found = gang[i];
-                               goto out;
-                       }
-               }
-               cur = gang[ret - 1]->start + gang[ret - 1]->len;
+       xa_for_each_start(&fs_info->extent_buffers, index, eb,
+                         page_start >> fs_info->sectorsize_bits) {
+               if (in_range(eb->start, page_start, PAGE_SIZE))
+                       return eb;
+               else if (eb->start >= page_start + PAGE_SIZE)
+                       /* Already beyond page end */
+                       return NULL;
        }
-out:
-       return found;
+       return NULL;
 }
 
 static int try_release_subpage_extent_buffer(struct page *page)
 
 
 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 {
-       struct radix_tree_iter iter;
-       void **slot;
+       unsigned long index;
+       struct extent_buffer *eb;
        struct btrfs_device *dev, *tmp;
 
        if (!fs_info)
 
        test_mnt->mnt_sb->s_fs_info = NULL;
 
-       spin_lock(&fs_info->buffer_lock);
-       radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
-               struct extent_buffer *eb;
-
-               eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
-               if (!eb)
-                       continue;
-               /* Shouldn't happen but that kind of thinking creates CVE's */
-               if (radix_tree_exception(eb)) {
-                       if (radix_tree_deref_retry(eb))
-                               slot = radix_tree_iter_retry(&iter);
-                       continue;
-               }
-               slot = radix_tree_iter_resume(slot, &iter);
-               spin_unlock(&fs_info->buffer_lock);
+       xa_for_each(&fs_info->extent_buffers, index, eb) {
                free_extent_buffer_stale(eb);
-               spin_lock(&fs_info->buffer_lock);
        }
-       spin_unlock(&fs_info->buffer_lock);
 
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
        list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,