}
xa_init(&fs_info->fs_roots);
- INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
+ xa_init(&fs_info->buffer_array);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock);
- spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->unused_bgs_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
struct extent_buffer *eb;
rcu_read_lock();
- eb = radix_tree_lookup(&fs_info->buffer_radix,
- start >> PAGE_SHIFT);
+ eb = xa_load(&fs_info->buffer_array, start >> PAGE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
/*
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
- struct extent_buffer *eb, *exists = NULL;
- int ret;
+ struct extent_buffer *exists, *eb = NULL;
- eb = find_extent_buffer(fs_info, start);
- if (eb)
- return eb;
- eb = alloc_dummy_extent_buffer(fs_info, start);
- if (!eb)
- return NULL;
- eb->fs_info = fs_info;
again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ exists = find_extent_buffer(fs_info, start);
+ if (exists)
goto free_eb;
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_SHIFT, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- exists = find_extent_buffer(fs_info, start);
- if (exists)
+ if (!eb)
+ eb = alloc_dummy_extent_buffer(fs_info, start);
+ if (!eb)
+ return NULL;
+ exists = xa_cmpxchg(&fs_info->buffer_array, start >> PAGE_SHIFT,
+ NULL, eb, GFP_NOFS);
+ if (unlikely(exists)) {
+ if (xa_is_err(exists)) {
+ exists = NULL;
goto free_eb;
- else
- goto again;
+ }
+ goto again;
}
check_buffer_tree_ref(eb);
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
return eb;
free_eb:
- btrfs_release_extent_buffer(eb);
+ if (eb)
+ btrfs_release_extent_buffer(eb);
return exists;
}
#endif
int num_pages;
int i;
unsigned long index = start >> PAGE_SHIFT;
- struct extent_buffer *eb;
+ struct extent_buffer *eb = NULL;
struct extent_buffer *exists = NULL;
struct page *p;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
int uptodate = 1;
- int ret;
if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
return ERR_PTR(-EINVAL);
}
- eb = find_extent_buffer(fs_info, start);
- if (eb)
- return eb;
+again:
+ exists = find_extent_buffer(fs_info, start);
+ if (exists)
+ goto free_eb;
+ if (eb)
+ goto add;
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
return ERR_PTR(-ENOMEM);
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret) {
- exists = ERR_PTR(ret);
- goto free_eb;
- }
-
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_SHIFT, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- exists = find_extent_buffer(fs_info, start);
- if (exists)
+add:
+ exists = xa_cmpxchg(&fs_info->buffer_array, start >> PAGE_SHIFT,
+ NULL, eb, GFP_NOFS);
+ if (unlikely(exists)) {
+ if (xa_is_err(exists)) {
+ exists = NULL;
goto free_eb;
- else
- goto again;
+ }
+ goto again;
}
/* add one reference for the tree */
check_buffer_tree_ref(eb);
return eb;
free_eb:
+ if (!eb)
+ return exists;
WARN_ON(!atomic_dec_and_test(&eb->refs));
for (i = 0; i < num_pages; i++) {
if (eb->pages[i])
spin_unlock(&eb->refs_lock);
- spin_lock(&fs_info->buffer_lock);
- radix_tree_delete(&fs_info->buffer_radix,
+ xa_erase(&fs_info->buffer_array,
eb->start >> PAGE_SHIFT);
- spin_unlock(&fs_info->buffer_lock);
} else {
spin_unlock(&eb->refs_lock);
}
return NULL;
}
- spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->qgroup_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
- INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
+ xa_init(&fs_info->buffer_array);
xa_init(&fs_info->fs_roots);
extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
- struct radix_tree_iter iter;
- void **slot;
+ struct extent_buffer *eb;
+ unsigned long index;
if (!fs_info)
return;
test_mnt->mnt_sb->s_fs_info = NULL;
- spin_lock(&fs_info->buffer_lock);
- radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
- struct extent_buffer *eb;
-
- eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
- if (!eb)
- continue;
- /* Shouldn't happen but that kind of thinking creates CVE's */
- if (radix_tree_exception(eb)) {
- if (radix_tree_deref_retry(eb))
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- slot = radix_tree_iter_resume(slot, &iter);
- spin_unlock(&fs_info->buffer_lock);
+ xa_for_each(&fs_info->buffer_array, index, eb)
free_extent_buffer_stale(eb);
- spin_lock(&fs_info->buffer_lock);
- }
- spin_unlock(&fs_info->buffer_lock);
btrfs_free_qgroup_config(fs_info);
btrfs_free_fs_roots(fs_info);