/* the log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
- spinlock_t fs_roots_radix_lock;
- struct radix_tree_root fs_roots_radix;
+ struct xarray fs_roots;
/* block group cache stuff */
spinlock_t block_group_cache_lock;
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_id)
{
- struct btrfs_root *root;
-
- spin_lock(&fs_info->fs_roots_radix_lock);
- root = radix_tree_lookup(&fs_info->fs_roots_radix,
- (unsigned long)root_id);
- spin_unlock(&fs_info->fs_roots_radix_lock);
- return root;
+ return xa_load(&fs_info->fs_roots, (unsigned long)root_id);
}
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
{
int ret;
- ret = radix_tree_preload(GFP_NOFS);
- if (ret)
- return ret;
-
- spin_lock(&fs_info->fs_roots_radix_lock);
- ret = radix_tree_insert(&fs_info->fs_roots_radix,
+ xa_lock(&fs_info->fs_roots);
+ ret = __xa_insert(&fs_info->fs_roots,
(unsigned long)root->root_key.objectid,
- root);
+ root, GFP_NOFS);
if (ret == 0)
set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
- spin_unlock(&fs_info->fs_roots_radix_lock);
- radix_tree_preload_end();
+ xa_unlock(&fs_info->fs_roots);
return ret;
}
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
- if (ret == -EEXIST) {
+ if (ret == -EBUSY) {
btrfs_free_fs_root(root);
goto again;
}
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
{
- int ret;
- struct btrfs_root *gang[8];
- int i;
+ struct btrfs_root *root;
+ unsigned long i;
while (!list_empty(&fs_info->dead_roots)) {
- gang[0] = list_entry(fs_info->dead_roots.next,
+ root = list_entry(fs_info->dead_roots.next,
struct btrfs_root, root_list);
- list_del(&gang[0]->root_list);
+ list_del(&root->root_list);
- if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
- btrfs_drop_and_free_fs_root(fs_info, gang[0]);
+ if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
+ btrfs_drop_and_free_fs_root(fs_info, root);
} else {
- free_extent_buffer(gang[0]->node);
- free_extent_buffer(gang[0]->commit_root);
- btrfs_put_fs_root(gang[0]);
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
+ btrfs_put_fs_root(root);
}
}
- while (1) {
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
- (void **)gang, 0,
- ARRAY_SIZE(gang));
- if (!ret)
- break;
- for (i = 0; i < ret; i++)
- btrfs_drop_and_free_fs_root(fs_info, gang[i]);
- }
+ xa_for_each(&fs_info->fs_roots, i, root)
+ btrfs_drop_and_free_fs_root(fs_info, root);
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log_root_tree(NULL, fs_info);
goto fail_delalloc_bytes;
}
- INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+ xa_init(&fs_info->fs_roots);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock);
- spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root)
{
- spin_lock(&fs_info->fs_roots_radix_lock);
- radix_tree_delete(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid);
- spin_unlock(&fs_info->fs_roots_radix_lock);
+ xa_erase(&fs_info->fs_roots, (unsigned long)root->root_key.objectid);
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
while (1) {
index = srcu_read_lock(&fs_info->subvol_srcu);
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
- (void **)gang, root_objectid,
- ARRAY_SIZE(gang));
+ ret = xa_extract(&fs_info->fs_roots, (void **)gang,
+ root_objectid, ULONG_MAX, ARRAY_SIZE(gang),
+ XA_PRESENT);
if (!ret) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
break;
err = btrfs_insert_fs_root(fs_info, root);
if (err) {
- BUG_ON(err == -EEXIST);
+ BUG_ON(err == -EBUSY);
btrfs_free_fs_root(root);
break;
}
spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->qgroup_lock);
spin_lock_init(&fs_info->super_lock);
- spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
mutex_init(&fs_info->qgroup_ioctl_lock);
mutex_init(&fs_info->qgroup_rescan_lock);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
- INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+ xa_init(&fs_info->fs_roots);
extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
#include "dev-replace.h"
#include "qgroup.h"
-#define BTRFS_ROOT_TRANS_TAG 0
+#define BTRFS_ROOT_TRANS_MARK XA_MARK_0
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
*/
smp_wmb();
- spin_lock(&fs_info->fs_roots_radix_lock);
+ xa_lock(&fs_info->fs_roots);
if (root->last_trans == trans->transid && !force) {
- spin_unlock(&fs_info->fs_roots_radix_lock);
+ xa_unlock(&fs_info->fs_roots);
return 0;
}
- radix_tree_tag_set(&fs_info->fs_roots_radix,
+ __xa_set_mark(&fs_info->fs_roots,
(unsigned long)root->root_key.objectid,
- BTRFS_ROOT_TRANS_TAG);
- spin_unlock(&fs_info->fs_roots_radix_lock);
+ BTRFS_ROOT_TRANS_MARK);
+ xa_unlock(&fs_info->fs_roots);
root->last_trans = trans->transid;
/* this is pretty tricky. We don't want to
spin_unlock(&cur_trans->dropped_roots_lock);
/* Make sure we don't try to update the root at commit time */
- spin_lock(&fs_info->fs_roots_radix_lock);
- radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
- BTRFS_ROOT_TRANS_TAG);
- spin_unlock(&fs_info->fs_roots_radix_lock);
+ xa_clear_mark(&fs_info->fs_roots,
+ (unsigned long)root->root_key.objectid,
+ BTRFS_ROOT_TRANS_MARK);
}
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *gang[8];
- int i;
- int ret;
+ struct btrfs_root *root;
+ unsigned long index;
int err = 0;
- spin_lock(&fs_info->fs_roots_radix_lock);
- while (1) {
- ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
- (void **)gang, 0,
- ARRAY_SIZE(gang),
- BTRFS_ROOT_TRANS_TAG);
- if (ret == 0)
- break;
- for (i = 0; i < ret; i++) {
- struct btrfs_root *root = gang[i];
- radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
- BTRFS_ROOT_TRANS_TAG);
- spin_unlock(&fs_info->fs_roots_radix_lock);
-
- btrfs_free_log(trans, root);
- btrfs_update_reloc_root(trans, root);
-
- btrfs_save_ino_cache(root, trans);
-
- /* see comments in should_cow_block() */
- clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
- smp_mb__after_atomic();
-
- if (root->commit_root != root->node) {
- list_add_tail(&root->dirty_list,
- &trans->transaction->switch_commits);
- btrfs_set_root_node(&root->root_item,
- root->node);
- }
+ xa_lock(&fs_info->fs_roots);
+ xa_for_each_marked(&fs_info->fs_roots, index, root,
+ BTRFS_ROOT_TRANS_MARK) {
+ __xa_clear_mark(&fs_info->fs_roots, index,
+ BTRFS_ROOT_TRANS_MARK);
+ xa_unlock(&fs_info->fs_roots);
- err = btrfs_update_root(trans, fs_info->tree_root,
- &root->root_key,
- &root->root_item);
- spin_lock(&fs_info->fs_roots_radix_lock);
- if (err)
- break;
- btrfs_qgroup_free_meta_all_pertrans(root);
+ btrfs_free_log(trans, root);
+ btrfs_update_reloc_root(trans, root);
+
+ btrfs_save_ino_cache(root, trans);
+
+ /* see comments in should_cow_block() */
+ clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
+ smp_mb__after_atomic();
+
+ if (root->commit_root != root->node) {
+ list_add_tail(&root->dirty_list,
+ &trans->transaction->switch_commits);
+ btrfs_set_root_node(&root->root_item, root->node);
}
+
+ err = btrfs_update_root(trans, fs_info->tree_root,
+ &root->root_key, &root->root_item);
+ xa_lock(&fs_info->fs_roots);
+ if (err)
+ break;
+ btrfs_qgroup_free_meta_all_pertrans(root);
+ index = 0;
}
- spin_unlock(&fs_info->fs_roots_radix_lock);
+ xa_unlock(&fs_info->fs_roots);
return err;
}