{
struct inode_entry *ie;
- ie = radix_tree_lookup(&gc_list->iroot, ino);
+ ie = xa_load(&gc_list->iroot, ino);
if (ie)
return ie->inode;
return NULL;
new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
new_ie->inode = inode;
- f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
+ xa_store(&gc_list->iroot, inode->i_ino, new_ie,
+ GFP_NOFS | __GFP_NOFAIL);
list_add_tail(&new_ie->list, &gc_list->ilist);
}
{
struct inode_entry *ie, *next_ie;
list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
- radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
+ xa_erase(&gc_list->iroot, ie->inode->i_ino);
iput(ie->inode);
list_del(&ie->list);
kmem_cache_free(f2fs_inode_entry_slab, ie);
unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
- .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ .iroot = XARRAY_INIT(&gc_list.iroot, 0),
};
unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
unsigned long long first_skipped;
for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
- .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ .iroot = XARRAY_INIT(gc_list.iroot, 0),
};
mutex_lock(&sbi->gc_mutex);