/* must be locked by nat_tree_lock */
static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
+ struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, gfp_t gfp)
{
- if (no_fail)
- f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
- else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
+ if (xa_store(&nm_i->nat_cache, nat_get_nid(ne), ne, gfp) != NULL)
return NULL;
if (raw_ne)
{
struct nat_entry *ne;
- ne = radix_tree_lookup(&nm_i->nat_root, n);
+ ne = xa_load(&nm_i->nat_cache, n);
/* for recent accessed nat entry, move it to tail of lru list */
if (ne && !get_nat_flag(ne, IS_DIRTY)) {
return ne;
}
-static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
- nid_t start, unsigned int nr, struct nat_entry **ep)
-{
- return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
-}
-
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
- radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
+ xa_erase(&nm_i->nat_cache, nat_get_nid(e));
nm_i->nat_cnt--;
__free_nat_entry(e);
}
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e)
- e = __init_nat_entry(nm_i, new, ne, false);
+ e = __init_nat_entry(nm_i, new, ne, GFP_NOIO);
else
f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
nat_get_blkaddr(e) !=
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
- e = __init_nat_entry(nm_i, new, NULL, true);
+ e = __init_nat_entry(nm_i, new, NULL, GFP_NOIO | __GFP_NOFAIL);
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
ne = __alloc_nat_entry(nid, true);
- __init_nat_entry(nm_i, ne, &raw_ne, true);
+ __init_nat_entry(nm_i, ne, &raw_ne,
+ GFP_NOIO | __GFP_NOFAIL);
}
/*
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
xa_init(&nm_i->free_nid_cache);
- INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
+ xa_init(&nm_i->nat_cache);
xa_init(&nm_i->nat_set_cache);
INIT_LIST_HEAD(&nm_i->nat_entries);
spin_lock_init(&nm_i->nat_list_lock);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct nat_entry *natvec[NATVEC_SIZE];
+ struct nat_entry *nat;
struct nat_entry_set *set;
unsigned long index;
- nid_t nid = 0;
- unsigned int found;
if (!nm_i)
return;
/* destroy nat cache */
down_write(&nm_i->nat_tree_lock);
- while ((found = __gang_lookup_nat_cache(nm_i,
- nid, NATVEC_SIZE, natvec))) {
- unsigned idx;
-
- nid = nat_get_nid(natvec[found - 1]) + 1;
- for (idx = 0; idx < found; idx++) {
- spin_lock(&nm_i->nat_list_lock);
- list_del(&natvec[idx]->list);
- spin_unlock(&nm_i->nat_list_lock);
+ xa_for_each(&nm_i->nat_cache, index, nat) {
+ spin_lock(&nm_i->nat_list_lock);
+ list_del(&nat->list);
+ spin_unlock(&nm_i->nat_list_lock);
- __del_from_nat_cache(nm_i, natvec[idx]);
- }
+ __del_from_nat_cache(nm_i, nat);
}
+ f2fs_bug_on(sbi, !xa_empty(&nm_i->nat_set_cache));
f2fs_bug_on(sbi, nm_i->nat_cnt);
/* destroy nat set cache */