void (*sc_free)(struct nfs4_stid *))
{
struct nfs4_stid *stid;
- int new_id;
+ int ret;
stid = kmem_cache_zalloc(slab, GFP_KERNEL);
if (!stid)
return NULL;
- idr_preload(GFP_KERNEL);
- spin_lock(&cl->cl_lock);
- /* Reserving 0 for start of file in nfsdfs "states" file: */
- new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
- spin_unlock(&cl->cl_lock);
- idr_preload_end();
- if (new_id < 0)
+ ret = xa_alloc_cyclic(&cl->cl_stateids,
+ &stid->sc_stateid.si_opaque.so_id, stid,
+ xa_limit_32b, &cl->cl_stateid_next, GFP_KERNEL);
+ if (ret < 0)
goto out_free;
stid->sc_free = sc_free;
stid->sc_client = cl;
- stid->sc_stateid.si_opaque.so_id = new_id;
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
refcount_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
- /*
- * It shouldn't be a problem to reuse an opaque stateid value.
- * I don't think it is for 4.1. But with 4.0 I worry that, for
- * example, a stray write retransmission could be accepted by
- * the server when it should have been rejected. Therefore,
- * adopt a trick from the sctp code to attempt to maximize the
- * amount of time until an id is reused, by ensuring they always
- * "increase" (mod INT_MAX):
- */
return stid;
out_free:
kmem_cache_free(slab, stid);
wake_up_all(&close_wq);
return;
}
- idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+ xa_erase(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
spin_unlock(&clp->cl_lock);
s->sc_free(s);
if (fp)
return;
}
- idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+ xa_erase(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
list_add(&stp->st_locks, reaplist);
}
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&clp->cl_sessions);
- idr_init(&clp->cl_stateids);
+ xa_init_flags(&clp->cl_stateids, XA_FLAGS_ALLOC1);
atomic_set(&clp->cl_rpc_users, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
INIT_LIST_HEAD(&clp->cl_idhash);
kfree(clp->cl_name.data);
kfree(clp->cl_nii_domain.data);
kfree(clp->cl_nii_name.data);
- idr_destroy(&clp->cl_stateids);
+ xa_destroy(&clp->cl_stateids);
kmem_cache_free(client_slab, clp);
}
{
struct nfs4_stid *ret;
- ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+ ret = xa_load(&cl->cl_stateids, t->si_opaque.so_id);
if (!ret || !ret->sc_type)
return NULL;
return ret;
unsigned long id = *pos;
void *ret;
- spin_lock(&clp->cl_lock);
- ret = idr_get_next_ul(&clp->cl_stateids, &id);
+ xa_lock(&clp->cl_stateids);
+ ret = xa_find(&clp->cl_stateids, &id, ULONG_MAX, XA_PRESENT);
*pos = id;
return ret;
}
void *ret;
id = *pos;
- id++;
- ret = idr_get_next_ul(&clp->cl_stateids, &id);
+ ret = xa_find_after(&clp->cl_stateids, &id, ULONG_MAX, XA_PRESENT);
*pos = id;
return ret;
}
{
struct nfs4_client *clp = s->private;
- spin_unlock(&clp->cl_lock);
+ xa_unlock(&clp->cl_stateids);
}
static void nfs4_show_superblock(struct seq_file *s, struct file *f)