mutex_init(&lkb->lkb_cb_mutex);
INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
- idr_preload(GFP_NOFS);
- spin_lock(&ls->ls_lkbidr_spin);
- rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
- if (rv >= 0)
- lkb->lkb_id = rv;
- spin_unlock(&ls->ls_lkbidr_spin);
- idr_preload_end();
-
+ rv = xa_alloc(&ls->ls_lkbs, &lkb->lkb_id, lkb, xa_limit_31b, GFP_NOFS);
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
dlm_free_lkb(lkb);
{
struct dlm_lkb *lkb;
- spin_lock(&ls->ls_lkbidr_spin);
- lkb = idr_find(&ls->ls_lkbidr, lkid);
+ xa_lock(&ls->ls_lkbs);
+ lkb = xa_load(&ls->ls_lkbs, lkid);
if (lkb)
kref_get(&lkb->lkb_ref);
- spin_unlock(&ls->ls_lkbidr_spin);
+ xa_unlock(&ls->ls_lkbs);
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
{
uint32_t lkid = lkb->lkb_id;
- spin_lock(&ls->ls_lkbidr_spin);
+ xa_lock(&ls->ls_lkbs);
if (kref_put(&lkb->lkb_ref, kill_lkb)) {
- idr_remove(&ls->ls_lkbidr, lkid);
- spin_unlock(&ls->ls_lkbidr_spin);
+ __xa_erase(&ls->ls_lkbs, lkid);
+ xa_unlock(&ls->ls_lkbs);
detach_lkb(lkb);
dlm_free_lkb(lkb);
return 1;
} else {
- spin_unlock(&ls->ls_lkbidr_spin);
+ xa_unlock(&ls->ls_lkbs);
return 0;
}
}
goto out_rsbtbl;
}
- idr_init(&ls->ls_lkbidr);
- spin_lock_init(&ls->ls_lkbidr_spin);
+ xa_init_flags(&ls->ls_lkbs, XA_FLAGS_ALLOC1);
INIT_LIST_HEAD(&ls->ls_waiters);
mutex_init(&ls->ls_waiters_mutex);
ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (!ls->ls_recover_buf)
- goto out_lkbidr;
+ goto out_rsbtbl;
ls->ls_slot = 0;
ls->ls_num_slots = 0;
spin_unlock(&lslist_lock);
idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf);
- out_lkbidr:
- idr_destroy(&ls->ls_lkbidr);
out_rsbtbl:
for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
kfree(ls->ls_remove_names[i]);
return error;
}
-static int lkb_idr_is_local(int id, void *p, void *data)
-{
- struct dlm_lkb *lkb = p;
-
- return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
-}
-
-static int lkb_idr_is_any(int id, void *p, void *data)
-{
- return 1;
-}
-
-static int lkb_idr_free(int id, void *p, void *data)
-{
- struct dlm_lkb *lkb = p;
-
- if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
- dlm_free_lvb(lkb->lkb_lvbptr);
-
- dlm_free_lkb(lkb);
- return 0;
-}
-
-/* NOTE: We check the lkbidr here rather than the resource table.
+/* NOTE: We check the lkbs here rather than the resource table.
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
static int lockspace_busy(struct dlm_ls *ls, int force)
{
- int rv;
+ struct dlm_lkb *lkb;
+ unsigned long index;
- spin_lock(&ls->ls_lkbidr_spin);
- if (force == 0) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
- } else if (force == 1) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
- } else {
- rv = 0;
+ if (force == 0)
+ return !xa_empty(&ls->ls_lkbs);
+ if (force != 1)
+ return 0;
+
+ xa_for_each(&ls->ls_lkbs, index, lkb) {
+ if (lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV)
+ return 1;
}
- spin_unlock(&ls->ls_lkbidr_spin);
- return rv;
+
+ return 0;
}
static int release_lockspace(struct dlm_ls *ls, int force)
{
+ struct dlm_lkb *lkb;
struct dlm_rsb *rsb;
struct rb_node *n;
+ unsigned long index;
int i, busy, rv;
busy = lockspace_busy(ls, force);
kfree(ls->ls_recover_buf);
/*
- * Free all lkb's in idr
+ * Free all lkb's
*/
-
- idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
- idr_destroy(&ls->ls_lkbidr);
+ xa_for_each(&ls->ls_lkbs, index, lkb) {
+ if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
+ dlm_free_lvb(lkb->lkb_lvbptr);
+ dlm_free_lkb(lkb);
+ }
+ xa_destroy(&ls->ls_lkbs);
/*
* Free all rsb's on rsbtbl[] lists