#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include <linux/idr.h>
#include <linux/sched/mm.h>
+#include <linux/xarray.h>
#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
INIT_LIST_HEAD(&ctx->irq_names);
/*
- * When we have to destroy all contexts in cxl_context_detach_all() we
- * end up with afu_release_irqs() called from inside a
- * idr_for_each_entry(). Hence we need to make sure that anything
- * dereferenced from this IDR is ok before we allocate the IDR here.
- * This clears out the IRQ ranges to ensure this.
+ * When we have to destroy all contexts in cxl_context_detach_all()
+ * we end up with afu_release_irqs() called from inside a
+ * xa_for_each loop. Hence we need to make sure that anything
+ * dereferenced from this context is ok before we store it in the
+ * array. This clears out the IRQ ranges to ensure this.
*/
for (i = 0; i < CXL_IRQ_RANGES; i++)
ctx->irqs.range[i] = 0;
ctx->status = OPENED;
- /*
- * Allocating IDR! We better make sure everything's setup that
- * dereferences from it.
- */
+ /* Prevent resets and mode changes */
mutex_lock(&afu->contexts_lock);
- idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
- ctx->afu->num_procs, GFP_NOWAIT);
- idr_preload_end();
+ i = xa_alloc(&ctx->afu->contexts, &ctx->pe, ctx,
+ XA_LIMIT(0, ctx->afu->num_procs), GFP_KERNEL);
mutex_unlock(&afu->contexts_lock);
if (i < 0)
return i;
- ctx->pe = i;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
ctx->elem = &ctx->afu->native->spa[i];
ctx->external_pe = ctx->pe;
void cxl_context_detach_all(struct cxl_afu *afu)
{
struct cxl_context *ctx;
- int tmp;
+ unsigned long index;
- mutex_lock(&afu->contexts_lock);
- idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
+ xa_for_each(&afu->contexts, index, ctx) {
/*
- * Anything done in here needs to be setup before the IDR is
- * created and torn down after the IDR removed
+ * Anything done in here needs to be setup before the context
+ * is added to the array and torn down after it is removed
*/
cxl_context_detach(ctx);
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
}
- mutex_unlock(&afu->contexts_lock);
}
static void reclaim_ctx(struct rcu_head *rcu)
{
if (ctx->kernelapi && ctx->mapping)
cxl_release_mapping(ctx);
- mutex_lock(&ctx->afu->contexts_lock);
- idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- mutex_unlock(&ctx->afu->contexts_lock);
+ xa_erase(&ctx->afu->contexts, ctx->pe);
call_rcu(&ctx->rcu, reclaim_ctx);
}
struct cxl *adapter;
struct cxl_afu *afu;
struct cxl_context *ctx;
- int card, slice, id;
+ unsigned long index;
+ int card, slice;
pr_devel("%s called\n", __func__);
afu = adapter->afu[slice];
if (!afu || !afu->enabled)
continue;
- rcu_read_lock();
- idr_for_each_entry(&afu->contexts_idr, ctx, id)
+ xa_for_each(&afu->contexts, index, ctx)
_cxl_slbia(ctx, mm);
- rcu_read_unlock();
}
spin_unlock(&adapter->afu_list_lock);
}
afu->dev.parent = &adapter->dev;
afu->dev.release = cxl_ops->release_afu;
afu->slice = slice;
- idr_init(&afu->contexts_idr);
+ xa_init_flags(&afu->contexts, XA_FLAGS_ALLOC);
mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock);
atomic_set(&afu->configured_state, -1);