/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
-static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
+static DEFINE_XARRAY_ALLOC(worker_pools); /* PR: array of all pools */
/* PL: hash of all unbound pools keyed by pool->attrs */
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
* ignored.
*/
#define for_each_pool(pool, pi) \
- idr_for_each_entry(&worker_pool_idr, pool, pi) \
+ xa_for_each(&worker_pools, pi, pool) \
if (({ assert_rcu_or_pool_mutex(); false; })) { } \
else
*/
static int worker_pool_assign_id(struct worker_pool *pool)
{
- int ret;
-
lockdep_assert_held(&wq_pool_mutex);
- ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
- GFP_KERNEL);
- if (ret >= 0) {
- pool->id = ret;
- return 0;
- }
- return ret;
+ return xa_alloc(&worker_pools, &pool->id, pool,
+ XA_LIMIT(0, WORK_OFFQ_POOL_NONE - 1), GFP_KERNEL);
}
/**
if (pool_id == WORK_OFFQ_POOL_NONE)
return NULL;
- return idr_find(&worker_pool_idr, pool_id);
+ return xa_load(&worker_pools, pool_id);
}
/**
return;
/* release id and unhash */
- if (pool->id >= 0)
- idr_remove(&worker_pool_idr, pool->id);
+ xa_erase(&worker_pools, pool->id);
hash_del(&pool->hash_node);
/*
struct workqueue_struct *wq;
struct worker_pool *pool;
unsigned long flags;
- int pi;
+ unsigned long pi;
rcu_read_lock();
{
struct worker_pool *pool;
struct workqueue_struct *wq;
- int pi;
+ unsigned long pi;
mutex_lock(&wq_pool_mutex);
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
struct worker_pool *pool;
- int pi;
+ unsigned long pi;
if (!thresh)
return;
*
* This is the first half of two-staged workqueue subsystem initialization
* and invoked as soon as the bare basics - memory allocation, cpumasks and
- * idr are up. It sets up all the data structures and system workqueues
+ * xarray are up. It sets up all the data structures and system workqueues
* and allows early boot code to create workqueues and queue/cancel work
* items. Actual work item execution starts only after kthreads can be
* created and scheduled right before early initcalls.