*/
#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
-static DEFINE_IDR(shrinker_idr);
+static DEFINE_XARRAY_ALLOC(memcg_shrinkers);
static int shrinker_nr_max;
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
- int id, ret = -ENOMEM;
+ int ret;
down_write(&shrinker_rwsem);
/* This may call shrinker, so it must use down_read_trylock() */
- id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
- if (id < 0)
+ ret = xa_alloc(&memcg_shrinkers, &shrinker->id, SHRINKER_REGISTERING,
+ xa_limit_31b, GFP_KERNEL);
+ if (ret < 0)
goto unlock;
- if (id >= shrinker_nr_max) {
- if (memcg_expand_shrinker_maps(id)) {
- idr_remove(&shrinker_idr, id);
+ if (shrinker->id >= shrinker_nr_max) {
+ if (memcg_expand_shrinker_maps(shrinker->id)) {
+ xa_erase(&memcg_shrinkers, shrinker->id);
+ ret = -ENOMEM;
goto unlock;
}
- shrinker_nr_max = id + 1;
+ shrinker_nr_max = shrinker->id + 1;
}
- shrinker->id = id;
- ret = 0;
unlock:
up_write(&shrinker_rwsem);
return ret;
BUG_ON(id < 0);
- down_write(&shrinker_rwsem);
- idr_remove(&shrinker_idr, id);
- up_write(&shrinker_rwsem);
+ xa_erase(&memcg_shrinkers, id);
}
#else /* CONFIG_MEMCG_KMEM */
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
list_add_tail(&shrinker->list, &shrinker_list);
#ifdef CONFIG_MEMCG_KMEM
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
- idr_replace(&shrinker_idr, shrinker, shrinker->id);
+ xa_store(&memcg_shrinkers, shrinker->id, shrinker, GFP_KERNEL);
#endif
up_write(&shrinker_rwsem);
}
};
struct shrinker *shrinker;
- shrinker = idr_find(&shrinker_idr, i);
+ shrinker = xa_load(&memcg_shrinkers, i);
if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
if (!shrinker)
clear_bit(i, map->map);