*/
struct kmem_cache *radix_tree_node_cachep;
-/*
- * The radix tree is variable-height, so an insert operation not only has
- * to build the branch to its corresponding item, it also has to build the
- * branch to existing items if the size has to be increased (by
- * radix_tree_extend).
- *
- * The worst case is a zero height tree with just a single item at index 0,
- * and then inserting an item at index ULONG_MAX. This requires 2 new branches
- * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
- * Hence:
- */
-#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
-
-/*
- * The IDR does not have to be as high as the radix tree since it uses
- * signed integers, not unsigned longs.
- */
-#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
-#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
-
-/*
- * The IDA is even shorter since it uses a bitmap at the last level.
- */
-#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
-#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-
-/*
- * Per-cpu pool of preloaded nodes
- */
-struct radix_tree_preload {
- unsigned nr;
- /* nodes->parent points to next preallocated node */
- struct radix_tree_node *nodes;
-};
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
-
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
-/*
- * This assumes that the caller has performed appropriate preallocation, and
- * that the caller has pinned this thread of control to the current CPU.
- */
static struct radix_tree_node *
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
unsigned int count, unsigned int nr_values)
{
- struct radix_tree_node *ret = NULL;
-
- /*
- * Preload code isn't irq safe and it doesn't make sense to use
- * preloading during an interrupt anyway as all the allocations have
- * to be atomic. So just do normal allocation when in interrupt.
- */
- if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
- struct radix_tree_preload *rtp;
-
- /*
- * Even if the caller has preloaded, try to allocate from the
- * cache first for the new node to get accounted to the memory
- * cgroup.
- */
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- gfp_mask | __GFP_NOWARN);
- if (ret)
- goto out;
+ struct radix_tree_node *ret;
- /*
- * Provided the caller has preloaded here, we will always
- * succeed in getting a node here (and never reach
- * kmem_cache_alloc)
- */
- rtp = this_cpu_ptr(&radix_tree_preloads);
- if (rtp->nr) {
- ret = rtp->nodes;
- rtp->nodes = ret->parent;
- rtp->nr--;
- }
- /*
- * Update the allocation stack trace as this is more useful
- * for debugging.
- */
- kmemleak_update_trace(ret);
- goto out;
- }
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
-out:
BUG_ON(radix_tree_is_internal_node(ret));
if (ret) {
ret->shift = shift;
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
-/*
- * Load up this CPU's radix_tree_node buffer with sufficient objects to
- * ensure that the addition of a single element in the tree cannot fail. On
- * success, return zero, with preemption disabled. On error, return -ENOMEM
- * with preemption not disabled.
- *
- * To make use of this facility, the radix tree must be initialised without
- * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
- */
-static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
-{
- struct radix_tree_preload *rtp;
- struct radix_tree_node *node;
- int ret = -ENOMEM;
-
- /*
- * Nodes preloaded by one cgroup can be be used by another cgroup, so
- * they should never be accounted to any particular memory cgroup.
- */
- gfp_mask &= ~__GFP_ACCOUNT;
-
- preempt_disable();
- rtp = this_cpu_ptr(&radix_tree_preloads);
- while (rtp->nr < nr) {
- preempt_enable();
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
- if (node == NULL)
- goto out;
- preempt_disable();
- rtp = this_cpu_ptr(&radix_tree_preloads);
- if (rtp->nr < nr) {
- node->parent = rtp->nodes;
- rtp->nodes = node;
- rtp->nr++;
- } else {
- kmem_cache_free(radix_tree_node_cachep, node);
- }
- }
- ret = 0;
-out:
- return ret;
-}
-
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
}
EXPORT_SYMBOL(radix_tree_tagged);
-/**
- * idr_preload - preload for idr_alloc()
- * @gfp_mask: allocation mask to use for preloading
- *
- * Preallocate memory to use for the next call to idr_alloc(). This function
- * returns with preemption disabled. It will be enabled by idr_preload_end().
- */
-void idr_preload(gfp_t gfp_mask)
-{
- if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
- preempt_disable();
-}
-EXPORT_SYMBOL(idr_preload);
-
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
INIT_LIST_HEAD(&node->private_list);
}
-static int radix_tree_cpu_dead(unsigned int cpu)
-{
- struct radix_tree_preload *rtp;
- struct radix_tree_node *node;
-
- /* Free per-cpu pool of preloaded nodes */
- rtp = &per_cpu(radix_tree_preloads, cpu);
- while (rtp->nr) {
- node = rtp->nodes;
- rtp->nodes = node->parent;
- kmem_cache_free(radix_tree_node_cachep, node);
- rtp->nr--;
- }
- return 0;
-}
-
void __init radix_tree_init(void)
{
- int ret;
-
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
- ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
- NULL, radix_tree_cpu_dead);
- WARN_ON(ret < 0);
}