From 359334bdac2edd90105fb33b759c41836b1fb335 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 15 Feb 2019 08:16:26 -0500 Subject: [PATCH] Remove idr_preload With all users gone, we can delete the implementation and various pieces of infrastructure that it used. Signed-off-by: Matthew Wilcox --- Documentation/core-api/idr.rst | 6 - include/linux/idr.h | 13 --- lib/radix-tree.c | 161 +------------------------- tools/testing/radix-tree/idr-test.c | 24 ---- tools/testing/radix-tree/main.c | 3 - tools/testing/radix-tree/multiorder.c | 2 - tools/testing/radix-tree/test.h | 8 -- tools/testing/radix-tree/xarray.c | 1 - 8 files changed, 1 insertion(+), 217 deletions(-) diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst index 00ce8f24a6ba..e81e48909e31 100644 --- a/Documentation/core-api/idr.rst +++ b/Documentation/core-api/idr.rst @@ -42,12 +42,6 @@ to do it. You can use :c:func:`idr_is_empty` to find out whether there are any IDs currently allocated. -If you need to take a lock while allocating a new ID from the IDR, -you may need to pass a restrictive set of GFP flags, which can lead -to the IDR being unable to allocate memory. To work around this, -you can call :c:func:`idr_preload` before taking the lock, and then -:c:func:`idr_preload_end` after the allocation. - .. kernel-doc:: include/linux/idr.h :doc: idr sync diff --git a/include/linux/idr.h b/include/linux/idr.h index 90b0213c1c0d..e805371dd1b2 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -81,8 +81,6 @@ struct idr { #define idr_unlock_irqrestore(idr, flags) \ xa_unlock_irqrestore(&(idr)->idr_rt, flags) -void idr_preload(gfp_t gfp_mask); - int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); void *idr_remove(struct idr *, unsigned long id); void *idr_find(const struct idr *, unsigned long id); @@ -129,17 +127,6 @@ static inline bool idr_is_empty(const struct idr *idr) radix_tree_tagged(&idr->idr_rt, IDR_FREE); } -/** - * idr_preload_end - end preload section started with idr_preload() - * - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. - */ -static inline void idr_preload_end(void) -{ - preempt_enable(); -} - /** * idr_for_each_entry() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 9d3ba80206f7..4a7369612db4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -33,46 +33,6 @@ */ struct kmem_cache *radix_tree_node_cachep; -/* - * The radix tree is variable-height, so an insert operation not only has - * to build the branch to its corresponding item, it also has to build the - * branch to existing items if the size has to be increased (by - * radix_tree_extend). - * - * The worst case is a zero height tree with just a single item at index 0, - * and then inserting an item at index ULONG_MAX. This requires 2 new branches - * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. - * Hence: - */ -#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) - -/* - * The IDR does not have to be as high as the radix tree since it uses - * signed integers, not unsigned longs. - */ -#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) -#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ - RADIX_TREE_MAP_SHIFT)) -#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) - -/* - * The IDA is even shorter since it uses a bitmap at the last level. - */ -#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) -#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ - RADIX_TREE_MAP_SHIFT)) -#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) - -/* - * Per-cpu pool of preloaded nodes - */ -struct radix_tree_preload { - unsigned nr; - /* nodes->parent points to next preallocated node */ - struct radix_tree_node *nodes; -}; -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; - static inline struct radix_tree_node *entry_to_node(void *ptr) { return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); @@ -229,56 +189,15 @@ static unsigned long next_index(unsigned long index, return (index & ~node_maxindex(node)) + (offset << node->shift); } -/* - * This assumes that the caller has performed appropriate preallocation, and - * that the caller has pinned this thread of control to the current CPU. - */ static struct radix_tree_node * radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, struct radix_tree_root *root, unsigned int shift, unsigned int offset, unsigned int count, unsigned int nr_values) { - struct radix_tree_node *ret = NULL; - - /* - * Preload code isn't irq safe and it doesn't make sense to use - * preloading during an interrupt anyway as all the allocations have - * to be atomic. So just do normal allocation when in interrupt. - */ - if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { - struct radix_tree_preload *rtp; - - /* - * Even if the caller has preloaded, try to allocate from the - * cache first for the new node to get accounted to the memory - * cgroup. - */ - ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_NOWARN); - if (ret) - goto out; + struct radix_tree_node *ret; - /* - * Provided the caller has preloaded here, we will always - * succeed in getting a node here (and never reach - * kmem_cache_alloc) - */ - rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr) { - ret = rtp->nodes; - rtp->nodes = ret->parent; - rtp->nr--; - } - /* - * Update the allocation stack trace as this is more useful - * for debugging. - */ - kmemleak_update_trace(ret); - goto out; - } ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); -out: BUG_ON(radix_tree_is_internal_node(ret)); if (ret) { ret->shift = shift; @@ -314,49 +233,6 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } -/* - * Load up this CPU's radix_tree_node buffer with sufficient objects to - * ensure that the addition of a single element in the tree cannot fail. On - * success, return zero, with preemption disabled. On error, return -ENOMEM - * with preemption not disabled. - * - * To make use of this facility, the radix tree must be initialised without - * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). - */ -static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) -{ - struct radix_tree_preload *rtp; - struct radix_tree_node *node; - int ret = -ENOMEM; - - /* - * Nodes preloaded by one cgroup can be be used by another cgroup, so - * they should never be accounted to any particular memory cgroup. - */ - gfp_mask &= ~__GFP_ACCOUNT; - - preempt_disable(); - rtp = this_cpu_ptr(&radix_tree_preloads); - while (rtp->nr < nr) { - preempt_enable(); - node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); - if (node == NULL) - goto out; - preempt_disable(); - rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr < nr) { - node->parent = rtp->nodes; - rtp->nodes = node; - rtp->nr++; - } else { - kmem_cache_free(radix_tree_node_cachep, node); - } - } - ret = 0; -out: - return ret; -} - static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { @@ -986,20 +862,6 @@ int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) } EXPORT_SYMBOL(radix_tree_tagged); -/** - * idr_preload - preload for idr_alloc() - * @gfp_mask: allocation mask to use for preloading - * - * Preallocate memory to use for the next call to idr_alloc(). This function - * returns with preemption disabled. It will be enabled by idr_preload_end(). - */ -void idr_preload(gfp_t gfp_mask) -{ - if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) - preempt_disable(); -} -EXPORT_SYMBOL(idr_preload); - void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max) @@ -1101,26 +963,8 @@ radix_tree_node_ctor(void *arg) INIT_LIST_HEAD(&node->private_list); } -static int radix_tree_cpu_dead(unsigned int cpu) -{ - struct radix_tree_preload *rtp; - struct radix_tree_node *node; - - /* Free per-cpu pool of preloaded nodes */ - rtp = &per_cpu(radix_tree_preloads, cpu); - while (rtp->nr) { - node = rtp->nodes; - rtp->nodes = node->parent; - kmem_cache_free(radix_tree_node_cachep, node); - rtp->nr--; - } - return 0; -} - void __init radix_tree_init(void) { - int ret; - BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); BUILD_BUG_ON(XA_CHUNK_SIZE > 255); @@ -1128,7 +972,4 @@ void __init radix_tree_init(void) sizeof(struct radix_tree_node), 0, SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); - ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", - NULL, radix_tree_cpu_dead); - WARN_ON(ret < 0); } diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index a5ab6b54d9d9..ce97e50e8f0f 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -83,24 +83,6 @@ void idr_null_test(void) assert(idr_is_empty(&idr)); } -void idr_nowait_test(void) -{ - unsigned int i; - DEFINE_IDR(idr); - - idr_preload(GFP_KERNEL); - - for (i = 0; i < 3; i++) { - struct item *item = item_create(i, 0); - assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); - } - - idr_preload_end(); - - idr_for_each(&idr, item_idr_free, &idr); - idr_destroy(&idr); -} - void idr_get_next_test(int base) { unsigned long i; @@ -274,7 +256,6 @@ void idr_checks(void) idr_destroy(&idr); idr_null_test(); - idr_nowait_test(); idr_get_next_test(0); idr_get_next_test(1); idr_get_next_test(4); @@ -379,14 +360,10 @@ void ida_simple_get_remove_test(void) void user_ida_checks(void) { - radix_tree_cpu_dead(1); - ida_check_nomem(); ida_check_conv_user(); ida_check_random(); ida_simple_get_remove_test(); - - radix_tree_cpu_dead(1); } static void *ida_random_fn(void *arg) @@ -425,7 +402,6 @@ int __weak main(void) radix_tree_init(); idr_checks(); ida_tests(); - radix_tree_cpu_dead(1); rcu_barrier(); if (nr_allocated) printf("nr_allocated = %d\n", nr_allocated); diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 55495e5b0fdc..68f99311ea18 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -312,9 +312,6 @@ int main(int argc, char **argv) iteration_test(7, 10 + 90 * long_run); single_thread_tests(long_run); - /* Free any remaining preallocated nodes */ - radix_tree_cpu_dead(0); - benchmark(); rcu_barrier(); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 88b1761490b0..e602c8f8c49d 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -218,8 +218,6 @@ void multiorder_checks(void) multiorder_iteration(&array); multiorder_tagged_iteration(&array); multiorder_iteration_race(&array); - - radix_tree_cpu_dead(0); } int __weak main(void) diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 1ef9db77bfa0..700ed02eb2df 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -42,11 +42,3 @@ void tree_verify_min_height(struct xarray *, unsigned long maxindex); void verify_mark_consistency(struct xarray *, xa_mark_t mark); extern int nr_allocated; - -/* Normally private parts of lib/radix-tree.c */ -int radix_tree_cpu_dead(unsigned int cpu); -struct radix_tree_preload { - unsigned nr; - struct xa_node *nodes; -}; -extern struct radix_tree_preload radix_tree_preloads; diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c index e61e43efe463..0ba0595ab07c 100644 --- a/tools/testing/radix-tree/xarray.c +++ b/tools/testing/radix-tree/xarray.c @@ -27,7 +27,6 @@ int __weak main(void) { radix_tree_init(); xarray_tests(); - radix_tree_cpu_dead(1); rcu_barrier(); if (nr_allocated) printf("nr_allocated = %d\n", nr_allocated); -- 2.50.1