From 7a7ea8234d54e40b996833cd72ba1a5445e23d4d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 11 Aug 2020 12:44:11 -0400 Subject: [PATCH] maple_tree: Add 256Byte nodes and make them default. This code adds a define to decide which node size: 128 or 256. Defaults to 256. Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 40 ++++++--- lib/maple_tree.c | 6 +- lib/test_maple_tree.c | 175 ++++++++++++++++++++++++++----------- 3 files changed, 153 insertions(+), 68 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 951d3c45ce26..5d38c1eb5b24 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -23,21 +23,33 @@ * * Nodes in the tree point to their parent unless bit 0 is set. */ +#define NODE256 #if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) -#define MAPLE_NODE_SLOTS 15 /* 128 bytes including ->parent */ -#define MAPLE_RANGE64_SLOTS 8 /* 128 bytes */ -#define MAPLE_ARANGE64_SLOTS 5 /* 120 bytes */ -#define MAPLE_RANGE32_SLOTS 10 /* 124 bytes */ -#define MAPLE_RANGE16_SLOTS 12 /* 126 bytes */ -#define MAPLE_SPARSE64_SLOTS 7 /* 120 bytes */ -#define MAPLE_SPARSE32_SLOTS 10 /* 128 bytes */ -#define MAPLE_SPARSE21_SLOTS 11 /* 128 bytes */ -#define MAPLE_SPARSE16_SLOTS 12 /* 128 bytes */ -#define MAPLE_SPARSE9_SLOTS 13 /* 127 bytes */ -#define MAPLE_SPARSE6_SLOTS 14 /* 128 bytes */ - -#define MAPLE_TOPIARY_SLOTS (MAPLE_NODE_SLOTS - 1) -#define MAPLE_NODE_COALESCE 2 /* Limit on coalesce count */ +#if defined(NODE256) +#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */ +#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */ +#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */ +#define MAPLE_RANGE32_SLOTS 21 /* 256 bytes */ +#define MAPLE_RANGE16_SLOTS 25 /* 256 bytes */ +#define MAPLE_SPARSE64_SLOTS 15 /* 248 bytes */ +#define MAPLE_SPARSE32_SLOTS 20 /* 248 bytes */ +#define MAPLE_SPARSE21_SLOTS 23 /* 256 bytes */ +#define MAPLE_SPARSE16_SLOTS 24 /* 248 bytes */ +#define MAPLE_SPARSE9_SLOTS 27 /* 256 bytes */ +#define MAPLE_SPARSE6_SLOTS 30 /* 256 bytes */ +#else +#define MAPLE_NODE_SLOTS 15 /* 128 bytes including ->parent */ +#define MAPLE_RANGE64_SLOTS 8 /* 128 bytes */ +#define MAPLE_ARANGE64_SLOTS 5 /* 120 bytes */ +#define MAPLE_RANGE32_SLOTS 10 /* 124 bytes */ +#define MAPLE_RANGE16_SLOTS 12 /* 126 bytes */ +#define MAPLE_SPARSE64_SLOTS 7 /* 120 bytes */ +#define MAPLE_SPARSE32_SLOTS 10 /* 128 bytes */ +#define MAPLE_SPARSE21_SLOTS 11 /* 128 bytes */ +#define MAPLE_SPARSE16_SLOTS 12 /* 128 bytes */ +#define MAPLE_SPARSE9_SLOTS 13 /* 127 bytes */ +#define MAPLE_SPARSE6_SLOTS 14 /* 128 bytes */ +#endif // End NODE256 #else /* Need to do corresponding calculations for 32-bit kernels */ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4abaf15c3ee4..dc78f3ddc88f 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -96,7 +96,11 @@ unsigned char mt_min_slots[] = { [maple_range_16] = MAPLE_RANGE16_SLOTS / 2, [maple_range_32] = MAPLE_RANGE32_SLOTS / 2, [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, - [maple_arange_64] = (MAPLE_ARANGE64_SLOTS) / 2, +#if defined(NODE256) + [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, +#else + [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2), +#endif }; #define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)] diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 1c9d8a1c5331..20ae441e7c50 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -31447,8 +31447,9 @@ static noinline void check_alloc_range(struct maple_tree *mt) for (i = 0; i < ARRAY_SIZE(holes); i += 3) { #if DEBUG_ALLOC_RANGE - pr_debug("\tGet empty %lu-%lu size %lu\n", min >> 12, - holes[i+1] >> 12, holes[i+2] >> 12); + pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12, + holes[i+1] >> 12, holes[i+2] >> 12, + min, holes[i+1]); #endif MT_BUG_ON(mt, mas_get_empty_area(&mas, min >> 12, holes[i+1] >> 12, @@ -31459,11 +31460,10 @@ static noinline void check_alloc_range(struct maple_tree *mt) } for (i = 0; i < req_range_cnt; i += 5) { #if DEBUG_ALLOC_RANGE - pr_debug("\tTest %d: %lu-%lu size %lu expected %lu\n", i/5, - req_range[i] >> 12, - req_range[i + 1] >> 12, - req_range[i + 2] >> 12, - req_range[i + 3] >> 12); + pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n", + i/5, req_range[i] >> 12, req_range[i + 1] >> 12, + req_range[i + 2] >> 12, req_range[i + 3] >> 12, + req_range[i], req_range[i+1]); #endif check_mtree_alloc_range(mt, req_range[i] >> 12, // start @@ -31473,6 +31473,9 @@ static noinline void check_alloc_range(struct maple_tree *mt) req_range[i+4], // expected return xa_mk_value(req_range[i] >> 12)); // pointer mt_validate(mt); +#if DEBUG_ALLOC_RANGE + mt_dump(mt); +#endif } mtree_destroy(mt); @@ -31771,7 +31774,57 @@ static noinline void check_gap_combining(struct maple_tree *mt) { struct maple_enode *mn1, *mn2; void *entry; - unsigned long index = 82; + +#if defined(NODE256) + unsigned long seq100[] = { + /* 0-5 */ + 78, 79, 80, + 50, 100, 2, + + /* 6-12 */ + 44, 45, 46, 43, + 20, 50, 3, + + /* 13-20*/ + 80, 81, 82, + 76, 2, 79, 85, 4, + }; + unsigned long seq2000[] = { + 1152, 1151, + 1100, 1200, 2, + }; + unsigned long seq400[] = { + 286, 318, + 256, 260, 266, 270, 275, 280, 290, 398, + 286, 310, + }; +#else + // Seq test 100 data + unsigned long seq100[] = { + 82, 83, 84, + 50, 100, 2, + + 38, 39, 40, 37, + 20, 50, 3, + + 79, 80, 81, + 76, 2, 78, 85, 4, + }; + // seq test 2000 data + unsigned long seq2000[] = { + 1792, 1791, + 1700, 1800, 2, + }; + + // seq test 400 data + unsigned long seq400[] = { + 376, 391, + 352, 356, 360, 364, 378, 384, 388, 398, + 374, 375, + }; +#endif + + unsigned long index = seq100[0]; MA_STATE(mas, mt, index, index); @@ -31779,10 +31832,10 @@ static noinline void check_gap_combining(struct maple_tree *mt) check_seq(mt, 100, false); // create 100 singletons. mt_set_non_kernel(1); - mtree_test_erase(mt, 83); - check_load(mt, 83, NULL); - mtree_test_erase(mt, 84); - check_load(mt, 84, NULL); + mtree_test_erase(mt, seq100[2]); + check_load(mt, seq100[2], NULL); + mtree_test_erase(mt, seq100[1]); + check_load(mt, seq100[1], NULL); rcu_read_lock(); entry = mas_find(&mas, ULONG_MAX); @@ -31794,21 +31847,22 @@ static noinline void check_gap_combining(struct maple_tree *mt) mn2 = mas.node; MT_BUG_ON(mt, mn1 == mn2); // test the test. - /* At this point, there is a gap of 2 at index + 1 between 50-100. - * Search for the gap. + /* At this point, there is a gap of 2 at index + 1 between seq100[3] and + * seq100[4]. Search for the gap. */ mt_set_non_kernel(1); mas_reset(&mas); - MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 50, 100, 2)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, seq100[3], seq100[4], + seq100[5])); MT_BUG_ON(mt, mas.index != index + 1); rcu_read_unlock(); - mtree_test_erase(mt, 38); - check_load(mt, 38, NULL); - mtree_test_erase(mt, 39); - check_load(mt, 39, NULL); - mtree_test_erase(mt, 40); - index = 37; + mtree_test_erase(mt, seq100[6]); + check_load(mt, seq100[6], NULL); + mtree_test_erase(mt, seq100[7]); + check_load(mt, seq100[7], NULL); + mtree_test_erase(mt, seq100[8]); + index = seq100[9]; rcu_read_lock(); mas.index = index; @@ -31819,79 +31873,94 @@ static noinline void check_gap_combining(struct maple_tree *mt) mn1 = mas.node; entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != xa_mk_value(index + 4)); - mas_next(&mas, ULONG_MAX); // go to the next node. + mas_next(&mas, ULONG_MAX); // go to the next entry. mn2 = mas.node; - MT_BUG_ON(mt, mn1 == mn2); + MT_BUG_ON(mt, mn1 == mn2); // test the next entry is in the next node. - /* At this point, there is a gap of 3 at 38. Find it by searching 20 - + /* At this point, there is a gap of 3 at seq100[6]. Find it by searching 20 - * 50 for size 3. */ mas_reset(&mas); - MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 20, 50, 3)); - MT_BUG_ON(mt, mas.index != 38); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, seq100[10], seq100[11], + seq100[12])); + MT_BUG_ON(mt, mas.index != seq100[6]); rcu_read_unlock(); mt_set_non_kernel(1); - mtree_store(mt, 79, NULL, GFP_KERNEL); - check_load(mt, 79, NULL); - check_load(mt, 80, xa_mk_value(80)); - mtree_store(mt, 80, NULL, GFP_KERNEL); - check_load(mt, 79, NULL); - check_load(mt, 80, NULL); + mtree_store(mt, seq100[13], NULL, GFP_KERNEL); + check_load(mt, seq100[13], NULL); + check_load(mt, seq100[14], xa_mk_value(seq100[14])); + mtree_store(mt, seq100[14], NULL, GFP_KERNEL); + check_load(mt, seq100[13], NULL); + check_load(mt, seq100[14], NULL); mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 81, 2)); - MT_BUG_ON(mt, mas.index != 79); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, seq100[16], seq100[15], + seq100[17])); + MT_BUG_ON(mt, mas.index != seq100[13]); mt_validate(mt); rcu_read_unlock(); - // Test retry entry in the start of a gap. + // *DEPRECATED: no retries anymore* Test retry entry in the start of a gap. mt_set_non_kernel(2); - mtree_test_store_range(mt, 78, 80, NULL); - mtree_test_erase(mt, 81); + mtree_test_store_range(mt, seq100[18], seq100[14], NULL); + mtree_test_erase(mt, seq100[15]); mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 85, 4)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, seq100[16], seq100[19], + seq100[20])); rcu_read_unlock(); - MT_BUG_ON(mt, mas.index != 78); + MT_BUG_ON(mt, mas.index != seq100[18]); mt_validate(mt); mtree_destroy(mt); + /* seq 2000 tests are for multi-level tree gaps */ mtree_init(mt, MAPLE_ALLOC_RANGE); check_seq(mt, 2000, false); mt_set_non_kernel(1); - mtree_test_erase(mt, 1792); - mtree_test_erase(mt, 1791); + mtree_test_erase(mt, seq2000[0]); + mtree_test_erase(mt, seq2000[1]); mt_set_non_kernel(2); mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 1700, 1800, 2)); - MT_BUG_ON(mt, mas.index != 1791); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, seq2000[2], seq2000[3], + seq2000[4])); + MT_BUG_ON(mt, mas.index != seq2000[1]); rcu_read_unlock(); mt_validate(mt); mtree_destroy(mt); + /* seq 400 tests rebalancing over two levels. */ mt_set_non_kernel(99); mtree_init(mt, MAPLE_ALLOC_RANGE); check_seq(mt, 400, false); - mtree_test_store_range(mt, 376, 391, NULL); + mtree_test_store_range(mt, seq400[0], seq400[1], NULL); mt_set_non_kernel(0); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); check_seq(mt, 400, false); mt_set_non_kernel(50); - mtree_test_store_range(mt, 352, 398, xa_mk_value(352)); - mtree_test_store_range(mt, 356, 398, xa_mk_value(356)); - mtree_test_store_range(mt, 360, 398, xa_mk_value(360)); - mtree_test_store_range(mt, 364, 398, xa_mk_value(364)); - mtree_test_store_range(mt, 376, 398, xa_mk_value(376)); - mtree_test_store_range(mt, 378, 398, xa_mk_value(378)); - mtree_test_store_range(mt, 384, 398, xa_mk_value(384)); - mtree_test_store_range(mt, 388, 398, xa_mk_value(388)); - mtree_test_store_range(mt, 374, 375, xa_mk_value(374)); + mtree_test_store_range(mt, seq400[2], seq400[9], + xa_mk_value(seq400[2])); + mtree_test_store_range(mt, seq400[3], seq400[9], + xa_mk_value(seq400[3])); + mtree_test_store_range(mt, seq400[4], seq400[9], + xa_mk_value(seq400[4])); + mtree_test_store_range(mt, seq400[5], seq400[9], + xa_mk_value(seq400[5])); + mtree_test_store_range(mt, seq400[0], seq400[9], + xa_mk_value(seq400[0])); + mtree_test_store_range(mt, seq400[6], seq400[9], + xa_mk_value(seq400[6])); + mtree_test_store_range(mt, seq400[7], seq400[9], + xa_mk_value(seq400[7])); + mtree_test_store_range(mt, seq400[8], seq400[9], + xa_mk_value(seq400[8])); + mtree_test_store_range(mt, seq400[10], seq400[11], + xa_mk_value(seq400[10])); mt_set_non_kernel(0); mtree_destroy(mt); } -- 2.50.1