From f57b393a0b29f375599d94c2d1df0b51d06bc0e5 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 12 Feb 2020 21:42:24 -0500 Subject: [PATCH] maple_tree: Fix mas_set_rev_index, _mas_get_unmapped_area, and code cleanup Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 35 +++++++++- lib/maple_tree.c | 136 ++++++++++++++++++++----------------- lib/test_maple_tree.c | 61 +++++++++++++++-- 3 files changed, 165 insertions(+), 67 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 26f79e1164ce..33b19cec9364 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -36,7 +36,7 @@ #define MAPLE_SPARSE9_SLOTS 13 /* 127 bytes */ #define MAPLE_SPARSE6_SLOTS 14 /* 128 bytes */ -#define MAPLE_NODE_COALESCE 2 /* If we have larger than 1 space, attempt to reallocate the node(s). */ +#define MAPLE_NODE_COALESCE 2 /* Limit on coalesce count */ #else /* Need to do corresponding calculations for 32-bit kernels */ @@ -324,7 +324,7 @@ static inline void mas_reset(struct ma_state *mas) * */ #define mas_for_each(mas, entry, max) \ - while ((entry = mas_find(mas, max)) != NULL) + while (((entry) = mas_find((mas), (max))) != NULL) bool mas_retry(struct ma_state *mas, const void *entry); @@ -362,6 +362,37 @@ static inline void mas_set(struct ma_state *mas, unsigned long index) mas_set_range(mas, index, index); } +/** + * mt_init_flags() - Initialise an empty maple tree with flags. + * @mt: Maple Tree + * @flags: maple tree flags. + * + * If you need to initialise a Maple Tree with special falgs (eg, an + * allocation tree), use this function. + * + * Context: Any context. + * + */ +static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags) +{ + spin_lock_init(&mt->ma_lock); + mt->ma_flags = flags; + mt->ma_root = NULL; +} + +/** + * mt_init() - Initialise an empty maple tree. + * @mt: Maple Tree + * + * An empty Maple Tree. + * + * Context: Any context. + */ +static inline void mt_init(struct maple_tree *mt) +{ + mt_init_flags(mt, 0); +} + void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, bool start); /** diff --git a/lib/maple_tree.c b/lib/maple_tree.c index a4a6c36e6803..20a7f0c96f9e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -14,7 +14,7 @@ #include #include #include -#include // for task_size +//#include // for task_size #define MA_ROOT_PARENT 1 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) @@ -502,6 +502,7 @@ static inline unsigned long mas_get_safe_pivot(const struct ma_state *mas, unsigned char slot) { enum maple_type type = mte_node_type(mas->node); + return _mas_get_safe_pivot(mas, slot, type); } @@ -671,6 +672,7 @@ static inline void mas_dup_state(struct ma_state *dst, struct ma_state *src) static inline void mas_descend(struct ma_state *mas) { unsigned char slot = mas_get_slot(mas); + if (slot) mas->min = mas_get_safe_pivot(mas, slot - 1) + 1; mas->max = mas_get_safe_pivot(mas, slot); @@ -869,7 +871,6 @@ no_parent: mas->max = max; mas->min = min; mas->node = p_enode; - return; } static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot, @@ -935,12 +936,12 @@ static inline struct maple_node *mas_next_alloc(struct ma_state *ms) if (cnt == 1) { ms->alloc = NULL; } else if (cnt <= 16) { - cnt-=2; - smn = mn->slot[cnt]; - mn->slot[cnt] = NULL; - mn = smn; + cnt -= 2; + smn = mn->slot[cnt]; + mn->slot[cnt] = NULL; + mn = smn; } else if (cnt > 16) { - cnt-=2; + cnt -= 2; smn = mn->slot[(cnt / 15) - 1]; mn = smn->slot[(cnt % 15)]; smn->slot[cnt % 15] = NULL; @@ -1018,9 +1019,8 @@ slot_failed: mas_set_alloc_req(ms, req); list_failed: - if (req > 0) { + if (req > 0) mas_set_err(ms, -ENOMEM); - } } // Free the allocations. @@ -1165,14 +1165,13 @@ static inline unsigned char mas_data_end(const struct ma_state *mas, entry = _mte_get_rcu_slot(mn, slot, type); if (mt_will_coalesce(entry)) { - if (piv == prev_piv || !slot) { + if (piv == prev_piv || !slot) (*coalesce)++; - } - //counted_null = true; + } else if (entry == NULL) { - if (counted_null) { + if (counted_null) (*coalesce)++; - } + counted_null++; } else { counted_null = 0; @@ -1389,7 +1388,7 @@ static inline unsigned char mas_append_calc_split(struct ma_state *mas, } } - } else + } return half; @@ -2253,15 +2252,15 @@ static inline int __mas_add(struct ma_state *mas, void *entry, mas_set_slot(mas, data_end + 1); mas_append_entry(mas, entry); } else { - MA_STATE(cp, mas->tree, mas->index, mas->last); - mas_dup_state(&cp, mas); - unsigned char slot = mas_get_slot(mas); unsigned char end_slot = slot; unsigned long src_max = mas->max; unsigned long piv, prev_piv = mas->min - 1; void *existing_entry = NULL; + MA_STATE(cp, mas->tree, mas->index, mas->last); + mas_dup_state(&cp, mas); + if (slot) prev_piv = mte_get_pivot(mas->node, slot - 1); @@ -2363,10 +2362,11 @@ static inline int mas_spanning_add(struct ma_state *mas, void *entry, p_slot = mte_parent_slot(mas->node); do { + MA_STATE(tmp, mas->tree, mas->index, mas->last); + mas_set_slot(mas, p_slot); // for mas_next_node. mas_set_slot(&p_mas, p_slot); // for pivot changes in parent. - MA_STATE(tmp, mas->tree, mas->index, mas->last); mas_dup_state(&r_mas, mas); // point to the start node. while (!mas_is_none(&r_mas) && r_mas.max <= r_mas.last) { mas_dup_state(&tmp, &r_mas); @@ -3591,10 +3591,11 @@ static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size) if (entry) goto next_slot; - this_gap = max - mas->index + 1; + this_gap = max - min + 1; if (this_gap >= size) { /* within range and large enough */ mas->max = max; + mas->min = min; found = true; break; } @@ -4404,25 +4405,36 @@ void mas_set_fwd_index(struct ma_state *mas, unsigned long size) { unsigned long min = mas->min; unsigned char slot = mas_get_slot(mas); + // At this point, mas->node points to the right node and we have a + // slot that has a sufficient gap. if (slot) min = mte_get_pivot(mas->node, slot - 1) + 1; + mas->min = min; + mas->max = mas_get_safe_pivot(mas, slot); + if (mas->index < min) mas->index = min; mas->last = mas->index + size - 1; } void mas_set_rev_index(struct ma_state *mas, unsigned long size) { - // At this point, mas->node points to the right node and we have a - // slot that has a sufficient gap. + unsigned long gap_max = mas->max; + unsigned long range_max = mas->last; // Includes subtracted size. + + // rev_awalk has set mas->min and mas->max to the gap values. // If the maximum is outside the window we are searching, then use the // last location in the search. - if (mas->max > mas->last) - mas->index = mas->last; - else - mas->index = mas->max - size + 1; - - mas->last = mas->index + size - 1; + // mas->max and mas->min is the range of the gap. + // mas->index and mas->last are currently set to the search range. + + // Trim the upper limit to the max. + if (gap_max > range_max) + gap_max = range_max; + + mas->last = gap_max; + mas->index = mas->last - size + 1; + return; } static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size, bool forward) @@ -4444,7 +4456,7 @@ static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min // The start of the window can only be within these values. mas->index = min; - mas->last = max - size; + mas->last = max; if (forward) mas_awalk(mas, size); @@ -4909,7 +4921,23 @@ void mtree_destroy(struct maple_tree *mt) EXPORT_SYMBOL(mtree_destroy); #ifdef CONFIG_DEBUG_MAPLE_TREE +#ifndef __KERNEL__ +extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); +void mt_set_non_kernel(unsigned int val) +{ + kmem_cache_set_non_kernel(maple_node_cache, val); +} +extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); +unsigned long mt_get_alloc_size(void) +{ + return kmem_cache_get_alloc(maple_node_cache); +} +#define MA_PTR "%p" +#else +#define MA_PTR "%px" +#endif +// Tree validations void mt_dump_node(void *entry, unsigned long min, unsigned long max, unsigned int depth); void mt_dump_range(unsigned long min, unsigned long max, unsigned int depth) @@ -4928,7 +4956,7 @@ void mt_dump_entry(void *entry, unsigned long min, unsigned long max, mt_dump_range(min, max, depth); if (xa_is_value(entry)) - pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), + pr_cont("value %ld (0x%lx) ["MA_PTR"]\n", xa_to_value(entry), xa_to_value(entry), entry); else if (xa_is_zero(entry)) pr_cont("zero (%ld)\n", xa_to_internal(entry)); @@ -4939,9 +4967,9 @@ void mt_dump_entry(void *entry, unsigned long min, unsigned long max, else if (xa_is_retry(entry)) pr_cont("retry (%ld)\n", xa_to_internal(entry)); else if (mt_is_reserved(entry)) - pr_cont("UNKNOWN ENTRY (%p)\n", entry); + pr_cont("UNKNOWN ENTRY ("MA_PTR")\n", entry); else - pr_cont("%p\n", entry); + pr_cont(""MA_PTR"\n", entry); } @@ -4955,8 +4983,8 @@ void mt_dump_range64(void *entry, unsigned long min, unsigned long max, pr_cont(" contents: "); for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); - pr_cont("%p\n", node->slot[i]); + pr_cont(""MA_PTR" %lu ", node->slot[i], node->pivot[i]); + pr_cont(""MA_PTR"\n", node->slot[i]); for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { unsigned long last = max; @@ -4978,7 +5006,7 @@ void mt_dump_range64(void *entry, unsigned long min, unsigned long max, if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + pr_err("node "MA_PTR" last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i); break; } @@ -4998,8 +5026,8 @@ void mt_dump_arange64(void *entry, unsigned long min, unsigned long max, pr_cont("%lu ", node->gap[i]); pr_cont("| "); for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); - pr_cont("%p\n", node->slot[i]); + pr_cont(MA_PTR" %lu ", node->slot[i], node->pivot[i]); + pr_cont(MA_PTR"\n", node->slot[i]); for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { unsigned long last = max; @@ -5023,7 +5051,7 @@ void mt_dump_arange64(void *entry, unsigned long min, unsigned long max, if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + pr_err("node "MA_PTR" last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i); break; } @@ -5040,7 +5068,7 @@ void mt_dump_node(void *entry, unsigned long min, unsigned long max, mt_dump_range(min, max, depth); - pr_cont("node %p depth %d type %d parent %p", node, depth, type, + pr_cont("node "MA_PTR" depth %d type %d parent "MA_PTR, node, depth, type, node ? node->parent : NULL); switch (type) { case maple_dense: @@ -5068,27 +5096,13 @@ void mt_dump(const struct maple_tree *mt) { void *entry = mt->ma_root; - pr_info("maple_tree(%p) flags %X, root %p\n", + pr_info("maple_tree("MA_PTR") flags %X, root "MA_PTR"\n", mt, mt->ma_flags, entry); if (!xa_is_node(entry)) mt_dump_entry(entry, 0, 0, 0); else if (entry) mt_dump_node(entry, 0, mt_max[mte_node_type(entry)], 0); } -#ifndef __KERNEL__ -extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); -void mt_set_non_kernel(unsigned int val) -{ - kmem_cache_set_non_kernel(maple_node_cache, val); -} - -extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); -unsigned long mt_get_alloc_size(void) -{ - return kmem_cache_get_alloc(maple_node_cache); -} -#endif -// Tree validations /** * Calculate the maximum gap in a node and check if that's what is reported in @@ -5133,7 +5147,7 @@ void mas_validate_gaps(struct ma_state *mas) if (xa_is_retry(entry)) pr_err("retry\n"); - pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", + pr_err(MA_PTR"[%u] -> "MA_PTR" %lu != %lu - %lu + 1\n", mas_mn(mas), i, mte_get_rcu_slot(mas->node, i), gap, p_end, p_start); @@ -5143,7 +5157,7 @@ void mas_validate_gaps(struct ma_state *mas) } } else { if (gap >= p_end - p_start + 1) { - pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", + pr_err(MA_PTR"[%u] %lu >= %lu - %lu + 1 (%lu)\n", mas_mn(mas), i, gap, p_end, p_start, p_end - p_start + 1); MT_BUG_ON(mas->tree, @@ -5168,7 +5182,7 @@ counted: p_mn = mte_parent(mte); MT_BUG_ON(mas->tree, max_gap > mas->max); if (ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != max_gap) - pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); + pr_err("gap "MA_PTR"[%u] != %lu\n", p_mn, p_slot, max_gap); MT_BUG_ON(mas->tree, ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != @@ -5195,8 +5209,8 @@ void mas_validate_parent_slot(struct ma_state *mas) MT_BUG_ON(mas->tree, ma_get_rcu_slot(parent, i, p_type) != mas->node); else if (ma_get_rcu_slot(parent, i, p_type) == mas->node) { - pr_err("parent contains invalid child at %p[%u] %p\n", - parent, i, mas_mn(mas)); + pr_err("parent contains invalid child at "MA_PTR"[%u] " + MA_PTR"\n", parent, i, mas_mn(mas)); MT_BUG_ON(mas->tree, ma_get_rcu_slot(parent, i, p_type) == mas->node); } @@ -5223,13 +5237,13 @@ void mas_validate_limits(struct ma_state *mas) if (!mt_will_coalesce(entry)) { if (piv < mas->min) mt_dump(mas->tree); - pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, + pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i, piv, mas->min); MT_BUG_ON(mas->tree, piv < mas->min); } } if (piv > mas->max) { - pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, piv, + pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i, piv, mas->max); MT_BUG_ON(mas->tree, piv > mas->max); } diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 0a21aee5706c..22cdcb771e28 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -9366,6 +9366,32 @@ STORE, 139726599680000, 139726599684095, ERASE, 47906195480576, 47906195480576, STORE, 94641242615808, 94641242750975, }; + unsigned long set11[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140732658499584, 140737488351231, +ERASE, 140732658499584, 140732658499584, +STORE, 140732658499584, 140732658503679, +STORE, 94029856579584, 94029856751615, +ERASE, 94029856579584, 94029856579584, +STORE, 94029856579584, 94029856595967, +STORE, 94029856595968, 94029856751615, +ERASE, 94029856595968, 94029856595968, +STORE, 94029856595968, 94029856698367, +STORE, 94029856698368, 94029856739327, +STORE, 94029856739328, 94029856751615, +STORE, 140014592573440, 140014592745471, +ERASE, 140014592573440, 140014592573440, +STORE, 140014592573440, 140014592577535, +STORE, 140014592577536, 140014592745471, +ERASE, 140014592577536, 140014592577536, +STORE, 140014592577536, 140014592700415, +STORE, 140014592700416, 140014592733183, +STORE, 140014592733184, 140014592741375, +STORE, 140014592741376, 140014592745471, +STORE, 140732658565120, 140732658569215, +STORE, 140732658552832, 140732658565119, + }; + mt_set_non_kernel(3); check_erase2_testset(mt, set, ARRAY_SIZE(set)); @@ -9424,6 +9450,14 @@ STORE, 94641242615808, 94641242750975, check_erase2_testset(mt, set10, ARRAY_SIZE(set10)); rcu_barrier(); mtree_destroy(mt); + + mas_reset(&mas); + mtree_init(mt, MAPLE_ALLOC_RANGE); + check_erase2_testset(mt, set11, ARRAY_SIZE(set11)); + rcu_barrier(); + mas_get_unmapped_area_rev(&mas, 12288, 140014592737280, 0x2000); + MT_BUG_ON(mt, mas.index != 140014592565248); + mtree_destroy(mt); } static noinline void check_alloc_rev_range(struct maple_tree *mt) @@ -9529,28 +9563,43 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) for (i = 0; i < range_cnt; i += 2) { /* Inclusive, Inclusive (with the -1) */ - pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, + /* + pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12, (range[i + 1] >> 12) - 1); + */ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); mt_validate(mt); } + //printk("Done insert\n"); for (i = 0; i < ARRAY_SIZE(holes); i += 3) { + /* + pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n", + min, holes[i+1]>>12, holes[i+2]>>12, + holes[i] >> 12); + */ MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min, holes[i+1] >> 12, holes[i+2] >> 12)); - MT_BUG_ON(mt, mas.index != holes[i] >> 12); +// printk("Found %lu %lu\n", mas.index, mas.last); +// printk("gap %lu %lu\n", (holes[i] >> 12), +// (holes[i+1] >> 12)); + MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12)); + MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12)); min = holes[i+1] >> 12; mas_reset(&mas); } for (i = 0; i < req_range_cnt; i += 5) { - pr_debug("\tRequest between %lu-%lu size %lu\n", + /* + pr_debug("\tRequest between %lu-%lu size %lu, should get %lu\n", req_range[i] >> 12, (req_range[i + 1] >> 12) - 1, - req_range[i+2] >> 12); + req_range[i+2] >> 12, + req_range[i+3] >> 12); + */ check_mtree_alloc_rrange(mt, req_range[i] >> 12, // start req_range[i+1] >> 12, // end @@ -9664,17 +9713,21 @@ static noinline void check_alloc_range(struct maple_tree *mt) int req_range_cnt = ARRAY_SIZE(req_range); for (i = 0; i < range_cnt; i += 2) { + /* pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, (range[i + 1] >> 12) - 1); + */ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); mt_validate(mt); } + //printk("Checking stuff now\n\n"); MA_STATE(mas, mt, 0, 0); unsigned long min = 0x565234af2000; for (i = 0; i < ARRAY_SIZE(holes); i+= 3) { +// printk("\t check gap %i\n", i/3); MT_BUG_ON(mt, mas_get_unmapped_area(&mas, min >> 12, holes[i+1] >> 12, holes[i+2] >> 12)); -- 2.50.1