From 532a1fa4a6a90eb00a546493f4cb9daebc438bb6 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 12 Dec 2019 11:18:29 -0500 Subject: [PATCH] maple_tree: Rewrite mas_data_end to return slot not occupied count. Rewrite mas_data_end to return slot, not occupied count and fix coalesce calculation for a run of NULLs, etc Move MT_BUG_ON for use in verification functions. Add mas_mn for easier mas->node enode to node conversion. Drop __mte_get_rcu_slot as the dead_node detect doesn't work right. Make sure hard_data doesn't return a negative. Fix ma_state on calls to mas_update_gaps Fix end pivots on the right side of a split. Fix number of slots being occupied calculation when _mas_add is using the last slot and the new range doesn't run to mas->max. Fix mas_rebalance when right node is empty and fix mas_data_end uses. Rewrite mas_coalesce for new mas_data_end uses. Fix mt_find overflow of index when there is a valuse at ULONG_MAX. Add a testcase for mt_find overflow. Fix mas_rev_awalk to check for errors prior to trying to walk. Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 29 ++- lib/maple_tree.c | 405 +++++++++++++++++++++---------------- lib/test_maple_tree.c | 173 ++++++++++------ 3 files changed, 373 insertions(+), 234 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index d78f4df58b2d..522819689e38 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -395,6 +395,33 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, */ #define mt_for_each(tree, entry, index, max) \ for (entry = mt_find(tree, &index, max, true); \ - entry; entry = mt_find(tree, &index, max, false)) + entry; entry = mt_find(tree, &index, max, false)) + + +#ifdef CONFIG_DEBUG_MAPLE_TREE + +static unsigned int tests_run; +static unsigned int tests_passed; + +#ifndef MAPLE_TREE_DEBUG +# ifdef __KERNEL__ +void mt_dump(const struct maple_tree *mt) { } +# endif +#undef MT_BUG_ON +#define MT_BUG_ON(tree, x) do { \ + tests_run++; \ + if (x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, x); \ + mt_dump(tree); \ + pr_info("Pass: %u Run:%u\n", tests_passed, tests_run); \ + dump_stack(); \ + } else { \ + tests_passed++; \ + } \ +} while (0) +#endif + +#endif /* CONFIG_DEBUG_MAPLE_TREE */ #endif /*_LINUX_MAPLE_TREE_H */ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ab799b36e629..67b23aa78745 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -6,6 +6,8 @@ * Matthew Wilcox */ +#define CONFIG_DEBUG_MAPLE_TREE + #include #include #include @@ -205,6 +207,10 @@ static inline struct maple_node *mte_to_node(const struct maple_enode *entry) { return (struct maple_node *)((unsigned long)entry & ~127); } +static inline struct maple_node *mas_mn(const struct ma_state *mas) +{ + return mte_to_node(mas->node); +} static void mte_free(struct maple_enode *enode) { @@ -555,7 +561,7 @@ static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot, } mte_set_pivot(mas->node, slot, val); } -static inline struct maple_enode *__mte_get_rcu_slot( +static inline struct maple_enode *_mte_get_rcu_slot( const struct maple_enode *mn, unsigned char slot, enum maple_type type) { @@ -589,17 +595,6 @@ static inline struct maple_enode *__mte_get_rcu_slot( } } -static inline struct maple_enode *_mte_get_rcu_slot( - const struct maple_enode *mn, unsigned char slot, - enum maple_type type) -{ - struct maple_enode *p = __mte_get_rcu_slot(mn, slot, type); - - if (mte_dead_node(mn)) - return MAS_START; - return p; -} - static inline struct maple_enode *mte_get_rcu_slot(const struct maple_enode *mn, unsigned char slot) { @@ -1046,36 +1041,50 @@ done: } /* Private - * Returns the last slot that contains data. + * mas_data_end() - Find the end of the data (slot +1). Sets the value of the + * last pivot to @last_piv, sets @coalesce to the number of slots that can be + * removed by coalescing. */ static inline unsigned char mas_data_end(const struct ma_state *mas, const enum maple_type type, unsigned long *last_piv, unsigned char *coalesce) { - unsigned char data_end = 0; - unsigned long prev_piv = mas->max; struct maple_enode *mn = mas->node; + unsigned long piv = mas->min, prev_piv = mas->min; + unsigned char slot; + bool counted_null = false; + *coalesce = 0; + for (slot = 0; slot < mt_slot_count(mn); slot++) { + void *entry; - prev_piv = _mas_get_safe_pivot(mas, 0, type); - for (data_end = 1; data_end < mt_slot_count(mn); data_end++) { - *last_piv = _mas_get_safe_pivot(mas, data_end, type); - if (*last_piv == 0) { + piv = _mas_get_safe_pivot(mas, slot, type); + if (!piv && slot) { // Past the end of data. + slot--; *last_piv = prev_piv; - return data_end - 1; + return slot; } - if (prev_piv == *last_piv) - (*coalesce)++; - else if (mt_will_coalesce(_mte_get_rcu_slot(mn, data_end, type))) - (*coalesce)++; + entry = _mte_get_rcu_slot(mn, slot, type); + if (mt_will_coalesce(entry)) { + if (piv == prev_piv) + (*coalesce)++; + } else if (!entry) { + if (counted_null) + (*coalesce)++; + counted_null = true; + } else { + counted_null = false; + } - if (*last_piv == mas->max) - return data_end; + if (piv == mas->max) + break; - prev_piv = *last_piv; + prev_piv = piv; } - return data_end; + + *last_piv = piv; + return slot; } /** Private @@ -1089,6 +1098,8 @@ static inline unsigned char mas_data_end(const struct ma_state *mas, static inline int ma_hard_data(unsigned long end, unsigned long coalesce) { + if (end < coalesce) + return 0; return end - coalesce; } @@ -1101,7 +1112,7 @@ static inline unsigned char mas_no_dense_calc_split(struct ma_state *mas, unsigned long min = mas->min; unsigned long max = min; enum maple_type type = mte_node_type(mas->node); - unsigned long pivot_cnt = mt_pivots[type]; + unsigned long slot_cnt = mt_slots[type]; unsigned long half = mt_slots[type] / 2; unsigned long last_pivot; unsigned char coalesce; @@ -1114,8 +1125,9 @@ static inline unsigned char mas_no_dense_calc_split(struct ma_state *mas, if (mte_get_pivot(mas->node, half) > mas->index) - return half; - return half + 1; + return half - 1; + + return half; if (mte_is_root(mas->node)) return half; @@ -1138,8 +1150,8 @@ static inline unsigned char mas_no_dense_calc_split(struct ma_state *mas, if (i >= half) return i; - if (data_end < pivot_cnt) - max = mte_get_pivot(mas->node, data_end); + if (data_end < slot_cnt) + max = mte_get_pivot(mas->node, data_end - 1); else max = mas->max; @@ -1294,6 +1306,8 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas) pstart = mas->min; for (i = 0; i < mt_slots[mt]; i++) { pend = mas_get_safe_pivot(mas, i); + if (!pend && i) + pend = mas->max; gap = pend - pstart + 1; if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) @@ -1303,7 +1317,7 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas) max_gap = gap; next: - if (pend == mas->max) + if (pend >= mas->max) break; pstart = pend + 1; @@ -1353,6 +1367,8 @@ static inline void mas_update_gap(struct ma_state *mas) { unsigned char pslot; unsigned long p_gap, max_gap = 0; + unsigned long max, min; + struct maple_enode *mn; if (!mte_is_leaf(mas->node)) @@ -1361,6 +1377,9 @@ static inline void mas_update_gap(struct ma_state *mas) if (mte_is_root(mas->node)) return; + mn = mas->node; + max = mas->max; + min = mas->min; /* Get the largest gap in this leaf and check it against the reported * largest. */ @@ -1371,6 +1390,10 @@ static inline void mas_update_gap(struct ma_state *mas) if (p_gap != max_gap) mas_parent_gap(mas, pslot, max_gap); + + mas->max = max; + mas->min = min; + mas->node = mn; } @@ -1549,7 +1572,8 @@ next_src_slot: cp->src_start = sloc; } static inline int mas_split_data(struct ma_state *mas, struct maple_enode *left, - struct maple_enode *right, unsigned char split) + struct maple_enode *right, unsigned char split, + unsigned long r_max) { MA_CP(cp, mas->node, left, 0, split); @@ -1565,6 +1589,13 @@ static inline int mas_split_data(struct ma_state *mas, struct maple_enode *left, cp.dst = right; cp.dst_start = 0; mas_copy(mas, &cp); + if (cp.dst_start + split >= mt_pivot_count(cp.dst)) { + unsigned char slot = cp.dst_start; + + if (mt_is_empty(mte_get_rcu_slot(right, slot))) + slot--; + mte_set_pivot(right, slot, r_max); + } } return split; } @@ -1596,7 +1627,7 @@ static inline void mte_adopt_children(struct maple_enode *parent) */ static inline void _mas_replace(struct ma_state *mas, bool free, bool push) { - struct maple_node *mn = mte_to_node(mas->node); + struct maple_node *mn = mas_mn(mas); struct maple_enode *parent = NULL; struct maple_enode *prev; unsigned char slot = 0; @@ -1717,7 +1748,7 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, struct maple_enode *left = NULL, *right = NULL; enum maple_type ptype; // parent type. unsigned long pivot; - unsigned long p_max = 0; + unsigned long r_max, p_max = ULONG_MAX; if (mte_is_root(mas->node)) { old_parent = full; @@ -1785,9 +1816,12 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, // calculate the split location. split = mas_calc_split(mas, &left, &right); + r_max = mas->max; + if (p_slot == p_end) // Right will be the end of the parent node. + r_max = p_max; // the node type for the children types. // Node types must be set to copy data into them. - mas_split_data(mas, left, right, split); + mas_split_data(mas, left, right, split, r_max); if (right) { pivot = mte_get_pivot(left, split); if (!pivot) // dense node @@ -1802,9 +1836,7 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, mte_link(left, new_parent, p_slot, pivot, ptype); if (right) { - unsigned long r_max = mas->max; - - if (mt_node_max(right) < mas->max) + if (mt_node_max(right) < r_max) r_max = pivot + mt_node_max(right); mte_link(right, new_parent, p_slot + 1, r_max, ptype); } @@ -1965,13 +1997,14 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, unsigned char slot_cnt = mt_slots[this_type] - 1; unsigned char pivot_cnt = mt_pivots[this_type]; unsigned char coalesce; - unsigned char slot = mas_get_slot(mas); + unsigned char data_slot, slot = mas_get_slot(mas); bool spans_range = false; bool append = false; + bool split = false; bool null_entry = false; int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce); int new_end = old_end; - int ret = 1; + int ret = 0; if (mas->last > mas->max) { BUG_ON(!active); @@ -2009,23 +2042,21 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, if (!overwrite) { void *entry; - if (spans_range) - goto busy; + if (spans_range) { + mas_set_err(mas, -ERANGE); + return 0; + } entry = mte_get_rcu_slot(mas->node, slot); - if (!mt_is_empty(entry)) - goto busy; + if (!mt_is_empty(entry)) { + mas_set_err(mas, -EBUSY); + return 0; + } } - if (last_piv == mas->max) - slot_cnt++; - - if (slot > old_end) + if (slot >= old_end) append = true; - if (mas->index && min != mas->index) - null_entry = true; - if (mas->index == min && mas->last == max) { mte_set_rcu_slot(mas->node, slot, entry); ret = 1; @@ -2034,36 +2065,47 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, /* Calculate how many new pivots we will need. */ // Possible pivot before the new range. - if (mas->index > min) - ret++; - - // Possible pivot after the new range. - if (mas->last < max) - ret++; + data_slot = slot; + if (mas->index > min) { + if (!slot) { // storing to slot 0 means we need the null. + ret++; + } else if (append) { // Appending will need a null. + ret++; + } else { // a starting null is only needed if there isn't one + // there. + struct maple_enode *slot_val; - // Detect duplicate null entries and don't copy them. - if (null_entry) { - struct maple_enode *slot_val; - slot_val = mte_get_rcu_slot(mas->node, slot + 1); - if (mt_will_coalesce(slot_val)) { - if (ret) - ret--; - } - if (slot > 0) { - slot_val = mte_get_rcu_slot(mas->node, slot - 1); - if (mt_is_empty(slot_val)) { - slot--; - if (ret) - ret--; - } + slot_val = mte_get_rcu_slot(mas->node, slot); + if (!mt_is_empty(slot_val)) + ret++; } + + null_entry = true; + } + data_slot += ret; + + // Possible pivot after the new range - should exists, but make sure + // we don't overflow to the last slot which implies a pivot. + if (mas->last < max) { + unsigned char max_slots = old_end + ret - coalesce + 2; + + if (max_slots >= slot_cnt) + ret++; } + /* Check for splitting */ - new_end += ret - coalesce; - if (new_end > slot_cnt) { + new_end += ret - coalesce + 1; + if (new_end == slot_cnt && mas->max == mt_node_max(mas->node)) + split = true; // Need a NULL for the maximum range. + else if (new_end > slot_cnt) + split = true; // There is not enough space. + + + if (split) { /* Not enough space in the node */ unsigned char split = mas_split(mas, slot, active); + if (mas_is_err(mas)) return 0; @@ -2121,6 +2163,7 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, mas_partial_copy(mas, slot); if (mas_is_err(mas)) return 0; + old_end = slot; } @@ -2193,23 +2236,14 @@ update_gap: mas_update_gap(mas); return ret; - -busy: - mas_set_err(mas, -EBUSY); - return 0; } + static inline int _mas_insert(struct ma_state *mas, void *entry, unsigned char slot) { mas_set_slot(mas, slot); return _mas_add(mas, entry, false, true); } -static inline int _ma_store(struct ma_state *mas, void *entry, - unsigned char slot) -{ - mas_set_slot(mas, slot); - return _mas_add(mas, entry, true, true); -} static inline void mas_root_expand(struct ma_state *mas, void *entry) { @@ -2755,7 +2789,7 @@ EXPORT_SYMBOL_GPL(mas_next); /* Private * - * _mas_prev() - Find the previous entry from the current mas state. + * _mas_prev() - Find the previous entry from the current ma state. * @mas the current maple state (must have a valid slot) */ static inline void* _mas_prev(struct ma_state *mas, unsigned long limit) @@ -2882,7 +2916,7 @@ static inline void mas_coalesce_empty(struct ma_state *mas, * Try to allocate 1-2 nodes, depending on if the right node will be * completely consumed. * - * returns 0 on success or the number of slots that were not filled. + * returns 0 on failure or the number of slots that were not filled. * */ /* @@ -2910,9 +2944,6 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, MA_CP(cp, this_enode, NULL, 0, end); - if (coalesce) - coalesce--; - l_slot_cnt = ma_hard_data(end, coalesce); if (mte_is_root(this_enode)) return mas_coalesce_node(mas, end, coalesce, true); @@ -2942,7 +2973,7 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, mas->max = this_max; mas->min = this_min; mas_set_slot(mas, this_p_slot); - if (l_slot_cnt <= 1) { + if (end < coalesce) { enum maple_type p_type = mas_parent_enum(mas, this_enode); struct maple_enode *eparent = @@ -2967,8 +2998,15 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, r_end = mas_data_end(mas, mte_node_type(r_enode), &l_piv, &r_coalesce); r_slot_cnt = ma_hard_data(r_end, r_coalesce); - // Add 1 for slot 0 on the right. - all_slots = r_slot_cnt + 1 + l_slot_cnt; + if (r_end < r_coalesce) { + all_slots = l_slot_cnt; + new_type = mte_node_type(this_enode); + l_piv = r_max; + mas->min = this_min; + goto right_empty; + } + // Add 1 for each slot 0. + all_slots = r_slot_cnt + 2 + l_slot_cnt; // check if left ends in NULL, right start in NULL.. if ((mt_will_coalesce(mte_get_rcu_slot(this_enode, end)) || @@ -2985,7 +3023,7 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, mas_node_cnt(mas, node_cnt); if (mas_is_err(mas)) - return ret; + return 0; // Restore the left node (this_enode) mas->node = this_enode; @@ -3019,6 +3057,7 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, mas->node = r_enode; mas->max = r_max; mas->min = r_min; +right_empty: if (all_slots <= mt_slots[new_type] - 1) { // Right was entirely consumed. void *entry = XA_SKIP_ENTRY; @@ -3027,9 +3066,14 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, entry = NULL; ret = mt_slots[new_type] - all_slots; + if (!ret) + ret = 1; r_p_slot = mte_parent_slot(r_enode); mas_encoded_parent(mas); mte_set_rcu_slot(mas->node, r_p_slot, entry); + if (xa_is_skip(entry) && + r_p_slot < mt_pivot_count(mas->node)) + mte_set_pivot(mas->node, r_p_slot, l_piv); mte_free(r_enode); p_coalesce = true; goto right_done; @@ -3046,13 +3090,14 @@ static inline int mas_rebalance(struct ma_state *mas, unsigned char end, r_p_slot = mte_parent_slot(r_enode); mas_replace(mas); mas_encoded_parent(mas); - mte_set_pivot(mas->node, r_p_slot, r_piv); + if (r_p_slot < mt_pivot_count(mas->node)-1) + mte_set_pivot(mas->node, r_p_slot, r_piv); right_done: - while (r_p_slot-- > this_p_slot) { + // Set the parent slots to l_piv for all skip slots.. + while (r_p_slot-- > this_p_slot) mte_set_pivot(mas->node, r_p_slot, l_piv); - } /* If there is a freed node, then mas->node must point to the parent * which contained the freed node so it can also be checked for @@ -3068,76 +3113,72 @@ right_done: */ static inline void mas_coalesce_root(struct ma_state *mas) { - unsigned char end, coalesce, hard_data; - struct maple_enode *this_enode; - enum maple_type this_type; + struct maple_enode *this_enode = mas->node; + enum maple_type this_type = mte_node_type(this_enode); unsigned long piv; unsigned long min, max; + unsigned char coalesce, hard_data; + unsigned char end = mas_data_end(mas, this_type, &piv, &coalesce); - this_enode = mas->node; - this_type = mte_node_type(this_enode); - end = mas_data_end(mas, this_type, &piv, &coalesce); - hard_data = ma_hard_data(end, coalesce); - if (end == 1 && coalesce == 1) { - min = mas->min; - max = mas->max; - mas_first_node(mas, ULONG_MAX); - if (mas_is_none(mas)) { - // Allocation failures - - mas->tree->ma_root = NULL; - mte_free(this_enode); - return; - } - mas->node = this_enode; - mas->min = min; - mas->max = max; - } - - if ((end <= coalesce)) { - if(!mte_is_leaf(this_enode)) { - // Remove level in tree. - mas_set_slot(mas, 0); - mas_first_node(mas, ULONG_MAX); - - mte_to_node(mas->node)->parent = - mte_to_node(this_enode)->parent; - mas->node = mte_mk_root(mas->node); - mas_replace(mas); - return; - } + MA_CP(cp, mas->node, NULL, 0, end); - mas->tree->ma_root = NULL; - mte_free(this_enode); + hard_data = ma_hard_data(end, coalesce); + if (hard_data > mt_min_slots[this_type] - 1) return; - } - if (mte_is_leaf(this_enode) && hard_data == 1) { - void *entry; + /* Check for a single entry in the root node. + * 1. 0-oo => node + * 2. slot count == coalesce + * 3. one entry and one null. + */ + if (!hard_data || + (end + 1 == coalesce) || + (end == 1 && !mte_get_rcu_slot(this_enode, 1))) { + unsigned long piv; + + min = mas->min; + max = mas->max; mas_set_slot(mas, 0); - if (!mas_first_entry(mas, ULONG_MAX)) { - entry = mte_get_rcu_slot(mas->node, - mas_get_slot(mas)); - if (((unsigned long) (entry) & 3) != 2) { + piv = mas_first_node(mas, ULONG_MAX); + if (mte_is_leaf(this_enode)) { + if (!piv) { + void *entry = mte_get_rcu_slot(this_enode, + mas_get_slot(mas)); rcu_assign_pointer(mas->tree->ma_root, entry); mte_free(this_enode); return; } + // coalesce the node.. + mas->min = min; + mas->max = max; + mas->node = this_enode; + goto coalesce; + } else if (mas_is_none(mas)) { + /* allocation failed to create a leaf for this empty + * node. + */ + rcu_assign_pointer(mas->tree->ma_root, NULL); + mte_free(this_enode); + return; } - } - if (hard_data < mt_min_slots[this_type] - 1) { - MA_CP(cp, mas->node, NULL, 0, end); + /* it's not a leaf, remove a level from the tree. */ + goto remove_level; + } else if (hard_data < mt_min_slots[this_type] - 1) + goto coalesce; // Compact the node. + + return; + +coalesce: mas_copy(mas, &cp); if (mas_is_err(mas)) return; mas->node = cp.dst; - mte_to_node(mas->node)->parent = - mte_to_node(this_enode)->parent; +remove_level: + mas_mn(mas)->parent = mte_to_node(this_enode)->parent; mas->node = mte_mk_root(mas->node); mas_replace(mas); - } } /** Private @@ -3204,7 +3245,7 @@ start: if (!mt_is_empty(entry)) goto check_start; - if (!end || end + 1 <= coalesce) { + if (end <= coalesce) { mas_coalesce_empty(mas, eparent, p_slot, p_type); check_parent = true; } @@ -3221,10 +3262,9 @@ start: if (check_parent) slot = 0; - while (slot > 0) { + while (--slot > 0) { if (!mt_is_empty(mte_get_rcu_slot(this_enode, slot))) break; - slot--; } mas->node = eparent; @@ -3297,7 +3337,7 @@ static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size) { enum maple_type type; unsigned long max, min; - unsigned char pivot_cnt, i; + unsigned char i; bool found = false; type = mte_node_type(mas->node); @@ -3308,11 +3348,7 @@ static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size) switch (type) { case maple_leaf_64: - pivot_cnt = mt_pivots[type]; - if (i >= pivot_cnt - 1) - max = mas->max; - else - max = _mte_get_pivot(mas->node, i, type); + max = _mas_get_safe_pivot(mas, i, type); do { unsigned long this_gap = 0; @@ -3776,6 +3812,9 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, unsigned char slot; MA_STATE(mas, mt, *index, *index); + if (!start && !(*index)) + return NULL; + rcu_read_lock(); leaf = _mas_range_walk(&mas, &range_start, &range_end); slot = mas_get_slot(&mas); @@ -3783,7 +3822,6 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, entry = mte_get_rcu_slot(mas.node, slot); mas.last = range_end; - if (mt_is_empty(entry) || xa_is_zero(entry)) entry = NULL; @@ -3793,10 +3831,11 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, entry = NULL; } + rcu_read_unlock(); + if (entry) *index = mas.last + 1; - rcu_read_unlock(); return entry; } @@ -3984,6 +4023,8 @@ static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size) if (mas_is_ptr(mas)) return; + if (mas_is_err(mas)) + return; slot = mas_data_end(mas, mte_node_type(mas->node), &last_piv, &coalesce); mas_set_slot(mas, slot); @@ -3995,7 +4036,7 @@ static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size) * no gap found. (return, slot == MAPLE_NODE_SLOTS) * found the gap. (return, slot != MAPLE_NODE_SLOTS) */ - while (!_mas_rev_awalk(mas, size) && !mas_is_err(mas)) { + while (!mas_is_err(mas) && !_mas_rev_awalk(mas, size)) { if (last == mas->node) mas_rewind_node(mas); else @@ -4080,7 +4121,11 @@ static inline int mas_add(struct ma_state *mas, void *entry, bool overwrite, } /* Do the add */ - return _mas_add(mas, entry, overwrite, active); + ret = _mas_add(mas, entry, overwrite, active); + if (mas_is_err(mas) && xa_err(mas->node) == -ERANGE) + mas_set_err(mas, -EEXIST); + + return ret; exists: mas_set_err(mas, -EEXIST); @@ -4099,6 +4144,8 @@ static inline void mas_insert(struct ma_state *mas, void *entry) static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot, unsigned long size, unsigned long *index) { + unsigned char pslot = mte_parent_slot(mas->node); + struct maple_enode *mn = mas->node; /* mas->index is the start address for the search * which may no longer be needed. * mas->last is the end address for the search @@ -4106,6 +4153,17 @@ static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot, *index = mas->index; mas->last = mas->index + size - 1; + + /* It is possible that using mas->max and mas->min to correctly + * calculate the index and last will cause an issue in the gap + * calculation, so fix the ma_state here + */ + mas_encoded_parent(mas); + mas->max = mas_get_safe_pivot(mas, pslot); + if (pslot) + mas->min = mas_get_safe_pivot(mas, pslot - 1) + 1; + + mas->node = mn; _mas_insert(mas, entry, slot); return 0; } @@ -4154,7 +4212,7 @@ static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min // The start of the window can only be within these values. mas->index = min; - mas->last = max - size + 1; + mas->last = max - size; if (forward) mas_awalk(mas, size); @@ -4165,7 +4223,7 @@ static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min return xa_err(mas->node); if (mas_get_slot(mas) == MAPLE_NODE_SLOTS) - goto no_gap; + return -EBUSY; if (forward) mas_set_fwd_index(mas, size); @@ -4173,12 +4231,8 @@ static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min mas_set_rev_index(mas, size); return 0; - -no_gap: - return -EBUSY; - - } + int mas_get_unmapped_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size) { @@ -4539,6 +4593,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, mtree_lock(mas.tree); retry: + mas_set_slot(&mas, 0); mas.index = min; mas.last = max - size; ret = mas_alloc(&mas, entry, size, startp); @@ -4625,8 +4680,8 @@ void mtree_destroy(struct maple_tree *mt) } EXPORT_SYMBOL(mtree_destroy); -#define CONFIG_DEBUG_MAPLE_TREE #ifdef CONFIG_DEBUG_MAPLE_TREE + void mt_dump_node(void *entry, unsigned long min, unsigned long max, unsigned int depth); void mt_dump_range(unsigned long min, unsigned long max, unsigned int depth) @@ -4833,8 +4888,8 @@ void mas_validate_gaps(struct ma_state *mas) for(i = 0; i < mt_slot_count(mte); i++) { p_end = mas_get_safe_pivot(mas, i); - if (!p_end && i != 0) - break; + if (!p_end && i) + p_end = mas->max; if (mte_is_leaf(mte)) { gap = p_end - p_start + 1; @@ -4862,8 +4917,10 @@ counted: p_slot = mte_parent_slot(mas->node); p_mn = mte_parent(mte); - BUG_ON(_ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) - != max_gap); + MT_BUG_ON(mas->tree, max_gap > mas->max); + MT_BUG_ON(mas->tree, + _ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != + max_gap); } /** diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 172ab8f76006..fd0447753f37 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -8,27 +8,6 @@ #include #include -static unsigned int tests_run; -static unsigned int tests_passed; - -#ifndef MAPLE_TREE_DEBUG -# ifdef __KERNEL__ -void mt_dump(const struct maple_tree *mt) { } -# endif -#undef MT_BUG_ON -#define MT_BUG_ON(tree, x) do { \ - tests_run++; \ - if (x) { \ - pr_info("BUG at %s:%d (%u)\n", \ - __func__, __LINE__, x); \ - mt_dump(tree); \ - pr_info("Pass: %u Run:%u\n", tests_passed, tests_run); \ - dump_stack(); \ - } else { \ - tests_passed++; \ - } \ -} while (0) -#endif #define MTREE_ALLOC_MAX 0x2000000000000Ul static int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) @@ -464,6 +443,27 @@ static noinline void check_find(struct maple_tree *mt) val = 1; } + val = 0; + max = 0; + index = 0; + MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); + mt_for_each(mt, entry, index, ULONG_MAX) { + if (val == 4398046511104) + MT_BUG_ON(mt, entry != + xa_mk_value(ULONG_MAX & LONG_MAX)); + else + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + if (val == 64) // Skip zero entry. + val <<= 2; + // For zero check. + if (!val) + val = 1; + max++; + MT_BUG_ON(mt, max > 25); + } + mtree_erase_index(mt, ULONG_MAX); + mas_reset(&mas); index = 17; entry = mt_find(mt, &index, 512, true); @@ -744,7 +744,6 @@ static noinline void check_erase_testset(struct maple_tree *mt) erase_check_load(mt, i); } - mt_set_non_kernel(1); erase_check_erase(mt, 18); //7012 for (int i = 0; i < 25; i++) { if (i <= 18 && i >= 13) @@ -753,6 +752,7 @@ static noinline void check_erase_testset(struct maple_tree *mt) erase_check_load(mt, i); } + mt_set_non_kernel(2); erase_check_erase(mt, 19); //7015 for (int i = 0; i < 25; i++) { if (i <= 19 && i >= 13) @@ -799,8 +799,9 @@ static noinline void check_erase_testset(struct maple_tree *mt) // Shrinking tree test. // - for (int i = 13; i < ARRAY_SIZE(set); i++) + for (int i = 13; i < ARRAY_SIZE(set); i++) { erase_check_insert(mt, i); + } mt_set_non_kernel(99); for (int i = 18; i < ARRAY_SIZE(set); i++) { @@ -812,7 +813,7 @@ static noinline void check_erase_testset(struct maple_tree *mt) check_load(mt, set[j], NULL); } } - mt_set_non_kernel(30); + mt_set_non_kernel(35); for (int i = 0; i < 18; i++) { erase_check_erase(mt, i); for (int j = 0; j < ARRAY_SIZE(set); j++) { @@ -825,12 +826,10 @@ static noinline void check_erase_testset(struct maple_tree *mt) erase_check_insert(mt, 8); erase_check_insert(mt, 9); erase_check_erase(mt, 8); - - } #define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, a[i],\ - a[i + 1], ptr) + a[i + 1] - 1, ptr) #define STORE 1 #define ERASE 2 static noinline void check_erase2_testset(struct maple_tree *mt, @@ -841,31 +840,31 @@ static noinline void check_erase2_testset(struct maple_tree *mt, void *foo; unsigned long addr = 0; - mt_dump(mt); - for (int i = 0; i < size; i+=3){ + for (int i = 0; i < size; i += 3) { switch(set[i]) { - case STORE: - if (!mtree_test_insert_range(mt, set[i + 1], - set[i + 2], &set)) - entry_cnt++; - else - erase_check_store_range(mt, set, i + 1, &set); - break; - case ERASE: - check_erase(mt, set[i+1], &set); - entry_cnt--; + case STORE: + if (!mtree_test_insert_range(mt, set[i + 1], + set[i + 2] - 1, &set)) + entry_cnt++; + else + erase_check_store_range(mt, set, i + 1, &set); + break; + case ERASE: + check_erase(mt, set[i+1], &set); + entry_cnt--; + break; + } + + check = 0; + addr = 0; + mt_for_each(mt, foo, addr, ULONG_MAX) { + check++; + if (check > entry_cnt) break; } - mt_dump(mt); - } - mt_for_each(mt, foo, addr, ULONG_MAX) { - printk("addr %lu -> %p\n", addr, foo); - check++; + MT_BUG_ON(mt, check != entry_cnt); } - - printk("test %d != %d\n", check, entry_cnt); - MT_BUG_ON(mt, check != entry_cnt); } @@ -915,13 +914,62 @@ STORE, 140277094813696, 140277094821888, STORE, 140277094821888, 140277094825984, STORE, 140735933906944, 140735933911040, }; + unsigned long set3[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140735790264320, 140737488351231, +ERASE, 140735790264320, 140737488351231, +STORE, 140735790264320, 140735790268415, +STORE, 94016597282816, 94016597454847, +ERASE, 94016597282816, 94016597454847, +STORE, 94016597282816, 94016597299199, +STORE, 94016597299200, 94016597454847, +ERASE, 94016597299200, 94016597454847, +STORE, 94016597299200, 94016597401599, +STORE, 94016597401600, 94016597442559, +STORE, 94016597442560, 94016597454847, +STORE, 140496959283200, 140496959455231, +ERASE, 140496959283200, 140496959455231, +STORE, 140496959283200, 140496959287295, +STORE, 140496959287296, 140496959455231, +ERASE, 140496959287296, 140496959455231, +STORE, 140496959287296, 140496959410175, +STORE, 140496959410176, 140496959442943, +STORE, 140496959442944, 140496959451135, +STORE, 140496959451136, 140496959455231, +STORE, 140735791718400, 140735791722495, +STORE, 140735791706112, 140735791718399, +STORE, 47135835713536, 47135835721727, +STORE, 47135835721728, 47135835729919, +STORE, 47135835729920, 47135835893759, +ERASE, 47135835729920, 47135835893759, +STORE, 47135835729920, 47135835742207, +STORE, 47135835742208, 47135835893759, +STORE, 47135835840512, 47135835893759, +STORE, 47135835742208, 47135835840511, +ERASE, 47135835742208, 47135835840511, +STORE, 47135835742208, 47135835840511, +STORE, 47135835885568, 47135835893759, +STORE, 47135835840512, 47135835885567, +ERASE, 47135835840512, 47135835885567, +STORE, 47135835840512, 47135835893759, +ERASE, 47135835840512, 47135835893759, +STORE, 47135835840512, 47135835885567, +STORE, 47135835885568, 47135835893759, + + }; + check_erase2_testset(mt, set, ARRAY_SIZE(set)); mtree_destroy(mt); + mtree_init(mt, 0); check_erase2_testset(mt, set2, ARRAY_SIZE(set2)); start = 140735933894656; - printk("mt_find check %lu-%lu\n", start, 140735933906943UL); - MT_BUG_ON(mt, mt_find(mt, &start, 140735933906943UL, true)); + MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL, true)); + mtree_destroy(mt); + + mt_set_non_kernel(2); + mtree_init(mt, 0); + check_erase2_testset(mt, set3, ARRAY_SIZE(set3)); } static noinline void check_alloc_rev_range(struct maple_tree *mt) @@ -960,7 +1008,11 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) }; unsigned long holes[] = { - // Start of hole, end of hole, size of hole (+1) + /* Note: start of hole is INCLUSIVE + * end of hole is EXCLUSIVE + * (opposite of the above table.) + * Start of hole, end of hole, size of hole (+1) + */ 0x565234afb000, 0x565234afc000, 0x1000, 0x565234afe000, 0x565235def000, 0x12F1000, 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000, @@ -1014,8 +1066,6 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) }; - MT_BUG_ON(mt, !mtree_empty(mt)); - int i, range_cnt = ARRAY_SIZE(range); int req_range_cnt = ARRAY_SIZE(req_range); @@ -1030,10 +1080,10 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) unsigned long min = 0; for (i = 0; i < ARRAY_SIZE(holes); i+= 3) { MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min, - holes[i+1], - holes[i+2])); - MT_BUG_ON(mt, mas.index != holes[i + 1] - holes[i+2] + 1); - min = holes[i+1]; + holes[i+1] >> 12, + holes[i+2] >> 12)); + MT_BUG_ON(mt, mas.index != holes[i] >> 12); + min = holes[i+1] >> 12; mas_reset(&mas); } @@ -1150,8 +1200,6 @@ static noinline void check_alloc_range(struct maple_tree *mt) int i, range_cnt = ARRAY_SIZE(range); int req_range_cnt = ARRAY_SIZE(req_range); - MT_BUG_ON(mt, !mtree_empty(mt)); - for (i = 0; i < range_cnt; i += 2) { check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); @@ -1197,7 +1245,7 @@ static noinline void check_ranges(struct maple_tree *mt) MT_BUG_ON(mt, !mtree_empty(mt)); check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0); check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0); - check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EBUSY); + check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST); // Store check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0); check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0); @@ -1286,17 +1334,24 @@ static int maple_tree_seed(void) mtree_init(&tree, 0); check_new_node(&tree); + mtree_destroy(&tree); + mtree_init(&tree, 0); + check_erase2_sets(&tree); + mtree_destroy(&tree); /* Test ranges (store and insert) */ mtree_init(&tree, 0); check_ranges(&tree); + mtree_destroy(&tree); mtree_init(&tree, MAPLE_ALLOC_RANGE); check_alloc_range(&tree); + mtree_destroy(&tree); mtree_init(&tree, MAPLE_ALLOC_RANGE); check_alloc_rev_range(&tree); + mtree_destroy(&tree); mtree_init(&tree, 0); -- 2.50.1