From 4718fda07d389f183412e3e8075ebdffc251f532 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 4 Aug 2020 13:25:36 -0400 Subject: [PATCH] maple_tree: Update unmapped area name, remove mas->node == NULL possibility, 3 way split fixes, overflow slot fix, calc split fix, altered descend adopt algorithm. Many changes. 1. unmapped area => empty area 2. mas->node can no longer be null 3. splitting into 3 nodes can occur at non-leaf nodes. 4. calc split could potentially create an overflow or a deficient node. 5. drop Private from comments 6. comment fixes 7. rename combine_separate to mas_spanning_rebalance Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 16 +- lib/maple_tree.c | 585 +++++++++++++++++++++---------------- lib/test_maple_tree.c | 189 +++++++----- 3 files changed, 461 insertions(+), 329 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 86fdf158db29..5d5e53316be2 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -36,7 +36,7 @@ #define MAPLE_SPARSE9_SLOTS 13 /* 127 bytes */ #define MAPLE_SPARSE6_SLOTS 14 /* 128 bytes */ -#define MAPLE_TOPIARY_SLOTS MAPLE_NODE_SLOTS - 1 +#define MAPLE_TOPIARY_SLOTS (MAPLE_NODE_SLOTS - 1) #define MAPLE_NODE_COALESCE 2 /* Limit on coalesce count */ #else @@ -254,7 +254,7 @@ struct ma_state { unsigned long max; /* The maximum index of this node - implied pivot max */ struct maple_node *alloc; /* Allocated nodes for this operation */ struct maple_enode *span_enode; /* Pointer to maple parent/slot that set the max */ - int full_cnt; /* (+ full nodes) / (- number of almost empty) nodes above */ + int full_cnt; /* (+ full nodes) / (- almost empty) nodes above */ unsigned char depth; /* depth of tree descent during write */ }; @@ -312,13 +312,19 @@ void *mas_prev(struct ma_state *mas, unsigned long min); void *mas_next(struct ma_state *mas, unsigned long max); /* Finds a sufficient hole */ -int mas_get_unmapped_area(struct ma_state *mas, unsigned long min, +int mas_get_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size); -/* This finds an unmapped area from the highest address to the lowest. +/* Checks if a mas has not found anything */ +static inline bool mas_is_none(struct ma_state *mas) +{ + return mas->node == MAS_NONE; +} + +/* This finds an empty area from the highest address to the lowest. * AKA "Topdown" version, */ -int mas_get_unmapped_area_rev(struct ma_state *mas, unsigned long min, +int mas_get_empty_area_rev(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size); /** * mas_reset() - Reset a Maple Tree operation state. diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f4e905a96741..742c5454d2b3 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -15,7 +15,6 @@ #include //#include // for task_size -#define CONFIG_DEBUG_MAPLE_TREE #define MA_ROOT_PARENT 1 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) #define ma_mnode_ptr(x) ((struct maple_node *)(x)) @@ -164,7 +163,7 @@ static inline bool mte_is_leaf(const struct maple_enode *entry) return ma_is_leaf(mte_node_type(entry)); } -/** Private +/* * We also reserve values with the bottom two bits set to '10' which are * below 4096 */ @@ -185,11 +184,6 @@ static inline bool mas_is_ptr(struct ma_state *mas) { return mas->node == MAS_ROOT; } -static inline bool mas_is_none(struct ma_state *mas) -{ - return mas->node == MAS_NONE; -} - static inline bool mas_is_start(struct ma_state *mas) { return mas->node == MAS_START; @@ -202,9 +196,6 @@ static inline bool mas_is_err(struct ma_state *mas) static inline bool mas_searchable(struct ma_state *mas) { - if (!mas->node) - return false; - if (mas_is_none(mas)) return false; @@ -316,7 +307,8 @@ static inline enum maple_type mas_parent_enum(struct ma_state *mas, return mte_parent_range_enum(parent); } -/** Private +/* + * mte_set_parent() - Set the parent node and encode the slot. * * Type is encoded in the node->parent * bit 0: 1 = root, 0 otherwise @@ -341,7 +333,7 @@ static inline void mte_set_parent(struct maple_enode *node, case maple_range_64: case maple_arange_64: type |= 4; - /* fallthrough */ + fallthrough; case maple_range_32: type |= 2; break; @@ -352,7 +344,6 @@ static inline void mte_set_parent(struct maple_enode *node, break; } - BUG_ON(slot > MAPLE_NODE_SLOTS); // Only 4 bits to use. val &= ~bitmask; // Remove any old slot number. val |= (slot << slot_shift); // Set the slot. val |= type; @@ -495,7 +486,7 @@ static inline unsigned long _mas_get_safe_pivot(const struct ma_state *mas, return _mte_get_pivot(mas->node, slot, type); } -/** Private +/* * mas_get_safe_pivot() - Return the pivot or the mas->max. * * Return: The pivot (including mas->max for the final slot) @@ -520,7 +511,7 @@ static inline unsigned long mas_get_safe_lower_bound(struct ma_state *mas, static inline void ma_set_pivot(struct maple_node *mn, unsigned char slot, enum maple_type type, unsigned long val) { - BUG_ON(slot >= mt_slots[type]); + BUG_ON(slot >= mt_pivots[type]); switch (type) { default: @@ -627,7 +618,7 @@ static inline struct maple_enode *mas_get_rcu_slot(const struct ma_state *mas, { return mte_get_rcu_slot(mas->node, slot, mas->tree); } -/** Private +/* * mte_destroy_walk() - Free the sub-tree from @mn and below. * * @mn - the head of the sub-tree to free. @@ -657,8 +648,10 @@ void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree) mte_free(mn); } -/** Private - * matadd() - Add a @dead_enode to the ma_topiary of a list of dead nodes. +/* + * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. + * + * Add the %dead_enode to the linked list in %mat. * * @mat - the ma_topiary, a linked list of dead nodes. * @dead_enode - the node to be marked as dead and added to the tail of the list @@ -673,13 +666,14 @@ static inline void mat_add(struct ma_topiary *mat, return; } - //* Set the next entry. mte_to_mat(mat->tail)->next = dead_enode; mat->tail = dead_enode; } -/** Private +/* * mat_free() - Free all nodes in a dead list. * + * Free or destroy walk a dead list. + * * @mat - the ma_topiary linked list of dead nodes to free. * @recursive - specifies if this sub-tree is to be freed or just the single * node. @@ -698,7 +692,7 @@ static inline void mat_free(struct ma_topiary *mat, bool recursive) } } -/** Private +/* * ma_set_rcu_slot() - Set a nodes rcu slot. * * @mn - the maple node for the operation @@ -709,6 +703,7 @@ static inline void mat_free(struct ma_topiary *mat, bool recursive) static inline void ma_set_rcu_slot(struct maple_node *mn, unsigned char slot, enum maple_type type, void *val) { + BUG_ON(slot >= mt_slots[type]); switch (type) { default: @@ -743,16 +738,14 @@ static inline void ma_set_rcu_slot(struct maple_node *mn, break; case maple_range_64: case maple_leaf_64: - BUG_ON(slot >= MAPLE_RANGE64_SLOTS); rcu_assign_pointer(mn->mr64.slot[slot], val); break; case maple_arange_64: - BUG_ON(slot >= MAPLE_ARANGE64_SLOTS); rcu_assign_pointer(mn->ma64.slot[slot], val); break; } } -/** Private +/* * mte_set_rcu_slot() - Set an encoded nodes rcu slot. */ static inline void mte_set_rcu_slot(const struct maple_enode *mn, @@ -760,7 +753,7 @@ static inline void mte_set_rcu_slot(const struct maple_enode *mn, { ma_set_rcu_slot(mte_to_node(mn), slot, mte_node_type(mn), val); } -/** Private +/* * mas_dup_state() - duplicate the internal state of a ma_state. * * @dst - the destination to store the state information @@ -776,7 +769,7 @@ static inline void mas_dup_state(struct ma_state *dst, struct ma_state *src) dst->min = src->min; mas_set_slot(dst, mas_get_slot(src)); } -/** Private +/* * mas_descend() - Descend into the slot stored in the ma_state. * * @mas - the maple state. @@ -868,7 +861,6 @@ no_parent: if (!max || min == ULONG_MAX) { if (mas->node == a_enode) { //FIXME: Restart and retry? - printk("Dead node %p\n", mas_mn(mas)); MT_BUG_ON(mas->tree, mas->node == a_enode); } mas->node = a_enode; @@ -1003,7 +995,7 @@ void mas_empty_alloc(struct ma_state *mas) ma_free_alloc(node); mas->alloc = NULL; } -/** Private +/* * Check if there was an error allocating and do the allocation if necessary * If there are allocations, then free them. */ @@ -1040,7 +1032,7 @@ static inline struct maple_node *mas_node_cnt(struct ma_state *mas, int count) return mas->alloc; } -/** Private +/* * Sets up maple state for operations by setting mas->min = 0 & mas->node to * certain values. * returns: @@ -1085,7 +1077,7 @@ done: return entry; } -/** Private +/* * mas_data_end() - Find the end of the data (slot). Sets the value of the * last pivot to @last_piv. * @@ -1098,6 +1090,7 @@ static inline unsigned char _mas_data_end(const struct ma_state *mas, { int slot = 0; unsigned long piv = mas->min, prev_piv = mas->min; + for (; slot < mt_slot_count(mas->node); slot++) { piv = _mas_get_safe_pivot(mas, slot, type); if (piv >= mas->max) @@ -1118,9 +1111,10 @@ static inline unsigned char _mas_data_end(const struct ma_state *mas, static inline unsigned char mas_data_end(const struct ma_state *mas) { unsigned long l; + return _mas_data_end(mas, mte_node_type(mas->node), &l); } -/** Private +/* * mas_leaf_max_gap() - Returns the largest gap in a leaf node * * @mas - the maple state @@ -1175,7 +1169,7 @@ done: return max_gap; } -/** Private +/* * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. */ static inline unsigned long mas_max_gap(struct ma_state *mas) @@ -1225,7 +1219,7 @@ ascend: slot = mte_parent_slot(gaps.node); goto ascend; } -/** Private +/* * mas_update_gap() - Update a nodes gaps and propagate up if necessary. * * @mas - the maple state. @@ -1256,7 +1250,7 @@ static inline void mas_update_gap(struct ma_state *mas) } -/** Private +/* * mas_first_node() - Finds the first node in mas->node and returns the pivot, * mas->max if no node is found. Node is returned as mas->node which may be * MAS_NONE. @@ -1301,7 +1295,7 @@ no_entry: mas->node = MAS_NONE; return mas->max; } -/** Private +/* * mas_first_entry() - * Returns the pivot which points to the entry with the * lowest index. * @@ -1332,7 +1326,7 @@ static inline unsigned long mas_first_entry(struct ma_state *mas, } } -/** Private +/* * mas_adopt_children() - Set the parent pointer of all nodes in @parent to * @parent with the slot encoded. * @@ -1359,7 +1353,7 @@ static inline void mas_adopt_children(struct ma_state *mas, } } -/** Private +/* * mas_replace() - Replace a maple node in the tree with mas->node. Uses the * parent encoding to locate the maple node in the tree. * @@ -1405,54 +1399,34 @@ static inline void mas_replace(struct ma_state *mas, bool advanced) } } -/** Private - * mas_check_split_parent() - Check to see if this node has the correct parent - * set or not. - * @mas - the maple state - * @slot - the slot to examine. +/* + * mas_new_child() - Find the new child of a node. + * @mas: the maple state + * @child: the maple state to store the child. + * */ -static inline struct maple_enode *mas_check_split_parent(struct ma_state *mas, - unsigned char slot) -{ - void *entry = mas_get_rcu_slot(mas, slot); - - if (!entry) - return NULL; - - if (mte_parent(entry) == entry) { - printk("%s: Dead node %p", __func__, entry); - MT_BUG_ON(mas->tree, 1); - } - if (mte_parent(entry) != mas_mn(mas)) - return NULL; - - mas_set_slot(mas, slot); - return entry; -} - -static inline struct maple_enode *mas_find_l_split(struct ma_state *mas) +static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) { - unsigned char i, end = mas_data_end(mas); - struct maple_enode *en = NULL; + unsigned char slot, end = mt_slot_count(mas->node); + struct maple_enode *entry; - for (i = 0; i <= end; i++) { - if ((en = mas_check_split_parent(mas, i))) + for (slot = mas_get_slot(mas); slot < end; slot++) { + entry = mas_get_rcu_slot(mas, slot); + if (!entry) // end of node data. break; + + if (mte_parent(entry) == mas_mn(mas)) { + mas_set_slot(mas, slot); + mas_dup_state(child, mas); + mas_set_slot(mas, slot + 1); + mas_descend(child); + return true; + } } - return en; + return false; } -static inline struct maple_enode *mas_find_r_split(struct ma_state *mas) -{ - unsigned char i = mas_data_end(mas); - struct maple_enode *en; - - do { - en = mas_check_split_parent(mas, i); - } while (!en && i--); - return en; -} -/** Private +/* * mab_shift_right() - Shift the data in mab right. Note, does not clean out the * old data or set b_node->b_end. * @@ -1463,6 +1437,7 @@ static inline void mab_shift_right(struct maple_big_node *b_node, unsigned char shift) { unsigned char b_end = b_node->b_end - 1; + do { b_node->pivot[b_end + shift] = b_node->pivot[b_end]; b_node->slot[b_end + shift] = b_node->slot[b_end]; @@ -1470,7 +1445,7 @@ static inline void mab_shift_right(struct maple_big_node *b_node, } while (b_end--); } -/** Private +/* * mab_middle_node() - Check if a middle node is needed (unlikely) * * @b_node: the maple_big_node that contains the data. @@ -1492,7 +1467,7 @@ static inline bool mab_middle_node(struct maple_big_node *b_node, int split, return false; } -/** Private +/* * mab_no_null_split() - ensure the split doesn't fall on a NULL * * @b_node: the maple_big_node with the data @@ -1504,7 +1479,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, unsigned char split, unsigned char slot_cnt) { if (!b_node->slot[split]) { - if (split < slot_cnt - 1) + /* If the split is less than the max slot && the right side will + * still be sufficient, then increment the split on NULL. + */ + if ((split < slot_cnt - 1) && + (b_node->b_end - split) < (mt_min_slots[b_node->type])) split++; else split--; @@ -1512,7 +1491,7 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, return split; } -/** Private +/* * mab_calc_split() - Calculate the split location and if there needs to be two * splits. * @@ -1526,20 +1505,18 @@ static inline int mab_calc_split(struct maple_big_node *b_node, int split = b_node->b_end / 2; // Assume equal split. unsigned char slot_cnt = mt_slots[b_node->type]; - if (ma_is_leaf(b_node->type) && - mab_middle_node(b_node, split, slot_cnt)) { + if (mab_middle_node(b_node, split, slot_cnt)) { split = b_node->b_end / 3; *mid_split = split * 2; } else { *mid_split = 0; /* Avoid having a range less than the slot count unless it - * causes one node to be deficient. */ - /* FIXME: Linear allocations will cause either a wasted slot - * as is, or will cause a deficient node with not enough entries + * causes one node to be deficient. + * NOTE: mt_min_slots is 1 based, b_end and split are zero. */ while (((b_node->pivot[split] - b_node->min) < slot_cnt - 1) && - (split < slot_cnt) && - (b_node->b_end - split > mt_min_slots[b_node->type])) + (split < slot_cnt - 1) && + (b_node->b_end - split > mt_min_slots[b_node->type] - 1)) split++; } @@ -1553,7 +1530,7 @@ static inline int mab_calc_split(struct maple_big_node *b_node, return split; } -/** Private +/* * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node * and set @b_node->b_end to the next free slot. * @@ -1568,11 +1545,12 @@ static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, unsigned char mab_start) { int i, j; + for (i = mas_start, j = mab_start; i <= mas_end; i++, j++) { b_node->slot[j] = mas_get_rcu_slot(mas, i); - if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree)) { + if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree)) b_node->gap[j] = mte_get_gap(mas->node, i); - } + if (i < mt_pivot_count(mas->node)) { b_node->pivot[j] = mas_get_safe_pivot(mas, i); } else { @@ -1589,7 +1567,7 @@ static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, } b_node->b_end = j; } -/** Private +/* * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. * * @b_node: the maple_big_node that has the data @@ -1604,7 +1582,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, int i, j = 0; for (i = mab_start; i <= mab_end; i++, j++) { - if(j && !b_node->pivot[i]) + if (j && !b_node->pivot[i]) break; mas->max = b_node->pivot[i]; @@ -1617,7 +1595,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, } } -/** Private +/* * mas_descend_adopt() - Descend through a sub-tree and adopt children who do * not have the correct parents set. Follow the parents which have the correct * parents as they are the new entries which need to be followed to find other @@ -1627,40 +1605,43 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, */ static inline void mas_descend_adopt(struct ma_state *mas) { - struct maple_enode *l_enode, *r_enode; + struct ma_state list[3], next[3]; + int i, n; - MA_STATE(l_mas, mas->tree, mas->index, mas->last); - MA_STATE(r_mas, mas->tree, mas->index, mas->last); + mas_dup_state(&next[0], mas); + for (i = 0; i < 3; i++) { + mas_dup_state(&list[i], mas); + mas_set_slot(&list[i], 0); + } - mas_dup_state(&l_mas, mas); - mas_dup_state(&r_mas, mas); - while (!mte_is_leaf(l_mas.node)) { - if (!(l_enode = mas_find_l_split(&l_mas))) { - mas_adopt_children(&l_mas, l_mas.node); - mas_dup_state(&l_mas, &r_mas); - if (!(l_enode = mas_find_l_split(&l_mas))) { - mas_adopt_children(&r_mas, r_mas.node); - break; - } - } + while (!mte_is_leaf(list[0].node)) + { + n = 0; + for (i = 0; i < 3; i++) { + if (mas_is_none(&list[i])) + continue; - if (!(r_enode = mas_find_r_split(&r_mas))) { - mas_adopt_children(&r_mas, r_mas.node); - mas_dup_state(&r_mas, &l_mas); - r_enode = mas_find_r_split(&r_mas); + if (i && list[i-1].node == list[i].node) + continue; + + while ((n < 3) && (mas_new_child(&list[i], &next[n]))) + n++; + + mas_adopt_children(&list[i], list[i].node); } - mas_adopt_children(&l_mas, l_mas.node); - if (r_mas.node != l_mas.node) - mas_adopt_children(&r_mas, r_mas.node); + while (n < 3) + next[n++].node = MAS_NONE; - mas_descend(&l_mas); - mas_descend(&r_mas); + for (i = 0; i < 3; i++) { // descend. + mas_dup_state(&list[i], &next[i]); + mas_set_slot(&list[i], 0); + } } } -/** Private +/* * mas_store_b_node() - Store an @entry into the b_node while also copying the * data from a maple encoded node. * @@ -1723,7 +1704,7 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas, static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type, unsigned long *range_min, unsigned long *range_max); -/** Private +/* * mas_prev_sibling() - Find the previous node with the same parent. * * @mas: the maple state @@ -1731,7 +1712,7 @@ static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type, */ static inline bool mas_prev_sibling(struct ma_state *mas) { - unsigned p_slot = mte_parent_slot(mas->node); + unsigned int p_slot = mte_parent_slot(mas->node); if (mte_is_root(mas->node)) return false; @@ -1745,7 +1726,7 @@ static inline bool mas_prev_sibling(struct ma_state *mas) return true; } -/** Private +/* * mas_next_sibling() - Find the next node with the same parent. * * @mas: the maple state @@ -1754,6 +1735,7 @@ static inline bool mas_prev_sibling(struct ma_state *mas) static inline bool mas_next_sibling(struct ma_state *mas) { unsigned char p_end, p_slot = mte_parent_slot(mas->node); + MA_STATE(parent, mas->tree, mas->index, mas->last); if (mte_is_root(mas->node)) @@ -1772,7 +1754,14 @@ static inline bool mas_next_sibling(struct ma_state *mas) return true; } -/** Private +static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) +{ + if (enode) + return enode; + + return ma_enode_ptr(MAS_NONE); +} +/* * mast_topiary() - Add the portions of the tree to the removal list; either to * be freed or discarded (destroy walk). * @@ -1810,7 +1799,7 @@ static inline void mast_topiary(struct maple_subtree_state *mast) mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_r, slot)); } -/** Private +/* * mast_rebalance_from_siblings() - Rebalance from nodes with the same parents. * Check the right side, then the left. Data is copied into the @mast->bn. * @@ -1818,13 +1807,14 @@ static inline void mast_topiary(struct maple_subtree_state *mast) */ static inline bool mast_rebalance_from_siblings(struct maple_subtree_state *mast) { - unsigned char end; struct maple_enode *left = mast->orig_l->node; struct maple_enode *right = mast->orig_r->node; + unsigned char b_end = mast->bn->b_end; + unsigned char end; if (mas_next_sibling(mast->orig_r)) { end = mas_data_end(mast->orig_r); - mas_mab_cp(mast->orig_r, 0, end, mast->bn, mast->bn->b_end); + mas_mab_cp(mast->orig_r, 0, end, mast->bn, b_end); mat_add(mast->free, right); if (right == left) mast->orig_l->node = mast->orig_r->node; @@ -1832,7 +1822,6 @@ static inline bool mast_rebalance_from_siblings(struct maple_subtree_state *mast return true; } if (mas_prev_sibling(mast->orig_l)) { - unsigned char b_end = mast->bn->b_end; end = mas_data_end(mast->orig_l); mab_shift_right(mast->bn, end + 1); mas_mab_cp(mast->orig_l, 0, end, mast->bn, 0); @@ -1851,7 +1840,7 @@ static inline bool mast_rebalance_from_siblings(struct maple_subtree_state *mast static inline void mas_prev_node(struct ma_state *mas, unsigned long limit); static inline unsigned long mas_next_node(struct ma_state *mas, unsigned long max); -/** Private +/* * mast_rebalance_from_cousins() - Rebalance from nodes with different parents. * Check the right side, then the left. Data is copied into the @mast->bn. * @@ -1900,12 +1889,13 @@ static inline bool mast_rebalance_from_cousins(struct maple_subtree_state *mast) mas_set_slot(mast->l, mas_get_slot(mast->l) + end + 1); return true; } -/** Private - * mast_ascend_free() - ascend the original left and right sides and add the - * previous nodes to the free list. Set the slots to point to the correct location - * in the new nodes. - * +/* + * mast_ascend_free() - Add current original maple state nodes to the free list + * and ascend. * @mast: the maple subtree state. + * + * Ascend the original left and right sides and add the previous nodes to the + * free list. Set the slots to point to the correct location in the new nodes. */ static inline void mast_ascend_free(struct maple_subtree_state *mast) @@ -1940,11 +1930,14 @@ mast_ascend_free(struct maple_subtree_state *mast) &range_min, &range_max); } -/** Private +/* * mas_new_ma_node() - Create and return a new maple node. Helper function. - * * @mas: the maple state with the allocations. * @b_node: the maple_big_node with the type encoding. + * + * Use the node type from the maple_big_node to allocate a new node from the + * maple_state. This function exists mainly for code readability. + * * Returns: A new maple encoded node */ static inline struct maple_enode @@ -1952,7 +1945,7 @@ static inline struct maple_enode { return mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), b_node->type); } -/** Private +/* * mas_mab_to_node() - Set up right and middle nodes * * @mas: the maple state that contains the allocations. @@ -1993,7 +1986,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, } -/** Private +/* * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end * pointer. * @b_node - the big node to add the entry @@ -2013,8 +2006,8 @@ static inline void mab_set_b_end(struct maple_big_node *b_node, b_node->pivot[b_node->b_end++] = mas->max; } -/** Private - * mas_set_split_parent() - combine_separate helper function. Sets the parent +/* + * mas_set_split_parent() - combine_then_separate helper function. Sets the parent * of @mas->node to either @left or @right, depending on @slot and @split * * @mas - the maple state with the node that needs a parent @@ -2028,18 +2021,39 @@ static inline void mas_set_split_parent(struct ma_state *mas, struct maple_enode *right, unsigned char *slot, unsigned char split) { - if (!mas->node) + if (mas_is_none(mas)) return; if ((*slot) <= split) mte_set_parent(mas->node, left, *slot); - else + else if (right) mte_set_parent(mas->node, right, (*slot) - split - 1); (*slot)++; } -/** Private +/* + * mte_mid_split_check() - Check if the next node passes the mid-split + */ +static inline void mte_mid_split_check(struct maple_enode **l, + struct maple_enode **r, + struct maple_enode *right, + unsigned char slot, + unsigned char *split, + unsigned char mid_split) +{ + if (*r == right) + return; + + if (slot < mid_split) + return; + + *l = *r; + *r = right; + *split = mid_split; +} + +/* * mast_set_split_parents() - Helper function to set three nodes parents. Slot * is taken from @mast->l. * @@ -2050,19 +2064,42 @@ static inline void mas_set_split_parent(struct ma_state *mas, */ static inline void mast_set_split_parents(struct maple_subtree_state *mast, struct maple_enode *left, + struct maple_enode *middle, struct maple_enode *right, - unsigned char split) + unsigned char split, + unsigned char mid_split) { - unsigned char slot = mas_get_slot(mast->l); + unsigned char slot; + struct maple_enode *l = left; + struct maple_enode *r = right; + + if (mas_is_none(mast->l)) + return; + + if (middle) + r = middle; + + slot = mas_get_slot(mast->l); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set left parent. + mas_set_split_parent(mast->l, l, r, &slot, split); - mas_set_split_parent(mast->l, left, right, &slot, split); - mas_set_split_parent(mast->m, left, right, &slot, split); - mas_set_split_parent(mast->r, left, right, &slot, split); + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set middle parent. + mas_set_split_parent(mast->m, l, r, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set right parent. + mas_set_split_parent(mast->r, l, r, &slot, split); } static inline void mas_wmb_replace(struct ma_state *mas, struct ma_topiary *free, struct ma_topiary *destroy) { + /* Before replacing the tree items, be sure the old data is marked dead + * across all CPUs + */ smp_wmb(); // Insert the new data in the tree @@ -2097,7 +2134,7 @@ static inline bool mast_new_root(struct maple_subtree_state *mast, } while (!mte_is_root(mast->orig_l->node)); } else if ((mast->orig_l->node != mas->node) && (mast->l->depth > mas->tree->ma_height)) { - mat_add(mast->free, mas->node); + mat_add(mast->free, mas->node); } mat_add(mast->free, mast->orig_l->node); @@ -2112,9 +2149,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, unsigned char split, unsigned char mid_split) { - mast->l->node = left; - mast->m->node = middle; - mast->r->node = right; + mast->l->node = mte_node_or_none(left); + mast->m->node = mte_node_or_none(middle); + mast->r->node = mte_node_or_none(right); mast->l->min = mast->orig_l->min; mast->l->max = mast->bn->pivot[split]; @@ -2180,26 +2217,28 @@ static inline void mast_setup_bnode_for_split(struct maple_subtree_state *mast) mast->bn->type = mte_node_type(mast->orig_l->node); } -/** Private - * - * mas_combine_separate() - Follow the tree upwards from @l_mas and @r_mas for - * @count, or until the root is hit. First @b_node is split into two entries - * which are inserted into the next iteration of the loop. @b_node is returned - * populated with the final iteration. @mas is used to obtain allocations. +/* * + * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. + * @mas: The starting maple state + * @mast: The maple_subtree_state, keeps track of 4 maple states. + * @count: The estimated count of iterations needed. * - * orig_l_mas keeps track of the nodes that will remain active by using - * orig_l_mas->index and orig_l_mas->last to account of what has been copied - * into the new sub-tree. The update of orig_l_mas->last is used in mas_consume - * to find the slots that will need to be either freed or destroyed. - * orig_l_mas->depth keeps track of the height of the new sub-tree in case the - * sub-tree becomes the full tree. + * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root + * is hit. First @b_node is split into two entries which are inserted into the + * next iteration of the loop. @b_node is returned populated with the final + * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the + * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last + * to account of what has been copied into the new sub-tree. The update of + * orig_l_mas->last is used in mas_consume to find the slots that will need to + * be either freed or destroyed. orig_l_mas->depth keeps track of the height of + * the new sub-tree in case the sub-tree becomes the full tree. * * Returns the number of elements in b_node during the last loop. */ -static inline int mas_combine_separate(struct ma_state *mas, - struct maple_subtree_state *mast, - unsigned char count) +static inline int mas_spanning_rebalance(struct ma_state *mas, + struct maple_subtree_state *mast, + unsigned char count) { unsigned char split, mid_split; unsigned char slot = 0; @@ -2216,16 +2255,21 @@ static inline int mas_combine_separate(struct ma_state *mas, mast->r = &r_mas; mast->free = &free; mast->destroy = &destroy; + l_mas.node = r_mas.node = m_mas.node = MAS_NONE; - /* MA_START doesn't work here */ - l_mas.node = r_mas.node = m_mas.node = NULL; - + if (mast->orig_l->depth != mast->orig_r->depth) { + printk("Error: %d != %d\n", + mast->orig_l->depth, mast->orig_r->depth); + } + MT_BUG_ON(mas->tree, mast->orig_l->depth != mast->orig_r->depth); + mast->orig_l->depth = 0; mast_topiary(mast); - while(count--) { + while (count--) { mast_setup_bnode_for_split(mast); split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, &mid_split); - mast_set_split_parents(mast, left, right, split); + mast_set_split_parents(mast, left, middle, right, split, + mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); /* Copy data from next level in the tree to mast->bn from next iteration */ @@ -2247,7 +2291,7 @@ static inline int mas_combine_separate(struct ma_state *mas, mast_topiary(mast); mast->orig_l->last = mast->orig_l->max; - if(mast_sufficient(mast)) + if (mast_sufficient(mast)) continue; // Try to get enough data for the next iteration. @@ -2257,11 +2301,10 @@ static inline int mas_combine_separate(struct ma_state *mas, if (!count) count++; } - l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), mte_node_type(mast->orig_l->node)); mast->orig_l->depth++; - mab_mas_cp(mast->bn, 0, mast->bn->b_end, &l_mas); + mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas); mte_set_parent(left, l_mas.node, slot); if (middle) mte_set_parent(middle, l_mas.node, ++slot); @@ -2288,11 +2331,23 @@ new_root: return mast->bn->b_end; } +/* + * mas_rebalance() - Rebalance a given node. + * + * @mas: The maple state + * @b_node: The big maple node. + * + * Rebalance two nodes into a single node or two new nodes that are sufficient. + * Continue upwards until tree is sufficient. + * + * Returns the number of elements in b_node during the last loop. + */ static inline int mas_rebalance(struct ma_state *mas, struct maple_big_node *b_node) { char empty_cnt = mas->full_cnt * -1; struct maple_subtree_state mast; + unsigned char shift, b_end = ++b_node->b_end; MA_STATE(l_mas, mas->tree, mas->index, mas->last); MA_STATE(r_mas, mas->tree, mas->index, mas->last); @@ -2309,14 +2364,12 @@ static inline int mas_rebalance(struct ma_state *mas, mas_dup_state(&l_mas, mas); mas_dup_state(&r_mas, mas); - b_node->b_end++; if (mas_next_sibling(&r_mas)) { - mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas), b_node, - b_node->b_end); + mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas), b_node, b_end); r_mas.last = r_mas.index = r_mas.max; } else { - unsigned char shift, b_end = b_node->b_end; + mas_prev_sibling(&l_mas); shift = mas_data_end(&l_mas) + 1; mab_shift_right(b_node, shift); @@ -2325,7 +2378,7 @@ static inline int mas_rebalance(struct ma_state *mas, l_mas.index = l_mas.last = l_mas.min; } - return mas_combine_separate(mas, &mast, empty_cnt); + return mas_spanning_rebalance(mas, &mast, empty_cnt); } static inline bool mas_split_final_node(struct maple_subtree_state *mast, @@ -2456,7 +2509,7 @@ static inline int mas_commit_b_node(struct ma_state *mas, if ((b_node->b_end < mt_min_slot_cnt(mas->node)) && (!mte_is_root(mas->node)) && - (mas->tree->ma_height > 1) ) + (mas->tree->ma_height > 1)) return mas_rebalance(mas, b_node); if (b_node->b_end >= mt_slot_count(mas->node)) @@ -2509,8 +2562,9 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) if (mt_is_alloc(mas->tree)) { //FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1; unsigned long mmap_end = 0x2000000000000UL; + if (mas->index < mmap_end - 1) - mte_set_pivot(mas->node, slot++, mmap_end - 1); + mte_set_pivot(mas->node, slot++, mmap_end - 1); mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY); mte_set_pivot(mas->node, slot++, mt_max[type]); } @@ -2524,6 +2578,7 @@ static inline int ma_root_ptr(struct ma_state *mas, void *entry, void *contents, bool overwrite) { int ret = 1; + if (xa_is_node(mas->tree->ma_root)) return 0; @@ -2548,7 +2603,7 @@ exists: mas_set_err(mas, -EEXIST); return 0; } -/** Private +/* * * mas_is_span_() - Set span_enode if there is no value already and the * entry being written spans this nodes slot or touches the end of this slot and @@ -2575,10 +2630,10 @@ static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv, return false; if (!mte_is_leaf(mas->node)) { // Internal nodes. - if ( mas->last < piv) // Fits in the slot. + if (mas->last < piv) // Fits in the slot. return false; - if (entry && piv == mas->last) // Writes to the end of the child node, but has a value. + if (entry && piv == mas->last) // Writes a value to the end of the child node return false; } else { // A leaf node. if (mas->last < mas->max) // Fits in the node, but may span slots. @@ -2654,7 +2709,7 @@ static inline void mas_cnt_empty(struct ma_state *mas) else mas->full_cnt--; } -/** Private +/* * * mas_wr_walk(): Walk the tree for a write. Tracks extra information which * is used in special cases of a write. @@ -2713,7 +2768,7 @@ static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min, return ret; } -static inline unsigned char mas_extend_null( struct ma_state *l_mas, +static inline unsigned char mas_extend_null(struct ma_state *l_mas, struct ma_state *r_mas) { unsigned char l_slot = mas_get_slot(l_mas); @@ -2759,7 +2814,7 @@ static inline unsigned char mas_extend_null( struct ma_state *l_mas, return r_slot; } /* - * Private + * * __mas_walk(): Locates a value and sets the mas->node and slot accordingly. * range_min and range_max are set to the range which the entry is valid. * Returns true if mas->node is a leaf. @@ -2787,6 +2842,7 @@ static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min, next = mas_get_rcu_slot(mas, mas_get_slot(mas)); // Traverse. + mas->depth++; mas->max = *range_max; mas->min = *range_min; if (unlikely(mt_is_empty(next))) @@ -2797,7 +2853,7 @@ static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min, } return ret; } -/** Private +/* * * mas_spanning_store() - Create a subtree with the store operation completed * and new nodes where necessary, then place the sub-tree in the actual tree. @@ -2823,8 +2879,9 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) else node_cnt = 1 + -1 * mas->full_cnt * 2; // For rebalance upwards. - /* Node rebalancing may occur due to a store, so there may be two new - * entries per level plus a new root. */ + /* Node rebalancing may occur due to this store, so there may be two new + * entries per level plus a new root. + */ node_cnt += 1 + mas->tree->ma_height * 2; mas_node_cnt(mas, node_cnt); if (mas_is_err(mas)) @@ -2834,17 +2891,23 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) mast.orig_l = &l_mas; mast.orig_r = &r_mas; + // FIXME: Is this needed? +#if 0 mas_dup_state(&l_mas, mas); mas->last = mas->index; mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); mas->index = mas->last = l_mas.last; mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); - mas_dup_state(mas, &l_mas); +#endif + // Set up right side. mas_dup_state(&r_mas, mas); - r_mas.last++; + r_mas.depth = mas->depth; + if (r_mas.last + 1) // Avoid overflow. + r_mas.last++; + r_mas.index = r_mas.last; mas_set_slot(&r_mas, 0); __mas_walk(&r_mas, &range_min, &range_max); @@ -2852,9 +2915,12 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) // Set up left side. mas_dup_state(&l_mas, mas); + l_mas.depth = mas->depth; mas_set_slot(&l_mas, 0); __mas_walk(&l_mas, &range_min, &range_max); + MT_BUG_ON(mas->tree, l_mas.depth != r_mas.depth); + if (!entry) { mas_extend_null(&l_mas, &r_mas); mas->index = l_mas.index; @@ -2875,8 +2941,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) count = mas_cnt_positive(mas) + mas->tree->ma_height - mas->depth + 1; // Combine l_mas and r_mas and split them up evenly again. - l_mas.depth = 0; - return mas_combine_separate(mas, &mast, count); + return mas_spanning_rebalance(mas, &mast, count); } static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite) @@ -2890,7 +2955,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite int ret = 0; if (mas_start(mas) || (mas_is_none(mas) || mas->node == MAS_ROOT)) - ret = ma_root_ptr(mas, entry, content, overwrite); + ret = ma_root_ptr(mas, entry, content, overwrite); if (mas_is_err(mas)) return NULL; @@ -2917,7 +2982,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite slot = mas_get_slot(mas); slot_cnt = mt_slot_count(mas->node); content = mas_get_rcu_slot(mas, slot); - if (!overwrite && ((mas->last > r_max) || content )) { + if (!overwrite && ((mas->last > r_max) || content)) { mas_set_err(mas, -EEXIST); goto exists; } @@ -2973,15 +3038,16 @@ void *mas_store(struct ma_state *mas, void *entry) } static inline int mas_dead_node(struct ma_state *mas, unsigned long index); -/** Private +/* * mas_prev_node() - Find the prev non-null entry at the same level in the * tree. The prev value will be mas->node[mas_get_slot(mas)] or MAS_NONE. */ static inline void mas_prev_node(struct ma_state *mas, unsigned long limit) { - int level; + unsigned long pivot, start_piv, last_pivot, min; int slot = mas_get_slot(mas); - unsigned long start_piv; + struct maple_enode *mn; + int level; start_piv = mas_get_safe_pivot(mas, slot); restart_prev_node: @@ -2990,7 +3056,7 @@ restart_prev_node: goto no_entry; while (1) { - unsigned long min; + slot = mte_parent_slot(mas->node); mas_ascend(mas); level++; @@ -3003,9 +3069,7 @@ restart_prev_node: slot--; do { - struct maple_enode *mn; - unsigned long last_pivot; - unsigned long pivot = mas_get_safe_pivot(mas, slot); + pivot = mas_get_safe_pivot(mas, slot); min = mas_get_safe_lower_bound(mas, slot); if (pivot < limit) @@ -3101,9 +3165,9 @@ restart_next_node: if (level == 1) { mas_set_slot(mas, slot); mas->node = mn; - if (mas_dead_node(mas, start_piv)) { + if (mas_dead_node(mas, start_piv)) goto restart_next_node; - } + return pivot; } @@ -3124,7 +3188,7 @@ no_entry: } -/** Private +/* * prev node entry */ static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long limit, @@ -3155,7 +3219,7 @@ static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long limit, mas_set_slot(mas, slot); return true; } -/** Private +/* * mas_next_nentry() - Next node entry. Set the @mas slot to the next valid * entry and range_start to the start value for that entry. If there is no * entry, returns false. @@ -3208,18 +3272,19 @@ found: mas_set_slot(mas, slot); return true; } -/** Private +/* * * Returns the pivot which points to the entry with the highest index. * @mas slot is set to the entry location. * @limit is the minimum index to check. * */ -static inline void* mas_last_entry(struct ma_state *mas, +static inline void *mas_last_entry(struct ma_state *mas, unsigned long limit) { unsigned long prev_min, prev_max, range_start = 0; unsigned char slot = 1; + void *entry; if (mas_start(mas) || mas_is_none(mas)) return NULL; @@ -3229,12 +3294,13 @@ static inline void* mas_last_entry(struct ma_state *mas, while (range_start < limit) { mas_set_slot(mas, slot); if (!mas_next_nentry(mas, limit, &range_start)) { - void *entry = mas_get_rcu_slot(mas, slot - 1); + entry = mas_get_rcu_slot(mas, slot - 1); if (mte_is_leaf(mas->node)) { mas->index = range_start - 1; mas->index = mte_get_pivot(mas->node, slot - 1); return entry; } + mas->max = prev_max; mas->min = prev_min; mas->node = entry; @@ -3251,7 +3317,7 @@ static inline void* mas_last_entry(struct ma_state *mas, return NULL; } -/** Private +/* * * __mas_next() Set the @mas->node to the next entry and the range_start to * the beginning value for the entry. Does not check beyond @limit. @@ -3265,6 +3331,7 @@ static inline void *__mas_next(struct ma_state *mas, unsigned long limit, void *entry = NULL; unsigned long index = mas->index; unsigned char slot = mas_get_slot(mas); + mas_set_slot(mas, slot + 1); retry: @@ -3309,12 +3376,12 @@ next_node: return entry; } -/** Private +/* * * _mas_prev() - Find the previous entry from the current ma state. * @mas the current maple state (must have a valid slot) */ -static inline void* _mas_prev(struct ma_state *mas, unsigned long limit) +static inline void *_mas_prev(struct ma_state *mas, unsigned long limit) { unsigned long max = mas->max; unsigned char slot; @@ -3327,8 +3394,10 @@ static inline void* _mas_prev(struct ma_state *mas, unsigned long limit) mas_set_slot(mas, mt_slot_count(mas->node)); } - if (mas_is_none(mas)) + if (mas_is_none(mas)) { + mas->index = 0; return NULL; + } mas->last = max; slot = mas_get_slot(mas); @@ -3344,12 +3413,16 @@ static inline void* _mas_prev(struct ma_state *mas, unsigned long limit) void *mas_prev(struct ma_state *mas, unsigned long min) { void *entry; - if (mas->node && !mas_searchable(mas)) + + if (!mas->index) // Nothing comes before 0. return NULL; - if (!mas->node) + if (mas_is_none(mas)) mas->node = MAS_START; + if (!mas_searchable(mas)) + return NULL; + if (mas_is_start(mas)) { mas_start(mas); return mas_last_entry(mas, ULONG_MAX); @@ -3458,6 +3531,7 @@ static inline bool _mas_awalk(struct ma_state *mas, unsigned long size) switch (type) { default: slot = mas_get_slot(mas); + fallthrough; case maple_leaf_64: min = mas_get_safe_lower_bound(mas, slot); for (; slot <= pivot_cnt; slot++) { @@ -3521,7 +3595,7 @@ next_slot: return found; } -/** Private +/* * _mas_range_walk(): A walk that supports returning the range in which an * index is located. * @@ -3589,7 +3663,7 @@ static inline bool mas_search_cont(struct ma_state *mas, unsigned long index, return true; } -/** +/* * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. * * Some users need to pause a walk and drop the lock they're holding in @@ -3778,11 +3852,12 @@ void mas_set_rev_index(struct ma_state *mas, unsigned long size) mas->last = gap_max; mas->index = mas->last - size + 1; } -static void _mas_empty_or_single_unmapped_area(struct ma_state *mas, +static void _mas_empty_or_single_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size, bool fwd) { unsigned long start = 0; + if (!mas_is_none(mas)) start++; // mas_is_ptr @@ -3797,16 +3872,16 @@ static void _mas_empty_or_single_unmapped_area(struct ma_state *mas, mas->index = max; } -static inline int _mas_get_unmapped_area(struct ma_state *mas, - unsigned long min, unsigned long max, unsigned long size, bool - forward) +static inline int _mas_get_empty_area(struct ma_state *mas, + unsigned long min, unsigned long max, + unsigned long size, bool forward) { mas_start(mas); max--; // Convert to inclusive. // Empty set. if (mas_is_none(mas) || mas_is_ptr(mas)) { - _mas_empty_or_single_unmapped_area(mas, min, max, size, forward); + _mas_empty_or_single_empty_area(mas, min, max, size, forward); return 0; } @@ -3834,17 +3909,17 @@ static inline int _mas_get_unmapped_area(struct ma_state *mas, return 0; } -int mas_get_unmapped_area(struct ma_state *mas, unsigned long min, +int mas_get_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size) { - return _mas_get_unmapped_area(mas, min, max, size, true); + return _mas_get_empty_area(mas, min, max, size, true); } -int mas_get_unmapped_area_rev(struct ma_state *mas, unsigned long min, +int mas_get_empty_area_rev(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size) { - return _mas_get_unmapped_area(mas, min, max, size, false); + return _mas_get_empty_area(mas, min, max, size, false); } -/** Private +/* * mas_alloc() - Allocate a range. * * Give a size, a minimum starting point (mas->index), a maximum (mas->last), @@ -3858,8 +3933,8 @@ static inline int mas_alloc(struct ma_state *mas, void *entry, { unsigned char slot = MAPLE_NODE_SLOTS; unsigned long min; - mas_start(mas); + mas_start(mas); if (mas_is_none(mas) || mas_is_ptr(mas)) { mas_root_expand(mas, entry); if (mas_is_err(mas)) @@ -3871,7 +3946,6 @@ static inline int mas_alloc(struct ma_state *mas, void *entry, } mas_awalk(mas, size); // Must be walking a tree. - if (mas_is_err(mas)) return xa_err(mas->node); @@ -3893,7 +3967,7 @@ static inline int mas_alloc(struct ma_state *mas, void *entry, no_gap: return -EBUSY; } -/** Private +/* * mas_rev_alloc() - Reverse allocate a range. * * Give a size, a minimum value (mas->index), a maximum starting point @@ -3909,7 +3983,7 @@ static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, unsigned char slot = MAPLE_NODE_SLOTS; int ret = 0; - ret = _mas_get_unmapped_area(mas, min, max, size, false); + ret = _mas_get_empty_area(mas, min, max, size, false); if (ret) return ret; @@ -3926,7 +4000,7 @@ no_gap: return -EBUSY; } -/** +/* * * Must hold rcu_read_lock or the write lock. * @@ -3973,7 +4047,7 @@ void *mas_load(struct ma_state *mas) return mas_range_load(mas, &range_min, &range_max); } -/** Private +/* * * _mas_next() - Finds the next entry, sets index to the start of the range. * @@ -3984,10 +4058,10 @@ static inline void *_mas_next(struct ma_state *mas, unsigned long limit, void *entry = NULL; unsigned long range_max; - if (mas->node && !mas_searchable(mas)) + if (!mas_searchable(mas)) return NULL; - if (!mas->node || mas_is_start(mas)) {// First run. + if (mas_is_start(mas)) {// First run. *range_start = 0; mas_start(mas); entry = mas_range_load(mas, range_start, &range_max); @@ -4000,7 +4074,7 @@ static inline void *_mas_next(struct ma_state *mas, unsigned long limit, return __mas_next(mas, limit, range_start); } -/** +/* * mas_find: If mas->node == MAS_START, find the first * non-NULL entry >= mas->index. * Otherwise, find the first non-NULL entry > mas->index @@ -4067,7 +4141,8 @@ void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, return entry; } -void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) { +void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) +{ return _mt_find(mt, index, max, true); } EXPORT_SYMBOL(mt_find); @@ -4084,7 +4159,7 @@ void *mas_next(struct ma_state *mas, unsigned long max) return _mas_next(mas, max, &index); } EXPORT_SYMBOL_GPL(mas_next); -/** Private +/* * mas_erase() - Find the range in which index resides and erase the entire * range. * @@ -4208,8 +4283,8 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, unsigned long max, gfp_t gfp) { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, min, max - size); if (!mt_is_alloc(mt)) return -EINVAL; @@ -4242,8 +4317,8 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, unsigned long max, gfp_t gfp) { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, min, max - size); if (!mt_is_alloc(mt)) return -EINVAL; @@ -4315,8 +4390,8 @@ EXPORT_SYMBOL(mtree_destroy); #ifdef CONFIG_DEBUG_MAPLE_TREE unsigned int maple_tree_tests_run; -unsigned int maple_tree_tests_passed; EXPORT_SYMBOL_GPL(maple_tree_tests_run); +unsigned int maple_tree_tests_passed; EXPORT_SYMBOL_GPL(maple_tree_tests_passed); #ifndef __KERNEL__ @@ -4332,7 +4407,7 @@ unsigned long mt_get_alloc_size(void) return kmem_cache_get_alloc(maple_node_cache); } #define MA_PTR "%p" -#else +#else // __KERNEL__ is defined. #define MA_PTR "%px" #endif // Tree validations @@ -4381,7 +4456,7 @@ void mt_dump_range64(void *entry, unsigned long min, unsigned long max, if (i < (MAPLE_RANGE64_SLOTS - 1)) last = node->pivot[i]; - else if (node->slot[i] == NULL && max != mt_max[mte_node_type(entry)]) + else if (!node->slot[i] && max != mt_max[mte_node_type(entry)]) break; if (last == 0 && i > 0) break; @@ -4421,7 +4496,7 @@ void mt_dump_arange64(void *entry, unsigned long min, unsigned long max, if (i < (MAPLE_ARANGE64_SLOTS - 1)) last = node->pivot[i]; - else if (node->slot[i] == NULL) + else if (!node->slot[i]) break; if (last == 0 && i > 0) break; @@ -4486,7 +4561,7 @@ void mt_dump(const struct maple_tree *mt) mt_dump_node(entry, 0, mt_max[mte_node_type(entry)], 0); } -/** +/* * Calculate the maximum gap in a node and check if that's what is reported in * the parent (unless root). */ @@ -4526,6 +4601,7 @@ void mas_validate_gaps(struct ma_state *mas) gap += p_end - p_start + 1; } else { void *entry = mas_get_rcu_slot(mas, i); + gap = mte_get_gap(mte, i); if (mt_is_empty(entry)) { if (gap != p_end - p_start + 1) { @@ -4605,7 +4681,8 @@ void mas_validate_parent_slot(struct ma_state *mas) } } } -void mas_validate_child_slot(struct ma_state *mas) { +void mas_validate_child_slot(struct ma_state *mas) +{ enum maple_type type = mte_node_type(mas->node); struct maple_enode *child; unsigned char i; @@ -4633,7 +4710,7 @@ void mas_validate_child_slot(struct ma_state *mas) { } } } -/** +/* * Validate all pivots are within mas->min and mas->max. */ void mas_validate_limits(struct ma_state *mas) @@ -4665,7 +4742,7 @@ void mas_validate_limits(struct ma_state *mas) mt_dump(mas->tree); MT_BUG_ON(mas->tree, piv < mas->min); } - if ((piv > mas->max)) { + if (piv > mas->max) { pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i, piv, mas->max); mt_dump(mas->tree); @@ -4684,7 +4761,7 @@ static inline void mas_dfs_postorder(struct ma_state *mas, unsigned long max) mas_set_slot(mas, mte_parent_slot(mas->node)); mas_next_node(mas, max); - if (mas->node != MAS_NONE) + if (!mas_is_none(mas)) return; if (mte_is_root(mn)) @@ -4706,7 +4783,7 @@ static inline void mas_dfs_postorder(struct ma_state *mas, unsigned long max) mas->max = p_max; mas->min = p_min; } -/** +/* * validate a maple tree by checking: * 1. The limits (pivots are within mas->min to mas->max) * 2. The gap is correctly set in the parents @@ -4714,16 +4791,16 @@ static inline void mas_dfs_postorder(struct ma_state *mas, unsigned long max) void mt_validate(struct maple_tree *mt) { unsigned char end; - MA_STATE(mas, mt, 0, 0); - + MA_STATE(mas, mt, 0, 0); rcu_read_lock(); mas_start(&mas); mas_first_entry(&mas, ULONG_MAX); while (mas.node != MAS_NONE) { if (!mte_is_root(mas.node)) { end = mas_data_end(&mas); - if (end < mt_min_slot_cnt(mas.node)) { + if ((end < mt_min_slot_cnt(mas.node)) && + (mas.max != ULONG_MAX)) { pr_err("Invalid size %u of "MA_PTR"\n", end, mas_mn(&mas)); MT_BUG_ON(mas.tree, 1); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 2c52798e0729..e81c8d585ba4 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -284,6 +284,7 @@ static noinline void check_new_node(struct maple_tree *mt) MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0); for (int i = 1; i < 128; i++) { int j; + mas_node_cnt(&mas, i); // Request mas_nomem(&mas, GFP_KERNEL); // Fill request MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled @@ -296,15 +297,15 @@ static noinline void check_new_node(struct maple_tree *mt) } for (int i = 1; i < 128; i++) { int j; - MA_STATE(mas2, mt, 0, 0); + MA_STATE(mas2, mt, 0, 0); mas_node_cnt(&mas, i); // Request mas_nomem(&mas, GFP_KERNEL); // Fill request MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled for (j = 1; j <= i; j++) { // Move the allocations to mas2 mn = mas_next_alloc(&mas); // get the next node. MT_BUG_ON(mt, mn == NULL); - mas_push_node(&mas2, (struct maple_enode*)mn); + mas_push_node(&mas2, (struct maple_enode *)mn); MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j); } MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0); @@ -392,7 +393,7 @@ static noinline void check_mid_split(struct maple_tree *mt) unsigned long huge = 8000UL * 1000 * 1000; check_insert(mt, huge, (void *) huge); - check_insert(mt, 0, xa_mk_value(0) ); + check_insert(mt, 0, xa_mk_value(0)); check_lb_not_empty(mt); } @@ -569,6 +570,19 @@ static noinline void check_find(struct maple_tree *mt) MT_BUG_ON(mt, entry != entry2); MT_BUG_ON(mt, index != mas.index); MT_BUG_ON(mt, last != mas.last); + + + mas.node = MAS_NONE; + mas.index = ULONG_MAX; + mas.last = ULONG_MAX; + entry2 = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != entry2); + + mas_reset(&mas); + mas.index = 0; + mas.last = 0; + MT_BUG_ON(mt, !!mas_prev(&mas, 0)); + mas_unlock(&mas); mtree_destroy(mt); } @@ -577,8 +591,8 @@ static noinline void check_find_2(struct maple_tree *mt) { unsigned long i, j; void *entry; - MA_STATE(mas, mt, 0, 0); + MA_STATE(mas, mt, 0, 0); rcu_read_lock(); mas_for_each(&mas, entry, ULONG_MAX) MT_BUG_ON(mt, true); @@ -604,7 +618,8 @@ static noinline void check_find_2(struct maple_tree *mt) rcu_read_lock(); mas_for_each(&mas, entry, ULONG_MAX) { if (xa_is_zero(entry)) - continue; + continue; + MT_BUG_ON(mt, entry != xa_mk_value(j)); j++; } @@ -739,9 +754,8 @@ static noinline void check_erase_testset(struct maple_tree *mt) for (int i = 5; i < 25; i++) { erase_check_insert(mt, i); - for (int j = i; j >= 0; j--) { + for (int j = i; j >= 0; j--) erase_check_load(mt, j); - } } erase_check_erase(mt, 14); //6015 @@ -827,9 +841,9 @@ static noinline void check_erase_testset(struct maple_tree *mt) else erase_check_load(mt, i); } - for (int i = 23; i < 25; i++) { + for (int i = 23; i < 25; i++) erase_check_erase(mt, i); - } + for (int i = 0; i < 25; i++) { if (i <= 25 && i >= 13) check_load(mt, set[i], NULL); @@ -874,7 +888,7 @@ static noinline void check_erase_testset(struct maple_tree *mt) #define SNULL 2 #define ERASE 3 #define ec_type_str(x) \ - ( ((x) == STORE) ? \ + (((x) == STORE) ? \ "STORE" : \ (((x) == SNULL) ? \ "SNULL" : "ERASE") \ @@ -888,14 +902,16 @@ static noinline void check_erase2_testset(struct maple_tree *mt, int check = 0; void *foo; unsigned long addr = 0; - MA_STATE(mas, mt, 0, 0); void *s_entry = NULL, *e_entry = NULL; unsigned long retry = 0; + MA_STATE(mas, mt, 0, 0); + for (int i = 0; i < size; i += 3) { unsigned long s_min, s_max; unsigned long e_min, e_max; void *value = NULL; + MA_STATE(mas_start, mt, set[i+1], set[i+1]); MA_STATE(mas_end, mt, set[i+2], set[i+2]); mt_set_non_kernel(127); @@ -922,13 +938,14 @@ static noinline void check_erase2_testset(struct maple_tree *mt, if ((s_min == e_min) && (s_max == e_max)) { if (!entry_cnt) entry_cnt++; + else if (!mt_is_empty(s_entry)) { - if (e_max > mas_end.last) { + if (e_max > mas_end.last) entry_cnt++; - } - if (s_min < mas_start.index) { + + if (s_min < mas_start.index) entry_cnt++; - } + } else { entry_cnt++; } @@ -945,7 +962,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, s_entry = mas_next(&mas_start, set[i+2]); while (!mas_is_none(&mas_start) && - (mas_start.last != e_max) ) { + (mas_start.last != e_max)) { BUG_ON(retry > 50); // stop infinite retry on testing. if (xa_is_zero(s_entry)) { retry++; @@ -963,6 +980,8 @@ static noinline void check_erase2_testset(struct maple_tree *mt, erase_check_store_range(mt, set, i + 1, value); break; case ERASE: + if (!s_entry) + break; check_erase(mt, set[i+1], xa_mk_value(set[i+1])); entry_cnt--; break; @@ -10765,10 +10784,6 @@ SNULL, 140590386823167, 140590386827263, STORE, 140590386819072, 140590386823167, STORE, 140590386823168, 140590386827263, SNULL, 140590376591359, 140590376595455, -/* -STORE, 140590376587264, 140590376591359, -STORE, 140590376591360, 140590376595455, -*/ }; unsigned long set21[] = { STORE, 93874710941696, 93874711363583, @@ -29669,16 +29684,6 @@ ERASE, 140612707799040, 140612707803135, ERASE, 140612707803136, 140612716191743, ERASE, 140613504712704, 140613504716799, ERASE, 140613504716800, 140613513105407, -#if 0 -ERASE, 140613546643456, 140613546647551, -ERASE, 140613546647552, 140613555036159, -ERASE, 140613387280384, 140613387284479, -ERASE, 140613387284480, 140613395673087, -ERASE, 140613538250752, 140613538254847, -ERASE, 140613538254848, 140613546643455, -ERASE, 140612699406336, 140612699410431, -ERASE, 140612699410432, 140612707799039, -#endif }; unsigned long set39[] = { @@ -30807,6 +30812,7 @@ ERASE, 140320163409920, 140320163414015, int cnt = 0; void *ptr = NULL; + MA_STATE(mas, mt, 0, 0); mt_set_non_kernel(3); @@ -30870,7 +30876,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set11, ARRAY_SIZE(set11)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 12288, 140014592737280, 0x2000); + mas_get_empty_area_rev(&mas, 12288, 140014592737280, 0x2000); MT_BUG_ON(mt, mas.index != 140014592565248); mtree_destroy(mt); @@ -30894,7 +30900,7 @@ ERASE, 140320163409920, 140320163414015, check_erase2_testset(mt, set13, ARRAY_SIZE(set13)); mtree_erase(mt, 140373516443648); rcu_read_lock(); - mas_get_unmapped_area_rev(&mas, 0, 140373518663680, 4096); + mas_get_empty_area_rev(&mas, 0, 140373518663680, 4096); rcu_read_unlock(); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); @@ -30912,7 +30918,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set16, ARRAY_SIZE(set16)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 139921865637888, 0x6000); + mas_get_empty_area_rev(&mas, 4096, 139921865637888, 0x6000); MT_BUG_ON(mt, mas.index != 139921865523200); mtree_destroy(mt); @@ -30925,7 +30931,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set17, ARRAY_SIZE(set17)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 139953197334528, 0x1000); + mas_get_empty_area_rev(&mas, 4096, 139953197334528, 0x1000); MT_BUG_ON(mt, mas.index != 139953197318144); mtree_destroy(mt); @@ -30938,7 +30944,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set18, ARRAY_SIZE(set18)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 140222972858368, 2215936); + mas_get_empty_area_rev(&mas, 4096, 140222972858368, 2215936); MT_BUG_ON(mt, mas.index != 140222966259712); mtree_destroy(mt); @@ -31018,7 +31024,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set26, ARRAY_SIZE(set26)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 140109042671616, 409600); + mas_get_empty_area_rev(&mas, 4096, 140109042671616, 409600); MT_BUG_ON(mt, mas.index != 140109040549888); mt_set_non_kernel(0); mt_validate(mt); @@ -31040,8 +31046,8 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set28, ARRAY_SIZE(set28)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 139918413357056, 4190208); - mas.index = (mas.index + 2093056 - 0) &(~2093056); // align_mast = 2093056 offset = 0 + mas_get_empty_area_rev(&mas, 4096, 139918413357056, 4190208); + mas.index = (mas.index + 2093056 - 0) & (~2093056); // align_mast = 2093056 offset = 0 MT_BUG_ON(mt, mas.index != 139918401601536); mt_set_non_kernel(0); mt_validate(mt); @@ -31091,11 +31097,13 @@ ERASE, 140320163409920, 140320163414015, mt_validate(mt); mtree_destroy(mt); -//mmap: unmapped_area_topdown: ffff88821c9cb600 Gap was found: mt 140582827569152 gap_end 140582869532672 -//mmap: window was 140583656296448 - 4096 size 134217728 -//mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879 -//mmap: mas.index 140582827569152 align mask 0 offset 0 -//mmap: rb_find_vma find on 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00) +/* mmap: empty_area_topdown: ffff88821c9cb600 Gap was found: + * mt 140582827569152 gap_end 140582869532672 + * mmap: window was 140583656296448 - 4096 size 134217728 + * mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879 + * mmap: mas.index 140582827569152 align mask 0 offset 0 + * mmap: rb_find_vma find on 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00) + */ // move gap failed due to an entirely empty node. mt_set_non_kernel(99); @@ -31103,7 +31111,7 @@ ERASE, 140320163409920, 140320163414015, mtree_init(mt, MAPLE_ALLOC_RANGE); check_erase2_testset(mt, set33, ARRAY_SIZE(set33)); rcu_barrier(); - mas_get_unmapped_area_rev(&mas, 4096, 140583656296448, 134217728); + mas_get_empty_area_rev(&mas, 4096, 140583656296448, 134217728); MT_BUG_ON(mt, mas.index != 140582869532672); mt_set_non_kernel(0); mt_validate(mt); @@ -31171,9 +31179,6 @@ ERASE, 140320163409920, 140320163414015, rcu_barrier(); mt_validate(mt); mtree_destroy(mt); - - - } static noinline void check_alloc_rev_range(struct maple_tree *mt) @@ -31296,12 +31301,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) min, holes[i+1]>>12, holes[i+2]>>12, holes[i] >> 12); #endif - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min, + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, min, holes[i+1] >> 12, holes[i+2] >> 12)); #if DEBUG_REV_RANGE - printk("Found %lu %lu\n", mas.index, mas.last); - printk("gap %lu %lu\n", (holes[i] >> 12), + pr_debug("Found %lu %lu\n", mas.index, mas.last); + pr_deubg("gap %lu %lu\n", (holes[i] >> 12), (holes[i+1] >> 12)); #endif MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12)); @@ -31434,6 +31439,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) }; int i, range_cnt = ARRAY_SIZE(range); int req_range_cnt = ARRAY_SIZE(req_range); + unsigned long min = 0x565234af2000; for (i = 0; i < range_cnt; i += 2) { #define DEBUG_ALLOC_RANGE 0 @@ -31449,14 +31455,14 @@ static noinline void check_alloc_range(struct maple_tree *mt) MA_STATE(mas, mt, 0, 0); - unsigned long min = 0x565234af2000; - for (i = 0; i < ARRAY_SIZE(holes); i+= 3) { + + for (i = 0; i < ARRAY_SIZE(holes); i += 3) { #if DEBUG_ALLOC_RANGE - pr_debug("\tGet unmapped %lu-%lu size %lu\n", min >> 12, + pr_debug("\tGet empty %lu-%lu size %lu\n", min >> 12, holes[i+1] >> 12, holes[i+2] >> 12); #endif - MT_BUG_ON(mt, mas_get_unmapped_area(&mas, min >> 12, + MT_BUG_ON(mt, mas_get_empty_area(&mas, min >> 12, holes[i+1] >> 12, holes[i+2] >> 12)); MT_BUG_ON(mt, mas.index != holes[i] >> 12); @@ -31486,7 +31492,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) static noinline void check_ranges(struct maple_tree *mt) { - int i; + int i, val, val2; unsigned long r[] = { 10, 15, 20, 25, @@ -31624,8 +31630,8 @@ static noinline void check_ranges(struct maple_tree *mt) mt_set_non_kernel(50); for (i = 0; i <= 500; i++) { - int val = i*5; - int val2 = (i+1)*5; + val = i*5; + val2 = (i+1)*5; check_store_range(mt, val, val2, xa_mk_value(val), 0); } check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0); @@ -31638,8 +31644,8 @@ static noinline void check_ranges(struct maple_tree *mt) mt_set_non_kernel(50); for (i = 0; i <= 500; i++) { - int val = i*5; - int val2 = (i+1)*5; + val = i*5; + val2 = (i+1)*5; check_store_range(mt, val, val2, xa_mk_value(val), 0); } check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0); @@ -31655,8 +31661,8 @@ static noinline void check_ranges(struct maple_tree *mt) mtree_init(mt, MAPLE_ALLOC_RANGE); mt_set_non_kernel(50); for (i = 0; i <= 50; i++) { - int val = i*10; - int val2 = (i+1)*10; + val = i*10; + val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); } check_store_range(mt, 161, 161, xa_mk_value(161), 0); @@ -31672,27 +31678,70 @@ static noinline void check_ranges(struct maple_tree *mt) mtree_init(mt, MAPLE_ALLOC_RANGE); for (i = 0; i <= 500; i++) { - int val = i*10; - int val2 = (i+1)*10; + val = i*10; + val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); } check_store_range(mt, 4600, 4959, xa_mk_value(1), 0); mt_validate(mt); mtree_destroy(mt); + mtree_init(mt, MAPLE_ALLOC_RANGE); + for (i = 0; i <= 500; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0); + check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0); + check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0); + check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0); + check_store_range(mt, 4842, 4849, NULL, 0); + mt_validate(mt); + mtree_destroy(mt); + + mtree_init(mt, MAPLE_ALLOC_RANGE); + for (i = 0; i <= 200; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + check_store_range(mt, 1655, 1655, xa_mk_value(1655), 0); + check_store_range(mt, 1656, 1656, xa_mk_value(1656), 0); + check_store_range(mt, 1666, 1666, xa_mk_value(1666), 0); + + check_store_range(mt, 1705, 1705, xa_mk_value(1705), 0); + check_store_range(mt, 1706, 1706, xa_mk_value(1706), 0); + check_store_range(mt, 1716, 1716, xa_mk_value(1716), 0); + + check_store_range(mt, 1755, 1755, xa_mk_value(1755), 0); + check_store_range(mt, 1756, 1756, xa_mk_value(1756), 0); + + check_store_range(mt, 1805, 1806, xa_mk_value(1805), 0); + check_store_range(mt, 1806, 1806, xa_mk_value(1806), 0); + + check_store_range(mt, 1855, 1855, xa_mk_value(1855), 0); + check_store_range(mt, 1856, 1856, xa_mk_value(1856), 0); + check_store_range(mt, 1866, 1866, xa_mk_value(1866), 0); + // Cause a 3 child split. + check_store_range(mt, 1792, 1799, NULL, 0); + mt_validate(mt); + mtree_destroy(mt); } static noinline void check_next_entry(struct maple_tree *mt) { void *entry = NULL; unsigned long limit = 30, i = 0; + MT_BUG_ON(mt, !mtree_empty(mt)); MA_STATE(mas, mt, i, i); check_seq(mt, limit, false); rcu_read_lock(); - for (;i <= limit + 1; i++) { + for ( ; i <= limit + 1; i++) { entry = mas_next(&mas, limit); if (i > limit) MT_BUG_ON(mt, entry != NULL); @@ -31707,6 +31756,7 @@ static noinline void check_prev_entry(struct maple_tree *mt) { unsigned long index = 16; void *value; + MA_STATE(mas, mt, index, index); MT_BUG_ON(mt, !mtree_empty(mt)); @@ -31753,7 +31803,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) */ mt_set_non_kernel(1); mas_reset(&mas); - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 50, 100, 2)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 50, 100, 2)); MT_BUG_ON(mt, mas.index != index + 1); rcu_read_unlock(); @@ -31781,7 +31831,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) * 50 for size 3. */ mas_reset(&mas); - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 20, 50, 3)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 20, 50, 3)); MT_BUG_ON(mt, mas.index != 38); rcu_read_unlock(); @@ -31795,7 +31845,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 81, 2)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 81, 2)); MT_BUG_ON(mt, mas.index != 79); mt_validate(mt); rcu_read_unlock(); @@ -31806,7 +31856,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) mtree_test_erase(mt, 81); mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 85, 4)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 85, 4)); rcu_read_unlock(); MT_BUG_ON(mt, mas.index != 78); mt_validate(mt); @@ -31821,7 +31871,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) mt_set_non_kernel(2); mas_reset(&mas); rcu_read_lock(); - MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 1700, 1800, 2)); + MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 1700, 1800, 2)); MT_BUG_ON(mt, mas.index != 1791); rcu_read_unlock(); mt_validate(mt); @@ -31831,7 +31881,6 @@ static noinline void check_gap_combining(struct maple_tree *mt) mtree_init(mt, MAPLE_ALLOC_RANGE); check_seq(mt, 400, false); mtree_test_store_range(mt, 376, 391, NULL); - mt_dump(mt); mt_set_non_kernel(0); mtree_destroy(mt); -- 2.50.1