From: Liam R. Howlett Date: Fri, 4 Sep 2020 20:16:34 +0000 (-0400) Subject: maple_tree: mas_data_end reduction. X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=173f10c2e356d6df3f93b10fbe88f7c621e05c73;p=users%2Fjedix%2Flinux-maple.git maple_tree: mas_data_end reduction. Signed-off-by: Liam R. Howlett --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 2ec96a6796f3..153b5dfd6e91 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -947,14 +947,14 @@ done: /* * mas_data_end() - Find the end of the data (slot). + * @mas: the maple state + * @type: the type of maple node * - * @mas - the maple state - * @type - the type of maple node + * Returns: The zero indexed last slot with data (may be null). */ static inline unsigned char mas_data_end(const struct ma_state *mas) - { - int slot = 0; + unsigned char slot = 0; enum maple_type type = mte_node_type(mas->node); unsigned long piv = mas->min; @@ -1596,7 +1596,7 @@ static inline bool mas_prev_sibling(struct ma_state *mas) */ static inline bool mas_next_sibling(struct ma_state *mas) { - unsigned char p_end, p_slot = mte_parent_slot(mas->node); + unsigned char p_slot = mte_parent_slot(mas->node) + 1; MA_STATE(parent, mas->tree, mas->index, mas->last); @@ -1605,14 +1605,15 @@ static inline bool mas_next_sibling(struct ma_state *mas) mas_dup_state(&parent, mas); mas_ascend(&parent); - p_end = mas_data_end(&parent); + if (p_slot == mt_slot_count(parent.node)) + return false; - if (p_end == p_slot) + if (!mas_get_slot(&parent, p_slot)) return false; mas_dup_state(mas, &parent); - mas_set_offset(mas, p_slot + 1); + mas_set_offset(mas, p_slot); mas_descend(mas); return true; } @@ -1666,9 +1667,9 @@ static inline void mast_rebalance_next(struct maple_subtree_state *mast, struct maple_enode *old_r) { unsigned char b_end = mast->bn->b_end; - unsigned char end = mas_data_end(mast->orig_r); - mas_mab_cp(mast->orig_r, 0, end, mast->bn, b_end); + mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), + mast->bn, b_end); mat_add(mast->free, old_r); mast->orig_r->last = mast->orig_r->max; if (old_r == mast->orig_l->node) @@ -2068,7 +2069,8 @@ static inline void mast_combine_cp_right(struct maple_subtree_state *mast) return; mas_mab_cp(mast->orig_r, mas_offset(mast->orig_r) + 1, - mas_data_end(mast->orig_r), mast->bn, mast->bn->b_end); + mt_slot_count(mast->orig_r->node), mast->bn, + mast->bn->b_end); mast->orig_r->last = mast->orig_r->max; } @@ -2262,7 +2264,8 @@ static inline int mas_rebalance(struct ma_state *mas, mas_dup_state(&r_mas, mas); if (mas_next_sibling(&r_mas)) { - mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas), b_node, b_end); + MT_BUG_ON(r_mas.tree, mas_is_none(&r_mas)); + mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); r_mas.last = r_mas.index = r_mas.max; } else { @@ -2938,8 +2941,8 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) // Copy l_mas and store the value in b_node. b_node.b_end = mas_store_b_node(&l_mas, &b_node, entry); // Copy r_mas into b_node. - mas_mab_cp(&r_mas, mas_offset(&r_mas), mas_data_end(&r_mas), &b_node, - b_node.b_end + 1); + mas_mab_cp(&r_mas, mas_offset(&r_mas), mt_slot_count(r_mas.node), + &b_node, b_node.b_end + 1); // Stop spanning searches by searching for just index. l_mas.index = l_mas.last = mas->index; // Calc the number of iterations of combining and splitting that will