From: Liam R. Howlett Date: Wed, 8 Oct 2025 23:20:32 +0000 (-0400) Subject: FIXME maple_tree reduce ascending by tracking parent X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=f0ca5bbf400495d10a2b7732ea840ff9e1d28c43;p=users%2Fjedix%2Flinux-maple.git FIXME maple_tree reduce ascending by tracking parent rebalance change is more.. Signed-off-by: Liam R. Howlett --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f7c2806f0f07..b2300c798fea 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1931,15 +1931,11 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) wr_mas->offset_end = mas->offset = offset; } -static inline void rebalance_sib(struct ma_state *mas, struct ma_state *sib) +static inline void rebalance_sib(struct ma_state *parent, struct ma_state *sib) { - unsigned char end; - - *sib = *mas; - mas_ascend(sib); - end = mas_data_end(sib); + *sib = *parent; /* Prioritize move right to pull data left */ - if (sib->offset < end) + if (sib->offset < sib->end) sib->offset++; else sib->offset--; @@ -2478,15 +2474,15 @@ static bool data_fits(struct ma_state *sib, struct ma_state *mas, return false; } -static void push_data_sib(struct maple_copy *cp, struct ma_state *mas, - struct ma_state *sib) +static inline void push_data_sib(struct maple_copy *cp, struct ma_state *mas, + struct ma_state *sib, struct ma_state *parent) { + if (mte_is_root(mas->node)) goto no_push; - *sib = *mas; - mas_ascend(sib); + *sib = *parent; if (sib->offset) { sib->offset--; mas_descend(sib); @@ -2494,11 +2490,9 @@ static void push_data_sib(struct maple_copy *cp, struct ma_state *mas, if (data_fits(sib, mas, cp)) /* Push left */ return; - mas_ascend(sib); - sib->offset++; + *sib = *parent; } - sib->end = mas_data_end(sib); if (sib->offset >= sib->end) goto no_push; @@ -2524,20 +2518,21 @@ no_push: * */ static inline void rebalance_data(struct maple_copy *cp, - struct ma_wr_state *wr_mas, struct ma_state *sib) + struct ma_wr_state *wr_mas, struct ma_state *sib, + struct ma_state *parent) { cp_data_calc(cp, wr_mas, wr_mas); sib->end = 0; if (cp->data >= mt_slots[wr_mas->type]) { - push_data_sib(cp, wr_mas->mas, sib); + push_data_sib(cp, wr_mas->mas, sib, parent); if (sib->end) goto use_sib; - } - - if (((wr_mas->mas->min != 0) || (wr_mas->mas->max != ULONG_MAX)) && - (cp->data <= mt_min_slots[wr_mas->type])) { - rebalance_sib(wr_mas->mas, sib); - goto use_sib; + } else if (cp->data <= mt_min_slots[wr_mas->type]) { + if ((wr_mas->mas->min != 0) || + (wr_mas->mas->max != ULONG_MAX)) { + rebalance_sib(parent, sib); + goto use_sib; + } } return; @@ -2912,7 +2907,8 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, * needed, false otherwise. */ static inline bool rebalance_ascend(struct maple_copy *cp, - struct ma_wr_state *wr_mas, struct ma_state *sib) + struct ma_wr_state *wr_mas, struct ma_state *sib, + struct ma_state *parent) { struct ma_state *mas; unsigned long min; @@ -2938,7 +2934,8 @@ static inline bool rebalance_ascend(struct maple_copy *cp, return false; cp->height++; - mas_ascend(mas); + parent->alloc = mas->alloc; + *mas = *parent; wr_mas_setup(wr_mas, mas); wr_mas->offset_end = mas->offset; if (r == sib) @@ -3732,8 +3729,9 @@ static inline void mas_wr_append(struct ma_wr_state *wr_mas) * Return: true if another split operation on the next level is needed, false * otherwise */ -static bool split_ascend(struct maple_copy *cp, - struct ma_wr_state *wr_mas, struct ma_state *sib) +static inline bool split_ascend(struct maple_copy *cp, + struct ma_wr_state *wr_mas, struct ma_state *sib, + struct ma_state *parent) { struct ma_state *mas; unsigned long min, max; @@ -3756,7 +3754,9 @@ static bool split_ascend(struct maple_copy *cp, return false; cp->height++; - mas_ascend(mas); + BUG_ON(mas->node == parent->node); + parent->alloc = mas->alloc; + *mas = *parent; wr_mas_setup(wr_mas, mas); wr_mas->offset_end = mas->offset; if (sib->end) { @@ -3780,8 +3780,9 @@ static bool split_ascend(struct maple_copy *cp, * indicate it will not be used. * */ -static void split_data(struct maple_copy *cp, - struct ma_wr_state *wr_mas, struct ma_state *sib) +static inline void split_data(struct maple_copy *cp, + struct ma_wr_state *wr_mas, struct ma_state *sib, + struct ma_state *parent) { cp_data_calc(cp, wr_mas, wr_mas); if (cp->data <= mt_slots[wr_mas->type]) { @@ -3790,7 +3791,7 @@ static void split_data(struct maple_copy *cp, return; } - push_data_sib(cp, wr_mas->mas, sib); + push_data_sib(cp, wr_mas->mas, sib, parent); if (sib->end) cp->data += sib->end + 1; } @@ -3804,16 +3805,22 @@ static void mas_wr_split(struct ma_wr_state *wr_mas) struct ma_state *mas; struct maple_copy cp; struct ma_state sib; + struct ma_state parent; mas = wr_mas->mas; + parent = *mas; trace_ma_op(__func__, mas); cp_leaf_init(&cp, mas, wr_mas, wr_mas); do { - split_data(&cp, wr_mas, &sib); + if (!mte_is_root(parent.node)) { + mas_ascend(&parent); + parent.end = mas_data_end(&parent); + } + split_data(&cp, wr_mas, &sib, &parent); multi_src_setup(&cp, wr_mas, wr_mas, &sib); dst_setup(&cp, mas, wr_mas->type); cp_data_write(&cp, mas); - } while (split_ascend(&cp, wr_mas, &sib)); + } while (split_ascend(&cp, wr_mas, &sib, &parent)); mas_wmb_replace(mas, &cp); } @@ -3832,6 +3839,7 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) struct ma_state *mas; struct maple_copy cp; struct ma_state sib; + struct ma_state parent; /* * Rebalancing occurs if a node is insufficient. Data is rebalanced @@ -3845,13 +3853,18 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) mas = wr_mas->mas; trace_ma_op(__func__, mas); + parent = *mas; cp_leaf_init(&cp, mas, wr_mas, wr_mas); do { - rebalance_data(&cp, wr_mas, &sib); + if (!mte_is_root(parent.node)) { + mas_ascend(&parent); + parent.end = mas_data_end(&parent); + } + rebalance_data(&cp, wr_mas, &sib, &parent); multi_src_setup(&cp, wr_mas, wr_mas, &sib); dst_setup(&cp, mas, wr_mas->type); cp_data_write(&cp, mas); - } while (rebalance_ascend(&cp, wr_mas, &sib)); + } while (rebalance_ascend(&cp, wr_mas, &sib, &parent)); mas_wmb_replace(mas, &cp); }