From 8fcca759fc7edf8c4b310bc7f4b578262c1d0b72 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 25 Jun 2020 19:13:24 -0400 Subject: [PATCH] wip, maple_tree: Pass 850785 - added check for min node size. need to rebalance spanning store Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 143 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 100 insertions(+), 43 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index a24af99f13e8..1687189e53f6 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1652,10 +1652,10 @@ static inline void mas_descend_adopt(struct ma_state *mas) * then set the correct parent in all of of the parent's children. */ while (!mte_is_leaf(l_mas.node)) { - printk("start of l %p is %p\n", mas_mn(&l_mas), l_mas.node); - printk("start of r %p is %p\n", mas_mn(&r_mas), r_mas.node); + //printk("start of l %p is %p\n", mas_mn(&l_mas), l_mas.node); + //printk("start of r %p is %p\n", mas_mn(&r_mas), r_mas.node); if (!(l_enode = mas_find_l_split(&l_mas))) { - printk("\tChecking right\n"); + //printk("\tChecking right\n"); mas_adopt_children(&l_mas, l_mas.node); mas_dup_state(&l_mas, &r_mas); if (!(l_enode = mas_find_l_split(&l_mas))) { @@ -1664,28 +1664,28 @@ static inline void mas_descend_adopt(struct ma_state *mas) } } - printk("%d New node is %p\n", __LINE__, mte_to_node(l_enode)); + //printk("%d New node is %p\n", __LINE__, mte_to_node(l_enode)); if (!(r_enode = mas_find_r_split(&r_mas))) { - printk("\tChecking left\n"); + //printk("\tChecking left\n"); mas_adopt_children(&r_mas, r_mas.node); mas_dup_state(&r_mas, &l_mas); r_enode = mas_find_r_split(&r_mas); } - printk("%d New node is %p\n", __LINE__, mte_to_node(r_enode)); + //printk("%d New node is %p\n", __LINE__, mte_to_node(r_enode)); - printk("Adopt children of l %p\n", mas_mn(&l_mas)); + //printk("Adopt children of l %p\n", mas_mn(&l_mas)); mas_adopt_children(&l_mas, l_mas.node); if (r_mas.node != l_mas.node) { - printk("Adopt children of r %p\n", mas_mn(&r_mas)); + //printk("Adopt children of r %p\n", mas_mn(&r_mas)); mas_adopt_children(&r_mas, r_mas.node); } l_mas.node = l_enode; r_mas.node = r_enode; - printk("%d New node is %p\n", __LINE__, mas_mn(&l_mas)); - printk("%d New node is %p\n", __LINE__, mas_mn(&r_mas)); - printk("\n"); + //printk("%d New node is %p\n", __LINE__, mas_mn(&l_mas)); + //printk("%d New node is %p\n", __LINE__, mas_mn(&r_mas)); + //printk("\n"); } } @@ -1875,7 +1875,9 @@ new_root_node: } /* Insert new data */ - _mas_replace(mas, false, false, true); + _mas_replace(mas, false, false, false); + + mas_descend_adopt(mas); if (mt_is_alloc(mas->tree)) mas_update_gap(mas, false); @@ -1883,6 +1885,7 @@ new_root_node: + return 0; } @@ -2013,7 +2016,7 @@ static inline int mas_split(struct ma_state *mas, */ /* internal node split - cut in half for now. */ - split = (slot_cnt + 1) / 2; + split = slot_cnt / 2; printk("split is %u %u %u\n", split, mas_get_slot(&l_mas), mas_get_slot(&r_mas)); i = mab_mas_cp(b_node, 0, split + 1, &l_mas, 0); @@ -2452,6 +2455,8 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) if (mas->full_cnt > 0) node_cnt = 1 + mas->full_cnt * 2; // For split upwards. + else + node_cnt = 1 + -1 * mas->full_cnt * 2; // For rebalance upwards. /* one for parent, one for potential 3rd node at bottom, * two for every level below the split location. @@ -2522,7 +2527,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) memset(&b_node, 0, sizeof(struct maple_big_node)); // Add l_mas.node to clean_list. // Fill out big node with left contents. - printk("%s: %p and %p\n", __func__, mas_mn(&orig_l_mas), + printk("\n%s: %p and %p\n", __func__, mas_mn(&orig_l_mas), mas_mn(&orig_r_mas)); printk("l_slot %u r_slot %u\n", l_slot, r_slot); b_end = mas_mab_cp(&orig_l_mas, 0, l_slot, &b_node, 0); @@ -2538,7 +2543,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) // Insert new entry. b_node.slot[b_end] = entry; - b_node.pivot[b_end++] = mas->last; + b_node.pivot[b_end] = mas->last; printk("Set %lu to %p\n", mas->last, entry); } else { // Not a leaf means new l_mas is placed at b_end. @@ -2547,45 +2552,75 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) b_node.gap[b_end] = mas_find_gap(&l_mas); printk("gap %u is %lu\n", b_end, b_node.gap[b_end]); } - b_node.pivot[b_end++] = l_mas.max; + b_node.pivot[b_end] = l_mas.max; + printk("l_mas max is %lu\n", b_node.pivot[b_end - 1]); if (next) { // FIXME: What about gap? - b_node.slot[b_end] = next; - b_node.pivot[b_end++] = r_mas.min - 1; + // FIXME: What about descend_adopt? + b_node.slot[++b_end] = next; + b_node.pivot[b_end] = r_mas.min - 1; } if (r_mas.node != l_mas.node) { - b_node.slot[b_end] = r_mas.node; + b_node.slot[++b_end] = r_mas.node; if(mt_is_alloc(mas->tree)) { b_node.gap[b_end] = mas_find_gap(&r_mas); printk("gap %u is %lu\n", b_end, b_node.gap[b_end]); } - b_node.pivot[b_end++] = r_mas.max; + b_node.pivot[b_end] = r_mas.max; + printk("r_mas max is %lu\n", b_node.pivot[b_end - 1]); } } - // Add r_mas.node to clean_list. - b_end = mas_mab_cp(&orig_r_mas, r_slot, - mas_data_end(&orig_r_mas) + 1, &b_node, - b_end); + // Add orig_r_mas.node to clean_list. + if (b_node.pivot[b_end] < ULONG_MAX) { + b_end = mas_mab_cp(&orig_r_mas, r_slot, + mas_data_end(&orig_r_mas) + 1, + &b_node, b_end + 1); + } if (b_end < mt_min_slot_cnt(orig_l_mas.node)) { - printk("Steal some data from right\n"); + unsigned char end; + printk("\tSteal some data from right\n"); mas_set_slot(&orig_r_mas, mte_parent_slot(orig_r_mas.node)); mas_next_node(&orig_r_mas, ULONG_MAX); if (!mas_is_none(&orig_r_mas)) { // Add r_mas.node to clean_list. // r_mas is now at the next node.. - b_end = mas_mab_cp(&orig_r_mas, 0, - mas_data_end(&orig_r_mas) + 1, - &b_node, b_end); + end = mas_data_end(&orig_r_mas); + b_end = mas_mab_cp(&orig_r_mas, 0, end + 1, + &b_node, b_end + 1); } else { - // Use prev. + // Put left into right. + mas_dup_state(&orig_r_mas, &orig_l_mas); + mas_dup_state(&r_mas, &l_mas); + r_slot = l_slot; + // Use prev for left. + mas_prev_node(&orig_l_mas, 0); + if (mas_is_none(&orig_l_mas)) { + // This is going to be a new root of + // only what is in b_node + printk("%s: %d New root?\n", __func__, __LINE__); + return 0; + } + + mas_set_slot(&orig_l_mas, 0); + l_slot = 0; +// printk("Walk %p for %lu\n", mas_mn(&orig_l_mas), orig_l_mas.index); +// mas_node_walk(&orig_l_mas, mte_node_type(orig_l_mas.node), +// &range_min, &range_max); +// l_slot = mas_get_slot(&orig_l_mas); + + end = mas_data_end(&orig_l_mas); // shift b_node by prev size + mab_shift_right(&b_node, b_end, end, + (mt_is_alloc(mas->tree) ? true : false)); // copy in prev. - // fix l_slot/r_slot ? + mas_mab_cp(&orig_l_mas, 0, end, &b_node, 0); + b_end += end + 1; + printk("b_end is %u\n", b_end); } } /* Now that l_mas data and r_mas data is in big_node, divide the data @@ -2598,7 +2633,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) if (b_end < slot_cnt) { split = b_end; } else { - split = mab_calc_split(&b_node, b_end, slot_cnt, + split = mab_calc_split(&b_node, b_end - 1, slot_cnt, orig_l_mas.min); } @@ -2686,20 +2721,24 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) mas_ascend(&orig_r_mas); mas_set_slot(&orig_r_mas, 0); - orig_r_mas.index = orig_r_mas.last = r_mas.max + 1; - printk("node walk %p for %lu\n", mas_mn(&orig_r_mas), r_mas.max + 1); + orig_r_mas.index = r_mas.max; + if (r_mas.max != ULONG_MAX) + orig_r_mas.index++; + + orig_r_mas.last = orig_r_mas.index; + printk("node r walk %p for %lu\n", mas_mn(&orig_r_mas), orig_r_mas.index); mas_node_walk(&orig_r_mas, mte_node_type(orig_r_mas.node), &range_min, &range_max); r_slot = mas_get_slot(&orig_r_mas); orig_r_mas.index = orig_r_mas.last = r_mas.max; mas_set_slot(&orig_l_mas, 0); - printk("Walk %p for %lu\n", mas_mn(&orig_l_mas), orig_l_mas.index); + printk("Walk l %p for %lu\n", mas_mn(&orig_l_mas), orig_l_mas.index); mas_node_walk(&orig_l_mas, mte_node_type(orig_l_mas.node), &range_min, &range_max); l_slot = mas_get_slot(&orig_l_mas); } - printk("Done looping over spanning mess\n"); + printk("Done looping over spanning mess at %p\n", mas_mn(&orig_l_mas)); if (!l_slot && mte_is_root(orig_l_mas.node) && l_mas.max == orig_l_mas.max) { @@ -2713,19 +2752,19 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) b_node.gap[b_end] = mas_find_gap(&l_mas); printk("Set b_end %u to left %p gap %lu\n", b_end, l_mas.node, b_node.gap[b_end]); printk("l_mas.max = %lu\n", l_mas.max); - b_node.pivot[b_end++] = l_mas.max; + b_node.pivot[b_end] = l_mas.max; if (next) { - b_node.slot[b_end] = next; + b_node.slot[++b_end] = next; // FIXME: Need to find gap. printk("Set b_end %u to next %p\n", b_end, next); - b_node.pivot[b_end++] = r_mas.min - 1; + b_node.pivot[++b_end] = r_mas.min - 1; } if (l_mas.node != r_mas.node) { - b_node.slot[b_end] = r_mas.node; + b_node.slot[++b_end] = r_mas.node; b_node.gap[b_end] = mas_find_gap(&r_mas); printk("Set b_end %u to right %p %lu gap %lu\n", b_end, r_mas.node, r_mas.max, b_node.gap[b_end]); - b_node.pivot[b_end++] = r_mas.max; + b_node.pivot[b_end] = r_mas.max; } while (mas_get_safe_pivot(&orig_l_mas, r_slot) <= mas->last && @@ -2738,12 +2777,18 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) orig_l_mas.max != l_mas.max) b_end = mas_mab_cp(&orig_l_mas, r_slot, mas_data_end(&orig_l_mas) + 1, &b_node, - b_end); + b_end + 1); - // Check for overfull node and split upwards. + // Check for overfull node and split upwards or minimum entries. if (b_end > mt_slot_count(orig_l_mas.node)) { + mas_cnt_full(mas); printk("Splitting up required (%u)\n", b_end); + } else if (!mte_is_root(orig_l_mas.node) && + b_end < mt_min_slot_cnt(orig_l_mas.node)) { + printk("Rebalance needed (%u)\n", b_end); + mas_cnt_empty(mas); + _mas_rebalance(mas, &b_node, b_end); } l = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), @@ -2769,7 +2814,7 @@ new_root: // FIXME: Mark modified nodes as dead. // Insert new sub-tree - _mas_replace(mas, false, false, true); + _mas_replace(mas, false, false, false); if (mte_is_leaf(mas->node)) return 1; @@ -5009,12 +5054,24 @@ static inline void mas_dfs_postorder(struct ma_state *mas, unsigned long max) */ void mt_validate(struct maple_tree *mt) { + unsigned char end; MA_STATE(mas, mt, 0, 0); + + rcu_read_lock(); mas_start(&mas); mas_first_entry(&mas, ULONG_MAX); while (mas.node != MAS_NONE) { printk("Checking %p\n", mas_mn(&mas)); + if (!mte_is_root(mas.node)) { + end = mas_data_end(&mas); + if (end < mt_min_slot_cnt(mas.node)) { + pr_err("Invalid size %u of "MA_PTR"\n", end, + mas_mn(&mas)); + MT_BUG_ON(mas.tree, 1); + } + + } mas_validate_parent_slot(&mas); mas_validate_child_slot(&mas); mas_validate_limits(&mas); -- 2.50.1