From: Liam R. Howlett Date: Mon, 1 Sep 2025 06:26:03 +0000 (-0400) Subject: debug removal but broken somehow X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=1aaf2519fa9c4ba7d675d366dcefb682c44b2d28;p=users%2Fjedix%2Flinux-maple.git debug removal but broken somehow Signed-off-by: Liam R. Howlett --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 90c080bb318b..ed546a9236c2 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1749,7 +1749,6 @@ static inline void mas_adopt_children(struct ma_state *mas, offset = ma_data_end(node, type, pivots, mas->max); WARN_ON_ONCE(offset == 0); - //printk("Adopt %p 0 - %u\n", node, offset); do { child = mas_slot_locked(mas, slots, offset); mas_set_parent(mas, child, parent, offset); @@ -1823,7 +1822,6 @@ static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) end = ma_data_end(node, mt, pivots, mas->max); for (offset = mas->offset; offset <= end; offset++) { entry = mas_slot_locked(mas, slots, offset); - //printk("%p[%u] entry %p parent %p\n", node, offset, entry, mte_parent(entry)); if (mte_parent(entry) == node) { *child = *mas; mas->offset = offset + 1; @@ -2909,9 +2907,6 @@ unsigned long node_copy(struct ma_state *mas, struct maple_node *src, - printk("cp src %p[%u-%u] => %p[%u]\n", src, start, start + size - 1, - dst, d_start); - printk("size is %u\n", size); d_slots = ma_slots(dst, d_mt) + d_start; d_pivots = ma_pivots(dst, d_mt) + d_start; s_slots = ma_slots(src, s_mt) + start; @@ -2920,13 +2915,8 @@ unsigned long node_copy(struct ma_state *mas, struct maple_node *src, if (!ma_is_leaf(d_mt) && s_mt == maple_copy) { struct maple_enode *edst = mt_mk_node(dst, d_mt); - printk("dst %p d_slots %p %p\n", dst, d_slots, *d_slots); - printk("d_start %u\n", d_start); - for (int i = 0; i < size; i++) { - printk("set slot %u parent\n", i); - printk("d_slots = %p\n", d_slots[i]); + for (int i = 0; i < size; i++) mas_set_parent(mas, d_slots[i], edst, d_start + i); - } } d_gaps = ma_gaps(dst, d_mt); @@ -2936,32 +2926,15 @@ unsigned long node_copy(struct ma_state *mas, struct maple_node *src, memcpy(d_gaps, s_gaps, size * sizeof(unsigned long)); } - /* This is wrong.. */ - printk("\t\tAssume s_max..\n"); - printk("\t\tLast src is %u\n", start + size); -#if 0 - d_max = 0; - if (start + size <= mt_pivots[s_mt]) - d_max = s_pivots[size - 1]; - - if (d_max > s_max) - d_max = s_max; -#else d_max = s_max; - printk("\t\t d_start + size = %u\n", d_start + size); if (start + size < mt_pivots[s_mt]) { if (s_max > s_pivots[size - 1]) d_max = s_pivots[size - 1]; - printk("\t\td_max %lx\n", d_max); } -#endif - if (d_start + size <= mt_pivots[d_mt]) { - printk("Set final pivot at %u to %lx\n", size + d_start - 1, d_max); + + if (d_start + size <= mt_pivots[d_mt]) d_pivots[size - 1] = d_max; - } -// printk("cp from %p -> %p %u\n", d_pivots, s_pivots, size); - fflush(stdout); size--; if (size) memcpy(d_pivots, s_pivots, size * sizeof(unsigned long)); @@ -2977,7 +2950,6 @@ void node_finalise(struct maple_node *node, enum maple_type mt, unsigned char en unsigned long *gaps; unsigned char gap_slot; - printk("Finalise %p end %u max end %u\n", node, end, max_end); gaps = ma_gaps(node, mt); if (end < max_end - 1) { size = max_end - end; @@ -3021,11 +2993,6 @@ static inline void append_node_cp(struct maple_copy *cp, cp->s_count++; } -static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth, - enum mt_dump_format format); - - static inline void spanning_leaf_init(struct maple_copy *cp, struct ma_state *mas, struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas) @@ -3049,8 +3016,6 @@ static inline void spanning_leaf_init(struct maple_copy *cp, else cp->gap[end] = mas->last - mas->index + 1; - printk("r_wr_mas->r_max %lx mas last %lx offset %u", - r_wr_mas->r_max, mas->last, r_wr_mas->mas->offset); if (r_wr_mas->r_max > mas->last) { end++; cp->slot[end] = r_wr_mas->content; @@ -3076,22 +3041,16 @@ static inline void spanning_data_calc(struct maple_copy *cp, /* Add 1 every time for the 0th element */ cp->data = l_wr_mas->mas->offset; - printk("left: cp %p 0 - %u (%u)\n", l_wr_mas->mas->node, cp->data - 1, cp->data); cp->data += cp->end + 1; - printk("insert: %p data + end = %u\n", cp, cp->data); /* Data from right (offset + 1 to end), +1 for zero */ cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset; - printk("end %u - off %u + 1\n", r_wr_mas->mas->end, r_wr_mas->mas->offset); - printk("right: %p data = %u\n", r_wr_mas->mas->node, cp->data); if (((l_wr_mas->mas->min != 0) || (r_wr_mas->mas->max != ULONG_MAX)) && (cp->data <= mt_min_slots[l_wr_mas->type])) { - printk("MOVING!\n"); mas_spanning_move(l_wr_mas, r_wr_mas, sib); cp->data += sib->end + 1; - printk("%p data = %u\n", sib->node, cp->data); } else { sib->end = 0; } @@ -3118,9 +3077,7 @@ void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas, for (int i = 0; i < cp->d_count; i++) { cp->dst[i].mt = mt; cp->dst[i].node = ma_mnode_ptr(mas_pop_node(mas)); - printk("i node = %p\n", cp->dst[i].node); } - printk("split = %u data %u d_count %u type %u\n", cp->split, cp->data, cp->d_count, mt); } @@ -3152,10 +3109,8 @@ void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas, append_node_cp(cp, l_wr_mas->mas, 0, off); cp->src[cp->s_count - 1].max = cp->min - 1; - printk("Use pivot %p [%u]\n", l_wr_mas->node, off); } - printk("cp min is %lx\n", cp->min); spanning_init_cp_src(cp); /* Copy right either from offset or offset + 1 pending on r_max */ @@ -3163,22 +3118,8 @@ void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas, append_node_cp(cp, r_wr_mas->mas, r_wr_mas->mas->offset + 1, r_wr_mas->mas->end); - if (sib->end) { - if (sib->min > r_wr_mas->mas->max) { - append_node_cp(cp, sib, 0, sib->end); - /* Copy sibling, if necessary */ - /* set sib to right */ - //r_wr_mas->mas = sib; - } else { - /* set sib to left*/ - //l_wr_mas->mas = sib; - } - } - - printk("\t\t\t\tSources are %u:", cp->s_count); - for (int i = 0; i < cp->s_count; i++) - printk(" %p", cp->src[i].node); - printk("\n"); + if (sib->end && sib->min > r_wr_mas->mas->max) + append_node_cp(cp, sib, 0, sib->end); } static inline @@ -3218,49 +3159,24 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) debug++; BUG_ON(debug > 8); size = next_node - data_offset; - printk("try to use size %d\n", size); /* Fill the destination */ - //printk("%d: %u %u %u\n", __LINE__, size, next_node, data_offset); - - if (src_end - s_offset + 1 < size) { - printk("size too big for src %p end: %u %u\n", src, src_end, s_offset); + if (src_end - s_offset + 1 < size) size = src_end - s_offset + 1; - } - printk("%d: %u\n", __LINE__, size); - printk("split %u dst_offset %u size %u\n", split, dst_offset, size); - if (split - dst_offset + 1 < size) { + if (split - dst_offset + 1 < size) size = split - dst_offset + 1; - } - printk("%d: size %u\n", __LINE__, size); node_copy(mas, src, s_offset, size, s_max, s_mt, dst, dst_offset, d_mt); - //printk("%d: set dest max %lx\n", __LINE__, cp->dst[d].max); -#if 0 - { - unsigned long min = cp->min; - printk("\n\nCount is %u\n", cp->d_count); - for (int i = 0; i < cp->d_count; i++) { - printk("dump %p %lu - %lu\n", cp->dst[i].node, min, cp->dst[i].max); - mt_dump_node(mas->tree, mt_mk_node(cp->dst[i].node, cp->dst[i].mt), - min, cp->dst[i].max, 0, mt_dump_hex); - min = cp->dst[i].max + 1; - } - } -#endif - data_offset += size; dst_offset += size; s_offset += size; if (s_offset > src_end) { - printk("\t\tnext src %u / %u\n", s +1, cp->s_count); /* This source is exhausted */ cp->dst[d].max = s_max; s++; if (s >= cp->s_count) { - printk("Forced final\n"); node_finalise(dst, d_mt, dst_offset); return; } @@ -3271,30 +3187,22 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) s_max = cp->src[s].max; s_mt = cp->src[s].mt; } - printk("\toffset is %u next node is %u\n", data_offset, next_node); } while (data_offset < next_node); next_node *= 2; - if (next_node > data + 1) { - printk("next node reduced to data %u", data); + if (next_node > data + 1) next_node = data + 1; - } split = cp->split; - printk("reset split %u\n", split); /* Handle null entries */ if (dst_offset <= mt_pivots[d_mt]) { cp->dst[d].max = ma_pivots(dst, d_mt)[dst_offset - 1]; - printk("%d: set dest max %lx\n", __LINE__, cp->dst[d].max); - printk("===================>slot is %p\n", ma_slots(dst,d_mt)[dst_offset - 1]); } else { cp->dst[d].max = s_max; - printk("%d: set dest max %lx\n", __LINE__, cp->dst[d].max); } + if (cp->dst[d].max != ULONG_MAX && !ma_slots(dst, d_mt)[dst_offset - 1]) { - printk("NULL entry end %u and max %lx\n", dst_offset - 1, cp->dst[d].max); - //BUG_ON(1); if (s_offset == cp->src[s].start) { s--; src = cp->src[s].node; @@ -3308,18 +3216,15 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) /* Set dst max and clear pivot */ next_node++; split++; - printk("inc split %u\n", split); data_offset--; dst_offset--; cp->dst[d].max = ma_pivots(dst, d_mt)[dst_offset - 1]; } node_finalise(dst, d_mt, dst_offset); - //mt_dump_node(mas->tree, mt_mk_node(dst, d_mt), 0, ULONG_MAX, 1, mt_dump_hex); if (d >= cp->d_count) { WARN_ON(data_offset < data); return; } - printk("\t\tnext dst\n"); /* Reset local dst */ ++d; dst = cp->dst[d].node; @@ -3336,17 +3241,11 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, unsigned char d; unsigned long min; - printk("%s: %d\n", __func__, cp->d_count); - if (sib->end) { - printk("sib is at %p\n", sib->node); - if (sib->max < l_wr_mas->mas->min) { + if (sib->max < l_wr_mas->mas->min) *l_wr_mas->mas = *sib; - printk("Shift left\n"); - } else { + else *r_wr_mas->mas = *sib; - printk("Shift Right: %p\n", r_wr_mas->mas->node); - } } min = l_wr_mas->mas->min; @@ -3358,7 +3257,6 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, cp->slot[d] = mt_mk_node(mn, mt); cp->pivot[d] = max; - printk("%p %lx - %lx\n", cp->slot[d], min, max); if (mt_is_alloc(mas->tree)) { if (ma_is_leaf(mt)) { cp->gap[d] = ma_leaf_max_gap(mn, mt, min, max, @@ -3374,7 +3272,6 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, cp->gap[d] = gaps[gap_slot]; } } - printk("gap %lx\n", cp->gap[d]); } min = max + 1; } @@ -3387,7 +3284,6 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, if (cp->d_count != 1) { enum maple_type mt = maple_arange_64; - printk("New root\n"); if (!mt_is_alloc(mas->tree)) mt = maple_range_64; @@ -3402,27 +3298,25 @@ static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas, cp->height++; } - printk("CONVERGED d_count is %u\n", cp->d_count); WARN_ON_ONCE(cp->dst[0].node != mte_to_node(cp->slot[0])); cp->dst[0].node->parent = ma_parent_ptr(mas_tree_parent(mas)); while (!mte_is_root(mas->node)) mas_ascend(mas); - printk("parent %p vs %p\n", cp->dst[0].node->parent, mas_mn(mas)->parent); - printk("cp->dst %p cp->slot %p\n", cp->dst[0].node, mte_to_node(cp->slot[0])); return false; } else if (l_wr_mas->mas->node == r_wr_mas->mas->node) { /* Converged, but caused a cascading split. */ + if (cp->d_count != 1) { + mt_dump(mas->tree, mt_dump_hex); + printk("At %p\n", l_wr_mas->mas->node); + printk("Writing %lx -%lx => %p\n", mas->index, mas->last, l_wr_mas->entry); + } WARN_ON_ONCE(cp->d_count != 1); - printk("WARNING! CONVERGED d_count is %u\n", cp->d_count); //cp->dst[0].node->parent = mas_mn(mas)->parent; //return false; } - printk("more nodes.. %u\n", cp->end); wr_mas_ascend(l_wr_mas); wr_mas_ascend(r_wr_mas); - printk("At %p[%u] and %p[%u]\n", l_wr_mas->node, l_wr_mas->mas->offset, - r_wr_mas->node, r_wr_mas->mas->offset); /* * cp->slot[0] should go in l_wr_mas->offset * cp->slot[end] should go in r_wr_mas->offset @@ -3441,7 +3335,6 @@ static void mas_spanning_rebalance_loop(struct ma_state *mas, struct maple_enode *left = NULL, *middle = NULL, *right = NULL; struct maple_enode *old_enode; - bool debug = true; /* * Each level of the tree is examined and balanced, pushing data to the left or * right, or rebalancing against left or right nodes is employed to avoid @@ -3463,28 +3356,6 @@ static void mas_spanning_rebalance_loop(struct ma_state *mas, mast_cp_to_nodes(mast, left, middle, right, split, mid_split); new_height++; - if (debug) { - for (int i = 0; i < 3; i++) { - struct maple_enode *dst = NULL; - struct ma_state *state; - - if (i == 0) { - dst = left; - state = mast->l; - } else if (i == 1) { - dst = middle; - state = mast->m; - } else { - state = mast->r; - dst = right; - } - if (!dst) - continue; - - mt_dump_node(mas->tree, dst, state->min, state->max, count, mt_dump_hex); - } - debug = false; - } /* * Copy data from next level in the tree to mast->bn from next * iteration @@ -3614,7 +3485,6 @@ static void mas_wr_spanning_rebalance(struct ma_state *mas, struct maple_enode *old_enode; struct ma_state sib; struct maple_copy cp; - int debug = 0; /* * Spanning store is different in that the write is actually from @@ -3623,48 +3493,19 @@ static void mas_wr_spanning_rebalance(struct ma_state *mas, * being stored to the last slot of the left node. */ - mt_dump(mas->tree, mt_dump_hex); - - printk("\n\nSTARTING WRITE OF %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry); spanning_leaf_init(&cp, mas, l_wr_mas, r_wr_mas); do { - printk("\nlmas %p rmas %p\n", l_wr_mas->node, r_wr_mas->node); - cp.height++; - printk("%d: cp min %lx\n", __LINE__, cp.min); spanning_data_calc(&cp, mas, l_wr_mas, r_wr_mas, &sib); - printk("%d: cp min %lx\n", __LINE__, cp.min); spanning_split_dest_setup(&cp, mas, l_wr_mas->type); - printk("%d: cp min %lx\n", __LINE__, cp.min); spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib); - printk("%d: cp min %lx\n", __LINE__, cp.min); spanning_data_write(&cp, mas); - printk("%d: cp min %lx\n", __LINE__, cp.min); -#if 0 - if (debug < 2) - { - unsigned long min = l_wr_mas->mas->min; - printk("\n\nCount is %u\n", cp.d_count); - for (int i = 0; i < cp.d_count; i++) { - printk("dump %p %lx - %lx\n", cp.dst[i].node, min, cp.dst[i].max); - mt_dump_node(mas->tree, mt_mk_node(cp.dst[i].node, cp.dst[i].mt), - min, cp.dst[i].max, cp.height, mt_dump_hex); - min = cp.dst[i].max + 1; - } - } -#endif - printk ("NEXT LEVEL %d\n", debug++); - BUG_ON(debug > 4); - printk("%d: cp min %lx\n", __LINE__, cp.min); } while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib)); old_enode = mas->node; mas->node = cp.slot[0]; - printk("Replace %p with %p\n", old_enode, mas->node); mas_wmb_replace(mas, old_enode, cp.height); mtree_range_walk(mas); - printk("range at %p[%u]\n", mas->node, mas->offset); - mt_dump(mas->tree, mt_dump_hex); } /* @@ -4319,12 +4160,8 @@ static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, l_wr_mas->r_min = l_mas->index; } - if (!r_wr_mas->content) { - //if (r_mas->last < r_wr_mas->r_max) - // r_mas->offset++; + if (!r_wr_mas->content) r_mas->last = r_wr_mas->r_max; - printk("Extend end pivot\n"); - } } static inline void *mas_state_walk(struct ma_state *mas) @@ -4457,7 +4294,6 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) */ mas = wr_mas->mas; trace_ma_op(__func__, mas); - printk("init write is %lx - %lx => %p\n", mas->index, mas->last, wr_mas->entry); if (unlikely(!mas->index && mas->last == ULONG_MAX)) return mas_new_root(mas, wr_mas->entry); @@ -4479,7 +4315,6 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) r_mas.index = r_mas.last; mas_wr_walk_index(&r_wr_mas); r_mas.last = r_mas.index = mas->last; - printk("r_mas is at %p\n", r_mas.node); BUG_ON(!r_mas.end); /* Set up left side. */ diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 7118eea67fa5..64267e397762 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -35895,14 +35895,12 @@ static noinline void __init check_spanning_write(struct maple_tree *mt) for (i = 0; i <= max; i++) mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); mtree_lock(mt); - mt_dump(mt, mt_dump_hex); /* Store a null across a boundary that ends in a null */ mas_set(&mas, 49835); MT_BUG_ON(mt, mas_walk(&mas) == NULL); MT_BUG_ON(mt, mas.end != mas.offset); MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL); mas_set_range(&mas, 49835, mas.last - 1); - printk("Storing %lx - %lx\n", 49835, mas.last - 1); mas_store_gfp(&mas, NULL, GFP_KERNEL); mt_validate(mt); @@ -36468,11 +36466,8 @@ static inline void check_spanning_store_height(struct maple_tree *mt) mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL); mas_set(&mas, ++index); } - mt_dump(mt, mt_dump_dec); - printk("VS\n"); mas_set_range(&mas, 90, 140); mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL); - mt_dump(mt, mt_dump_dec); MT_BUG_ON(mt, mas_mt_height(&mas) != 2); mas_unlock(&mas); } @@ -36585,7 +36580,6 @@ static inline int check_vma_modification(struct maple_tree *mt) __mas_set_range(&mas, 0x7ffde4ca2000, 0x7ffffffff000 - 1); mas_preallocate(&mas, NULL, GFP_KERNEL); mas_store_prealloc(&mas, NULL); - mt_dump(mt, mt_dump_hex); mas_destroy(&mas); mtree_unlock(mt);