From: Liam R. Howlett Date: Wed, 3 Sep 2025 16:09:58 +0000 (-0400) Subject: Remove debug - Working spanning store X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=0af3d06dfd8671f2d7ed6998eb946eec71a3a54b;p=users%2Fjedix%2Flinux-maple.git Remove debug - Working spanning store Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 5e225e40b653..eb8f55d139b8 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -666,7 +666,6 @@ void mt_cache_shrink(void); pr_info("BUG at %s:%d (%u)\n", \ __func__, __LINE__, __x); \ mt_dump(__tree, mt_dump_hex); \ - fflush(stdout);\ pr_info("Pass: %u Run:%u\n", \ atomic_read(&maple_tree_tests_passed), \ atomic_read(&maple_tree_tests_run)); \ @@ -683,7 +682,6 @@ void mt_cache_shrink(void); __func__, __LINE__, __x); \ mas_dump(__mas); \ mt_dump((__mas)->tree, mt_dump_hex); \ - fflush(stdout);\ pr_info("Pass: %u Run:%u\n", \ atomic_read(&maple_tree_tests_passed), \ atomic_read(&maple_tree_tests_run)); \ @@ -701,7 +699,6 @@ void mt_cache_shrink(void); mas_wr_dump(__wrmas); \ mas_dump((__wrmas)->mas); \ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ - fflush(stdout);\ pr_info("Pass: %u Run:%u\n", \ atomic_read(&maple_tree_tests_passed), \ atomic_read(&maple_tree_tests_run)); \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 636a4d35fd67..f587b3c14905 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1826,7 +1826,6 @@ static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) end = ma_data_end(node, mt, pivots, mas->max); for (offset = mas->offset; offset <= end; offset++) { entry = mas_slot_locked(mas, slots, offset); - printk("entry %p at offset %u\n", entry, offset); if (mte_parent(entry) == node) { *child = *mas; mas->offset = offset + 1; @@ -2912,12 +2911,10 @@ unsigned long node_copy(struct ma_state *mas, struct maple_node *src, - printk("d_start = %u on %u\n", d_start, d_mt); d_slots = ma_slots(dst, d_mt) + d_start; d_pivots = ma_pivots(dst, d_mt) + d_start; s_slots = ma_slots(src, s_mt) + start; s_pivots = ma_pivots(src, s_mt) + start; - fflush(stdout); memcpy(d_slots, s_slots, size * sizeof(void __rcu*)); if (!ma_is_leaf(d_mt) && s_mt == maple_copy) { struct maple_enode *edst = mt_mk_node(dst, d_mt); @@ -3006,9 +3003,6 @@ static inline void spanning_leaf_init(struct maple_copy *cp, { unsigned char end = 0; - printk("write %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry); - printk("l %p[%u] r %p[%u]\n", l_wr_mas->node, l_wr_mas->mas->offset - 1, - r_wr_mas->node, r_wr_mas->mas->offset + 1); /* Create entries to insert including split entries to left and right */ if (l_wr_mas->r_min < mas->index) { cp->slot[0] = l_wr_mas->content; @@ -3051,20 +3045,16 @@ static inline void spanning_data_calc(struct maple_copy *cp, /* Add 1 every time for the 0th element */ cp->data = l_wr_mas->mas->offset; - printk("%d %d\n", __LINE__, cp->data); cp->data += cp->end + 1; - printk("%d %d\n", __LINE__, cp->data); /* Data from right (offset + 1 to end), +1 for zero */ cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset; - printk("%d %d\n", __LINE__, cp->data); if (((l_wr_mas->mas->min != 0) || (r_wr_mas->mas->max != ULONG_MAX)) && (cp->data <= mt_min_slots[l_wr_mas->type])) { mas_spanning_move(l_wr_mas, r_wr_mas, sib); cp->data += sib->end + 1; - printk("%d %d\n", __LINE__, cp->data); } else { sib->end = 0; } @@ -3099,33 +3089,22 @@ void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas, * can fit due to the NULL limitation on node ends. */ off = cp->split;; - printk("start %u\n", off); for (s = 0; s < cp->s_count; s++) { unsigned char s_off; s_off = cp->src[s].end - cp->src[s].start; - printk("%u size %u\n", s, s_off); if (s) off--; - if (s_off >= off) { - printk("s_off fits\n"); + if (s_off >= off) break; - } + off -= s_off; - printk("offset %u\n", off); } off += cp->src[s].start; - printk("CHECK %p[%u]\n", cp->src[s].node, off); - mt_dump(mas->tree, mt_dump_hex); - fflush(stdout); - if (ma_slots(cp->src[s].node, cp->src[s].mt)[off]) { - printk("src %u slot %u is not NULL\n", s, off); + if (ma_slots(cp->src[s].node, cp->src[s].mt)[off]) goto node_setup; - } else - printk("src %u slot %u IS NULL\n", s, off); cp->split++; - printk("set split to %u\n", cp->split); if (cp->split < mt_slots[mt]) goto node_setup; @@ -3136,11 +3115,9 @@ void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas, cp->d_count = 3; node_setup: - printk("data %u split %u d_count %u type %u\n", cp->data, cp->split, cp->d_count, mt); for (int i = 0; i < cp->d_count; i++) { cp->dst[i].mt = mt; cp->dst[i].node = ma_mnode_ptr(mas_pop_node(mas)); - printk("%d type %u ptr %p\n", i, mt, cp->dst[i].node); } } @@ -3186,7 +3163,7 @@ void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas, append_node_cp(cp, sib, 0, sib->end); } -//static inline +static inline void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) { struct maple_node *dst, *src; @@ -3198,7 +3175,6 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) unsigned long s_max; enum maple_type s_mt, d_mt; - printk("\n\n%s\n", __func__); s = d = 0; /* Readability help */ src = cp->src[s].node; @@ -3218,27 +3194,16 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) * size (and split, and next_node will be 1 indexed while * src start/end dst start/end are 0 indexed */ - printk("MAX src %u dst %u\n", cp->s_count, cp->d_count); do { do { - printk("%u - %u + 1\n", next_node, data_offset); size = next_node - data_offset + 1; /* Fill the destination */ - printk("src end %u s_off %u size %u\n", src_end, s_offset, size); - if (src_end - s_offset + 1 < size) { + if (src_end - s_offset + 1 < size) size = src_end - s_offset + 1; - printk("%d: %u %u %u\n", __LINE__, size, split, dst_offset); - } - printk("splut %u d_off %u size %u\n", split , dst_offset, size); - if (split - dst_offset + 1 < size) { + if (split - dst_offset + 1 < size) size = split - dst_offset + 1; - printk("%d: %u %u %u\n", __LINE__, size, split, dst_offset); - } - printk("Size is %u\n", size); - printk("src %u dst %u\n", s, d); - printk("src node %p to %p\n", src, dst); node_copy(mas, src, s_offset, size, s_max, s_mt, dst, dst_offset, d_mt); data_offset += size; @@ -3276,10 +3241,6 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) /* Handle null entries */ if (cp->dst[d].max != ULONG_MAX && !ma_slots(dst, d_mt)[dst_offset - 1]) { - printk("%p slot %u is nul!!\n", dst, dst_offset - 1); - printk("src is %p[%u]\n", src, s_offset - 1); - fflush(stdout); - BUG_ON(cp->d_count == 2); if (s_offset == cp->src[s].start) { s--; src = cp->src[s].node; @@ -3303,13 +3264,11 @@ void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) return; } /* Reset local dst */ - printk("Switch dst at %d\n", cp->d_count); ++d; dst = cp->dst[d].node; d_mt = cp->dst[d].mt; dst_offset = 0; } while (data_offset <= data); - BUG_ON(1); } @@ -3583,7 +3542,6 @@ static void mas_wr_spanning_rebalance(struct ma_state *mas, mas->node = cp.slot[0]; mas_wmb_replace(mas, old_enode, cp.height); mtree_range_walk(mas); - printk("\n\n"); } /* diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index c397cddf353f..45c1a0a6dd47 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -34433,7 +34433,6 @@ static void *rcu_reader_fwd(void *ptr) rcu_read_unlock(); goto quit; } - fflush(stdout); printk("start is wrong: %lx (%lu) vs expected %lx (%lu)\n", mas.index, mas.index, r_start, r_start); } RCU_MT_BUG_ON(test, mas.index != r_start); @@ -34443,7 +34442,6 @@ static void *rcu_reader_fwd(void *ptr) rcu_read_unlock(); goto quit; } - fflush(stdout); printk("last is wrong: %lx (%lu) vs expected %lx (%lu)\n", mas.last, mas.last, r_end, r_end); } RCU_MT_BUG_ON(test, mas.last != r_end); @@ -34967,7 +34965,6 @@ static void *rcu_loop(void *ptr) if (entry != expected) { if (pthread_mutex_trylock(&test->dump) != 0) break; - fflush(stdout); printk("\nERROR: %lx - %lx = %p not %p\n", mas.index, mas.last, entry, expected); } @@ -34987,7 +34984,6 @@ static void *rcu_loop(void *ptr) mas_set(&mas, test->range_start); } - fflush(stdout); rcu_unregister_thread(); return NULL; } @@ -35732,7 +35728,6 @@ static noinline void __init check_spanning_write(struct maple_tree *mt) mtree_lock(mt); mas_store_gfp(&mas, NULL, GFP_KERNEL); mas_set(&mas, 1205); - printk("Check %lx\n", mas.index); MT_BUG_ON(mt, mas_walk(&mas) != NULL); mtree_unlock(mt); mtree_destroy(mt); @@ -36592,9 +36587,9 @@ static inline void check_bulk_rebalance(struct maple_tree *mt) MA_STATE(mas, mt, ULONG_MAX, ULONG_MAX); int max = 10; + return; build_full_tree(mt, 0, 2); - return; /* erase every entry in the tree */ do { /* set up bulk store mode */