]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
maple_tree: Clean up ma_copy and mas_coalesce
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 2 Aug 2019 19:35:27 +0000 (15:35 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 2 Aug 2019 19:35:27 +0000 (15:35 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
lib/maple_tree.c
lib/test_maple_tree.c

index 59a3846e369c6d2646b0b2654c168e7b5bca4777..6cdc42f198e2e1db73d43680faefa8fe05cc3349 100644 (file)
@@ -463,6 +463,8 @@ static inline unsigned long ma_get_pivot(const struct maple_enode *mn,
 }
 
 /* ma_get_safe_pivot() - Return the pivot or the mas->max.
+ *
+ * Return: The pivot (including mas->max for the final slot)
  */
 static inline unsigned long ma_get_safe_pivot(const struct ma_state *mas,
                                         unsigned char slot)
@@ -1337,7 +1339,7 @@ static inline void ma_copy(struct ma_state *mas, struct ma_cp *cp)
        enum maple_type type = mt_node_type(cp->src);
        enum maple_type dtype;
        unsigned char pivot_cnt = mt_pivots[type];
-       unsigned long piv, prev_piv = cp->start_piv;
+       unsigned long piv, prev_piv = mas->min;
 
        if (!cp->dst) {
                /* Allocate a new node */
@@ -1360,26 +1362,30 @@ static inline void ma_copy(struct ma_state *mas, struct ma_cp *cp)
                if (prev_piv >= mas->max)
                        break;
 
-               if (sloc < pivot_cnt) {
+               if (sloc < pivot_cnt)
                        piv = ma_get_pivot(cp->src, sloc);
-                       if (sloc != 0 && piv == 0)
-                               break;
-                       if (piv < prev_piv) {
-                               sloc++;
-                               continue;
-                       }
-               } else {
+               else
                        piv = mas->max;
-               }
+
+               if (sloc && !piv)
+                       break;
+
+               if (piv < cp->start_piv)
+                       goto next_src_slot;
+
+               if (sloc && piv == prev_piv)
+                       goto next_src_slot;
 
                if (dloc < pivot_cnt)
                        ma_set_pivot(cp->dst, dloc, piv);
 
                if (dtype == maple_arange_64)
                        ma_cp_gap(cp->dst, dloc, cp->src, sloc);
-               ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc++);
+               ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc);
 
                prev_piv = piv;
+next_src_slot:
+               sloc++;
        }
 
        cp->dst_start = dloc;
@@ -2289,60 +2295,34 @@ retry:
  */
 static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot)
 {
-       struct maple_enode *src = mas->node;
-       struct maple_enode *dst = NULL;
-       unsigned char d_slot = 0;
-       unsigned long last = 0;
        int ret = 0;
-       enum maple_type type = mt_node_type(mas->node);
        unsigned char slot_cnt;
-       unsigned char pivot_cnt;
-       //bool check_prev = false;
-
-       slot_cnt = mt_slots[type];
-       pivot_cnt = mt_pivots[type];
+       unsigned long pivot, last = 0;
 
+       slot_cnt = mt_slot_count(mas->node);
        for (; s_slot < slot_cnt; s_slot++) {
-               if (s_slot < pivot_cnt) {
-                       unsigned long pivot = ma_get_pivot(src, s_slot);
-
-                       if (!pivot)
-                               goto done;
+               pivot = ma_get_safe_pivot(mas, s_slot);
+               if (s_slot && !pivot)
+                       break;
 
-                       if (last == pivot && !dst) {
-                               //if (s_slot == 1)
-                               //      check_prev = true;
-                               // First duplicate pivot.
-                               d_slot = s_slot;
-                               // Copy data to new node.
-                               mas_partial_copy(mas, s_slot - 1);
-                               if (mas_is_err(mas))
-                                       goto mas_error;
-
-                               dst = mas->node;
-                               continue;
-                       }
+               if (s_slot && last == pivot) {
+                       mas_partial_copy(mas, mt_slot_count(mas->node));
+                       if (mas_is_err(mas))
+                               goto mas_error;
 
-                       last = pivot;
-                       if (!dst)
-                               continue;
+                       ret = 1;
+                       goto done;
+               }
 
-                       ma_cp_pivot(dst, d_slot, src, s_slot);
-                       ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
+               if (pivot == mas->max)
+                       break;
 
-               } else if (dst && ma_get_rcu_slot(src, s_slot)) {
-                       ma_set_pivot(dst, d_slot, mas->max);
-                       ma_cp_rcu_slot(dst, d_slot, src, s_slot);
-                       // Detect dedup and rebalance.
-               }
+               last = pivot;
        }
 
 done:
-       if (!dst)
-               return 0;
-
-       ret = s_slot - d_slot;
-       mt_replace(mas);
+       if (ret)
+               mt_replace(mas);
 
 mas_error: // Regardless of allocation, update gaps.
        if (mt_is_alloc(mas->tree))
index 4c5c4520c7527295496a16f67378f5db02c9337e..f2c2b41ba187c7467fbf5570164dafab6b9f94f4 100644 (file)
@@ -19,10 +19,10 @@ void mt_dump(const struct maple_tree *mt) { }
 #define MT_BUG_ON(tree, x) do {                                        \
        tests_run++;                                            \
        if (x) {                                                \
-               printk("BUG at %s:%d (%u)\n",                   \
+               pr_info("BUG at %s:%d (%u)\n",                  \
                __func__, __LINE__, x);                         \
                mt_dump(tree);                                  \
-               printk("Pass: %u Run:%u\n", tests_passed, tests_run); \
+               pr_info("Pass: %u Run:%u\n", tests_passed, tests_run); \
                dump_stack();                                   \
        } else {                                                \
                tests_passed++;                                 \
@@ -318,7 +318,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max,
        if (verbose) {
                rcu_barrier();
                mt_dump(mt);
-               printk(" seq test of 0-%lu used %luK in %d allocations\n",
+               pr_info(" seq test of 0-%lu used %luK in %d allocations\n",
                        max, mt_get_alloc_size()/1024, nr_allocated);
        }
 }
@@ -849,7 +849,10 @@ static int maple_tree_seed(void)
        check_load(&tree, set[3], &tree);
        mt_set_non_kernel(1);
        check_erase(&tree, set[1]);
+       check_load(&tree, set[0], ptr);
        check_load(&tree, set[1], NULL);
+       check_load(&tree, set[2], ptr);
+       check_load(&tree, set[3], &tree);
        check_insert(&tree, set[1], &tree);
        check_insert(&tree, set[4], ptr); // 1000 < Should split.
        check_load(&tree, set[0], ptr);
@@ -902,7 +905,7 @@ static int maple_tree_seed(void)
 
        rcu_barrier();
 
-       printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
+       pr_info("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
        return (tests_run == tests_passed) ? 0 : -EINVAL;
 }