}
/* ma_get_safe_pivot() - Return the pivot or the mas->max.
+ *
+ * Return: The pivot (including mas->max for the final slot)
*/
static inline unsigned long ma_get_safe_pivot(const struct ma_state *mas,
unsigned char slot)
enum maple_type type = mt_node_type(cp->src);
enum maple_type dtype;
unsigned char pivot_cnt = mt_pivots[type];
- unsigned long piv, prev_piv = cp->start_piv;
+ unsigned long piv, prev_piv = mas->min;
if (!cp->dst) {
/* Allocate a new node */
if (prev_piv >= mas->max)
break;
- if (sloc < pivot_cnt) {
+ if (sloc < pivot_cnt)
piv = ma_get_pivot(cp->src, sloc);
- if (sloc != 0 && piv == 0)
- break;
- if (piv < prev_piv) {
- sloc++;
- continue;
- }
- } else {
+ else
piv = mas->max;
- }
+
+ if (sloc && !piv)
+ break;
+
+ if (piv < cp->start_piv)
+ goto next_src_slot;
+
+ if (sloc && piv == prev_piv)
+ goto next_src_slot;
if (dloc < pivot_cnt)
ma_set_pivot(cp->dst, dloc, piv);
if (dtype == maple_arange_64)
ma_cp_gap(cp->dst, dloc, cp->src, sloc);
- ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc++);
+ ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc);
prev_piv = piv;
+next_src_slot:
+ sloc++;
}
cp->dst_start = dloc;
*/
static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot)
{
- struct maple_enode *src = mas->node;
- struct maple_enode *dst = NULL;
- unsigned char d_slot = 0;
- unsigned long last = 0;
int ret = 0;
- enum maple_type type = mt_node_type(mas->node);
unsigned char slot_cnt;
- unsigned char pivot_cnt;
- //bool check_prev = false;
-
- slot_cnt = mt_slots[type];
- pivot_cnt = mt_pivots[type];
+ unsigned long pivot, last = 0;
+ slot_cnt = mt_slot_count(mas->node);
for (; s_slot < slot_cnt; s_slot++) {
- if (s_slot < pivot_cnt) {
- unsigned long pivot = ma_get_pivot(src, s_slot);
-
- if (!pivot)
- goto done;
+ pivot = ma_get_safe_pivot(mas, s_slot);
+ if (s_slot && !pivot)
+ break;
- if (last == pivot && !dst) {
- //if (s_slot == 1)
- // check_prev = true;
- // First duplicate pivot.
- d_slot = s_slot;
- // Copy data to new node.
- mas_partial_copy(mas, s_slot - 1);
- if (mas_is_err(mas))
- goto mas_error;
-
- dst = mas->node;
- continue;
- }
+ if (s_slot && last == pivot) {
+ mas_partial_copy(mas, mt_slot_count(mas->node));
+ if (mas_is_err(mas))
+ goto mas_error;
- last = pivot;
- if (!dst)
- continue;
+ ret = 1;
+ goto done;
+ }
- ma_cp_pivot(dst, d_slot, src, s_slot);
- ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
+ if (pivot == mas->max)
+ break;
- } else if (dst && ma_get_rcu_slot(src, s_slot)) {
- ma_set_pivot(dst, d_slot, mas->max);
- ma_cp_rcu_slot(dst, d_slot, src, s_slot);
- // Detect dedup and rebalance.
- }
+ last = pivot;
}
done:
- if (!dst)
- return 0;
-
- ret = s_slot - d_slot;
- mt_replace(mas);
+ if (ret)
+ mt_replace(mas);
mas_error: // Regardless of allocation, update gaps.
if (mt_is_alloc(mas->tree))
#define MT_BUG_ON(tree, x) do { \
tests_run++; \
if (x) { \
- printk("BUG at %s:%d (%u)\n", \
+ pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, x); \
mt_dump(tree); \
- printk("Pass: %u Run:%u\n", tests_passed, tests_run); \
+ pr_info("Pass: %u Run:%u\n", tests_passed, tests_run); \
dump_stack(); \
} else { \
tests_passed++; \
if (verbose) {
rcu_barrier();
mt_dump(mt);
- printk(" seq test of 0-%lu used %luK in %d allocations\n",
+ pr_info(" seq test of 0-%lu used %luK in %d allocations\n",
max, mt_get_alloc_size()/1024, nr_allocated);
}
}
check_load(&tree, set[3], &tree);
mt_set_non_kernel(1);
check_erase(&tree, set[1]);
+ check_load(&tree, set[0], ptr);
check_load(&tree, set[1], NULL);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
check_insert(&tree, set[1], &tree);
check_insert(&tree, set[4], ptr); // 1000 < Should split.
check_load(&tree, set[0], ptr);
rcu_barrier();
- printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
+ pr_info("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run == tests_passed) ? 0 : -EINVAL;
}