unsigned char min = mt_min_slots[b_node->type] - 1;
*mid_split = 0;
- if ((mas->mas_flags & MA_STATE_REBALANCE) &&
+ if ((mas->mas_flags & MA_STATE_BULK) &&
ma_is_leaf(b_node->type)) {
min = 2;
split = mt_slots[b_node->type] - min;
+ mas->mas_flags |= MA_STATE_REBALANCE;
}
/* Avoid having a range less than the slot count unless it
* causes one node to be deficient.
}
}
-static inline void mas_advanced_may_rebalance(struct ma_state *mas)
+static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
+ enum maple_type mt)
{
if (!(mas->mas_flags & MA_STATE_BULK))
return;
if (mte_is_root(mas->node))
return;
- mas->mas_flags |= MA_STATE_REBALANCE;
+ if (end > mt_min_slots[mt]) {
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ return;
+ }
}
/*
* mas_store_b_node() - Store an @entry into the b_node while also copying the
piv = _mas_safe_pivot(mas, pivots, slot, b_node->type);
if (piv > mas->last) {
if (piv == ULONG_MAX)
- mas_advanced_may_rebalance(mas);
+ mas_bulk_rebalance(mas, b_node->b_end, b_node->type);
b_node->slot[++b_end] = contents;
if (!contents)
offset_end++;
} else if (mas->last < max) { // new range ends in this range.
if (max == ULONG_MAX)
- mas_advanced_may_rebalance(mas);
+ mas_bulk_rebalance(mas, end, mt);
new_end++;
offset_end = offset;
mas_update_gap(mas);
return true;
-
}
static inline bool mas_slot_store(struct ma_state *mas, void *entry,
mas_set_err(mas, -EEXIST);
return NULL; // spanning writes always overwrite something.
}
+
ret = mas_spanning_store(mas, entry);
goto spanning_store;
}
*/
void *mas_store(struct ma_state *mas, void *entry)
{
- mas->mas_flags |= MA_STATE_BULK;
if (mas->index <= mas->last)
return _mas_store(mas, entry, true);
int nr_nodes;
int ret;
+ // Optimize splitting for bulk insert in-order.
+ mas->mas_flags |= MA_STATE_BULK;
+
// Avoid overflow, assume a gap between each entry and a trailing null
// If this is wrong, it just means allocation can happen during
// insertion of entries.
}
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
+ mas->mas_flags &= ~MA_STATE_BULK;
while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
node = mas->alloc;
{
struct maple_tree newmt;
- int i, max = 3000;
+ int i, nr_entries = 134;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
- for (i = 0; i < max; i+=10)
- mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
mtree_init(&newmt, MAPLE_ALLOC_RANGE);
mas_reset(&mas);
mas.index = 0;
mas.last = 0;
- if (mas_entry_count(&newmas, max/10)) {
+ if (mas_entry_count(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
}
-
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
}
mas_destroy(&newmas);
mt_validate(&newmt);
- mas_reset(&mas);
- mas_reset(&newmas);
- mas_for_each(&mas, val, ULONG_MAX) {
- void *val2 = mas_find(&newmas, mas.last);
- MT_BUG_ON(&newmt, mas.index != newmas.index);
- MT_BUG_ON(&newmt, mas.last != newmas.last);
- MT_BUG_ON(&newmt, val != val2);
- }
mt_set_non_kernel(0);
mtree_destroy(&newmt);
}
{
struct maple_tree newmt;
- int i, max = 300000, count = 100;
+ int i, nr_entries = 134, nr_fork = 60000;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
-// for (i = max; i > 0; i-=10)
- for (i = 0; i < max; i+=10)
- mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < nr_fork; i++) {
mt_set_non_kernel(99999);
mtree_init(&newmt, MAPLE_ALLOC_RANGE);
newmas.tree = &newmt;
mas_reset(&mas);
mas.index = 0;
mas.last = 0;
- if (mas_entry_count(&newmas, max/10)) {
+ if (mas_entry_count(&newmas, nr_entries)) {
printk("OOM!");
- break;
+ BUG_ON(1);
}
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
pr_info("\nTEST STARTING\n\n");
-#if 1
+#if 0
mtree_init(&tree, MAPLE_ALLOC_RANGE);
bench_slot_store(&tree);
mtree_destroy(&tree);