return pivots[piv - 1] + 1;
}
+// Check what the maximum value the pivot could represent.
static inline unsigned long
mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
if (pivots[offset])
break;
} while (--offset);
+ if (pivots[offset] < mas->max)
+ offset++;
}
return offset;
piv = b_node->pivot[b_end - 1];
}
- // Handle range overlap start.
+ // Handle range starting after old range
if (piv + 1 < mas->index) {
b_node->slot[b_end] = contents;
if (!contents)
b_node->pivot[b_end++] = mas->index - 1;
}
- // Insert the data.
+ // Store the new entry.
mas_set_offset(mas, b_end);
b_node->slot[b_end] = entry;
b_node->pivot[b_end] = mas->last;
- // Handle range overlap end.
+ // Handle new range ending before old range ends
piv = _mas_safe_pivot(mas, pivots, slot, b_node->type);
if (piv > mas->last) {
b_node->slot[++b_end] = contents;
if (l_slot)
range_min = mas_safe_pivot(l_mas, l_slot - 1) + 1;
+ // Expand NULL to start of the range.
if (!content)
l_mas->index = range_min;
return mas_spanning_rebalance(mas, &mast, height + 1);
}
-static inline bool mas_fast_store(struct ma_state *mas, void *entry,
+static inline bool mas_node_store(struct ma_state *mas, void *entry,
unsigned long min, unsigned long max,
- unsigned char end, void *content)
+ unsigned char end, void *content,
+ unsigned char offset)
{
enum maple_type mt = mte_node_type(mas->node);
- struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *node = mas_mn(mas);
+ void **dst_slots, **slots = ma_slots(node, mt);
+ unsigned long *dst_pivots, *pivots = ma_pivots(node, mt);
+ unsigned char dst_offset, offset_end = offset, new_end = end;
+ struct maple_node reuse, *newnode;
+ unsigned char copy_size;
+
+ if (mas->last == max) { // don't copy this offset
+ offset_end++;
+ } else if (mas->last < max) { // new range ends in this range.
+ new_end++;
+ offset_end = offset;
+ } else if (mas->last == mas->max) { // runs right to the end of the node.
+ new_end = offset;
+ offset_end = end + 1; // no data beyond this range.
+ } else {
+ unsigned long piv = 0;
+ new_end++;
+ do {
+ offset_end++;
+ new_end--;
+ piv = mas_logical_pivot(mas, pivots, offset_end, mt);
+ } while(piv <= mas->last);
+ }
+
+ if (min < mas->index) // new range starts within a range.
+ new_end++;
+
+ if (new_end >= mt_slots[mt]) // Not enough room
+ return false;
+
+ if (new_end <= mt_min_slots[mt]) // not enough data.
+ return false;
+
+ // set up node.
+ if (mt_in_rcu(mas->tree)) {
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return false;
+
+ newnode = mas_next_alloc(mas);
+ } else {
+ memset(&reuse, 0, sizeof(struct maple_node));
+ newnode = &reuse;
+ }
+ newnode->parent = mas_mn(mas)->parent;
+ dst_pivots = ma_pivots(newnode, mt);
+ dst_slots = ma_slots(newnode, mt);
+ // Copy from start to insert point
+ memcpy(dst_pivots, pivots, sizeof(unsigned long) * (offset + 1));
+ memcpy(dst_slots, slots, sizeof(void*) * (offset + 1));
+ dst_offset = offset;
+
+ // Handle insert of new range starting after old range
+ if (min < mas->index) {
+ dst_slots[dst_offset] = content;
+ dst_pivots[dst_offset++] = mas->index - 1;
+ }
+
+ // Store the new entry and range end.
+ if (dst_offset < mt_pivots[mt])
+ dst_pivots[dst_offset] = mas->last;
+ dst_slots[dst_offset++] = entry;
+
+ if (offset_end > end) // this range wrote to the end of the node.
+ goto done;
+
+ // Copy to the end of node if necessary.
+ copy_size = end - offset_end + 1;
+ memcpy(dst_slots + dst_offset, slots + offset_end,
+ sizeof(void *) * copy_size);
+ if (dst_offset < mt_pivots[mt]) {
+ if (copy_size > mt_pivots[mt] - dst_offset)
+ copy_size = mt_pivots[mt] - dst_offset;
+ memcpy(dst_pivots + dst_offset, pivots + offset_end,
+ sizeof(unsigned long) * copy_size);
+ }
+done:
+ if ((end == mt_slots[mt] - 1) && (new_end < mt_slots[mt] - 1))
+ dst_pivots[new_end] = mas->max;
+
+ if (!mt_in_rcu(mas->tree)) {
+ memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
+ } else {
+ mas->node = mt_mk_node(newnode, mt);
+ mas_replace(mas, false);
+ }
+
+ mas_update_gap(mas);
+ return true;
+
+}
+static inline bool mas_slot_store(struct ma_state *mas, void *entry,
+ unsigned long min, unsigned long max,
+ unsigned char end, void *content,
+ unsigned char offset)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mas_mn(mas);
void **slots = ma_slots(node, mt);
unsigned long *pivots = ma_pivots(node, mt);
- unsigned char offset = mas_offset(mas); //may have changed on extend null.
+ unsigned long lmax; // Logical max.
if (min == mas->index && max == mas->last) { // exact fit.
slots[offset] = entry;
if (offset + 1 >= mt_slots[mt]) // out of room.
return false;
+ lmax = mas_logical_pivot(mas, pivots, offset + 1, mt);
if (max > mas->last) // going to split a single entry.
- return false;
+ return mas_node_store(mas, entry, min, max, end, content,
+ offset);
- max = mas_logical_pivot(mas, pivots, offset + 1, mt);
- if (max < mas->last) // going to overwrite too many slots.
- return false;
+ if (lmax < mas->last) // going to overwrite too many slots.
+ return mas_node_store(mas, entry, min, max, end, content,
+ offset);
if (min == mas->index) {
- if (max <= mas->last) // overwriting two slots with one.
- return false;
+ if (lmax <= mas->last) // overwriting two or more ranges with one.
+ return mas_node_store(mas, entry, min, max, end,
+ content, offset);
slots[offset] = entry;
pivots[offset] = mas->last;
goto done;
- } else if (min < mas->index) {
- if (max != mas->last)
- return false;
+ } else if (min < mas->index) { // split start
+ if (lmax != mas->last) // Doesn't end on the next range end.
+ return mas_node_store(mas, entry, min, max, end,
+ content, offset);
if (offset + 1 < mt_pivots[mt])
pivots[offset + 1] = mas->last;
mas_set_offset(&r_mas, offset);
r_mas.node = mas->node;
- mas_node_walk(&r_mas, mte_node_type(r_mas.node), &rmin, &rmax);
+ mas_node_walk(&r_mas, mte_node_type(r_mas.node), &rmin,
+ &rmax);
mas_extend_null(mas, &r_mas);
mas->last = r_mas.last;
+ offset = mas_offset(mas);
+ r_max = mas_safe_pivot(mas, mas_offset(mas));
}
end = mas_data_end(mas);
- if (mas_fast_store(mas, entry, r_min, r_max, end, content))
+ if (mas_slot_store(mas, entry, r_min, r_max, end, content, offset))
+ return content;
+
+ if (mas_is_err(mas))
return content;
/* Slow path. */
mtree_init(mt, MAPLE_ALLOC_RANGE);
mas_reset(&mas);
count = 0;
- check_rev_seq(mt, 1000, false);
+ check_rev_seq(mt, max, false);
do {
count++;
mas_dfs_preorder(&mas);
} while(!mas_is_none(&mas));
// 71 + MAS_START = 72
- printk("count %u\n", count);
+ //printk("count %lu\n", count);
+
MT_BUG_ON(mt, count != 72);
+ mtree_destroy(mt);
+
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ mas_reset(&mas);
+ nr_tallocated = 0;
+ mt_set_non_kernel(100);
+ mas_entry_count(&mas, max);
+ for(count = 0; count <= max; count++) {
+ mas.index = mas.last = count;
+ mas_store(&mas, xa_mk_value(count));
+ MT_BUG_ON(mt, mas_is_err(&mas));
+ }
+ mas_empty_alloc(&mas);
+ rcu_barrier();
+ //mt_dump(mt);
+ //pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n",
+ // max, mt_get_alloc_size()/1024, nr_allocated,
+ // nr_tallocated);
+
+
}
+static noinline void check_node_store(struct maple_tree *mt)
+{
+ int i, overwrite = 76, max = 240, count = 20000000;
+ for (i = 0; i < max; i+=10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, overwrite, overwrite + 15,
+ xa_mk_value(overwrite), GFP_KERNEL);
+
+ overwrite += 5;
+ if (overwrite >= 135)
+ overwrite = 76;
+ }
+}
static noinline void check_forking(struct maple_tree *mt)
{
struct maple_tree newmt;
- int i, max = 300000, count = 1000000;
+ int i, max = 300000, count = 100;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
- mas_lock(&mas);
+ mas.index = 0;
+ mas.last = 0;
+ if (mas_entry_count(&newmas, max/10)) {
+ printk("OOM!");
+ break;
+ }
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_store(&newmas, val);
}
mas_empty_alloc(&newmas);
- mas_unlock(&mas);
-// mt_validate(&newmt);
+ mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
}
void *ptr = &set;
pr_info("\nTEST STARTING\n\n");
+#if 0
+ mtree_init(&tree, MAPLE_ALLOC_RANGE);
+ check_node_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
#if 0
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_forking(&tree);