*/
static inline unsigned char mas_store_b_node(struct ma_state *mas,
struct maple_big_node *b_node,
- void *entry)
+ void *entry, unsigned char end)
{
unsigned char slot = mas_offset(mas);
- unsigned char end = mas_data_end(mas);
void *contents = mas_get_slot(mas, slot);
unsigned char b_end = 0;
// Possible underflow of piv will wrap back to 0 before use.
unsigned long piv = mas->min - 1;
+ unsigned long *pivots = ma_pivots(mas_mn(mas), b_node->type);
// Copy start data up to insert.
if (slot) {
b_node->pivot[b_end] = mas->last;
// Handle range overlap end.
- piv = mas_safe_pivot(mas, slot);
+ piv = _mas_safe_pivot(mas, pivots, slot, b_node->type);
if (piv > mas->last) {
b_node->slot[++b_end] = contents;
if (!contents)
// Handle range overwrites
do {
- piv = mas_safe_pivot(mas, ++slot);
+ piv = _mas_safe_pivot(mas, pivots, ++slot, b_node->type);
} while ((piv <= mas->last) && (slot <= end));
// Copy end data to the end of the node.
return mast->bn->b_end;
}
-static inline int mas_cnt_positive(struct ma_state *mas)
-{
- if (mas->full_cnt < 0)
- return mas->full_cnt * -1;
- return mas->full_cnt;
-}
-
/*
* mas_rebalance() - Rebalance a given node.
*
static inline int mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
- char empty_cnt = mas_cnt_positive(mas);
+ char empty_cnt = mas_mt_height(mas);
struct maple_subtree_state mast;
unsigned char shift, b_end = ++b_node->b_end;
MA_TOPIARY(mat, mas->tree);
trace_mas_split(mas);
+ mas->depth = mas_mt_height(mas);
// Allocation failures will happen early.
- mas_node_cnt(mas, 1 + mas->full_cnt * 2);
+ mas_node_cnt(mas, 1 + mas->depth * 2);
if (mas_is_err(mas))
return 0;
mast.free = &mat;
mast.bn = b_node;
- mas->depth = mas_mt_height(mas);
- while (height++ <= mas->full_cnt) {
+ while (height++ <= mas->depth) {
if (mas_split_final_node(&mast, mas, height))
break;
* @entry - the entry that is going to be written.
*
*/
-static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv,
+bool mas_is_span_wr(struct ma_state *mas, unsigned long piv,
enum maple_type type, void *entry)
{
- if (mas->span_enode) // Already a spanning store.
- return true;
-
if (piv > mas->last) // Contained in this pivot
return false;
return false;
if (ma_is_leaf(type)) {
- trace_mas_is_span_wr(mas, piv, entry);
if (mas->last < mas->max) // Fits in the node, but may span slots.
return false;
if (entry && (mas->last == mas->max)) // Writes to the end of the node but not null.
return ret;
}
-static inline void mas_cnt_full(struct ma_state *mas)
-{
- if (mas->full_cnt < 0)
- mas->full_cnt = 1;
- else
- mas->full_cnt++;
-}
-
-static inline void mas_cnt_empty(struct ma_state *mas)
-{
- if (mas->full_cnt > 0)
- mas->full_cnt = -1;
- else
- mas->full_cnt--;
-}
-
/*
* mas_wr_walk(): Walk the tree for a write.
* @range_min - pointer that will be set to the minimum of the slot range
*
* Tracks extra information which is used in special cases of a write.
*/
-static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
+bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
unsigned long *range_max, void *entry)
{
enum maple_type type;
- unsigned char end;
bool ret = false;
-
mas->span_enode = NULL;
- mas->full_cnt = 0;
mas->depth = 0;
-
while (true) {
type = mte_node_type(mas->node);
mas->depth++;
if (ma_is_leaf(type))
return true;
- end = mas_data_end(mas);
- if (end < mt_min_slots[type])
- mas_cnt_empty(mas);
- else if (end >= mt_slots[type] - 1)
- mas_cnt_full(mas);
- else
- mas->full_cnt = 0;
-
// Traverse.
mas->max = *range_max;
mas->min = *range_min;
static inline int mas_spanning_store(struct ma_state *mas, void *entry)
{
unsigned long range_min, range_max;
- unsigned char count = 0;
struct maple_big_node b_node;
struct maple_subtree_state mast;
- int node_cnt = 0;
+ unsigned char height = mas_mt_height(mas);
+ int node_cnt = 1 + height * 3;
// Holds new left and right sub-tree
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
MA_STATE(r_mas, mas->tree, mas->index, mas->index);
trace_mas_spanning_store(mas);
- // Leaf nodes
- node_cnt = mas_cnt_positive(mas); // For rebalance upwards.
/* Node rebalancing may occur due to this store, so there may be two new
* entries per level plus a new root.
*/
- node_cnt += 1 + mas_mt_height(mas) * 3;
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
return 0;
// Copy l_mas and store the value in b_node.
- b_node.b_end = mas_store_b_node(&l_mas, &b_node, entry);
+ b_node.b_end = mas_store_b_node(&l_mas, &b_node, entry,
+ mas_data_end(&l_mas));
// Copy r_mas into b_node.
mas_mab_cp(&r_mas, mas_offset(&r_mas), mt_slot_count(r_mas.node),
&b_node, b_node.b_end + 1);
// Stop spanning searches by searching for just index.
l_mas.index = l_mas.last = mas->index;
- // Calc the number of iterations of combining and splitting that will
- // need to occur.
- count = mas_cnt_positive(mas) + mas_mt_height(mas) - mas->depth + 1;
// Combine l_mas and r_mas and split them up evenly again.
- return mas_spanning_rebalance(mas, &mast, count);
+ return mas_spanning_rebalance(mas, &mast, height + 1);
}
static inline bool mas_can_append(struct ma_state *mas,
if (!entry)
mas_extend_null(mas, mas);
+ end = mas_data_end(mas);
memset(&b_node, 0, sizeof(struct maple_big_node));
- b_node.b_end = mas_store_b_node(mas, &b_node, entry);
- b_node.min = mas->min;
b_node.type = mte_node_type(mas->node);
+ b_node.b_end = mas_store_b_node(mas, &b_node, entry, end);
+ b_node.min = mas->min;
// Check if this is an append operation.
- end = mas_data_end(mas);
slot_cnt = mt_slot_count(mas->node);
if (mas_can_append(mas, &b_node, slot_cnt, end)) {
offset = b_node.b_end;
goto append;
}
- // count the node as full if it has not already been counted.
- if (b_node.b_end >= slot_cnt && end < slot_cnt)
- mas_cnt_full(mas);
- else if (b_node.b_end < mt_min_slot_cnt(mas->node))
- mas_cnt_empty(mas);
-
if (!mas_commit_b_node(mas, &b_node, end))
return NULL;