#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
#define ma_enode_ptr(x) ((struct maple_enode *)(x))
-
static struct kmem_cache *maple_node_cache;
unsigned long mt_max[] = {
}
}
-static inline unsigned long _mas_safe_pivot(const struct ma_state *mas,
- unsigned long *pivots,
- unsigned char piv, enum maple_type type)
+static inline unsigned long
+_mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
+ unsigned char piv, enum maple_type type)
{
if (piv >= mt_pivots[type])
return mas->max;
*
* Return: The pivot (including mas->max for the final piv)
*/
-static inline unsigned long mas_safe_pivot(const struct ma_state *mas,
- unsigned char piv)
+static inline unsigned long
+mas_safe_pivot(const struct ma_state *mas, unsigned char piv)
{
enum maple_type type = mte_node_type(mas->node);
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
* mas_safe_min() - Return the minimum for a given offset.
*
*/
-static inline unsigned long mas_safe_min(struct ma_state *mas,
- unsigned long *pivots,
- unsigned char piv)
+static inline unsigned long
+mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char piv)
{
if (!piv)
return mas->min;
return pivots[piv - 1] + 1;
}
+static inline unsigned long
+mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
+ unsigned char piv, enum maple_type type)
+{
+ unsigned long lpiv = _mas_safe_pivot(mas, pivots, piv, type);
+ if (!lpiv && piv)
+ return mas->max;
+ return lpiv;
+}
static inline void ma_set_pivot(struct maple_node *mn, unsigned char piv,
enum maple_type type, unsigned long val)
{
*
* Returns: The zero indexed last slot with data (may be null).
*/
-static inline unsigned char mas_data_end(const struct ma_state *mas)
+static inline unsigned char mas_data_end(struct ma_state *mas)
{
enum maple_type type = mte_node_type(mas->node);
unsigned char offset = mt_min_slots[type];
if (pivots[offset]) {
while (offset < mt_slots[type]) {
- piv = _mas_safe_pivot(mas, pivots, offset, type);
- if ((!piv && offset) || piv >= mas->max)
+ piv = mas_logical_pivot(mas, pivots, offset, type);
+ if (piv >= mas->max)
break;
+
offset++;
}
} else {
{
enum maple_type mt = mte_node_type(mas->node);
unsigned long *pivots = ma_pivots(mas_mn(mas), mt);
+ unsigned long pstart, pend, gap = 0, max_gap = 0;
void **slots = ma_slots(mas_mn(mas), mt);
- unsigned long gap = 0, max_gap = 0;
- unsigned long pstart, pend;
- int i;
+ unsigned char i;
if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mas->node); i++) {
}
if (gap > max_gap)
max_gap = gap;
- goto done;
+ return max_gap;
}
- pstart = mas->min;
- for (i = 0; i < mt_slots[mt]; i++) {
- pend = _mas_safe_pivot(mas, pivots, i, mt);
- if (!pend && i)
- pend = mas->max;
+ pstart = mas->min;
+ for (i = 0; i < mt_slots[mt]; i++) {
+ pend = mas_logical_pivot(mas, pivots, i, mt);
- if (mas_slot_protected(mas, slots, i))
- goto next;
+ if (slots[i])
+ goto next;
- gap = pend - pstart + 1;
- if (gap > max_gap)
- max_gap = gap;
+ gap = pend - pstart + 1;
+ if (gap > max_gap)
+ max_gap = gap;
next:
- if (pend >= mas->max)
- break;
-
- pstart = pend + 1;
- }
-done:
- return max_gap;
-
-}
+ if (pend >= mas->max)
+ break;
+ pstart = pend + 1;
+ }
+ return max_gap;
+ }
/*
* mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
{
- enum maple_type mt = mte_node_type(mas->node);
- unsigned long *gaps = ma_gaps(mas_mn(mas), mt);
- unsigned long *pivots = ma_pivots(mas_mn(mas), mt);
- unsigned long max_gap = 0;
+ unsigned long *gaps, *pivots;
+ unsigned long max_gap;
+ enum maple_type mt;
unsigned char i;
- for (i = 0; i < mt_slots[mt]; i++) {
- unsigned long gap;
-
- gap = gaps[i];
- if (gap > max_gap)
- max_gap = gap;
+ mt = mte_node_type(mas->node);
+ gaps = ma_gaps(mas_mn(mas), mt);
+ pivots = ma_pivots(mas_mn(mas), mt);
+ i = mt_slots[mt] - 1;
+ max_gap = gaps[i--];
+ do {
+ if (!pivots[i])
+ continue;
- if (i < mt_pivots[mt] && !pivots[i])
- break;
- }
+ if (gaps[i] > max_gap)
+ max_gap = gaps[i];
+ } while(i--);
return max_gap;
}
+static inline unsigned long mas_tree_gap(struct ma_state *mas)
+{
+ struct maple_node *pnode;
+ unsigned long *gaps;
+ enum maple_type mt;
+
+ if (!mte_is_root(mas->node)) {
+
+ pnode = mte_parent(mas->node);
+ mt = mas_parent_enum(mas, mas->node);
+ gaps = ma_gaps(pnode, mt);
+ return gaps[mte_parent_slot(mas->node)];
+
+ }
+ return mas_max_gap(mas);
+}
static inline unsigned long mas_find_gap(struct ma_state *mas)
{
static inline void mas_parent_gap(struct ma_state *mas, unsigned char slot,
unsigned long new)
{
- unsigned long old_max_gap;
+ unsigned long old_max_gap = 0;
/* Don't mess with mas state, use a new state */
MA_STATE(gaps, mas->tree, mas->index, mas->last);
/* Go to the parent node. */
gaps.node = mt_mk_node(mte_parent(gaps.node),
mas_parent_enum(&gaps, gaps.node));
- old_max_gap = mas_max_gap(&gaps);
+ //old_max_gap = mas_max_gap(&gaps);
+ if (!mte_is_root(gaps.node))
+ old_max_gap = mas_tree_gap(&gaps);
mte_set_gap(gaps.node, slot, new);
if (mte_is_root(gaps.node))
return;
if (mte_is_root(mas->node))
return;
- if (mte_is_leaf(mas->node))
- max_gap = mas_leaf_max_gap(mas);
- else
- max_gap = mas_max_gap(mas);
-
+ max_gap = mas_find_gap(mas);
/* Get the gap reported in the parent */
pslot = mte_parent_slot(mas->node);
p_gap = ma_gaps(mte_parent(mas->node),
{
unsigned long size = b_node->b_end * sizeof(unsigned long);
- memmove(b_node->pivot +shift, b_node->pivot, size);
- memmove(b_node->slot +shift, b_node->slot, size);
+ memmove(b_node->pivot + shift, b_node->pivot, size);
+ memmove(b_node->slot + shift, b_node->slot, size);
memmove(b_node->gap + shift, b_node->gap, size);
}
void **slots = ma_slots(node, mt);
unsigned long *pivots = ma_pivots(node, mt);
unsigned long *gaps = NULL;
- int i, j;
-
- if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
- gaps = ma_gaps(node, mt);
+ int i = mas_start, j = mab_start;
for (i = mas_start, j = mab_start; i <= mas_end; i++, j++) {
- b_node->slot[j] = mas_slot_protected(mas, slots, i);
b_node->pivot[j] = _mas_safe_pivot(mas, pivots, i, mt);
- if (gaps)
- b_node->gap[j] = gaps[i];
-
if ((mas->max == b_node->pivot[j]) ||
(j && !b_node->pivot[j])) { // end of node.
j++;
break;
}
}
+
+ memcpy(b_node->slot + mab_start,
+ slots + mas_start,
+ sizeof(void*) * (j - mab_start));
+
+ if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree)) {
+ gaps = ma_gaps(node, mt);
+ memcpy(b_node->gap + mab_start,
+ gaps + mas_start,
+ sizeof(unsigned long) * (j - mab_start));
+ }
b_node->b_end = j;
}
unsigned long *pivots = ma_pivots(node, mt);
unsigned long *gaps = NULL;
- if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree))
- gaps = ma_gaps(mas_mn(mas), mt);
-
for (i = mab_start; i <= mab_end; i++, j++) {
if (j && !b_node->pivot[i])
break;
- rcu_assign_pointer(slots[j], b_node->slot[i]);
if (j < mt_pivots[mt])
pivots[j] = b_node->pivot[i];
+ }
- if (gaps)
- gaps[j] = b_node->gap[i];
+ memcpy(slots, b_node->slot + mab_start,
+ sizeof(void*) * (i - mab_start));
+
+ if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
+ gaps = ma_gaps(mas_mn(mas), mt);
+ memcpy(gaps, b_node->gap + mab_start,
+ sizeof(unsigned long) * (i - mab_start));
}
mas->max = b_node->pivot[--i];
}
* Returns the number of elements in b_node during the last loop.
*/
static inline int mas_spanning_rebalance(struct ma_state *mas,
- struct maple_subtree_state *mast,
- unsigned char count)
+ struct maple_subtree_state *mast,
+ unsigned char count)
{
unsigned char split, mid_split;
unsigned char slot = 0;
{
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
unsigned long min = mas->min, pivot = 0;
- unsigned char i;
+ unsigned char i = mas_offset(mas);
bool ret = true;
if (ma_is_dense(type)) {
goto dense;
}
- for (i = mas_offset(mas); i < mt_slots[type]; i++) {
+ while(i < mt_slots[type]) {
pivot = _mas_safe_pivot(mas, pivots, i, type);
if (!pivot && i) {
break;
min = pivot + 1;
+ i++;
}
dense:
unsigned long *range_max, void *entry)
{
enum maple_type type;
- bool ret = false;
mas->span_enode = NULL;
mas->depth = 0;
mas->node = mas_get_slot(mas, mas_offset(mas));
mas_set_offset(mas, 0);
}
- return ret;
+ return false;
}
static inline void mas_extend_null(struct ma_state *l_mas, struct ma_state *r_mas)
return mas_spanning_rebalance(mas, &mast, height + 1);
}
-static inline bool mas_can_append(struct ma_state *mas,
- struct maple_big_node *bn,
- unsigned char slot_cnt,
- unsigned char end)
+#if 0
+static inline bool mas_append(struct ma_state *mas, void **slots,
+ unsigned long *pivots, unsigned char offset,
+ unsigned char slot_cnt, void *entry, void *content)
+{
+ /* Offset is where this data will go, aka the old end slot. */
+ unsigned long min = mas_safe_min(mas, pivots, offset);
+ unsigned long max = mas_logical_pivot(mas, pivots, offset,
+ mte_node_type(mas->node));
+
+ mt_dump(mas->tree);
+ printk("%s Insert %lu-%lu\n", __func__, mas->index, mas->last);
+ printk("slot %u is %lu-%lu\n", offset, min, max);
+ printk("slot_cnt %u\n", slot_cnt);
+ if (max != mas->last) {
+ slots[slot_cnt] = content;
+ printk("Set slot %u to contents of slot %u\n", slot_cnt, offset);
+ pivots[slot_cnt] = pivots[offset];
+ slot_cnt--;
+ }
+
+ printk("Set slot %u to new value\n", slot_cnt);
+ slots[slot_cnt] = entry;
+ pivots[slot_cnt] = mas->last;
+
+ if (min != mas->index)
+ pivots[offset] = mas->index - 1;
+
+ mas_update_gap(mas);
+ return true;
+}
+
+static inline void
+mas_truncate(struct ma_state *mas, void **slots, unsigned long *pivots,
+ unsigned char offset, char shift, void *entry, unsigned char end)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned char size;
+ unsigned long r_min;
+ int src;
+
+ mt_dump(mas->tree);
+ printk("%s Insert %lu-%lu\n", __func__, mas->index, mas->last);
+
+ r_min = mas_safe_min(mas, pivots, offset);
+ if (r_min != mas->index)
+ pivots[offset++] = mas->index - 1;
+
+ slots[offset] = entry;
+ pivots[offset] = mas->last;
+
+ if (offset > shift + end)
+ return;
+
+ // Now we are at offset, we have to skip -shift amount.
+ printk("src = %u - %d\n", offset, shift);
+ src = offset - shift;
+ size = end - offset;
+ printk("Insert cp offset %u => src %u size %u\n", offset, src, size);
+ memmove(pivots + offset, pivots + src, sizeof(unsigned long) * size);
+ memset(pivots+src+1, 0, sizeof(unsigned long) * (mt_slots[mt] - src));
+ memmove(slots + offset, slots + src, sizeof(void *) * size);
+ memset(slots + src + 1, 0, sizeof(void *) * (mt_slots[mt] - src));
+}
+
+static inline void
+mas_expand(struct ma_state *mas, void **slots, unsigned long *pivots,
+ unsigned char offset, char shift, void *entry, unsigned char end)
{
- if (bn->b_end >= slot_cnt)
- return false; // no room.
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned char size = end + shift - offset;
+ unsigned long r_min, r_max;
+ int dst = offset + shift;
+ int src = offset;
+ mt_dump(mas->tree);
+ printk("%s Insert %lu-%lu\n", __func__, mas->index, mas->last);
+ printk("dst is %u + %u\n", offset, shift);
- if (bn->b_end <= end)
- return false; // truncated data.
+ r_max = mas_logical_pivot(mas, pivots, offset, mt);
+ if (r_max > mas->last)
+ src += 1;
- if (!mas->last)
- return false; // zero cannot be an append.
+ printk("Expand: dst %u src %u size %u\n", dst, src, size);
+ printk("Expand: shift %u src %u size %u\n", shift, src, size);
+ printk("r_max = %lu\n", r_max);
- if (bn->pivot[bn->b_end] == mas->last)
- return true; // last entry is the insert.
+ memmove(pivots + dst, pivots + src,
+ sizeof(unsigned long) * min(size, (unsigned char)(mt_pivots[mt] - 1)));
+ memmove(slots + dst, slots + src, sizeof(void *) * size);
- if ((bn->pivot[bn->b_end - 1] == mas->last) && !bn->slot[bn->b_end])
- return true; // inserting second last entry and last is NULL.
+ r_min = mas_safe_min(mas, pivots, offset);
+ if (r_min != mas->index)
+ pivots[offset++] = mas->index - 1;
+
+ slots[offset] = entry;
+ pivots[offset] = mas->last;
+}
+
+static inline bool _mas_med_path(struct ma_state *mas, void *entry,
+ unsigned char end, void *content)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ unsigned char offset = mas_offset(mas); //may have changed on extend null.
+ unsigned char slot_max = mt_slots[mt];
+ unsigned char slot_cnt, new_end;
+ char shift;
+ unsigned long r_min, r_max, *pivots = ma_pivots(node, mt);
+ void **slots = ma_slots(node, mt);
+
+ if (end < mt_pivots[mt] - 1 && pivots[end] < mas->max)
+ end++;
+
+ slot_cnt = end;
+
+ if (offset >= slot_max)
+ offset = end;
+
+ /* Cannot use mas_safe_min due to piv + 1 below */
+ r_min = mas->min - 1;
+ if (offset)
+ r_min = pivots[offset - 1];
+
+ if (r_min + 1 < mas->index) // insert starts later than this range.
+ slot_cnt++;
+
+ r_min++;
+ r_max = mas_logical_pivot(mas, pivots, offset, mt);
+ if (r_max > mas->last) { // insert ends before this range.
+ slot_cnt++;
+ } else if (r_max < mas->last) { // insert overwrites a range of data.
+ unsigned char overwrite = offset;
+ unsigned long npiv;
+
+ do {
+ npiv = mas_logical_pivot(mas, pivots, ++overwrite, mt);
+ } while (npiv < mas->last);
+
+ if (npiv > mas->last)
+ overwrite--;
+
+ slot_cnt -= (overwrite - offset);
+ }
+ if (slot_cnt >= slot_max) // not enough room for new data.
+ return false;
+
+ if (slot_cnt < mt_min_slots[mt]) // Not enough data for a node.
+ return false;
+
+ // Can use fast path.
+ new_end = slot_cnt;
+ // copy data further out on shift right.
+ if (slot_cnt > end)
+ return false;
+ //mas_shift_right(mas, offset, slot_cnt, end);
+
+ if (r_max > mas->last) {
+
+ }
+
+
+ if (new_end < end) { // Zero new_end -> end.
+ if (end == mt_pivots[mt])
+ slots[end--] = NULL;
+
+ while(end > new_end) {
+ slots[end] = NULL;
+ pivots[end--] = 0;
+ }
+ }
+
+
+
+
+#if 0
+ printk("\n");
+ shift = slot_cnt - end;
+ printk("end is %u slot_cnt is %u shift %d\n", end, slot_cnt, shift);
+ // Check if this is an append operation.
+ if (offset == end)
+ return mas_append(mas, slots, pivots, offset, slot_cnt, entry, content);
+ if (shift < 0)
+ mas_truncate(mas, slots, pivots, offset, shift, entry, end);
+ if (shift >= 0)
+ mas_expand(mas, slots, pivots, offset, shift, entry, end);
+#endif
+ mas_update_gap(mas);
+ return true;
+}
+static inline bool mas_medium_store(struct ma_state *mas, void *entry,
+ unsigned long min, unsigned char end,
+ void *content)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ void **slots = ma_slots(node, mt);
+ unsigned long *pivots = ma_pivots(node, mt);
+ struct maple_node new_node;
+ void **nslots = ma_slots(&new_node, mt);
+ unsigned long *npivots = ma_pivots(&new_node, mt);
+ unsigned char offset = mas_offset(mas); //may have changed on extend null.
+ unsigned char size, noffset = offset;
+
+ memset(&new_node, 0, sizeof(struct maple_node));
+ if (offset) {
+ memcpy(nslots, slots, sizeof(void*) * offset);
+ memcpy(npivots, pivots, sizeof(unsigned long) * offset);
+
+ }
+
+ if (min != mas->index)
+ noffset++;
+
+ nslots[noffset] = entry;
+ npivots[noffset++] = mas->last;
+
+ if (mas->last < pivots[offset]) {
+ nslots[noffset] = content;
+ npivots[noffset++] = pivots[offset];
+ }
+
+ while (offset < mt_slots[mt] && pivots[offset] <= mas->last) {
+ offset++;
+ }
+
+ size = mt_slots[mt] - 1 - offset;
+ memcpy(nslots + noffset, slots + offset, sizeof(void*) * size);
+ size = min(size, (unsigned char) (mt_pivots[mt] - 1));
+ memcpy(npivots + noffset, pivots + offset, sizeof(unsigned long) * size);
+ memcpy(node, &new_node, sizeof(struct maple_node));
+ return true;
+}
+
+#endif
+static inline bool mas_fast_store(struct ma_state *mas, void *entry,
+ unsigned long min, unsigned long max,
+ unsigned char end, void *content)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ void **slots = ma_slots(node, mt);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned char offset = mas_offset(mas); //may have changed on extend null.
+
+ if (min == mas->index && max == mas->last) { // exact fit.
+ slots[offset] = entry;
+ goto done;
+ }
+
+ if (offset + 1 >= mt_slots[mt]) // out of room.
+ return false;
+
+ if (max > mas->last) // going to split a single entry.
+ return false;
+
+ max = mas_logical_pivot(mas, pivots, offset + 1, mt);
+ if (max < mas->last) // going to overwrite too many slots.
+ return false;
+
+ if (min == mas->index) {
+ if (max <= mas->last) // overwriting two slots with one.
+ return false;
+
+ slots[offset] = entry;
+ pivots[offset] = mas->last;
+ goto done;
+ } else if (min < mas->index) {
+ if (max != mas->last)
+ return false;
+
+ if (offset + 1 < mt_pivots[mt])
+ pivots[offset + 1] = mas->last;
+ slots[offset + 1] = entry;
+ pivots[offset] = mas->index - 1;
+ goto done;
+ }
return false;
+
+
+done:
+ mas_update_gap(mas);
+ return true;
}
static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
{
unsigned long r_max, r_min;
unsigned char end, offset;
- unsigned char slot_cnt;
void *content = NULL;
struct maple_big_node b_node;
mas_extend_null(mas, mas);
end = mas_data_end(mas);
+ if (mas_fast_store(mas, entry, r_min, r_max, end, content))
+ return content;
+
+ /* Slow path. */
memset(&b_node, 0, sizeof(struct maple_big_node));
b_node.type = mte_node_type(mas->node);
b_node.b_end = mas_store_b_node(mas, &b_node, entry, end);
b_node.min = mas->min;
- // Check if this is an append operation.
- slot_cnt = mt_slot_count(mas->node);
- if (mas_can_append(mas, &b_node, slot_cnt, end)) {
- offset = b_node.b_end;
- do {
- mte_set_slot(mas->node, offset, b_node.slot[offset]);
- if (offset < slot_cnt - 1)
- mte_set_pivot(mas->node, offset, b_node.pivot[offset]);
- } while(offset && offset-- >= end);
- mas_update_gap(mas);
- goto append;
- }
if (!mas_commit_b_node(mas, &b_node, end))
return NULL;
complete_at_root:
if (ret > 2)
return NULL;
-append:
spanning_store:
return content;
}
unsigned long p_end, p_start = mas->min;
unsigned char p_slot;
unsigned long *gaps = NULL;
+ unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
int i;
if (ma_is_dense(mte_node_type(mte))) {
for (i = 0; i < mt_slot_count(mte); i++) {
- p_end = mas_safe_pivot(mas, i);
- if (!p_end && i)
- p_end = mas->max;
+ p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
if (!gaps) {
if (mas_get_slot(mas, i)) {
{
int i;
unsigned long prev_piv = 0;
- void **slots = ma_get_slots(mte_to_node(mas->node),
- mte_node_type(mas->node));
+ void **slots = ma_slots(mte_to_node(mas->node),
+ mte_node_type(mas->node));
if (mte_is_root(mas->node))
return; // all limits are fine here.