break;
}
}
+
+/** Private
+ * ma_cp_rcu_slot() = Copy from one node to anther. Upon seeing a retry,
+ * copies NULL.
+ */
static inline void ma_cp_rcu_slot(struct maple_enode *dst,
unsigned char dloc, struct maple_enode *src, unsigned long sloc)
{
- ma_set_rcu_slot(dst, dloc, ma_get_rcu_slot(src, sloc));
+ void *entry = ma_get_rcu_slot(src, sloc);
+
+ if (xa_is_retry(entry))
+ entry = NULL;
+
+ ma_set_rcu_slot(dst, dloc, entry);
}
+
static inline void ma_update_rcu_slot(const struct maple_enode *mn,
unsigned char slot, void *val)
{
}
static inline unsigned char ma_data_end(const struct ma_state *mas,
- const enum maple_type type, unsigned long *last_piv)
+ const enum maple_type type, unsigned long *last_piv,
+ unsigned char *coalesce)
{
unsigned char data_end = 0;
- unsigned long prev_piv = 0;
+ unsigned long prev_piv = mas->max;
struct maple_enode *mn = mas->node;
+ *coalesce = 0;
for (data_end = 0; data_end < mt_slot_count(mn) - 1; data_end++) {
*last_piv = _ma_get_pivot(mn, data_end, type);
*last_piv = prev_piv;
return data_end - 1;
}
+
+ if (prev_piv == *last_piv)
+ (*coalesce)++;
+
prev_piv = *last_piv;
}
if (!_ma_get_rcu_slot(mn, data_end, type))
- data_end--;
- else
- *last_piv = mas->max;
+ return data_end - 1;
+
return data_end;
}
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
unsigned long last_pivot;
- unsigned char data_end = ma_data_end(mas, type, &last_pivot);
+ unsigned char coalesce;
+ unsigned char data_end = ma_data_end(mas, type, &last_pivot, &coalesce);
*left = (struct maple_enode *)ma_next_alloc(mas);
*left = mt_mk_node(ma_mnode_ptr(*left), type);
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
unsigned long last_pivot;
- unsigned char data_end = ma_data_end(mas, type, &last_pivot);
+ unsigned char coalesce;
+ unsigned char data_end = ma_data_end(mas, type, &last_pivot, &coalesce);
if (ma_is_root(mas->node)) {
i = half;
unsigned long gap = 0;
unsigned long pstart, pend;
-
if (mt == maple_dense) {
for (int i = 0; i < mt_slot_count(mas->node); i++) {
if (ma_get_rcu_slot(mas->node, i)) {
if (pend) {
gap = pend - pstart;
- if (ma_get_rcu_slot(mas->node, i))
- goto next;
} else {
gap = mas->max - pstart;
}
- if (!pstart)
+ if (ma_get_rcu_slot(mas->node, i))
+ goto next;
+
+ if (!gap)
gap++;
if (gap > max_gap)
p_slot = 0;
} else {
unsigned long last_pivot;
+ unsigned char coalesce;
p_slot = mt_parent_slot(mas->node);
ma_encoded_parent(mas);
old_parent = mas->node;
ptype = mt_node_type(mas->node);
- p_end = ma_data_end(mas, ptype, &last_pivot);
- if (p_end >= mt_slots[ptype] - 1) {
+ p_end = ma_data_end(mas, ptype, &last_pivot, &coalesce);
+ if (p_end - coalesce >= mt_slots[ptype] - 1) {
/* Must split the parent */
split = ma_split(mas, p_slot, active);
if (mas_is_err(mas))
ptype = mt_node_type(mas->node);
p_max = mas->max;
mas_update_limits(mas, p_slot, ptype);
- p_end = ma_data_end(mas, ptype, &last_pivot);
+ p_end = ma_data_end(mas, ptype, &last_pivot, &coalesce);
mas->node = ma_get_rcu_slot(old_parent, p_slot);
}
return ret;
}
-static inline void mas_balance(struct ma_state *mas)
-{
- if (mt_is_alloc(mas->tree))
- ma_update_gap(mas);
-}
-
/* Private
*
* Insert entry into a node.
bool active)
{
struct maple_enode *prev_enode = NULL;
+ enum maple_type this_type = mt_node_type(mas->node);
unsigned long last_piv;
- int old_end = ma_data_end(mas, mt_node_type(mas->node),
- &last_piv);
- int new_end = old_end;
unsigned long max = mas->max;
unsigned long min = mas->min;
- int ret = 1;
- enum maple_type this_type = mt_node_type(mas->node);
unsigned char slot_cnt = mt_slots[this_type] - 1;
unsigned char pivot_cnt = mt_pivots[this_type];
+ unsigned char coalesce;
+ unsigned char slot = ma_get_slot(mas);
bool spans_range = false;
bool append = false;
bool null_entry = false;
- unsigned char slot = ma_get_slot(mas);
+ int old_end = ma_data_end(mas, this_type, &last_piv, &coalesce);
+ int new_end = old_end;
+ int ret = 1;
if (mas->last > mas->max) {
BUG_ON(!active);
}
}
/* Check for splitting */
- new_end += ret;
+ new_end += ret - coalesce;
if (new_end > slot_cnt) {
/* Not enough space in the node */
unsigned char split = ma_split(mas, slot, active);
struct maple_enode *mn;
unsigned long last_pivot;
unsigned long pivot = ma_get_safe_pivot(mas, slot);
+ unsigned char coalesce;
if (pivot < min)
goto no_entry;
level--;
mas->node = mn;
- slot = ma_data_end(mas, mt_node_type(mn), &last_pivot);
+ slot = ma_data_end(mas, mt_node_type(mn), &last_pivot,
+ &coalesce);
} while (slot-- > 0);
ascend:
if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
+ unsigned char coalesce;
next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
mas->max = max;
if (next) {
mas->node = next;
- i = ma_data_end(mas, mt_node_type(next), &max);
+ i = ma_data_end(mas, mt_node_type(next), &max,
+ &coalesce);
} else {
found = true; // this is a non-leaf hole.
}
if (ma_get_slot(&r_mas) != MAPLE_NODE_SLOTS) {
unsigned long piv;
+ unsigned char coalesce;
r_slot_cnt += ma_data_end(&r_mas, mt_node_type(r_mas.node),
- &piv);
+ &piv, &coalesce);
if (piv != ULONG_MAX) {
unsigned char p_slot = mt_parent_slot(r_mas.node);
{
struct maple_enode *last = NULL;
unsigned long last_piv;
- unsigned char slot;
+ unsigned char slot, coalesce;
mas->node = mas_start(mas);
- slot = ma_data_end(mas, mt_node_type(mas->node), &last_piv);
+ slot = ma_data_end(mas, mt_node_type(mas->node), &last_piv, &coalesce);
ma_set_slot(mas, slot);
// The root node should be replaced to avoid writing a busy slot.
MT_BUG_ON(mt, root_node == mt->ma_root);
+ check_load(mt, set[0], ptr);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[1], mt);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], ptr);
+ check_load(mt, 5018, NULL);
+ check_load(mt, set[3], mt);
+
+ check_erase(mt, set[2]); // erase 5017 to check append
+ check_erase(mt, set[0]); // erase 5015 to check append
+ check_insert(mt, set[4], ptr); // 1000 < Should NOT split
+
+ check_load(mt, set[0], NULL);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[1], mt);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+ check_load(mt, set[3], mt);
+ check_load(mt, 999, NULL);
+ check_load(mt, 1001, NULL);
+ check_load(mt, set[4], ptr);
+ // Should be a new node.
+ MT_BUG_ON(mt, root_node == mt->ma_root);
+ // Should not have split.
+ MT_BUG_ON(mt, !mt_is_leaf(mt->ma_root));
+
mtree_destroy(mt);
}