#define MA_ROOT_PARENT 1
#define ma_parent_ptr(x) ((struct maple_pnode*)(x))
#define ma_mnode_ptr(x) ((struct maple_node*)(x))
+#define ma_enode_ptr(x) ((struct maple_enode*)(x))
+
+
static struct kmem_cache *maple_node_cache;
unsigned long mt_max[] = {
[maple_arange_64] = MAPLE_ARANGE64_SLOTS,
};
#define mt_slot_count(x) mt_slots[mt_node_type(x)]
+
unsigned char mt_pivots[] = {
[maple_dense] = 0,
[maple_sparse_6] = 1,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
};
#define mt_pivot_count(x) mt_pivots[mt_node_type(x)]
+
+unsigned char mt_min_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS / 2,
+ [maple_sparse_6] = MAPLE_SPARSE6_SLOTS / 2,
+ [maple_sparse_9] = MAPLE_SPARSE9_SLOTS / 2,
+ [maple_sparse_16] = MAPLE_SPARSE16_SLOTS / 2,
+ [maple_sparse_21] = MAPLE_SPARSE21_SLOTS / 2,
+ [maple_sparse_32] = MAPLE_SPARSE32_SLOTS / 2,
+ [maple_sparse_64] = MAPLE_SPARSE64_SLOTS / 2,
+ [maple_leaf_16] = MAPLE_RANGE16_SLOTS / 2,
+ [maple_leaf_32] = MAPLE_RANGE32_SLOTS / 2,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS / 2,
+ [maple_range_16] = MAPLE_RANGE16_SLOTS / 2,
+ [maple_range_32] = MAPLE_RANGE32_SLOTS / 2,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS / 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS + 1) / 2,
+};
+#define mt_min_slot_cnt(x) mt_min_slots[mt_node_type(x)]
+
// Functions
static struct maple_node *mt_alloc_one(gfp_t gfp)
{
static inline enum maple_type mt_parent_enum(struct ma_state *mas,
struct maple_enode *node)
{
- unsigned long parent = (unsigned long) mt_to_node(node)->parent;
- unsigned long slot_shift = mt_parent_shift(parent);
+ unsigned long parent = 6;
+ unsigned long slot_shift;
+
+ if (!ma_is_root(mas->node)) {
+ parent = (unsigned long) mt_to_node(node)->parent;
+ slot_shift = mt_parent_shift(parent);
+ parent &= (1 << slot_shift ) - 1;
+ }
- parent &= (1 << slot_shift ) - 1;
if (mt_is_alloc(mas->tree))
return mt_parent_alloc_enum(parent);
return mt_parent_range_enum(parent);
return (struct maple_node *)((unsigned long)ms->alloc & ~0x7F);
}
+static inline int ma_get_node_alloc_cnt(const struct maple_node *node)
+{
+ int ret = 1;
+ int slot = 0;
+ while (slot < MAPLE_NODE_SLOTS) {
+ if (!node->slot[slot])
+ return ret;
+
+ if (ma_mnode_ptr(node->slot[slot])->slot[0]) {
+ ret += ma_get_node_alloc_cnt(
+ ma_mnode_ptr(node->slot[slot]));
+ } else {
+ ret++;
+ }
+ slot++;
+ }
+ return ret;
+}
+
static inline int ma_get_alloc_cnt(const struct ma_state *ms)
{
struct maple_node *node = ma_get_alloc(ms);
if (!node)
return 0;
- if (!node->slot[0])
- return 1;
- if (!node->slot[1])
- return 2;
- return 3;
+
+ return ma_get_node_alloc_cnt(node);
}
static inline void ma_set_alloc_req(struct ma_state *ms, int count)
{
return _ma_get_pivot(mn, slot, mt_node_type(mn));
}
+static inline unsigned long ma_get_safe_pivot(const struct ma_state *mas,
+ unsigned char slot)
+{
+ enum maple_type type = mt_node_type(mas->node);
+ if (slot >= mt_pivots[type])
+ return mas->max;
+ return _ma_get_pivot(mas->node, slot, type);
+}
static inline void ma_set_pivot(struct maple_enode *mn, unsigned char slot,
unsigned long val)
{
{
ma_set_pivot(dst, dloc, ma_get_pivot(src, sloc));
}
-static inline struct maple_enode *_ma_get_rcu_slot(const struct maple_enode *mn,
- unsigned char slot, enum maple_type type)
+static inline struct maple_enode *__ma_get_rcu_slot(
+ const struct maple_enode *mn, unsigned char slot,
+ enum maple_type type)
{
switch (type) {
case maple_range_64:
return rcu_dereference(mt_to_node(mn)->mr32.slot[slot]);
}
}
+
+static inline struct maple_enode *_ma_get_rcu_slot(
+ const struct maple_enode *mn, unsigned char slot,
+ enum maple_type type)
+{
+ struct maple_enode *p = __ma_get_rcu_slot(mn, slot, type);
+
+ if ((void*)mt_parent(mn) == mn) // dead node.
+ return NULL;
+ return p;
+}
+
static inline struct maple_enode *ma_get_rcu_slot(const struct maple_enode *mn,
unsigned char slot)
{
static inline void ma_encoded_parent(struct ma_state *mas)
{
- void *parent, *gparent;
+ struct maple_enode *p_enode, *gp_enode;
+ enum maple_type ptype, gptype;
unsigned char slot;
+ ptype = mt_parent_enum(mas, mas->node);
+ p_enode = mt_mk_node(mt_parent(mas->node), ptype);
if (_ma_is_root(mt_parent(mas->node))) {
- mas->node = mt_safe_root(rcu_dereference(mas->tree->ma_root));
+ mas->node = p_enode;
+ ma_set_slot(mas, 0); // No slot.
mas->min = 0;
- mas->max = mt_node_max(mas->node);
+ mas->max = mt_max[ptype];
return;
}
- /* Go up 2 levels */
- parent = mt_parent(mas->node);
- gparent = mt_parent(parent);
- /* Get the parents slot in the grand parent */
- slot = mt_parent_slot(parent);
- mas->node = mt_mk_node(gparent, mt_parent_enum(mas, parent));
+ gptype = mt_parent_enum(mas, p_enode);
+ gp_enode = mt_mk_node(mt_parent(p_enode), gptype);
+ slot = mt_parent_slot(p_enode);
+ mas->node = gp_enode;
ma_set_slot(mas, slot);
- mas_update_limits(mas, slot, mt_parent_enum(mas, parent));
- mas->node = ma_get_rcu_slot(mas->node, slot);
- return;
+ mas_update_limits(mas, slot, gptype);
+ mas->node = p_enode;
}
static inline struct maple_node *ma_next_alloc(struct ma_state *ms)
{
cnt = ma_get_alloc_cnt(ms);
mn = ma_get_alloc(ms);
- cnt--;
- if (cnt == 0) {
+ if (cnt == 1) {
ms->alloc = NULL;
} else {
- smn = (struct maple_node *)mn->slot[cnt - 1];
- mn->slot[cnt - 1] = NULL;
+ unsigned char slot = cnt - 2;
+ struct maple_node *zero = mn;
+ if (cnt - 1 >= MAPLE_NODE_SLOTS) {
+ slot /= MAPLE_NODE_SLOTS;
+ slot--;
+ }
+ smn = ma_mnode_ptr(mn->slot[slot]);
+ if (cnt - 1 >= MAPLE_NODE_SLOTS) {
+ slot = ((cnt - 1) % MAPLE_NODE_SLOTS);
+ zero = smn;
+ smn = ma_mnode_ptr(smn->slot[slot]);
+ }
+ zero->slot[slot] = NULL;
mn = smn;
}
return mn;
}
+static inline void ma_push_node(struct ma_state *mas, struct maple_enode *used)
+{
+ struct maple_node *reuse = mt_to_node(used);
+ struct maple_node *node = ma_get_alloc(mas);
+ int cnt;
+ memset(reuse, 0, sizeof(*reuse));
+ cnt = ma_get_alloc_cnt(mas);
+ if (cnt == 0) {
+ mas->alloc = reuse;
+ } else if (cnt <= 16) {
+ cnt--;
+ node->slot[cnt] = reuse;
+ } else {
+ struct maple_node *smn;
+ cnt--;
+ smn = node->slot[cnt/15];
+ smn->slot[cnt % 15] = reuse;
+ }
+}
static inline void ma_new_node(struct ma_state *ms, gfp_t gfp)
{
struct maple_node *mn, *smn;
allocated++;
}
- slot = allocated - 1;
+ ms->alloc = mn;
+ slot = (allocated - 1);
+ if (allocated - 1 >= MAPLE_NODE_SLOTS) {
+ slot /= MAPLE_NODE_SLOTS;
+ mn = mn->slot[slot - 1];
+ }
+
while (req > 0) {
smn = mt_alloc_one(gfp);
if (!smn)
goto slot_failed;
smn->parent = NULL;
- mn->slot[slot++] = smn;
+ mn->slot[slot] = smn;
req--;
allocated++;
+ slot++;
+ if (slot >= MAPLE_NODE_SLOTS) {
+ slot = (allocated - 1) / MAPLE_NODE_SLOTS;
+ mn = ms->alloc->slot[slot - 1];
+ slot = 0;
+ }
}
slot_failed:
- ms->alloc = mn;
ma_set_alloc_req(ms, req);
list_failed:
mas_set_err(ms, -ENOMEM);
}
+// Free the allocations.
+static inline void mas_free_alloc(struct maple_node *node)
+{
+ int alloc = 0;
+
+ while (alloc < MAPLE_NODE_SLOTS && node->slot[alloc]) {
+ if (ma_mnode_ptr(node->slot[alloc])->slot[0])
+ mas_free_alloc(node->slot[alloc]);
+ else
+ kfree(node->slot[alloc]);
+ alloc++;
+ }
+ kfree(node);
+}
+
/* Private
* Check if there was an error allocating and do the allocation if necessary
* If there are allocations, then free them.
if (ms->node != MA_ERROR(-ENOMEM)) {
struct maple_node *node = ma_get_alloc(ms);
- if (node) {
- kfree(node->slot[1]);
- kfree(node->slot[0]);
- kfree(node);
- }
+ if (node)
+ mas_free_alloc(node);
ms->alloc = NULL;
return false;
}
{
int allocated = ma_get_alloc_cnt(ms);
- BUG_ON(count > 3);
+ BUG_ON(count > 127);
if (allocated < count) {
ma_set_alloc_req(ms, count - allocated);
return mas->node;
}
-static inline unsigned char ma_data_end(const struct maple_enode *mn,
- const enum maple_type type, unsigned long *last)
+static inline unsigned char ma_data_end(const struct ma_state *mas,
+ const enum maple_type type, unsigned long *last_piv)
{
unsigned char data_end = 0;
+ unsigned long prev_piv = 0;
+ struct maple_enode *mn = mas->node;
for (data_end = 0; data_end < mt_slot_count(mn) - 1; data_end++) {
- *last = _ma_get_pivot(mn, data_end, type);
- if (*last == 0 && data_end > 0)
+ *last_piv = _ma_get_pivot(mn, data_end, type);
+ if (*last_piv == 0 && data_end > 0) {
+ *last_piv = prev_piv;
return data_end - 1;
+ }
+ prev_piv = *last_piv;
}
- if (_ma_get_rcu_slot(mn, data_end, type) == NULL)
+ if (!_ma_get_rcu_slot(mn, data_end, type)) {
data_end--;
+ } else {
+ *last_piv = mas->max;
+ }
+
return data_end;
}
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
unsigned long last_pivot;
- unsigned char data_end = ma_data_end(mas->node, type, &last_pivot);
+ unsigned char data_end = ma_data_end(mas, type, &last_pivot);
*left = (struct maple_enode*)ma_next_alloc(mas);
*left = mt_mk_node(ma_mnode_ptr(*left), type);
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
unsigned long last_pivot;
- unsigned char data_end = ma_data_end(mas->node, type, &last_pivot);
+ unsigned char data_end = ma_data_end(mas, type, &last_pivot);
if (ma_is_root(mas->node)) {
i = half;
if (p_gap != max_gap)
ma_parent_gap(mas, pslot, max_gap);
}
-
-static inline void ma_copy(struct ma_state *mas, struct ma_cp *cp)
+/*
+ * Find the prev non-null entry at the same level in the tree. The prev value
+ * will be mas->node[ma_get_slot(mas)] or MAS_NONE.
+ *
+ *
+ * FIXME: Restart from top with dead node, mas_walk to mas->min - 1.
+ */
+static inline void mas_prev_node (struct ma_state *mas, unsigned long min)
+{
+ int level = 0;
+ unsigned char slot = ma_get_slot(mas);
+
+ if (ma_is_root(mas->node))
+ goto no_entry;
+
+ while (1) {
+ unsigned char count;
+
+ ma_encoded_parent(mas);
+ level++;
+
+ if (!slot)
+ goto ascend;
+
+ slot--;
+ count = mt_slot_count(mas->node);
+ do {
+ struct maple_enode *mn;
+ unsigned long pivot;
+ unsigned long last_pivot;
+
+ if (slot >= count - 1)
+ pivot = mas->max;
+ else
+ pivot = ma_get_pivot(mas->node, slot);
+
+ if (pivot < min)
+ goto no_entry;
+
+ if (slot != 0 && pivot == 0)
+ break;
+
+ mn = ma_get_rcu_slot(mas->node, slot);
+ if (!mn)
+ continue;
+
+ if (level == 1) {
+ ma_set_slot(mas, slot);
+ if (!mt_is_leaf(mas->node)) {
+ mas->node = mn;
+ }
+ return;
+ }
+
+ level--;
+ mas->node = mn;
+ slot = ma_data_end(mas, mt_node_type(mn), &last_pivot);
+ count = mt_slot_count(mas->node);
+ } while(slot-- > 0);
+
+ascend:
+ if (ma_is_root(mas->node))
+ goto no_entry;
+
+ slot = mt_parent_slot(mas->node);
+ }
+
+no_entry:
+ mas->node = MAS_NONE;
+}
+
+/*
+ * Find the next non-null entry at the same level in the tree. The next value
+ * will be mas->node[ma_get_slot(mas)] or MAS_NONE.
+ *
+ * FIXME: Restart from top with dead node, mas_walk to mas->max - 1.
+ */
+static inline unsigned long
+mas_next_node (struct ma_state *mas, unsigned long max)
+{
+ int level = 0;
+
+ while (1) {
+ unsigned char count, slot;
+ struct maple_enode *mn;
+ unsigned long prev_piv;
+
+ mn = mas->node;
+ slot = ma_get_slot(mas);
+ level++;
+ if (!ma_is_root(mas->node))
+ ma_encoded_parent(mas);
+
+ if (mas->node == mn) // Dead node.
+ goto restart;
+
+ count = mt_slot_count(mas->node);
+
+ prev_piv = ma_get_safe_pivot(mas, slot);
+
+ while (++slot < count) {
+ unsigned long pivot = mas->max;
+
+ if (slot < count - 1)
+ pivot = ma_get_pivot(mas->node, slot);
+
+ if (prev_piv > max)
+ goto no_entry;
+
+ if (slot != 0 && pivot == 0)
+ break;
+
+ mn = ma_get_rcu_slot(mas->node, slot);
+ if (!mn) {
+ prev_piv = pivot;
+ continue;
+ }
+
+ mas->min = prev_piv + 1;
+ mas->max = pivot;
+
+ if (level == 1) {
+ ma_set_slot(mas, slot);
+ mas->node = mn;
+ return pivot;
+ }
+
+ level--;
+ mas->node = mn;
+ slot = -1;
+ count = mt_slot_count(mas->node);
+ }
+
+ if (ma_is_root(mas->node))
+ goto no_entry;
+ }
+
+no_entry:
+ mas->node = MAS_NONE;
+ return mas->max;
+restart:
+ /* FIXME */
+ mas->node = MAS_NONE;
+ return mas->max;
+
+}
+
+// First non-null entry in mas->node
+static inline unsigned long
+mas_first_node (struct ma_state *mas, unsigned long max)
+{
+ unsigned char slot = ma_get_slot(mas) - 1;
+ unsigned char count = mt_slot_count(mas->node);
+
+ while(++slot < count) {
+ struct maple_enode *mn;
+ unsigned long pivot;
+
+ pivot = ma_get_safe_pivot(mas, slot);
+ if (pivot > max)
+ goto no_entry;
+
+ mn = ma_get_rcu_slot(mas->node, slot);
+
+ if (!mn)
+ continue;
+
+ if (!mt_is_leaf(mas->node))
+ mas->node = mn;
+ ma_set_slot(mas, slot);
+ return pivot;
+ }
+
+no_entry:
+ mas->node = MAS_NONE;
+ return mas->max;
+}
+
+static inline unsigned long
+mas_first_entry (struct ma_state *mas, unsigned long max)
+{
+ unsigned long pivot;
+
+ while (1)
+ {
+ pivot = mas_first_node(mas, max);
+ if (mas->node == MAS_NONE)
+ return pivot;
+
+ if (mt_is_leaf(mas->node))
+ return pivot;
+
+ ma_set_slot(mas, 0);
+ }
+}
+
+// Next node entry or return 0 on none.
+static inline unsigned long
+mas_next_nentry (struct ma_state *mas, unsigned long max)
+{
+ unsigned long pivot = 0;
+ unsigned char slot = ma_get_slot(mas);
+ unsigned char count = mt_slot_count(mas->node);
+ void *entry;
+
+ while (slot++ < count) {
+ pivot = mas->max;
+ if (slot < count - 1)
+ pivot = ma_get_pivot(mas->node, slot);
+
+ if (pivot > max)
+ goto no_entry;
+
+ if (slot != 0 && pivot == 0)
+ goto no_entry;
+
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (entry)
+ break;
+ }
+ ma_set_slot(mas, slot);
+ return pivot;
+
+no_entry:
+ return 0;
+}
+static inline unsigned long
+mas_next_entry (struct ma_state *mas, unsigned long max)
+{
+ unsigned long pivot = max;
+
+ if (mas->node == MAS_NONE)
+ return max;
+
+ if (!mt_is_leaf(mas->node))
+ pivot = mas_first_entry(mas, max);
+ else {
+ while (mas->node != MAS_NONE) {
+ unsigned char p_slot;
+ pivot = mas_next_nentry(mas, max);
+ if (pivot)
+ break;
+ p_slot = mt_parent_slot(mas->node);
+ ma_set_slot(mas, p_slot);
+ mas_next_node(mas, max);
+ }
+ }
+ return pivot;
+}
+
+static inline void mas_prev_entry (struct ma_state *mas) {
+}
+void ma_destroy_walk(struct maple_enode *mn)
+{
+ struct maple_enode *node;
+ unsigned int type = mt_node_type(mn);
+ unsigned char slot_cnt = mt_slot_count(mn);
+ int i;
+
+ switch (type) {
+ case maple_range_16:
+ case maple_range_32:
+ case maple_range_64:
+ case maple_arange_64:
+ for (i = 0; i < slot_cnt; i++) {
+ node = ma_get_rcu_slot(mn, i);
+ if (node)
+ ma_destroy_walk(node);
+ }
+ break;
+ default:
+ break;
+ }
+ mt_free(mt_to_node(mn));
+
+}
+static inline void ma_copy (struct ma_state *mas, struct ma_cp *cp)
{
unsigned char sloc = cp->src_start; // src location
unsigned char dloc = cp->dst_start; // dst location
enum maple_type type = mt_node_type(cp->src);
enum maple_type dtype;
unsigned char pivot_cnt = mt_pivots[type];
+ unsigned long piv, prev_piv = cp->start_piv;
if (!cp->dst) {
/* Allocate a new node */
while (sloc <= cp->src_end &&
dloc <= cp->dst_end) {
- if (sloc != 0 && sloc < pivot_cnt &&
- ma_get_pivot(cp->src, sloc) == 0)
+ if (prev_piv >= mas->max)
break;
- if (dloc < pivot_cnt) {
- if (sloc < pivot_cnt)
- ma_cp_pivot(cp->dst, dloc, cp->src, sloc);
- else if (dloc > 0 &&
- ma_get_pivot(cp->dst, dloc - 1) != mas->max)
- ma_set_pivot(cp->dst, dloc, mas->max);
+ if (sloc < pivot_cnt) {
+ piv = ma_get_pivot(cp->src, sloc);
+ if (sloc != 0 && piv == 0)
+ break;
+ if (piv < prev_piv) {
+ sloc++;
+ continue;
+ }
+ } else {
+ piv = mas->max;
}
+ if (dloc < pivot_cnt)
+ ma_set_pivot(cp->dst, dloc, piv);
+
if (dtype == maple_arange_64)
ma_cp_gap(cp->dst, dloc, cp->src, sloc);
ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc++);
+
+ prev_piv = piv;
}
+
cp->dst_start = dloc;
}
static inline int ma_split_data(struct ma_state *mas, struct maple_enode *left,
mt_set_parent(child, parent, slot);
}
}
+static inline void ma_recursive_adopt(struct maple_enode *parent)
+{
+}
/* Private
* Replace mn with mas->node in the tree
*/
-static inline void mt_replace(struct ma_state *mas)
+static inline void _mt_replace(struct ma_state *mas, bool free, bool push)
{
struct maple_node *mn = mt_to_node(mas->node);
struct maple_enode *parent = NULL;
ma_update_rcu_slot(parent, slot, mas->node);
}
- mt_free(mt_to_node(prev));
+ if (free) {
+ mt_free(mt_to_node(prev));
+ return;
+ }
+
+ if (push)
+ ma_push_node(mas, prev);
+
+}
+static inline void mt_replace(struct ma_state *mas)
+{
+ _mt_replace(mas, true, false);
}
+
/* Private
*
* This copies from the start to the specified end of a node into a new node.
* 8. set up ma_state for return.
*
*/
-static inline int ma_split(struct ma_state *mas, unsigned char slot)
+static inline int ma_split(struct ma_state *mas, unsigned char slot,
+ bool active)
{
- struct maple_enode *full = (mas->node);
+ struct maple_enode *full = mas->node;
unsigned char split, p_slot = 0, p_end = 0;
struct maple_enode *old_parent, *new_parent;
struct maple_enode *left = NULL, *right = NULL;
enum maple_type ptype; // parent type.
unsigned long pivot;
+ unsigned long p_max = 0;
if (ma_is_root(mas->node)) {
old_parent = full;
else
ptype = maple_range_64;
p_slot = 0;
- mas->max = mt_max[ptype];
} else {
unsigned long last_pivot;
p_slot = mt_parent_slot(mas->node);
ma_encoded_parent(mas);
old_parent = mas->node;
ptype = mt_node_type(mas->node);
- p_end = ma_data_end(mas->node, ptype, &last_pivot);
+ p_end = ma_data_end(mas, ptype, &last_pivot);
if (p_end >= mt_slots[ptype] - 1) {
/* Must split the parent */
- split = ma_split(mas, p_slot);
+ split = ma_split(mas, p_slot, active);
if (mas_is_err(mas))
return 0;
if (split < p_slot)
ma_set_slot(mas, p_slot);
}
ptype = mt_node_type(mas->node);
+ p_max = mas->max;
mas_update_limits(mas, p_slot, ptype);
- p_end = ma_data_end(mas->node, ptype, &last_pivot);
+ p_end = ma_data_end(mas, ptype, &last_pivot);
mas->node = ma_get_rcu_slot(old_parent, p_slot);
}
// Copy the parents information
if (!ma_is_root(full)) {
+ unsigned long c_max = mas->max;
+ mas->max = p_max;
// Copy the parent data except p_slot, which will later be
// replaced.
MA_CP(cp, old_parent, new_parent, 0, p_slot - 1);
cp.src_end = p_end;
ma_copy(mas, &cp);
}
+ mas->max = c_max;
}
// calculate the split location.
ma_split_data(mas, left, right, split);
if (right) {
pivot = ma_get_pivot(left, split);
- if (!pivot)
+ if (!pivot) // dense node
pivot = mas->min + split - 1;
} else {
pivot = mt_node_max(left);
mas->node = new_parent;
// Replace the parent node & free the old parent.
- mt_replace(mas);
+ _mt_replace(mas, active, true);
// Set up the ma_state for the return. Point to the correct node for
// the insert or subsequent split.
mas->min = pivot + 1;
}
- if (mas->index > mt_node_max(mas->node) + mas->min) {
+ if (mas->index > mt_node_max(mas->node) &&
+ mas->index > mt_node_max(mas->node) + mas->min) {
mas->node = new_parent;
split = p_slot;
}
// Free the full node.
- if (old_parent != full)
- mt_free(mt_to_node(full));
+ if (old_parent != full) {
+ if (!active)
+ ma_push_node(mas, full);
+ else
+ mt_free(mt_to_node(full));
+ }
return split;
}
return mt;
}
+
+
+static inline int ma_add(struct ma_state *mas, void *entry, bool overwrite,
+ bool active);
+static inline int _ma_add_dense(struct ma_state *mas, void *entry,
+ unsigned char slot, bool overwrite, enum maple_type this_type,
+ bool active)
+{
+ int ret = 0;
+ unsigned long min = mas->index - mas->min;
+ unsigned long max = mas->last - mas->min;
+
+ if (max > mt_max[this_type])
+ max = mt_max[this_type];
+
+ // FIXME: Check entire range, not what we would insert this time.
+ if (!overwrite) {
+ do
+ if (_ma_get_rcu_slot(mas->node, min++, this_type))
+ return 0;
+ while (min < max);
+ }
+
+ do
+ ma_update_rcu_slot(mas->node, min++, entry);
+ while (min < max);
+
+ if (max != mas->last - mas->min) {
+ mas->index = mas->min + max + 1;
+ ma_add(mas, entry, overwrite, active);
+ }
+
+ ret = max - min + 1;
+
+ return ret;
+}
+
+static inline void mas_balance(struct ma_state *mas)
+{
+ if (mt_is_alloc(mas->tree))
+ ma_update_gap(mas);
+}
+
/* Private
*
* Insert entry into a node.
* If this is not an append, a new node will be generated.
- * If this node is full, split the node & insert.
+ * If this node is full, split the node & insert or overwrite
*
* This is done by:
* 1. Calculating the range of slot.
* 3. Copy the data over
* 4. Write the entry.
*
+ * Returns the number of slots used on success, the slot number on failure.
*/
-static inline void ma_insert(struct ma_state *mas, void *entry);
-static inline int _ma_insert(struct ma_state *mas, void *entry,
- unsigned char slot)
+static inline int mas_replace_tree(struct ma_state *mas, void *new_entry);
+static inline int _ma_add(struct ma_state *mas, void *entry, bool overwrite,
+ bool active)
{
- struct maple_enode *p_mn;
- unsigned long last_pivot;
- // Old end
- int o_end = ma_data_end(mas->node, mt_node_type(mas->node),
- &last_pivot);
- int n_end = o_end; // New end
+ struct maple_enode *prev_enode = NULL;
+ unsigned long last_piv;
+ int old_end = ma_data_end(mas, mt_node_type(mas->node),
+ &last_piv);
+ int new_end = old_end;
unsigned long max = mas->max;
unsigned long min = mas->min;
int ret = 1;
- enum maple_type type = mt_node_type(mas->node);
- unsigned char slot_cnt = mt_slots[type];
- unsigned char pivot_cnt = mt_pivots[type];
-
+ enum maple_type this_type = mt_node_type(mas->node);
+ unsigned char slot_cnt = mt_slots[this_type] - 1;
+ unsigned char pivot_cnt = mt_pivots[this_type];
+ bool spans_range = false;
+ bool append = false;
+ bool null_entry = false;
+ unsigned char slot = ma_get_slot(mas);
+
+ if (mas->last > mas->max) {
+ BUG_ON(!active);
+ /* Replace the current tree with a new tree with the requested
+ * value. This is sub-optimal, but *a lot* less work. The
+ * initial user of the tree will pretty much _never_ do this.
+ */
+ ret = mas_replace_tree(mas, entry);
+ goto update_gap;
+ }
/* Dense node type */
if (!pivot_cnt) {
- min = mas->index - mas->min;
- max = mas->last - mas->min;
- if (max >= mt_max[type])
- max = mt_max[type] - 1;
-
- do
- ma_update_rcu_slot(mas->node, min++, entry);
- while(min <= max);
-
- if (max != mas->last - mas->min) {
- mas->index = mas->min + max + 1;
- ma_insert(mas, entry);
- }
-
- ret = max - min + 1;
+ ret = _ma_add_dense(mas, entry, slot, this_type, overwrite, active);
+ if (!ret)
+ return ret;
goto update_gap;
}
- // If this is an append, set the slot to one beyond the end.
- if (slot == MAPLE_NODE_SLOTS) {
- slot = o_end;
- if (last_pivot != mt_max[type])
- slot++;
- }
+ if (slot > slot_cnt)
+ slot = old_end + 1;
/* Calculate the range of this slot */
- // Set the max
+ // Set the range max
if (slot < pivot_cnt && ma_get_pivot(mas->node, slot) != 0)
max = ma_get_pivot(mas->node, slot);
- // Set the min.
+ // Set the range min.
if (slot > 0)
min = ma_get_pivot(mas->node, slot - 1) + 1;
- /* Figure out how many slots are needed for the entry. */
- // A null for the end.
- if (max > mas->last)
- n_end++;
-
- // A new entry for the start.
- if (mas->index && min != mas->index)
- n_end++;
+ if (mas->last > max)
+ spans_range = true;
- // Check if we need to split.
- if (n_end > slot_cnt - 1 ||
- (n_end == slot_cnt - 1 && mas->max != mas->last)){
- unsigned char split = ma_split(mas, slot);
- if (mas_is_err(mas))
+ if (!overwrite) {
+ if (spans_range || ma_get_rcu_slot(mas->node, slot)) {
+ mas_set_err(mas, -EBUSY);
return 0;
+ }
+ }
- if (split <= slot)
- slot -= split;
+ if (last_piv == mas->max)
+ slot_cnt++;
- return _ma_insert(mas, entry, slot);
+ if (slot > old_end)
+ append = true;
+
+ if (mas->index && min != mas->index)
+ null_entry = true;
+
+ if (mas->index == min && mas->last == max) {
+ ma_set_rcu_slot(mas->node, slot, entry);
+ ret = 1;
+ goto complete;
}
- p_mn = mas->node;
+ /* Calculate how many new pivots we will need. */
+ // Possible pivot before the new range.
+ if (mas->index > min)
+ ret++;
- if (!mt_is_leaf(mas->node)) {
- struct maple_enode *leaf;
- enum maple_type mt;
- // Old limits.
- unsigned long o_min = mas->min;
- unsigned long o_max = mas->max;
-
- // Create a new node.
- mas_node_cnt(mas, 1);
- if (mas_is_err(mas))
- return 0;
-
- // Set the new leaf parent and type.
- mt = ma_determine_type(mas, min, slot);
- leaf = mt_mk_node(ma_next_alloc(mas), mt);
- mt_set_parent(leaf, mas->node, slot);
- // Put the new range in the new leaf.
- mas->node = leaf;
- mas->min = mas->index;
- mas->max = max;
- _ma_insert(mas, entry, 0);
- // Restore old values and continue inserting.
- mas->min = o_min;
- mas->max = o_max;
- mas->node = p_mn;
- entry = leaf;
- if (mt == maple_dense)
- mas->last = mas->index + mt_max[mt] - 1;
- }
-
- /* Figure out if this is an append or not.
- * Appending does not need to create a new node. */
- if (o_end == 0 || slot - 1 == o_end) {
- o_end = n_end; /* Appending */
- } else {
- /* Not appending */
- /* Creates a new node and copies until slot (inclusively) */
- mas_partial_copy(mas, slot);
-
- if (mas_is_err(mas))
- return 0;
-
- o_end = slot;
- }
-
- if (mas->index && min != mas->index) {
- /* When writing a NULL entry, the order must be reversed to
- * ensure readers don't get incorrect data on appends
- */
- /* Write the entry */
- ma_set_rcu_slot(mas->node, ++slot, entry);
- if (slot < pivot_cnt)
- ma_set_pivot(mas->node, slot, mas->last);
- /* Write NULL entry */
- ma_set_rcu_slot(mas->node, --slot, NULL);
- if (o_end == n_end + 1) // Append.
- wmb();
-
- ma_set_pivot(mas->node, slot, mas->index - 1);
- if (mt_is_alloc(mas->tree)) {
- unsigned long gap = mas->min;
- if (slot > 0)
- gap = ma_get_pivot(mas->node, slot - 1);
- gap = (mas->index - 1) - gap;
- ma_set_gap(mas->node, slot, gap);
- }
- slot += 2;
- ret = 2;
- } else {
- /* Write the entry */
- ma_set_rcu_slot(mas->node, slot, entry);
- if (o_end == n_end + 1) // Append.
- wmb();
-
- if (slot < pivot_cnt)
- ma_set_pivot(mas->node, slot++, mas->last);
- }
-
- /* Skip possible duplicate entry that contains a NULL */
- if (o_end != n_end && ma_get_pivot(p_mn, o_end) <= mas->last)
- o_end++;
-
- /* Copy remainder of node if this isn't an append */
- MA_CP(cp, p_mn, mas->node, o_end, slot_cnt - 1);
- cp.dst_start = slot;
- cp.dst_end = n_end;
- ma_copy(mas, &cp);
-
- //mas->max = mas->last;
-
- if (p_mn != mas->node)
- mt_replace(mas);
-
-update_gap:
-
- if (mt_is_alloc(mas->tree))
- ma_update_gap(mas);
-
- return ret;
-}
-static inline int old_ma_insert(struct ma_state *mas, void *entry,
- unsigned char slot)
-{
- struct maple_enode *p_mn;
- unsigned long last_pivot;
- // Old end
- int o_end = ma_data_end(mas->node, mt_node_type(mas->node),
- &last_pivot);
- int n_end = o_end; // New end
- unsigned long max = mas->max;
- unsigned long min = mas->min;
- int ret = 1;
- enum maple_type type = mt_node_type(mas->node);
- unsigned char slot_cnt = mt_slots[type];
- unsigned char pivot_cnt = mt_pivots[type];
+ // Possible pivot after the new range.
+ if (mas->last < max)
+ ret++;
- /* Dense node type */
- if (!pivot_cnt) {
- min = mas->index - mas->min;
- max = mas->last - mas->min;
- if (max >= mt_max[type])
- max = mt_max[type] - 1;
-
- do
- ma_update_rcu_slot(mas->node, min++, entry);
- while(min <= max);
-
- if (max != mas->last - mas->min) {
- mas->index = mas->min + max + 1;
- ma_insert(mas, entry);
+ // Detect duplicate null entries and don't copy them.
+ if (null_entry && slot > 0) {
+ struct maple_enode *slot_val = ma_get_rcu_slot(mas->node,
+ slot - 1);
+ if (!slot_val) {
+ slot--;
+ if (ret)
+ ret--;
}
-
- ret = max - min + 1;
- goto update_gap;
}
-
- /* Calculate the range of the slot */
- if (slot < pivot_cnt && ma_get_pivot(mas->node, slot) != 0)
- max = ma_get_pivot(mas->node, slot);
-
- if (slot == MAPLE_NODE_SLOTS)
- slot = o_end + 1; // Append.
-
- if (slot > 0)
- min = ma_get_pivot(mas->node, slot - 1) + 1;
-
- /* Figure out how many slots are needed for the entry. */
- if (max > mas->last)
- n_end++;
-
- if (mas->index && min != mas->index)
- n_end++;
-
- if (n_end > slot_cnt - 1 ||
- (n_end == slot_cnt - 1 && mas->max != mas->last)){
- unsigned char split = ma_split(mas, slot);
+ /* Check for splitting */
+ new_end += ret;
+ if (new_end > slot_cnt ) {
+ /* Not enough space in the node */
+ unsigned char split = ma_split(mas, slot, active);
if (mas_is_err(mas))
return 0;
+ split++;
if (split <= slot)
- slot -= split;
+ slot = slot - split;
- return _ma_insert(mas, entry, slot);
+ ma_set_slot(mas, slot);
+ return _ma_add(mas, entry, overwrite, active);
}
- /* Save the node in case we are not appending. */
- p_mn = mas->node;
+ prev_enode = mas->node;
if (!mt_is_leaf(mas->node)) {
struct maple_enode *leaf;
enum maple_type mt;
+ // Old limits.
unsigned long o_min = mas->min;
unsigned long o_max = mas->max;
+ // Create a new node.
mas_node_cnt(mas, 1);
if (mas_is_err(mas))
return 0;
+ // Set the new leaf parent and type.
mt = ma_determine_type(mas, min, slot);
leaf = mt_mk_node(ma_next_alloc(mas), mt);
mt_set_parent(leaf, mas->node, slot);
+ // Put the new range in the new leaf.
mas->node = leaf;
mas->min = mas->index;
mas->max = max;
- _ma_insert(mas, entry, 0);
+ ma_set_slot(mas, 0);
+ _ma_add(mas, entry, overwrite, active);
+ // Restore old values and continue inserting.
mas->min = o_min;
mas->max = o_max;
- mas->node = p_mn;
+ mas->node = prev_enode;
entry = leaf;
if (mt == maple_dense)
mas->last = mas->index + mt_max[mt] - 1;
}
- /* Figure out if this is an append or not.
- * Appending does not need to create a new node. */
- if (o_end == 0 || slot - 1 == o_end) {
- o_end = n_end; /* Appending */
+
+ if (append) {
+ old_end = new_end;
} else {
- /* Not appending */
- /* Creates a new node and copies until slot (inclusively) */
mas_partial_copy(mas, slot);
-
if (mas_is_err(mas))
return 0;
-
- o_end = slot;
+ old_end = slot;
}
- if (mas->index && min != mas->index) {
+ if (null_entry) {
/* When writing a NULL entry, the order must be reversed to
* ensure readers don't get incorrect data on appends
*/
+
/* Write the entry */
ma_set_rcu_slot(mas->node, ++slot, entry);
if (slot < pivot_cnt)
ma_set_pivot(mas->node, slot, mas->last);
+
/* Write NULL entry */
ma_set_rcu_slot(mas->node, --slot, NULL);
- if (o_end == n_end + 1) // Append.
+ if (append)
wmb();
ma_set_pivot(mas->node, slot, mas->index - 1);
ma_set_gap(mas->node, slot, gap);
}
slot += 2;
- ret = 2;
} else {
/* Write the entry */
ma_set_rcu_slot(mas->node, slot, entry);
- if (o_end == n_end + 1) // Append.
+ if (old_end == new_end + 1) // Append.
wmb();
if (slot < pivot_cnt)
}
/* Skip possible duplicate entry that contains a NULL */
- if (o_end != n_end && ma_get_pivot(p_mn, o_end) <= mas->last)
- o_end++;
-
- /* Copy remainder of node if this isn't an append */
- MA_CP(cp, p_mn, mas->node, o_end, slot_cnt - 1);
- cp.dst_start = slot;
- cp.dst_end = n_end;
- ma_copy(mas, &cp);
-
- //mas->max = mas->last;
-
- if (p_mn != mas->node)
- mt_replace(mas);
+ if (!append) {
+ if (ma_get_pivot(prev_enode, old_end) <= mas->last)
+ old_end++;
+
+ /* Copy remainder of node if this isn't an append */
+ MA_CP(cp, prev_enode, mas->node, old_end, slot_cnt - 1);
+ cp.dst_start = slot;
+ cp.dst_end = mt_slot_count(cp.dst) - 1;
+ cp.start_piv = mas->last;
+ ma_copy(mas, &cp);
+ }
+complete:
+ if (prev_enode != mas->node)
+ _mt_replace(mas, active, true);
update_gap:
if (mt_is_alloc(mas->tree))
return ret;
}
+static inline int _ma_insert(struct ma_state *mas, void *entry,
+ unsigned char slot)
+{
+ ma_set_slot(mas, slot);
+ return _ma_add(mas, entry, false, true);
+}
+static inline int _ma_store(struct ma_state *mas, void *entry,
+ unsigned char slot)
+{
+ ma_set_slot(mas, slot);
+ return _ma_add(mas, entry, true, true);
+}
static inline void ma_root_expand(struct ma_state *ms, void *entry)
{
void *r_entry = rcu_dereference(ms->tree->ma_root); // root entry
struct maple_node *mn;
enum maple_type mt = ma_ptype_leaf(ms);
+ int slot = 0;
mas_node_cnt(ms, 1);
if (mas_is_err(ms))
mn->parent = ma_parent_ptr(
((unsigned long)ms->tree | MA_ROOT_PARENT));
- /* Assign the old entry to slot 0, or set it to null. */
- ma_set_rcu_slot(ms->node, 0, r_entry);
- if (!r_entry)
- ma_set_pivot(ms->node, 0, ms->index - 1);
+ if (ms->index != 0) {
+ slot++;
+ /* Assign the old entry to slot 0, or set it to null. */
+ ma_set_rcu_slot(ms->node, 0, r_entry);
+ if (!r_entry)
+ ma_set_pivot(ms->node, 0, ms->index - 1);
+ }
+ // FIXME: When task_size / page_size -1 works, check to ensure we are
+ // not inserting above this.
+ _ma_insert(ms, entry, slot);
- _ma_insert(ms, entry, 1);
if (mas_is_err(ms))
return;
if (mt_is_alloc(ms->tree)) {
-// ms->index = TASK_SIZE / PAGE_SIZE - 1;
+ // FIXME: ms->index = TASK_SIZE / PAGE_SIZE - 1;
ms->index = 0x2000000000000UL;
ms->last = mt_max[mt];
_ma_insert(ms, XA_ZERO_ENTRY, 2);
/* Private
* Combine nulls with the same pivot value
- * Note: The mas->node will most likely be changed, so keep track of the old
- * mas->node to free it.
+ *
+ * Allocation of a new node can occur in mas_partial_copy.
+ * If allocation fails, gaps must still be updated.
+ *
+ *
*/
-static inline int mas_coalesce(struct ma_state *mas)
+static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot)
{
struct maple_enode *src = mas->node;
struct maple_enode *dst = NULL;
- unsigned char s_slot, d_slot = 0;
+ unsigned char d_slot = 0;
unsigned long last = 0;
int ret = 0;
enum maple_type type = mt_node_type(mas->node);
- unsigned char slot_cnt = mt_slots[type];
- unsigned char pivot_cnt = mt_pivots[type];
+ unsigned char slot_cnt;
+ unsigned char pivot_cnt;
+ //bool check_prev = false;
- if (!pivot_cnt)
- return ret;
+ if (ma_is_leaf(type))
+ return 0;
+ slot_cnt = mt_slots[type];
+ pivot_cnt = mt_pivots[type];
- for (s_slot = 0; s_slot < slot_cnt; s_slot++) {
+ for (; s_slot < slot_cnt; s_slot++) {
if (s_slot < pivot_cnt) {
- if (s_slot != 0 && last == ma_get_pivot(src, s_slot)) {
- if (!dst) {
- // Skip this duplicate slot
- d_slot = s_slot;
- mas_partial_copy(mas, s_slot - 1);
- if (mas_is_err(mas))
- goto mas_error;
-
- dst = mas->node;
- }
+ unsigned long pivot = ma_get_pivot(src, s_slot);
+ if (!s_slot) {
+ last = pivot;
+ if (mas->min == pivot)
+ printk("Should check coalescing of previous node\n");
continue;
}
- if (s_slot != 0 && ma_get_pivot(src, s_slot) == 0)
+ if (!pivot)
goto done;
- last = ma_get_pivot(src, s_slot);
+ if (last == pivot && !dst) {
+ //if (s_slot == 1)
+ // check_prev = true;
+ // First duplicate pivot.
+ d_slot = s_slot;
+ // Copy data to new node.
+ mas_partial_copy(mas, s_slot - 1);
+ if (mas_is_err(mas))
+ goto mas_error;
+
+ dst = mas->node;
+ }
+
+ last = pivot;
if (!dst)
continue;
ma_cp_pivot(dst, d_slot, src, s_slot);
ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
- } else if (dst && ma_get_rcu_slot(src, s_slot) != NULL) {
+
+ } else if (dst && ma_get_rcu_slot(src, s_slot)) {
ma_set_pivot(dst, d_slot, mas->max);
- ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
+ ma_cp_rcu_slot(dst, d_slot, src, s_slot);
+ // Detect dedup and rebalance.
}
}
+
done:
if (!dst)
- return ret;
+ return 0;
ret = s_slot - d_slot;
mt_replace(mas);
switch (type) {
case maple_leaf_64:
- pivot_cnt = mt_max[type];
+ pivot_cnt = mt_pivots[type];
if (i >= pivot_cnt - 1)
max = mas->max;
else
mas->max = max;
if (next) {
mas->node = next;
- i = ma_data_end(mas->node, mt_node_type(next), &max);
+ i = ma_data_end(mas, mt_node_type(next), &max);
} else {
found = true; // this is a non-leaf hole.
}
ma_set_slot(mas, i);
return found;
ascend:
- if (ma_is_root(mas->node)) {
+ if (ma_is_root(mas->node))
found = true;
- }
+
ma_set_slot(mas, i);
return found;
}
/*
* Private: Returns true if mas->node is a leaf
*/
-static inline bool _mas_walk(struct ma_state *mas)
+static inline bool __mas_walk(struct ma_state *mas)
{
enum maple_type type;
struct maple_enode *next;
switch (type) {
default:
- for (i = 0; i < pivot_cnt; i++) {
+ for (i = ma_get_slot(mas); i < pivot_cnt; i++) {
pivot = _ma_get_pivot(mas->node, i, type);
if (i != 0 && pivot == 0) {
i = MAPLE_NODE_SLOTS;
}
if ((i == pivot_cnt - 1) && (mas->index > pivot))
- i++;
+ i++;
if (ret)
goto done;
ma_set_slot(mas, i);
return ret;
}
+static inline bool _mas_walk(struct ma_state *mas)
+{
+ mas->node = mas_start(mas);
+ ma_set_slot(mas, 0);
+ return __mas_walk(mas);
+}
+static inline void ma_inactive_insert(struct ma_state *mas, void *entry);
+static inline int mas_replace_tree(struct ma_state *mas, void *new_entry)
+{
+ long l_node_cnt = -1, r_node_cnt = 0;
+ unsigned int r_slot_cnt = 0, slot_cnt = 0;
+ long node_cnt = 0, nodes = 0;
+ MA_STATE(r_mas, mas->tree, mas->last + 1, mas->last + 1);
+ void *entry;
+ struct maple_enode *r_node = NULL, *last = NULL;
+ unsigned char r_slot = 0, slot;
+
+
+
+ printk("\n%s:\n", __func__);
+ slot_cnt = 1 + ma_get_slot(mas);
+ ma_set_slot(mas, mt_parent_slot(mas->node));
+ printk("%p %u Replace %ld-%ld\n", mas->node, slot_cnt, mas->index, mas->last);
+ while (mas->node != MAS_NONE) {
+ last = mas->node;
+ mas_prev_node(mas, 0);
+ printk("%p\n", mas->node);
+ l_node_cnt++;
+ }
+ mas->node = last;
+ printk("%s: left node count = %ld\n", __func__, l_node_cnt);
+ printk("mas->node = %p\n", mas->node);
+
+ _mas_walk(&r_mas);
+
+ printk("Starting right at %p[%u]\n", r_mas.node, ma_get_slot(&r_mas));
+ if (ma_get_slot(&r_mas) != MAPLE_NODE_SLOTS) {
+ unsigned long piv;
+ printk("Right node %p[%u]\n", r_node, r_slot);
+ r_slot_cnt += ma_data_end(&r_mas, mt_node_type(r_mas.node), &piv);
+ if (piv != ULONG_MAX) {
+ unsigned char p_slot = mt_parent_slot(r_mas.node);
+ r_node = r_mas.node;
+ r_slot = ma_get_slot(&r_mas);
+ r_slot_cnt -= r_slot;
+ slot_cnt += r_slot_cnt;
+ ma_set_slot(&r_mas, p_slot);
+ while (r_mas.node != MAS_NONE) {
+ last = r_mas.node;
+ mas_next_node(&r_mas, ULONG_MAX);
+ printk("%p\n", r_mas.node);
+ r_node_cnt++;
+ }
+ r_mas.node = last;
+ }
+ if (r_mas.max < mas->last)
+ slot_cnt++;
+ }
+ printk("%s: right node count = %ld\n", __func__, r_node_cnt);
+ printk("%s: right slots %u\n", __func__, slot_cnt);
+
+
+ if (slot_cnt > mt_slot_count(mas->node))
+ nodes++;
+
+ nodes = l_node_cnt + r_node_cnt + 2;
+
+ node_cnt = 1; // Root node.
+ while (nodes) {
+ printk("Adding %ld\n", nodes);
+ node_cnt += nodes;
+ nodes /= 8;
+ }
+
+ printk("%s: Requesting %ld nodes\n", __func__, node_cnt + 1);
+
+ mas_node_cnt(mas, node_cnt + 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ printk("\n\n%s: Here we go! %lu-%lu -> %p\n", __func__, mas->index, mas->last, new_entry);
+ // Create a new tree.
+ DEFINE_MTREE(new_tree);
+ MA_STATE(new_mas, &new_tree, 0, 0);
+
+ new_mas.alloc = mas->alloc;
+ mas->alloc = NULL;
+
+ // Copy left side
+ ma_set_slot(mas, 0);
+ new_mas.last = mas_first_entry(mas, mas->index);
+ slot = ma_get_slot(mas);
+ do {
+ new_mas.index = mas->min;
+ new_mas.last = mas->min;
+ while (slot < mt_slot_count(mas->node)) {
+ if (new_mas.index == mas->index)
+ break;
+ printk("On %p[%u]\n", mas->node, slot);
+ new_mas.last = ma_get_safe_pivot(mas, slot);
+ if (!new_mas.last && slot)
+ break;
+
+ if (new_mas.last > mas->index)
+ new_mas.last = mas->index - 1;
+
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (entry) {
+ printk("Add %lu-%lu -> %p\n", new_mas.index, new_mas.last, entry);
+ ma_inactive_insert(&new_mas, entry);
+ if (mas_is_err(&new_mas))
+ BUG_ON(1);
+ }
+
+ if (new_mas.last == mas->index)
+ break;
+
+ new_mas.index = new_mas.last + 1;
+
+ slot++;
+ }
+ ma_set_slot(mas, mt_parent_slot(mas->node));
+ printk("Going to next from %p's parent[%u]\n", mas->node, mt_parent_slot(mas->node));
+ mas_next_node(mas, mas->index);
+ printk("Returned %p\n", mas->node);
+ slot = 0;
+ } while (mas->node != MAS_NONE);
+
+ printk("Done left\n");
+ // Insert the new value.
+ new_mas.index = mas->index;
+ new_mas.last = mas->last;
+ printk("Add %lu-%lu -> %p\n", new_mas.index, new_mas.last, new_entry);
+ ma_inactive_insert(&new_mas, new_entry);
+ if (mas_is_err(&new_mas))
+ BUG_ON(1);
+
+
+ /*
+ * We need to run through a few things:
+ * - new_mas.last goes beyond anything right now (no entries)
+ * - new_mas.last cuts a range
+ * - new_mas.last ends in a null
+ * - new_mas.last has a sequentially next value (48)
+ */
+ if (!r_node) // No entry beyond new_mas.last
+ goto skip_right;
+ printk("Now the Right\n");
+
+ new_mas.index = new_mas.last + 1;
+ new_mas.last = ma_get_pivot(r_node, r_slot);
+
+ ma_set_slot(mas, r_slot);
+ mas->node = r_node;
+ mas->min = new_mas.index;
+ printk("going from %p[%u] to the end\n", r_node, r_slot);
+ printk("Inserting %lu - %lu -> ?\n", new_mas.index, new_mas.last);
+ do {
+ new_mas.index = mas->min;
+ new_mas.last = mas->min;
+ slot = ma_get_slot(mas);
+ printk("do %p[%u]\n", mas->node, slot);
+ while (slot++ < mt_slot_count(mas->node)) {
+ new_mas.last = ma_get_safe_pivot(mas, slot);
+ if (!new_mas.last)
+ break;
+ printk("pivot %lu\n", new_mas.last);
+
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (entry) {
+ printk("inserting %lu-%lu->%p\n", new_mas.index, new_mas.last, entry);
+ ma_inactive_insert(&new_mas, entry);
+ if (mas_is_err(&new_mas)) {
+ printk("Error %p\n", new_mas.node);
+ BUG_ON(1);
+ }
+ }
+
+ new_mas.index = new_mas.last + 1;
+ }
+ mas_next_node(mas, mas->index);
+ ma_set_slot(mas, 0);
+ } while (mas->node != MAS_NONE);
+
+skip_right:
+
+ r_node = mas->tree->ma_root;
+ mas->node = new_tree.ma_root;
+ _mt_replace(mas, false, false);
+ mas->node = 0;
+ mas->alloc = new_mas.alloc;
+ printk("Destroy %p\n", r_node);
+ ma_destroy_walk(r_node);
+
+ // Copy right side
+ return node_cnt;
+}
static inline bool mas_rewind_node(struct ma_state *mas);
static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size)
{
unsigned char slot;
mas->node = mas_start(mas);
- slot = ma_data_end(mas->node, mt_node_type(mas->node), &last_piv);
+ slot = ma_data_end(mas, mt_node_type(mas->node), &last_piv);
ma_set_slot(mas, slot);
}
return;
}
-static inline void ma_insert(struct ma_state *mas, void *entry)
-{
- unsigned char slot = MAPLE_NODE_SLOTS;
- bool leaf;
- mas->node = mas_start(mas);
- if (!xa_is_node(rcu_dereference(mas->tree->ma_root))) {
- if (mas->last != 0) {
- ma_root_expand(mas, entry);
- return;
- }
+static inline int ma_root_ptr(struct ma_state *mas, void *entry,
+ bool overwrite)
+{
+ if (xa_is_node(mas->tree->ma_root))
+ return 0;
- if (mas->node != NULL)
+ if (!overwrite)
+ if (mas->tree->ma_root && mas->last == 0)
goto exists;
- if (((unsigned long) (entry) & 3) == 2)
- ma_root_expand(mas, entry);
- else
- rcu_assign_pointer(mas->tree->ma_root, entry);
+ if (mas->last != 0)
+ ma_root_expand(mas, entry);
+ else if (((unsigned long) (entry) & 3) == 2)
+ ma_root_expand(mas, entry);
+ else
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ return 1;
- return;
- }
+exists:
+ mas_set_err(mas, -EEXIST);
+ return 0;
+}
+
+static inline int ma_add(struct ma_state *mas, void *entry, bool overwrite,
+ bool active)
+{
+ unsigned char slot = MAPLE_NODE_SLOTS;
+ bool leaf;
+ int ret = 0;
+
+ ret = ma_root_ptr(mas, entry, overwrite);
+ if (mas_is_err(mas))
+ return 0;
+
+ if (ret)
+ return ret;
+retry:
leaf = _mas_walk(mas);
slot = ma_get_slot(mas);
+
+ // Clean this node
+ if (mas_coalesce(mas, 0)) {
+ if (mas_is_err(mas))
+ return 0;
+ goto retry;
+ }
+
if (leaf == true && slot != MAPLE_NODE_SLOTS) {
- if (ma_get_rcu_slot(mas->node, slot))
+ if (!overwrite && ma_get_rcu_slot(mas->node, slot))
goto exists;
}
- mas_coalesce(mas);
- if (mas_is_err(mas))
- return;
- /* Do the insert */
- _ma_insert(mas, entry, slot);
- return;
+ /* Do the add */
+ return _ma_add(mas, entry, overwrite, active);
exists:
mas_set_err(mas, -EEXIST);
- return;
+ return 0;
+}
+
+static inline void ma_inactive_insert(struct ma_state *mas, void *entry)
+{
+ ma_add(mas, entry, false, false);
+}
+static inline void ma_insert(struct ma_state *mas, void *entry)
+{
+ ma_add(mas, entry, false, true);
}
+
static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
unsigned long size, unsigned long *index)
{
if (mas_is_err(mas))
return xa_err(mas->node);
#endif
- if (mas->max < mas->last)
- mas->index = mas->max;
- else
+ // If the maximum is outside the window we are searching, then use the
+ // last location in the search.
+ if (mas->max > mas->last)
mas->index = mas->last;
+ else
+ mas->index = mas->max - size + 1;
+
+ mas->last = mas->index + size - 1;
return mas_fill_gap(mas, entry, slot, size, index);
return true;
}
/* Private
- * Find an entry and erase the entire range
+ * Find the range in which index resides and erase the entire range
+ * Any previous pivots with no value will be set to the same pivot value.
+ * returns the number of slots that have been erased.
*/
-int ma_erase(struct ma_state *mas)
+static inline int ma_erase(struct ma_state *mas)
{
enum maple_type type = mt_node_type(mas->node);
unsigned char slot_cnt = mt_slots[type];
unsigned char pivot_cnt = mt_pivots[type];
unsigned long piv_val;
- int ret = -EINVAL;
- int slot;
+ int slot, ret = 1;
- _mas_walk(mas);
slot = ma_get_slot(mas);
- if (slot == MAPLE_NODE_SLOTS)
- return ret;
-
ma_update_rcu_slot(mas->node, slot, NULL);
- ret = 0;
if ((slot >= slot_cnt - 1))
return ret;
+ // dense nodes only need to set a single value.
if (!pivot_cnt)
return ret;
- if ((slot < pivot_cnt) &&
- ((ma_get_pivot(mas->node, slot + 1) == 0) ||
- (ma_get_rcu_slot(mas->node, slot + 1) == NULL))) {
- piv_val = ma_get_pivot(mas->node, ++slot);
- } else {
- piv_val = ma_get_pivot(mas->node, slot);
+ piv_val = ma_get_pivot(mas->node, slot);
+ while ((slot < pivot_cnt - 1)) {
+ unsigned long this_pivot = ma_get_pivot(mas->node, slot + 1);
+
+ if (!this_pivot) // end of node.
+ break;
+
+ // There is data for this pivot.
+ if (ma_get_rcu_slot(mas->node, slot + 1))
+ break;
+
+ // empty slot above the erase.
+ piv_val = this_pivot;
+ slot++;
}
/* Walk down and set all the previous pivots with NULLs to piv_val */
- while(--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL)
+ while(--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL) {
ma_set_pivot(mas->node, slot, piv_val);
+ ret++;
+ }
+
+ mas_coalesce(mas, ++slot); /* The error on allocation failure can be ignored */
- mas_coalesce(mas);
- /* Error may be returned, but it will be passed through anyways */
return ret;
}
-void ma_destroy_walk(struct maple_enode *mn)
-{
- unsigned int type = mt_node_type(mn);
- unsigned char slot_cnt = mt_slot_count(mn);
- int i;
-
- switch (type) {
- case maple_range_16:
- case maple_range_32:
- case maple_range_64:
- case maple_arange_64:
- for (i = 0; i < slot_cnt; i++) {
- if (i > 0 && i < slot_cnt - 1 &&
- ma_get_pivot(mn, i) == 0)
- break;
- if (ma_get_rcu_slot(mn, i))
- ma_destroy_walk(ma_get_rcu_slot(mn, i));
- }
- break;
- default:
- break;
- }
- mt_free(mt_to_node(mn));
-}
/* Interface */
void __init maple_tree_init(void)
return entry;
}
EXPORT_SYMBOL(mtree_load);
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(mas, mt, first, last);
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ if (first > last)
+ return -EINVAL;
+
+ mtree_lock(mas.tree);
+retry:
+ ma_add(&mas, entry, true, true);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mas.tree);
+ if (mas_is_err(&mas)) {
+ printk("Error %p\n", mas.node);
+ return xa_err(mas.node);
+ }
+
+ printk("Return 0\n");
+ return 0;
+}
+EXPORT_SYMBOL(mtree_store_range);
+int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_store_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_store);
+
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
int mtree_erase(struct maple_tree *mt, unsigned long index)
{
int ret = -EINVAL;
+ int slot;
MA_STATE(mas, mt, index, index);
mtree_lock(mt);
- ret = ma_erase(&mas);
+ _mas_walk(&mas);
+ slot = ma_get_slot(&mas);
+ if (slot != MAPLE_NODE_SLOTS)
+ ret = ma_erase(&mas);
+
mtree_unlock(mt);
return ret;
}
+EXPORT_SYMBOL(mtree_erase);
void mtree_destroy(struct maple_tree *mt)
{
+ struct maple_enode *destroyed;
+
mtree_lock(mt);
- struct maple_enode *destroyed = mt->ma_root;
+ destroyed = mt->ma_root;
+
- rcu_assign_pointer(mt->ma_root, NULL);
if (xa_is_node(destroyed)) {
- ma_destroy_walk(mt_safe_root(destroyed));
+ ma_destroy_walk(destroyed);
}
mt->ma_flags = 0;
+ rcu_assign_pointer(mt->ma_root, NULL);
mtree_unlock(mt);
}
EXPORT_SYMBOL(mtree_destroy);
else if (entry)
mt_dump_node(entry, 0, mt_max[mt_node_type(entry)], 0);
}
+
extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
void mt_set_non_kernel(unsigned int val)
{
{
return kmem_cache_get_alloc(maple_node_cache);
}
+
#endif