static struct kmem_cache *maple_node_cache;
+unsigned long mt_max[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_sparse_6] = (1UL << 6) - 1,
+ [maple_sparse_9] = (1UL << 9) - 1,
+ [maple_sparse_16] = (1UL << 16) - 1,
+ [maple_sparse_21] = (1UL << 21) - 1,
+ [maple_sparse_32] = UINT_MAX,
+ [maple_sparse_64] = ULONG_MAX,
+ [maple_leaf_16] = (1UL << 16) - 1,
+ [maple_leaf_32] = UINT_MAX,
+ [maple_leaf_64] = ULONG_MAX,
+ [maple_range_16] = (1UL << 16) - 1,
+ [maple_range_32] = UINT_MAX,
+ [maple_range_64] = ULONG_MAX,
+};
+#define mt_node_max(x) mt_max[mt_node_type(x)]
+
+
+unsigned char mt_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_sparse_6] = MAPLE_SPARSE6_SLOTS,
+ [maple_sparse_9] = MAPLE_SPARSE9_SLOTS,
+ [maple_sparse_16] = MAPLE_SPARSE16_SLOTS,
+ [maple_sparse_21] = MAPLE_SPARSE21_SLOTS,
+ [maple_sparse_32] = MAPLE_SPARSE32_SLOTS,
+ [maple_sparse_64] = MAPLE_SPARSE64_SLOTS,
+ [maple_leaf_16] = MAPLE_RANGE16_SLOTS,
+ [maple_leaf_32] = MAPLE_RANGE32_SLOTS,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
+ [maple_range_16] = MAPLE_RANGE16_SLOTS,
+ [maple_range_32] = MAPLE_RANGE32_SLOTS,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS,
+};
+#define mt_slot_count(x) mt_slots[mt_node_type(x)]
+// Functions
static struct maple_node *mt_alloc_one(gfp_t gfp)
{
return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
return false;
}
+static inline unsigned int mt_parent_shift(unsigned long parent)
+{
+ if (!(parent & 2))
+ return 2; // maple_range_16
+ return 3;
+}
+
+static inline enum maple_type mt_parent_enum(struct maple_node *node)
+{
+ unsigned long parent = (unsigned long) mt_to_node(node)->parent;
+ unsigned long slot_shift = mt_parent_shift(parent);
+
+ parent &= (1 << slot_shift ) - 1;
+ switch (parent) {
+ case 6:
+ return maple_range_64;
+ case 4:
+ return maple_range_32;
+ case 0:
+ return maple_range_16;
+ }
+ return maple_dense;
+}
+
/* Private
*
* Type is encoded in the node->parent
mt_to_node(node)->parent = (struct maple_node *)val;
}
-static inline unsigned int mt_parent_shift(unsigned long parent)
-{
- if (!(parent & 2))
- return 2; // maple_range_16
- return 3;
-}
static inline unsigned int mt_parent_slot(struct maple_node *node)
{
unsigned long bitmask = 0x7C;
return (val & bitmask) >> slot_shift;
}
-static inline enum maple_type mt_parent_type(struct maple_node *node)
-{
- unsigned long parent = (unsigned long) mt_to_node(node)->parent;
- unsigned long slot_shift = mt_parent_shift(parent);
-
- parent &= (1 << slot_shift ) - 1;
- switch (parent) {
- case 6:
- return maple_range_64;
- case 4:
- return maple_range_32;
- case 0:
- return maple_range_16;
- }
- return maple_dense;
-}
static inline void *mt_parent(struct maple_node *node)
{
ma_set_alloc_req(ms, slot);
}
-static void mas_update_limits(struct ma_state *ms, unsigned char slot)
+static inline unsigned long ma_get_pivot(const struct maple_node *mn,
+ unsigned char slot)
+{
+ enum maple_type type = mt_node_type(mn);
+
+ switch (type) {
+ default:
+ case maple_dense:
+ return 0;
+ case maple_sparse_6:
+ return mt_to_node(mn)->ms6.pivot;
+ case maple_sparse_9:
+ return mt_to_node(mn)->ms9.pivot[slot];
+ case maple_sparse_16:
+ return mt_to_node(mn)->ms16.pivot[slot];
+ case maple_sparse_21:
+ return mt_to_node(mn)->ms21.pivot[slot];
+ case maple_sparse_32:
+ return mt_to_node(mn)->ms32.pivot[slot];
+ case maple_sparse_64:
+ return mt_to_node(mn)->ms64.pivot[slot];
+ case maple_range_16:
+ case maple_leaf_16:
+ return mt_to_node(mn)->mr16.pivot[slot];
+ case maple_range_32:
+ case maple_leaf_32:
+ return mt_to_node(mn)->mr32.pivot[slot];
+ case maple_range_64:
+ case maple_leaf_64:
+ return mt_to_node(mn)->mr64.pivot[slot];
+ }
+}
+static inline void ma_set_pivot(struct maple_node *mn, unsigned char slot,
+ unsigned long val)
+{
+ enum maple_type type = mt_node_type(mn);
+
+ switch (type) {
+ default:
+ case maple_dense:
+ return;
+ case maple_sparse_6:
+ mt_to_node(mn)->ms6.pivot = val;
+ break;
+ case maple_sparse_9:
+ mt_to_node(mn)->ms9.pivot[slot] = val;
+ break;
+ case maple_sparse_16:
+ mt_to_node(mn)->ms16.pivot[slot] = val;
+ break;
+ case maple_sparse_21:
+ mt_to_node(mn)->ms21.pivot[slot] = val;
+ break;
+ case maple_sparse_32:
+ mt_to_node(mn)->ms32.pivot[slot] = val;
+ break;
+ case maple_sparse_64:
+ mt_to_node(mn)->ms64.pivot[slot] = val;
+ break;
+ case maple_range_16:
+ case maple_leaf_16:
+ mt_to_node(mn)->mr16.pivot[slot] = val;
+ break;
+ case maple_range_32:
+ case maple_leaf_32:
+ mt_to_node(mn)->mr32.pivot[slot] = val;
+ break;
+ case maple_range_64:
+ case maple_leaf_64:
+ (&mt_to_node(mn)->mr64)->pivot[slot] = val;
+ break;
+ }
+ return;
+}
+static inline void ma_cp_pivot(struct maple_node *dst,
+ unsigned char dloc, struct maple_node *src, unsigned long sloc)
+{
+ ma_set_pivot(dst, dloc, ma_get_pivot(src, sloc));
+}
+static inline void __rcu *ma_get_rcu_slot(const struct maple_node *mn,
+ unsigned char slot)
{
- struct maple_range_64 *mr64;
+ enum maple_type type = mt_node_type(mn);
+ switch (type) {
+ default:
+ case maple_dense:
+ return rcu_dereference(mt_to_node(mn)->slot[slot]);
+ case maple_sparse_6:
+ return rcu_dereference(mt_to_node(mn)->ms6.slot[slot]);
+ case maple_sparse_9:
+ return rcu_dereference(mt_to_node(mn)->ms9.slot[slot]);
+ case maple_sparse_16:
+ return rcu_dereference(mt_to_node(mn)->ms16.slot[slot]);
+ case maple_sparse_21:
+ return rcu_dereference(mt_to_node(mn)->ms21.slot[slot]);
+ case maple_sparse_32:
+ return rcu_dereference(mt_to_node(mn)->ms32.slot[slot]);
+ case maple_sparse_64:
+ return rcu_dereference(mt_to_node(mn)->ms64.slot[slot]);
+ case maple_range_16:
+ case maple_leaf_16:
+ return rcu_dereference(mt_to_node(mn)->mr16.slot[slot]);
+ case maple_range_32:
+ case maple_leaf_32:
+ return rcu_dereference(mt_to_node(mn)->mr32.slot[slot]);
+ case maple_range_64:
+ case maple_leaf_64:
+ return rcu_dereference(mt_to_node(mn)->mr64.slot[slot]);
+ }
+}
+static inline void ma_set_rcu_slot(const struct maple_node *mn,
+ unsigned char slot, void *val)
+{
+ enum maple_type type = mt_node_type(mn);
+
+ switch (type) {
+ default:
+ case maple_dense:
+ RCU_INIT_POINTER(mt_to_node(mn)->slot[slot], val);
+ case maple_sparse_6:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms6.slot[slot], val);
+ case maple_sparse_9:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms9.slot[slot], val);
+ case maple_sparse_16:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms16.slot[slot], val);
+ case maple_sparse_21:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms21.slot[slot], val);
+ case maple_sparse_32:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms32.slot[slot], val);
+ case maple_sparse_64:
+ RCU_INIT_POINTER(mt_to_node(mn)->ms64.slot[slot], val);
+ case maple_range_16:
+ case maple_leaf_16:
+ RCU_INIT_POINTER(mt_to_node(mn)->mr16.slot[slot], val);
+ case maple_range_32:
+ case maple_leaf_32:
+ RCU_INIT_POINTER(mt_to_node(mn)->mr32.slot[slot], val);
+ case maple_range_64:
+ case maple_leaf_64:
+ RCU_INIT_POINTER(mt_to_node(mn)->mr64.slot[slot], val);
+ }
+}
+static inline void ma_cp_rcu_slot(struct maple_node *dst,
+ unsigned char dloc, struct maple_node *src, unsigned long sloc)
+{
+ ma_set_rcu_slot(dst, dloc, ma_get_rcu_slot(src, sloc));
+}
+static inline void ma_update_rcu_slot(const struct maple_node *mn,
+ unsigned char slot, void *val)
+{
+ enum maple_type type = mt_node_type(mn);
+
+ switch (type) {
+ default:
+ case maple_dense:
+ rcu_assign_pointer(mt_to_node(mn)->slot[slot], val);
+ case maple_sparse_6:
+ rcu_assign_pointer(mt_to_node(mn)->ms6.slot[slot], val);
+ case maple_sparse_9:
+ rcu_assign_pointer(mt_to_node(mn)->ms9.slot[slot], val);
+ case maple_sparse_16:
+ rcu_assign_pointer(mt_to_node(mn)->ms16.slot[slot], val);
+ case maple_sparse_21:
+ rcu_assign_pointer(mt_to_node(mn)->ms21.slot[slot], val);
+ case maple_sparse_32:
+ rcu_assign_pointer(mt_to_node(mn)->ms32.slot[slot], val);
+ case maple_sparse_64:
+ rcu_assign_pointer(mt_to_node(mn)->ms64.slot[slot], val);
+ case maple_range_16:
+ case maple_leaf_16:
+ rcu_assign_pointer(mt_to_node(mn)->mr16.slot[slot], val);
+ case maple_range_32:
+ case maple_leaf_32:
+ rcu_assign_pointer(mt_to_node(mn)->mr32.slot[slot], val);
+ case maple_range_64:
+ case maple_leaf_64:
+ rcu_assign_pointer(mt_to_node(mn)->mr64.slot[slot], val);
+ }
+}
+static void mas_update_limits(struct ma_state *ms, unsigned char slot)
+{
if (slot >= MAPLE_NODE_SLOTS)
return;
return;
if (ma_is_root(ms->node)) {
- ms->max = ULONG_MAX;
+ ms->max = mt_node_max(ms->node);
ms->min = 0;
}
- mr64 = &mt_to_node(ms->node)->mr64;
if (slot > 0)
- ms->min = mr64->pivot[slot - 1] + 1;
+ ms->min = ma_get_pivot(ms->node, slot - 1);
- if (slot < MAPLE_RANGE64_SLOTS - 1)
- ms->max = mr64->pivot[slot];
+ if (slot < mt_slot_count(ms->node) - 1)
+ ms->max = ma_get_pivot(ms->node, slot);
}
static inline void ma_encoded_parent(struct ma_state *mas)
{
struct maple_node *parent, *gparent;
unsigned char slot;
+ parent = mt_to_node(mas->node)->parent;
if (ma_is_root(mt_to_node(mas->node)->parent)) {
mas->node = mt_safe_root(rcu_dereference(mas->tree->ma_root));
return;
}
- parent = mt_parent(mas->node);
gparent = mt_parent(parent);
slot = mt_parent_slot(parent);
- mas->node = gparent;
+ mas->node = mt_mk_node(gparent, mt_parent_enum(parent));
+ ma_set_slot(mas, slot);
mas_update_limits(mas, slot);
- mas->node = rcu_dereference(gparent->mr64.slot[slot]);
+ mas->node = ma_get_rcu_slot(gparent, slot);
return;
}
static inline struct maple_node *ma_next_alloc(struct ma_state *ms)
return entry;
}
-unsigned char ma_data_end_r64(const struct maple_range_64 *mr64,
- unsigned long last)
+unsigned char ma_data_end(const struct maple_node *mn)
{
unsigned char data_end = 0;
+ unsigned long last;
- for (data_end = 0; data_end < MAPLE_RANGE64_SLOTS - 1; data_end++) {
- last = mr64->pivot[data_end];
+ for (data_end = 0; data_end < mt_slot_count(mn) - 1; data_end++) {
+ last = ma_get_pivot(mn, data_end);
if (last == 0 && data_end > 0)
return data_end - 1;
- if (last == ULONG_MAX)
+ if (last == mt_node_max(mn))
return data_end - 1;
}
- if (mr64->slot[data_end] == NULL)
+ if (ma_get_rcu_slot(mn, data_end) == NULL)
data_end--;
return data_end;
static int ma_calc_split(struct ma_state *mas)
{
int i;
- struct maple_range_64 *full64 = &mt_to_node(mas->node)->mr64;
unsigned long min = mas->min;
unsigned long max = min;
- for (i = 3; i < MAPLE_RANGE64_SLOTS - 1; i++) {
- max = full64->pivot[i];
+ for (i = 3; i < mt_slot_count(mas->node) - 1; i++) {
+ max = ma_get_pivot(mas->node, i);
if ((max - min) >= 8)
break;
}
}
void ma_copy(struct ma_state *mas, struct ma_cp *cp)
{
- struct maple_range_64 *src64 = &cp->src->mr64;
- struct maple_range_64 *dst64;
unsigned char sloc = cp->src_start; // src location
unsigned char dloc = cp->dst_start; // dst location
+ unsigned char slot_cnt = mt_slot_count(mas->node);
if (!cp->dst) {
/* Allocate a new node */
if (mas_is_err(mas))
return;
cp->dst = ma_next_alloc(mas);
+ cp->dst = mt_mk_node(cp->dst, mt_node_type(cp->src));
}
- dst64 = &cp->dst->mr64;
while (sloc <= cp->src_end &&
dloc <= cp->dst_end) {
- if (sloc != 0 && sloc < MAPLE_RANGE64_SLOTS - 1 &&
- src64->pivot[sloc] == 0)
+ if (sloc != 0 && sloc < slot_cnt - 1 &&
+ ma_get_pivot(cp->src, sloc) == 0)
break;
- if (dloc < MAPLE_RANGE64_SLOTS - 1) {
- if (sloc < MAPLE_RANGE64_SLOTS - 1)
- dst64->pivot[dloc] = src64->pivot[sloc];
- else if (dloc > 0 && dst64->pivot[dloc - 1] != mas->max)
- dst64->pivot[dloc] = mas->max;
+ if (dloc < slot_cnt - 1) {
+ if (sloc < slot_cnt - 1)
+ ma_cp_pivot(cp->dst, dloc, cp->src, sloc);
+ else if (dloc > 0 &&
+ ma_get_pivot(cp->dst, dloc - 1) != mas->max)
+ ma_set_pivot(cp->dst, dloc, mas->max);
}
- RCU_INIT_POINTER(dst64->slot[dloc], src64->slot[sloc]);
-
- /* Increment counters */
- sloc++;
- dloc++;
+ ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc++);
}
cp->dst_start = dloc;
}
-static int ma_cp_data_64(struct ma_state *mas, struct maple_node *left,
- struct maple_node *right, int off)
+static int ma_split_data(struct ma_state *mas, struct maple_node *left,
+ struct maple_node *right)
{
int split = ma_calc_split(mas);
- MA_CP(cp, mt_to_node(mas->node), left, off, split);
+ MA_CP(cp, mas->node, left, 0, split);
ma_copy(mas, &cp);
cp.src_start = split + 1;
- cp.src_end = MAPLE_RANGE64_SLOTS - 1;
+ cp.src_end = mt_slot_count(mas->node) - 1;
cp.dst = right;
cp.dst_start = 0;
ma_copy(mas, &cp);
static void ma_adopt_children(struct maple_node *parent)
{
- struct maple_range_64 *p64 = &mt_to_node(parent)->mr64;
struct maple_node *child;
unsigned char slot;
+ unsigned char slot_cnt = mt_slot_count(parent);
- for (slot = 0; slot < MAPLE_RANGE64_SLOTS; slot++) {
- if (slot != 0 && slot < MAPLE_RANGE64_SLOTS - 1 &&
- p64->pivot[slot] == 0)
+ for (slot = 0; slot < slot_cnt; slot++) {
+ if (slot != 0 && slot < slot_cnt - 1 &&
+ ma_get_pivot(parent, slot) == 0)
break;
- child = p64->slot[slot];
+ child = ma_get_rcu_slot(parent, slot);
if (child)
mt_set_parent(child, parent, slot);
}
{
struct maple_node *mn = mt_to_node(mas->node);
struct maple_node *parent;
- struct maple_range_64 *p64;
unsigned char slot = 0;
struct maple_node *prev;
if (ma_is_root(mas->node)) {
prev = mas->tree->ma_root;
} else {
+ enum maple_type ptype = mt_parent_enum(mas->node);
parent = mt_parent(mas->node);
- p64 = &parent->mr64;
slot = mt_parent_slot(mas->node);
- prev = p64->slot[slot];
+ prev = ma_get_rcu_slot(mt_mk_node(parent, ptype), slot);
}
if (prev == mn)
* This isn't safe to use to copy the last slot as it doesn't check the
* pivot.
*/
-static struct maple_range_64 *mas_partial_copy(struct ma_state *mas,
+static void mas_partial_copy(struct ma_state *mas,
unsigned char end)
{
- MA_CP(cp, mt_to_node(mas->node), NULL, 0, end);
+ MA_CP(cp, mas->node, NULL, 0, end);
ma_copy(mas, &cp);
if (mas_is_err(mas))
- return NULL;
+ return;
- mas->node = mt_mk_node(cp.dst, mt_node_type(mas->node));
- cp.dst->parent = cp.src->parent;
- return &cp.dst->mr64;
+ mt_to_node(cp.dst)->parent = mt_to_node(cp.src)->parent;
+ mas->node = cp.dst;
+ return;
}
static void ma_link(struct maple_node *new, struct maple_node *parent,
unsigned char slot, unsigned long pivot, enum maple_type type)
{
- struct maple_range_64 *p64 = &parent->mr64;
struct maple_node *new_enc = mt_mk_node(new, type);
+ unsigned char slot_cnt = mt_slots[type];
mt_set_parent(new, parent, slot);
- if (slot < MAPLE_RANGE64_SLOTS - 1)
- p64->pivot[slot] = pivot;
- RCU_INIT_POINTER(p64->slot[slot], new_enc);
+ if (slot < slot_cnt - 1)
+ ma_set_pivot(parent, slot, pivot);
+
+ ma_set_rcu_slot(parent, slot, new_enc);
if (!mt_is_leaf(new_enc))
ma_adopt_children(new_enc);
}
static int ma_split(struct ma_state *mas, unsigned char slot)
{
- struct maple_node *full = mt_to_node(mas->node);
+ struct maple_node *full = (mas->node);
unsigned char split, p_slot = 0, p_end = 0;
struct maple_node *old_parent, *new_parent, *left, *right;
enum maple_type ctype; // Child type.
enum maple_type ptype; // parent type.
unsigned long pivot;
+ unsigned char slot_cnt = mt_slot_count(mas->node);
if (ma_is_root(mas->node)) {
old_parent = full;
ptype = maple_range_64;
p_slot = 0;
- mas->max = ULONG_MAX;
+ mas->max = mt_max[ptype];
} else {
- old_parent = mt_parent(mas->node);
p_slot = mt_parent_slot(mas->node);
- p_end = ma_data_end_r64(&old_parent->mr64, UINT_MAX);
- ptype = mt_parent_type(mas->node);
- if (p_end >= MAPLE_RANGE64_SLOTS - 1) {
+ ma_encoded_parent(mas);
+ old_parent = mas->node;
+ p_end = ma_data_end(mas->node);
+ if (p_end >= slot_cnt - 1) {
/* Must split the parent */
- ma_encoded_parent(mas);
- mas_update_limits(mas, p_slot);
split = ma_split(mas, p_slot);
if (mas_is_err(mas))
return 0;
if (split < p_slot)
p_slot -= split + 1;
// Split will return the parent.
- old_parent = mt_to_node(mas->node);
- ptype = mt_node_type(mas->node);
- p_slot = mt_parent_slot(mas->node);
- mas_update_limits(mas, p_slot);
- mas->node = old_parent->mr64.slot[p_slot];
+ old_parent = mas->node;
+ ma_set_slot(mas, p_slot);
}
+ ptype = mt_node_type(mas->node);
+ mas_update_limits(mas, p_slot);
+ mas->node = ma_get_rcu_slot(old_parent, p_slot);
}
mas_node_cnt(mas, 3);
left = ma_next_alloc(mas);
new_parent = ma_next_alloc(mas);
- // Record the node type for the children types.
+ // the node type for the children types.
ctype = mt_node_type(mas->node);
+
+ right = mt_mk_node(right, ctype);
+ left = mt_mk_node(left, ctype);
+ new_parent = mt_mk_node(new_parent, ptype);
+
+
// copy the data and calculate the split location.
- split = ma_cp_data_64(mas, left, right, 0);
+ split = ma_split_data(mas, left, right);
+
// Copy the parents information
if (!ma_is_root(full)) {
// Copy the parent data and leave a hole.
}
// Copy grand parent to the parent, including slot encoding.
- new_parent->parent = old_parent->parent;
+ mt_to_node(new_parent)->parent = mt_to_node(old_parent)->parent;
// Set up the link to the right in the new parent
if (ma_is_root(full))
- pivot = ULONG_MAX;
+ pivot = mt_node_max(mas->node);
else
- pivot = old_parent->mr64.pivot[p_slot];
+ pivot = ma_get_pivot(old_parent, p_slot);
ma_link(right, new_parent, p_slot + 1, pivot, ctype);
// Set up the link to the left in the new parent
- if (split < MAPLE_RANGE64_SLOTS - 1)
- pivot = full->mr64.pivot[split];
+ if (split < slot_cnt - 1)
+ pivot = ma_get_pivot(mas->node, split);
else
- pivot = left->mr64.pivot[split - 1];
+ pivot = ma_get_pivot(left, split -1);
ma_link(left, new_parent, p_slot, pivot, ctype);
// Set up maple state to replace the parent node in the grandparent.
- mas->node = mt_mk_node(new_parent, ptype);
+ mas->node = new_parent;
// Replace the parent node & free the old parent.
mt_replace(mas);
// Set up the ma_state for the return. Point to the correct node for
// the insert or subsequent split.
if (mas->index <= pivot)
- mas->node = mt_mk_node(left, ctype);
+ mas->node = left;
else
- mas->node = mt_mk_node(right, ctype);
+ mas->node = right;
// Free the full node.
- mt_free(full);
+ mt_free(mt_to_node(full));
return split;
}
*/
static int _ma_insert(struct ma_state *mas, void *entry, unsigned char slot)
{
- struct maple_range_64 *mr64 = &mt_to_node(mas->node)->mr64;
struct maple_node *p_mn;
- struct maple_range_64 *p_mr64;
- int o_end = ma_data_end_r64(mr64, mas->max); // Old end
+ int o_end = ma_data_end(mas->node); // Old end
int n_end = o_end; // New end
unsigned long max = mas->max;
unsigned long min = mas->min;
int ret = 1;
+ unsigned char slot_cnt = mt_slot_count(mas->node);
/* Calculate the range of the slot */
- if (slot < MAPLE_RANGE64_SLOTS - 1 && mr64->pivot[slot] != 0)
- max = mr64->pivot[slot];
+ if (slot < slot_cnt - 1 && ma_get_pivot(mas->node, slot) != 0)
+ max = ma_get_pivot(mas->node, slot);
if (slot == MAPLE_NODE_SLOTS)
slot = o_end + 1; // Append.
if (slot > 0)
- min = mr64->pivot[slot - 1];
+ min = ma_get_pivot(mas->node, slot - 1);
/* Figure out how many slots are needed for the entry. */
if (max != mas->last)
if (mas->index && min != mas->index - 1)
n_end++;
- if (n_end > MAPLE_RANGE64_SLOTS - 1 ||
- (n_end == MAPLE_RANGE64_SLOTS - 1 && mas->max == ULONG_MAX)) {
+ if (n_end > slot_cnt - 1 ||
+ (n_end == slot_cnt - 1 && mas->max == mt_node_max(mas->node))){
unsigned char split = ma_split(mas, slot);
if (mas_is_err(mas))
return 0;
slot -= split;
n_end -= split;
- mr64 = &mt_to_node(mas->node)->mr64;
- o_end = ma_data_end_r64(mr64, mas->max); // Old end is not so old now.
+ o_end = ma_data_end(mas->node); // Old end is not so old now.
}
/* Save the node in case we are not appending. */
- p_mr64 = mr64;
- p_mn = mt_to_node(mas->node);
+ p_mn = mas->node;
/* Figure out if this is an append or not.
* Appending does not need to create a new node. */
o_end = n_end; /* Appending */
} else {
/* Not appending */
- if (p_mr64 == mr64) {
- /* Creates a new node and copies until slot (inclusively) */
- mr64 = mas_partial_copy(mas, slot);
- if (mas_is_err(mas))
- return 0;
- }
+ /* Creates a new node and copies until slot (inclusively) */
+ mas_partial_copy(mas, slot);
+ if (mas_is_err(mas))
+ return 0;
o_end = slot;
}
* ensure readers don't get incorrect data on appends
*/
/* Write the entry */
- RCU_INIT_POINTER(mr64->slot[++slot], entry);
- mr64->pivot[slot] = mas->last;
+ ma_set_rcu_slot(mas->node, ++slot, entry);
+ ma_set_pivot(mas->node, slot, mas->last);
/* Write NULL entry */
- RCU_INIT_POINTER(mr64->slot[--slot], NULL);
+ ma_set_rcu_slot(mas->node, --slot, NULL);
if (o_end == n_end + 1) // Append.
wmb();
- mr64->pivot[slot] = mas->index - 1;
+ ma_set_pivot(mas->node, slot, mas->index - 1);
slot += 2;
ret = 2;
} else {
/* Write the entry */
- RCU_INIT_POINTER(mr64->slot[slot], entry);
+ ma_set_rcu_slot(mas->node, slot, entry);
if (o_end == n_end + 1) // Append.
wmb();
- if (slot < MAPLE_RANGE64_SLOTS - 1)
- mr64->pivot[slot++] = mas->last;
+ if (slot < mt_slot_count(mas->node) - 1)
+ ma_set_pivot(mas->node, slot++, mas->last);
}
/* Skip possible duplicate entry that contains a NULL */
- if (o_end != n_end && p_mr64->pivot[o_end] == mas->last)
+ if (o_end != n_end && ma_get_pivot(p_mn, o_end) == mas->last)
o_end++;
/* Copy remainder of node if this isn't an append */
- MA_CP(cp, p_mn, mt_to_node(mas->node), o_end, MAPLE_RANGE64_SLOTS - 1);
+ MA_CP(cp, p_mn, mas->node, o_end, mt_slot_count(p_mn) - 1);
cp.dst_start = slot;
cp.dst_end = n_end;
ma_copy(mas, &cp);
- if (p_mr64 != mr64)
+ if (p_mn != mas->node)
mt_replace(mas);
return ret;
((unsigned long)ms->tree | MA_ROOT_PARENT);
/* Assign the old entry to slot 0, or set it to null. */
- RCU_INIT_POINTER(mn->mr64.slot[0], r_entry);
+ ma_set_rcu_slot(mn, 0, r_entry);
if (!r_entry)
- mn->mr64.pivot[0] = ms->index - 1;
+ ma_set_pivot(mn, 0, ms->index - 1);
_ma_insert(ms, entry, 1);
if (mas_is_err(ms))
*/
static int mas_coalesce(struct ma_state *mas)
{
- struct maple_node *src_mn = mt_to_node(mas->node);
- struct maple_range_64 *src = &src_mn->mr64;
+ struct maple_node *src = mas->node;
+ struct maple_node *dst = NULL;
unsigned char s_slot, d_slot = 0;
unsigned long last = 0;
- struct maple_range_64 *dst = NULL;
int ret = 0;
+ unsigned char slot_cnt = mt_slot_count(mas->node);
- for (s_slot = 0; s_slot < MAPLE_RANGE64_SLOTS; s_slot++) {
- if (s_slot < MAPLE_RANGE64_SLOTS - 1) {
- if (s_slot != 0 && last == src->pivot[s_slot]) {
+ for (s_slot = 0; s_slot < slot_cnt; s_slot++) {
+ if (s_slot < slot_cnt - 1) {
+ if (s_slot != 0 && last == ma_get_pivot(src, s_slot)) {
if (!dst) {
// Skip this duplicate slot
d_slot = s_slot;
- dst = mas_partial_copy(mas, s_slot - 1);
+ mas_partial_copy(mas, s_slot - 1);
if (mas_is_err(mas))
return 0;
+
+ dst = mas->node;
}
continue;
}
- if (s_slot != 0 && src->pivot[s_slot] == 0)
+ if (s_slot != 0 && ma_get_pivot(src, s_slot) == 0)
goto done;
- last = src->pivot[s_slot];
+ last = ma_get_pivot(src, s_slot);
if (!dst)
continue;
- dst->pivot[d_slot] = src->pivot[s_slot];
- RCU_INIT_POINTER(dst->slot[d_slot++],
- src->slot[s_slot]);
- } else if (dst && src->slot[s_slot] != NULL) {
- dst->pivot[d_slot] = mas->max;
- RCU_INIT_POINTER(dst->slot[d_slot++],
- src->slot[s_slot]);
+ ma_cp_pivot(dst, d_slot, src, s_slot);
+ ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
+ } else if (dst && ma_get_rcu_slot(src, s_slot) != NULL) {
+ ma_set_pivot(dst, d_slot, mas->max);
+ ma_cp_rcu_slot(dst, d_slot++, src, s_slot);
}
}
done:
static bool mas_search_slots(struct ma_state *ms, unsigned long val)
{
- struct maple_range_64 *mr64;
int i = 0;
bool ret = false;
+ unsigned char slot_cnt = mt_slot_count(ms->node);
+ unsigned long pivot = 0;
- mr64 = &mt_to_node(ms->node)->mr64;
- for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
- if (i != 0 && mr64->pivot[i] == 0) {
+ for (i = 0; i < slot_cnt - 1; i++) {
+ pivot = ma_get_pivot(ms->node, i);
+ if (i != 0 && pivot == 0) {
ma_set_slot(ms, MAPLE_NODE_SLOTS);
return ret;
}
- if (val <= mr64->pivot[i])
+ if (val <= pivot)
break;
}
- if (i == MAPLE_RANGE64_SLOTS - 2) {
- if (val > mr64->pivot[i])
+ if (i == slot_cnt - 2) {
+ if (val > pivot)
i++;
}
- if (mr64->slot[i])
+ if (ma_get_rcu_slot(ms->node, i))
ret = true;
ma_set_slot(ms, i);
mas_update_limits(mas, slot);
if (mt_is_leaf(mas->node))
return false;
- mas->node = mt_to_node(mas->node)->mr64.slot[slot];
+ mas->node = ma_get_rcu_slot(mas->node, slot);
return true;
}
void ma_insert(struct ma_state *mas, void *entry)
{
unsigned char slot = MAPLE_NODE_SLOTS;
- struct maple_range_64 *src;
bool leaf;
mas->node = mas_start(mas);
leaf = _mas_walk(mas);
slot = ma_get_slot(mas);
- src = &mt_to_node(mas->node)->mr64;
if (leaf == true && slot != MAPLE_NODE_SLOTS) {
- if (src->slot[slot])
+ if (ma_get_rcu_slot(mas->node, slot))
goto exists;
}
leaf = _mas_walk(mas);
slot = ma_get_slot(mas);
- if (leaf == true && slot != MAPLE_NODE_SLOTS) {
- struct maple_range_64 mr = mt_to_node(mas->node)->mr64;
+ if (leaf == true && slot != MAPLE_NODE_SLOTS)
+ entry = ma_get_rcu_slot(mas->node, slot);
- entry = mr.slot[slot];
- }
ma_set_slot(mas, 0);
return entry;
}
*/
int ma_erase(struct ma_state *mas)
{
- struct maple_range_64 *mr64;
+ unsigned char slot_cnt = mt_slot_count(mas->node);
unsigned long piv_val;
int cnt = -EINVAL;
int slot;
_mas_walk(mas);
slot = ma_get_slot(mas);
- mr64 = &mt_to_node(mas->node)->mr64;
if (slot == MAPLE_NODE_SLOTS)
return cnt;
- rcu_assign_pointer(mr64->slot[slot], NULL);
- if ((slot >= MAPLE_RANGE64_SLOTS - 1))
+
+ ma_update_rcu_slot(mas->node, slot, NULL);
+ if ((slot >= slot_cnt - 1))
return 1;
- if ((slot < MAPLE_RANGE64_SLOTS - 2) &&
- ((mr64->pivot[slot + 1] == 0) ||
- (mr64->slot[slot + 1] == NULL))) {
- piv_val = mr64->pivot[++slot];
+ if ((slot < slot_cnt - 2) &&
+ ((ma_get_pivot(mas->node, slot + 1) == 0) ||
+ (ma_get_rcu_slot(mas->node, slot + 1) == NULL))) {
+ piv_val = ma_get_pivot(mas->node, ++slot);
} else {
- piv_val = mr64->pivot[slot];
+ piv_val = ma_get_pivot(mas->node, slot);
}
cnt = 1;
/* Walk down and set all the previous pivots with NULLs to piv_val */
- while(--slot >= 0 && mr64->slot[slot] == NULL) {
- mr64->pivot[slot] = piv_val;
+ while(--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL) {
+ ma_set_pivot(mas->node, slot, piv_val);
cnt++;
}
return cnt;
}
-void ma_destroy_walk(struct maple_node *enc_mn)
+void ma_destroy_walk(struct maple_node *mn)
{
- struct maple_node *mn = mt_to_node(enc_mn);
- unsigned int type = mt_node_type(enc_mn);
+ unsigned int type = mt_node_type(mn);
+ unsigned char slot_cnt = mt_slot_count(mn);
int i;
switch (type) {
+ case maple_range_16:
+ case maple_range_32:
case maple_range_64:
- for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
- if (i > 0 && i < MAPLE_RANGE64_SLOTS - 1 &&
- mn->mr64.pivot[i] == 0)
+ for (i = 0; i < slot_cnt; i++) {
+ if (i > 0 && i < slot_cnt - 1 &&
+ ma_get_pivot(mn, i) == 0)
break;
- if (rcu_dereference(mn->mr64.slot[i]))
- ma_destroy_walk(rcu_dereference(mn->mr64.slot[i]));
+ if (ma_get_rcu_slot(mn, i))
+ ma_destroy_walk(ma_get_rcu_slot(mn, i));
}
break;
- case maple_leaf_64:
default:
break;
}
- mt_free(mn);
+ mt_free(mt_to_node(mn));
}
/* Interface */
pr_info("dumped node %p\n", entry);
}
-unsigned long mt_max[] = {
- [maple_dense] = MAPLE_NODE_SLOTS,
- [maple_sparse_6] = (1UL << 6) - 1,
- [maple_sparse_9] = (1UL << 9) - 1,
- [maple_sparse_16] = (1UL << 16) - 1,
- [maple_sparse_21] = (1UL << 21) - 1,
- [maple_sparse_32] = UINT_MAX,
- [maple_sparse_64] = ULONG_MAX,
- [maple_leaf_16] = (1UL << 16) - 1,
- [maple_leaf_32] = UINT_MAX,
- [maple_leaf_64] = ULONG_MAX,
- [maple_range_16] = (1UL << 16) - 1,
- [maple_range_32] = UINT_MAX,
- [maple_range_64] = ULONG_MAX,
-};
-
void mt_dump(const struct maple_tree *mt)
{
void *entry = mt->ma_root;