#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/maple_tree.h>
+#include <linux/rcupdate.h>
/* Things we will probably eventually need */
}
-/*
- * Private
- * Split a ms->node into two.
- * Requires tree to be locked.
- */
-static void _maple_split(struct maple_state *ms)
-{
-}
#endif
/* Actual implementation. */
return (void *)((unsigned long)node | 2);
}
+static inline struct maple_node **ma_get_alloc(const struct maple_state *ms)
+{
+ return (struct maple_node **)((unsigned long)ms->alloc & ~3);
+}
+static inline void ma_set_alloc_cnt(struct maple_node **alloc, int count)
+{
+ alloc = (struct maple_node **)((unsigned long)alloc | count);
+}
+static inline int ma_req_alloc_cnt(const struct maple_state *ms)
+{
+ return (int)((unsigned long)ms->alloc & 3);
+}
+
+static inline bool ma_alloc_empty(struct maple_node **alloc)
+{
+ if ((unsigned long)alloc & ~2)
+ return false;
+ return true;
+}
+static inline struct maple_node *ma_left_alloc(const struct maple_state *ms)
+{
+ return ma_get_alloc(ms)[0];
+}
+static inline struct maple_node *ma_right_alloc(const struct maple_state *ms)
+{
+ return ma_get_alloc(ms)[1];
+}
+static inline struct maple_node *ma_root_alloc(const struct maple_state *ms)
+{
+ return ma_get_alloc(ms)[2];
+}
+
+static inline bool maple_is_root(struct maple_state *ms,
+ struct maple_node *node)
+{
+ if (ma_mk_node(node) == ms->tree->root)
+ return true;
+ return false;
+}
+
+
static inline bool _maple_is_node_4(struct maple_state *ms)
{
/* FIXME: This just means that our node could be contained within a
* node_4. This could happen when shrinking a node_64, for example.
*/
- if (ms->node == ms->tree->root)
+ return false;
+ if (maple_is_root(ms, ms->node))
return false;
if ((ms->max - ms->min) <= (unsigned long)MAPLE_NODE4_MAX_SLOT)
return false;
}
-static struct maple_node *_maple_new_node(gfp_t gfp)
+static struct maple_node **_maple_new_node(gfp_t gfp, int count)
{
+ struct maple_node **list;
struct maple_node *mn;
+ int i = 0;
- mn = kzalloc(sizeof(*mn), gfp);
- if (!mn)
- goto kmalloc_failed;
+ list = kcalloc(count, sizeof(struct maple_node *), gfp);
+ if (!list)
+ goto list_failed;
- mn->map64.parent = NULL;
+ for (i = 0; i < count; i++) {
+ mn = kzalloc(sizeof(*mn), gfp);
+ if (!mn)
+ goto kzalloc_failed;
+ mn->map64.parent = NULL;
+ list[i] = mn;
+ }
+ return list;
-kmalloc_failed:
- return mn;
+kzalloc_failed:
+ while (i-- > 0)
+ kfree(list[i]);
+ kfree(list);
+list_failed:
+ return NULL;
}
static bool __maple_nomem(struct maple_state *ms, gfp_t gfp)
{
if (gfpflags_allow_blocking(gfp)) {
spin_unlock(&ms->tree->lock);
- ms->alloc = _maple_new_node(gfp);
+ ms->alloc = _maple_new_node(gfp, ma_req_alloc_cnt(ms));
spin_lock(&ms->tree->lock);
} else {
- ms->alloc = _maple_new_node(gfp);
+ ms->alloc = _maple_new_node(gfp, ma_req_alloc_cnt(ms));
}
ms->node = MAS_START;
- if (!ms->alloc)
+ if (!ma_get_alloc(ms))
return false;
return true;
}
if (mn64.pivot[i] == 0)
break;
}
- /*FIXME: Is this right? */
- if (i == MAPLE_NODE64_MAX_PIVOT - 1 &&
- mn64.slot[i + 1] != NULL)
+
+ if (i == MAPLE_NODE64_MAX_PIVOT && mn64.slot[i] != NULL)
i++;
+
+ if (maple_is_root(ms, ms->node))
+ i++;
+
return i;
}
/*
* Private
*/
-static struct maple_node *_maple_state_node(struct maple_state *ms)
+static struct maple_node **_maple_state_node(struct maple_state *ms, int count)
{
- struct maple_node *alt_mn;
+ struct maple_node **alt_mn;
+ int allocated = ma_req_alloc_cnt(ms);
- if (ms->alloc) {
+ BUG_ON(count > 3);
+
+ if (allocated != 0) {
+ if (allocated != count)
+ return NULL; // FIXME: Busy, retry.
alt_mn = ms->alloc;
- ms->alloc = NULL;
} else {
alt_mn = _maple_new_node(GFP_KERNEL | GFP_NOWAIT |
- __GFP_NOWARN | __GFP_ZERO);
+ __GFP_NOWARN | __GFP_ZERO, count);
+ ma_set_alloc_cnt(alt_mn, count);
}
return alt_mn;
}
static int _maple_root_expand(struct maple_state *ms)
{
void *r_entry = ms->tree->root; // root entry
- struct maple_node *mn;
+ struct maple_node **list, *mn;
- mn = _maple_state_node(ms);
- if (mn == NULL)
+ list = _maple_state_node(ms, 1);
+ if (ma_alloc_empty(list))
return -ENOMEM;
-
- ms->node = mn;
+ mn = list[0];
+ ms->node = list[0];
+ /* Used the list of allocated nodes. */
+ kfree(list);
/*
* Insert the existing entry into the new node
rcu_assign_pointer(ms->tree->root, ma_mk_node(mn));
return 0;
}
+/*
+ * Private
+ *
+ * Copy 1/2 the data from ms->node to new_mn.
+ */
+static void maple_split_data_64(struct maple_state *ms, int off)
+{
+ struct maple_node *left_mn = ma_left_alloc(ms);
+ struct maple_node *right_mn = ma_right_alloc(ms);
+
+ struct maple_node_64 *full_mn = &(ms->node->map64);
+ struct maple_node_64 *target = &(left_mn->map64);
+ int i, j;
+
+ /* Copy 0-4 to left_mn 1-5
+ * Copy 4-8 to right_mn 0-4
+ */
+ for (i = 0, j = off; j < MAPLE_NODE64_MAX_SLOT; i++, j++) {
+ if (i == MAPLE_NODE64_MAX_SLOT / 2) {
+ target = &(right_mn->map64);
+ i = 0;
+ }
+ if (j < MAPLE_NODE64_MAX_SLOT - 1)
+ target->pivot[i] = full_mn->pivot[j];
+ else if (j == MAPLE_NODE64_MAX_SLOT - 1 && ms->max != ULONG_MAX)
+ target->pivot[i] = ms->max;
+ target->slot[i] = full_mn->slot[j];
+ }
+
+}
+
+/*
+ * Private
+ *
+ * Note: If the final slot is in use, this will return
+ * MAPLE_NODE64_MAX_PIVOT - 1
+ */
+int maple_data_end_64(const struct maple_node_64 *mn64)
+{
+ int p_end;
+
+ for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
+ if (mn64->pivot[p_end] == 0)
+ break;
+ }
+ if ((p_end == MAPLE_NODE64_MAX_PIVOT - 1) &&
+ (mn64->slot[p_end + 1] != NULL))
+ p_end++;
+ return p_end;
+}
+/*
+ * Private
+ * Writing new values from the tail forward ensure that a valid entry
+ * was hit prior to a partial write.
+ */
+void maple_shift_64(struct maple_node_64 *mn64, int p_here)
+{
+ int p_end = maple_data_end_64(mn64);
+ int idx;
+
+ for (idx = p_end; idx >= p_here; idx--) {
+ if (idx < 6)
+ mn64->pivot[idx + 1] = mn64->pivot[idx];
+ rcu_assign_pointer(mn64->slot[idx + 1], mn64->slot[idx]);
+ }
+}
+
+/*
+ * Private
+ *
+ * Note, currently only links node 64s
+ *
+ */
+void maple_link_node(struct maple_state *ms)
+{
+ struct maple_node *full_mn = ms->node;
+ struct maple_node_64 *fmn64 = &(full_mn->map64);
+ struct maple_node *left_mn = ma_left_alloc(ms);
+ struct maple_node *right_mn = ma_right_alloc(ms);
+ struct maple_node_64 *lmn64 = &(left_mn->map64);
+ struct maple_node_64 *rmn64 = &(right_mn->map64);
+
+ if (maple_is_root(ms, ms->node)) {
+ struct maple_node *root_mn = ma_root_alloc(ms);
+ int l_end = maple_data_end_64(lmn64) - 1;
+ int r_end = maple_data_end_64(rmn64) - 1;
+ int idx = 0;
+ /* The full node is the root.
+ * We have to throw out the old root due to pesky readers.
+ * left and right have the data already, so link them in the
+ * correct place in the root.
+ */
+ lmn64->parent = ma_mk_node(root_mn);
+ rmn64->parent = ma_mk_node(root_mn);
+ /* Root will have two entries: left_mn, right_mn.
+ * The pivot will be the maximum pivot of each.
+ * pivot 0 will be the lowest pivot of left_mn.
+ */
+ root_mn->map64.pivot[idx] = fmn64->pivot[0];
+ idx++;
+ /* Left child */
+ root_mn->map64.pivot[idx] = lmn64->pivot[l_end];
+ RCU_INIT_POINTER(root_mn->map64.slot[idx], ma_mk_node(left_mn));
+ idx++;
+ /* Right child */
+ root_mn->map64.pivot[idx] = rmn64->pivot[r_end];
+ RCU_INIT_POINTER(root_mn->map64.slot[idx],
+ ma_mk_node(right_mn));
+ /* Swap root of tree */
+ rcu_assign_pointer(ms->tree->root, ma_mk_node(root_mn));
+ ms->node = root_mn;
+ } else {
+ struct maple_node_64 *target =
+ &(ma_to_node(fmn64->parent)->map64);
+ /*
+ * Shift in the parent, making room for the right node.
+ *
+ * Point the new slot to the right node
+ * Set the old pivot to max in the left node.
+ * Point the old slot to the left node
+ * Free the full node.
+ *
+ * ms->slot_idx is the slot location of the full node in the
+ * target.
+ */
+
+ lmn64->parent = fmn64->parent;
+ rmn64->parent = fmn64->parent;
+ /* Shift the data over */
+ maple_shift_64(target, ms->slot_idx);
+ target->slot[ms->slot_idx + 1] = ma_mk_node(right_mn);
+ target->pivot[ms->slot_idx] = lmn64->pivot[3];
+ target->slot[ms->slot_idx] = ma_mk_node(left_mn);
+ }
+
+ /* Orhpan the full node */
+ fmn64->parent = full_mn;
+ _maple_free_node(full_mn);
+ kfree(ma_get_alloc(ms));
+}
/*
* Private
* Split a node and set ms->node to the correct one for an insert for
static int _maple_node_split(struct maple_state *ms)
{
/* Assume node64, as node4 cannot be split. */
- //struct maple_node_64 *mn64 = &ms->node->map64;
- //struct maple_node_64 *alt_mn64;
- struct maple_node *alt_mn;
+ int alloc_cnt = 2;
+ int offset = 0;
+
+ if (maple_is_root(ms, ms->node)) {
+ alloc_cnt++;
+ offset++;
+ }
+
- if (!xa_is_node(ms->tree->root))
- return _maple_root_expand(ms);
/*
* FIXME:
* If there is room in the previous or next node
- * then rebalance.
- * set up maple_state to point to the correct node.
+ * then don't create a new node - rebalance.
*/
/* Allocate new mn64 and split */
- alt_mn = _maple_state_node(ms);
- if (alt_mn == NULL)
+ ms->alloc = _maple_state_node(ms, alloc_cnt);
+ if (ma_alloc_empty(ms->alloc))
return -ENOMEM;
- /* Calculate even split by pivots */
- /* Set up maple_state for the side of the split we will fall on. */
-
+ /* copy 1/2 data from the end to the new node. */
+ maple_split_data_64(ms, offset);
+ maple_link_node(ms);
return 0;
}
return 0;
}
-/* Private
- *
- * Note: If the final slot is in use, this will return
- * MAPLE_NODE64_MAX_PIVOT - 1
- */
-static inline int _maple_data_end_64(const struct maple_node_64 *mn64)
-{
- int p_end;
- for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
- if (mn64->pivot[p_end] == 0)
- break;
- }
- if ( (p_end == MAPLE_NODE64_MAX_PIVOT - 1) &&
- (mn64->slot[p_end + 1] != NULL) )
- p_end++;
- return p_end;
-}
static inline int _maple_insert_64(struct maple_state *ms, void *entry)
{
struct maple_node_64 *mn64 = &ms->node->map64;
int p_here = ms->slot_idx; /* Location to place data */
- int p_end; /* End of data */
int shift = 0;
int idx = 0;
return 0;
}
- /* Find the end of pivots in this node */
- p_end = _maple_data_end_64(mn64); // Points to nil on non-full nodes.
-
/* Things to check for the null start:
*
* 1. Not using the parent pivot to get to slot 0.
* 2. The previous pivot isn't sequential with our value.
*/
- if ((p_here == 0 && ms->index + 1 > ms->min) ||
- (p_here > 0 && mn64->pivot[p_here-1] != ms->index)) {
+ if (ms->index != 0 &&
+ ((p_here == 0 && ms->index + 1 > ms->min) ||
+ (p_here > 0 && mn64->pivot[p_here-1] != ms->index))) {
shift++;
}
- // Sanity check.
- BUG_ON(p_end + shift >= MAPLE_NODE64_MAX_SLOT);
-
/* Shift everything over. */
- if (shift > 0) {
- /* Writing new values from the tail forward ensure that a valid entry
- * was hit prior to a partial write.
- */
- for (idx = p_end; idx >= p_here; idx--)
- {
- mn64->pivot[idx + shift] = mn64->pivot[idx];
- rcu_assign_pointer(mn64->slot[idx + shift],
- mn64->slot[idx]);
- }
- }
+ if (shift > 0)
+ maple_shift_64(mn64, p_here);
/* We now have made space at p_here to p_here + shift for the new
* data, which needs to be:
*/
idx = p_here + shift;
- mn64->pivot[idx] = ms->end + 1;
+ if (idx < 7)
+ mn64->pivot[idx] = ms->end + 1;
rcu_assign_pointer(mn64->slot[idx], entry);
if (shift > 0) {
return _maple_insert_64(ms, entry);
}
+
/* Private
*
* Must hold the spin lock.
* FIXME: What happens if start/end span nodes?
*
*/
-static void *_maple_insert_walk(struct maple_state *ms)
+static void *_maple_setup_insert(struct maple_state *ms)
{
void *entry = mas_start(ms);
return maple_retry(ms);
if (ret == -EINVAL) // FIXME: root expand on split may return an invalid insert.
return maple_retry(ms);
- return NULL;
}
entry = _maple_walk_64(ms, ms->index);
spin_lock(&ms.tree->lock);
retry:
- walked = _maple_insert_walk(&ms);
+ walked = _maple_setup_insert(&ms);
if (walked != NULL)
goto already_exists;
if (walked == maple_retry(&ms))
check_load(&tree, set[0], &tree); // See if 15 -> &tree
check_load(&tree, set[1], ptr); // See if 14 -> ptr
check_load(&tree, set[6], ptr); // See if 1002 -> ptr
- check_load(&tree, set[7], &tree); // See if 1003 -> &tree
+ check_load(&tree, set[7], &tree); // 1003 = &tree ? (test 20)
/* Clear out tree */
mtree_destroy(&tree);
/* Clear out the tree */
mtree_destroy(&tree);
+
+ mtree_init(&tree);
+/*
+ * set[] = {15, 14, 17, 25, 1000,
+ * 1001, 1002, 1003, 1005, 0,
+ * 3};
+ */
+
+ check_insert(&tree, set[0], ptr); // 15
+ check_insert(&tree, set[1], &tree); // 14 (test 30)
+ check_insert(&tree, set[2], ptr); // 17
+ check_insert(&tree, set[3], &tree); // 25.
+ check_insert(&tree, set[4], ptr); // 1000 < Should split.
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); //25
+ check_load(&tree, set[4], ptr);
+ check_insert(&tree, set[5], &tree); // 1001
+ check_load(&tree, set[0], ptr); // test 40
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_insert(&tree, set[6], ptr);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); // test 50
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_insert(&tree, set[7], &tree);
+ check_insert(&tree, set[8], ptr);
+ check_insert(&tree, set[9], &tree);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); // test 60
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_load(&tree, set[9], &tree);
+ mtree_destroy(&tree);
+
+
printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run == tests_passed) ? 0 : -EINVAL;