*/
#include <linux/maple_tree.h>
+#include <linux/export.h>
#include <linux/slab.h>
+#define MA_ROOT_PARENT 1
+
static struct kmem_cache *maple_node_cache;
static struct maple_node *mt_alloc_one(gfp_t gfp)
return ((unsigned long)entry < 4096) && xa_is_internal(entry);
}
+static inline void mas_set_err(struct ma_state *mas, long err)
+{
+ mas->node = MA_ERROR(err);
+}
static inline bool mas_is_start(struct ma_state *ms)
{
return ms->node == MAS_START;
return (void *)((unsigned long)node | (type << 3) | 2);
}
-void mtree_init(struct maple_tree *mt)
+static inline bool mas_is_err(struct ma_state *mas)
{
- spin_lock_init(&mt->ma_lock);
- mt->ma_flags = 0;
- mt->ma_root = NULL;
+ return xa_is_err(mas->node);
}
-void *mtree_load(struct maple_tree *mt, unsigned long index)
+static inline struct maple_node *ma_get_alloc(const struct ma_state *ms)
{
- return NULL;
+ return (struct maple_node *)((unsigned long)ms->alloc & ~0x7F);
}
-int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, gfp_t gfp)
+static inline int ma_get_alloc_cnt(const struct ma_state *ms)
{
- return -EINVAL;
+ struct maple_node *node = ma_get_alloc(ms);
+
+ if (!node)
+ return 0;
+ if (!node->slot[0])
+ return 1;
+ if (!node->slot[1])
+ return 2;
+ return 3;
}
-int mtree_insert_range(struct maple_tree *mt, unsigned long first,
- unsigned long last, void *entry, gfp_t gfp)
+static inline void ma_set_alloc_req(struct ma_state *ms, int count)
{
- return -EINVAL;
+ ms->alloc = (struct maple_node *)((unsigned long)ms->alloc & ~0x03);
+ ms->alloc = (struct maple_node *)((unsigned long)ms->alloc | count);
}
-int mtree_erase(struct maple_tree *mt, unsigned long index)
+static inline int ma_get_alloc_req(const struct ma_state *ms)
{
- return -EINVAL;
+ return (int)(((unsigned long)ms->alloc & 0x03));
}
-void mtree_destroy(struct maple_tree *mt)
+static inline bool ma_is_root(struct maple_node *node)
+{
+ if (((unsigned long)node->parent & 1) == 1)
+ return true;
+ return false;
+}
+static inline struct maple_node *ma_next_alloc(struct ma_state *ms)
+{
+ int cnt;
+ struct maple_node *mn, *smn;
+
+ if (!ms->alloc)
+ return NULL;
+
+ cnt = ma_get_alloc_cnt(ms);
+ mn = ma_get_alloc(ms);
+ cnt--;
+ if (cnt == 0) {
+ ms->alloc = NULL;
+ } else {
+ smn = (struct maple_node *)mn->slot[cnt - 1];
+ mn->slot[cnt - 1] = NULL;
+ mn = smn;
+ }
+
+ return mn;
+}
+static void ma_new_node(struct ma_state *ms, gfp_t gfp)
+{
+ struct maple_node *mn, *smn;
+ int req = ma_get_alloc_req(ms);
+ int allocated = ma_get_alloc_cnt(ms);
+ int slot;
+
+ if (!req)
+ return;
+
+ mn = ma_get_alloc(ms);
+ if (!mn) {
+ mn = mt_alloc_one(gfp);
+ if (!mn)
+ goto list_failed;
+ printk("Node = %p\n", mn);
+ req--;
+ allocated++;
+ }
+
+ slot = allocated - 1;
+ while (req > 0) {
+ smn = mt_alloc_one(gfp);
+ if (!smn)
+ goto slot_failed;
+ smn->parent = NULL;
+ mn->slot[slot++] = smn;
+ req--;
+ allocated++;
+ }
+
+slot_failed:
+ ms->alloc = mn;
+ ma_set_alloc_req(ms, req);
+
+list_failed:
+ if (req > 0)
+ mas_set_err(ms, -ENOMEM);
+}
+
+/* Private
+ * Check if there was an error allocating and do the allocation if necessary
+ * If there are allocations, then free them.
+ */
+bool mas_nomem(struct ma_state *ms, gfp_t gfp)
+ __must_hold(ms->tree->lock)
+{
+ if (ms->node != MA_ERROR(-ENOMEM)) {
+ struct maple_node *node = ma_get_alloc(ms);
+
+ if (node) {
+ kfree(node->slot[1]);
+ kfree(node->slot[0]);
+ kfree(node);
+ }
+ ms->alloc = NULL;
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp)) {
+ mtree_unlock(ms->tree);
+ ma_new_node(ms, gfp);
+ mtree_lock(ms->tree);
+ } else {
+ ma_new_node(ms, gfp);
+ }
+ if (!ma_get_alloc(ms))
+ return false;
+ ms->node = MAS_START;
+ return true;
+}
+static struct maple_node *mas_node_cnt(struct ma_state *ms, int count)
+{
+ int allocated = ma_get_alloc_cnt(ms);
+
+ BUG_ON(count > 3);
+
+ if (allocated < count) {
+ ma_set_alloc_req(ms, count - allocated);
+ ma_new_node(ms, GFP_NOWAIT | __GFP_NOWARN);
+ }
+ return ms->alloc;
+}
+
+static void *mas_start(struct ma_state *ms)
+{
+ void *entry;
+
+ if (mas_is_err(ms))
+ return NULL;
+
+ if (ms->node == MAS_START) {
+ entry = ms->tree->ma_root;
+ if (!xa_is_node(entry)) {
+ if (ms->index > 0)
+ return NULL;
+ ms->node = MAS_ROOT;
+ }
+ } else {
+ entry = mt_mk_node(ms->node, maple_range_64);
+ }
+
+ return entry;
+}
+
+short ma_data_end_r64(const struct ma_state *ms)
+{
+ struct maple_range_64 *mr64 = &ms->node->mr64;
+ short data_end = 0;
+
+ for (data_end = 0; data_end < MAPLE_RANGE64_SLOTS - 1; data_end++) {
+ if (mr64->pivot[data_end] == 0 ||
+ mr64->pivot[data_end] == ms->max)
+ return data_end;
+ }
+
+ if (mr64->slot[data_end+1] != NULL)
+ data_end++;
+
+ return data_end;
+}
+static void ma_append(struct ma_state *ms, void *entry)
+{
+ short idx = ma_data_end_r64(ms) + 2;
+ struct maple_range_64 *mr64 = &ms->node->mr64;
+
+ if ((ms->index - 1 != mr64->pivot[idx - 1]) ||
+ (ms->index != ms->last))
+ idx++;
+
+ if (idx >= MAPLE_RANGE64_SLOTS - 1)
+ printk("Split this node\n");
+
+ /* zero the new end */
+ mr64->pivot[idx] = 0;
+ RCU_INIT_POINTER(mr64->slot[idx], NULL);
+ idx--;
+ /* Set the entry value */
+ RCU_INIT_POINTER(mr64->slot[idx], entry);
+ mr64->pivot[idx] = ms->last;
+ idx--;
+
+ /* Create a NULL gap, if necessary */
+ if ((ms->index - 1 != mr64->pivot[idx - 1]) ||
+ (ms->index != ms->last)) {
+ RCU_INIT_POINTER(mr64->slot[idx], NULL);
+ mr64->pivot[idx] = ms->index - 1;
+ }
+}
+static void ma_root_expand(struct ma_state *ms, void *entry)
+{
+ void *r_entry = ms->tree->ma_root; // root entry
+ struct maple_node *mn;
+
+ mas_node_cnt(ms, 1);
+ if (mas_is_err(ms))
+ return;
+
+ mn = ma_next_alloc(ms);
+ ms->node = mn;
+ mn->parent = (struct maple_node*)
+ ((unsigned long)ms->tree | MA_ROOT_PARENT);
+
+
+ /*
+ * Insert the existing entry into the new node
+ * rcu_assign is not necessary as readers are not able to access this
+ * node.
+ */
+ if (r_entry != NULL) {
+ RCU_INIT_POINTER(mn->mr64.slot[0], r_entry);
+ mn->mr64.pivot[0] = 0;
+ }
+ ma_append(ms, entry);
+
+
+ mn->parent = NULL;
+ /* swap the new root into the tree */
+ rcu_assign_pointer(ms->tree->ma_root, mt_mk_node(mn, maple_leaf_64));
+}
+
+static void ms_update_limits(struct ma_state *ms, unsigned char slot)
+{
+ struct maple_range_64 *mr64;
+ if (slot >= MAPLE_NODE_SLOTS)
+ return;
+
+ if (mas_is_start(ms))
+ return;
+
+ mr64 = &ms->node->mr64;
+
+ if (slot > 0)
+ ms->min = mr64->pivot[slot - 1] + 1;
+
+ if (slot < MAPLE_RANGE64_SLOTS - 1)
+ ms->max = mr64->pivot[slot];
+}
+static unsigned char mas_walk_r64(struct ma_state *ms, unsigned long val)
+{
+ struct maple_range_64 *mr64 = &ms->node->mr64;
+ int i = 0;
+
+ do {
+ if (i >= MAPLE_RANGE64_SLOTS - 1)
+ break; // Right-most child.
+ if (val <= mr64->pivot[i])
+ break;
+ } while (i++ < MAPLE_RANGE64_SLOTS - 1);
+
+ ms_update_limits(ms, i);
+ ms->node = rcu_dereference(mr64->slot[i]);
+ return i;
+}
+void *ma_insert(struct ma_state *ms, void *entry)
+{
+ void *e_entry = mas_start(ms); // existing entry;
+ void *p_entry; // Previous entry.
+ char slot = MAPLE_NODE_SLOTS;
+
+ if (!xa_is_node(e_entry)) {
+ if (ms->last == 0) {
+ if (e_entry != NULL)
+ goto exists;
+ rcu_assign_pointer(ms->tree->ma_root, entry);
+ return NULL;
+ }
+ ma_root_expand(ms, entry);
+ return NULL;
+ }
+
+ /* Find the correct node */
+ do {
+ ms->node = mt_to_node(e_entry);
+ p_entry = ms->node;
+ slot = mas_walk_r64(ms, ms->index);
+ e_entry = ms->node;
+ } while (xa_is_node(e_entry));
+
+ if (e_entry)
+ goto exists;
+
+ /* Do the Insert */
+ printk("Inserting %lu\n", ms->index);
+
+ return NULL;
+exists:
+ mas_set_err(ms, -EEXIST);
+ return NULL;
+
+}
+
+/*
+ * Private
+ *
+ * Must hold rcu_read_lock or the write lock.
+ *
+ * Find where ms->index is located and return the entry.
+ * ms->node will point to the node containing the entry.
+ *
+ *
+ */
+void *mas_walk(struct ma_state *ms)
{
+ void *entry = mas_start(ms);
+
+ /* Outside this nodes range, it doesn't exist. */
+ if (ms->min > ms->index ||
+ ms->max < ms->index)
+ return NULL; // FIXME: Retry?
+
+ while (xa_is_node(entry)) {
+ ms->node = mt_to_node(entry);
+ mas_walk_r64(ms, ms->index);
+ entry = ms->node;
+ }
+ return entry;
}
+/* Interface */
void __init maple_tree_init(void)
{
maple_node_cache = kmem_cache_create("maple node",
sizeof(struct maple_node), sizeof(struct maple_node),
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, NULL);
}
+void mtree_init(struct maple_tree *mt)
+{
+ spin_lock_init(&mt->ma_lock);
+ mt->ma_flags = 0;
+ mt->ma_root = NULL;
+}
+
+EXPORT_SYMBOL(mtree_init);
+
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
+ void *entry;
+ MA_STATE(ms, mt, index, index);
+ rcu_read_lock();
+ entry = mas_walk(&ms);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL(mtree_load);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(ms, mt, first, last);
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return -EINVAL;
+
+ if (first > last)
+ return -EINVAL;
+
+ mtree_lock(ms.tree);
+retry:
+ ma_insert(&ms, entry);
+ if (mas_nomem(&ms, gfp))
+ goto retry;
+
+ mtree_unlock(ms.tree);
+ if (mas_is_err(&ms))
+ return xa_err(ms.node);
+
+ return ret;
+}
+EXPORT_SYMBOL(mtree_insert_range);
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_insert_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_insert);
+int mtree_erase(struct maple_tree *mt, unsigned long index)
+{
+ return -EINVAL;
+}
+
+void mtree_destroy(struct maple_tree *mt)
+{
+}
+
#ifdef MT_DEBUG
void mt_dump_range(unsigned long min, unsigned long max)
last = node->pivot[i];
if (last == 0 && i > 0)
break;
-
if (leaf)
mt_dump_entry(node->slot[i], first, last);
else
default:
pr_cont(" UNKNOWN TYPE\n");
}
+ pr_info("dumped node %p\n", entry);
}
unsigned long mt_max[] = {
{
unsigned long set[] = {15, 14, 17, 25, 1000,
1001, 1002, 1003, 1005, 0,
- 3};
+ 3, 2};
unsigned long r[] = {10, 15, 20, 25, 22}; // For range testing
void *ptr = &set;
mtree_init(&tree);
- check_new_node(&tree);
- check_double_insert_leaf(&tree);
+ //check_new_node(&tree);
+ //check_double_insert_leaf(&tree);
check_load(&tree, set[0], NULL); // See if 15 -> NULL
check_insert(&tree, set[9], &tree); // Insert 0
check_insert(&tree, set[10], ptr); // Insert 3
check_load(&tree, set[9], &tree); // See if 0 -> &tree
+ check_load(&tree, set[11], NULL); // See if 2 -> NULL
+
check_load(&tree, set[10], ptr); // See if 3 -> ptr
/* Clear out the tree */
* a second value, then loads the value again
*/
check_load(&tree, set[1], NULL); // See if 14 -> NULL
+ printk("%s: %d\n", __func__, __LINE__);
+ mt_dump(&tree);
check_insert(&tree, set[1], ptr); // insert 14 -> ptr
+ mt_dump(&tree);
+ printk("%s: %d\n", __func__, __LINE__);
check_load(&tree, set[1], ptr); // See if 14 -> ptr
+ printk("%s: %d\n", __func__, __LINE__);
check_load(&tree, set[0], &tree); // See if 15 -> &tree
+ printk("%s: %d\n", __func__, __LINE__);
/* Tree currently contains:
* p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)