};
// Functions
-static struct maple_node *mt_alloc_one(gfp_t gfp)
+static inline struct maple_node *mt_alloc_one(gfp_t gfp)
{
return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
}
+static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+{
+ return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
+ nodes);
+}
+
+
+static inline void mt_free_bulk(size_t size, void **nodes)
+{
+ kmem_cache_free_bulk(maple_node_cache, size, nodes);
+}
+
static void mt_free_rcu(struct rcu_head *head)
{
struct maple_node *node = container_of(head, struct maple_node, rcu);
}
/*
- * mas_get_alloc() - Get the first allocated node.
- * @mas: The maple state.
- *
- * The first allocated node may be used for accounting of many other nodes.
- * Please see mas_alloc_count() and mas_next_alloc() for complete use.
- *
- * Returns: The first allocated node.
- *
- */
-static inline struct maple_node *mas_get_alloc(const struct ma_state *mas)
-{
- return (struct maple_node *)
- ((unsigned long)mas->alloc & ~MAPLE_NODE_MASK);
-}
-
-/*
- * ma_node_alloc_count() - Get the number of allocations stored in this node.
- * @node: The maple node
- *
- * Used to calculate the total allocated nodes in a maple state. See
- * mas_alloc_count().
- *
- * Returns: The count of the allocated nodes stored in this nodes slots.
- *
- */
-static inline int ma_node_alloc_count(const struct maple_node *node)
-{
- int slot = 0;
-
- while (slot < MAPLE_NODE_SLOTS) {
- if (!node->slot[slot])
- break;
- slot++;
- }
- return slot;
-}
-
-/*
- * mas_alloc_count() - Get the number of nodes allocated in a maple state.
+ * mas_allocated() - Get the number of nodes allocated in a maple state.
* @mas: The maple state
*
* Walks through the allocated nodes and returns the number allocated.
* Returns: The total number of nodes allocated
*
*/
-static inline int mas_alloc_count(const struct ma_state *mas)
+static inline unsigned mas_allocated(const struct ma_state *mas)
{
- struct maple_node *node = mas_get_alloc(mas);
- int ret = 1;
- int slot = 0;
-
- if (!node)
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
return 0;
- slot = ma_node_alloc_count(node);
- ret += slot;
- while (--slot >= 0) {
- if (ma_mnode_ptr(node->slot[slot])->slot[0])
- ret += ma_node_alloc_count(node->slot[slot]);
- }
- return ret;
+ return mas->alloc->total;
}
/*
* mas_set_alloc_req() - Set the requested number of allocations.
- * @mas - the maple state
- * @count - the number of allocations.
+ * @mas: the maple state
+ * @count: the number of allocations.
*/
-static inline void mas_set_alloc_req(struct ma_state *mas, int count)
+static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
{
- mas->alloc = (struct maple_node *)
- (((unsigned long)mas->alloc & ~MAPLE_NODE_MASK) | count);
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
+ if (!count)
+ mas->alloc = NULL;
+ else
+ mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
+ return;
+ }
+
+ mas->alloc->request_count += count;
}
/*
* mas_alloc_req() - get the requested number of allocations.
+ * @mas: The maple state.
+ *
* Returns: The allocation request count.
*/
-static inline int mas_alloc_req(const struct ma_state *mas)
+static inline unsigned int mas_alloc_req(const struct ma_state *mas)
{
- return (int)(((unsigned long)mas->alloc & MAPLE_NODE_MASK));
+ if ((unsigned long)mas->alloc & 0x1)
+ return (unsigned long)(mas->alloc) >> 1;
+ else if (mas->alloc)
+ return mas->alloc->request_count;
+ return 0;
}
/*
*
* Returns: The offset into the @mas node of interest.
*/
-static inline int mas_offset(const struct ma_state *mas)
+static inline unsigned char mas_offset(const struct ma_state *mas)
{
- return mas_alloc_req(mas);
+ return mas->offset;
}
/*
* @offset: The offset
*
*/
-static inline void mas_set_offset(struct ma_state *mas, int offset)
+static inline void mas_set_offset(struct ma_state *mas, unsigned char offset)
{
- mas_set_alloc_req(mas, offset);
+ mas->offset = offset;
}
/*
return pivots[piv - 1] + 1;
}
+
// Check what the maximum value the pivot could represent.
static inline unsigned long
mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
dst->node = src->node;
dst->max = src->max;
dst->min = src->min;
- mas_set_offset(dst, mas_offset(src));
+ dst->offset = src->offset;
}
/*
mas->node = p_enode;
}
-static inline struct maple_node *mas_next_alloc(struct ma_state *ms)
+static inline struct maple_node *mas_pop_node(struct ma_state *mas)
{
- int count;
- struct maple_node *mn, *smn;
+ struct maple_alloc *ret, *node = mas->alloc;
+ unsigned total = mas_allocated(mas);
- if (!ms->alloc)
+ if (!total) // nothing or a request pending.
return NULL;
- count = mas_alloc_count(ms);
- mn = mas_get_alloc(ms);
- if (count == 1) {
- ms->alloc = NULL;
- } else if (count <= MAPLE_NODE_SLOTS + 1) {
- count -= 2;
- smn = mn->slot[count];
- mn->slot[count] = NULL;
- mn = smn;
- } else if (count > MAPLE_NODE_SLOTS + 1) {
- count -= 2;
- smn = mn->slot[(count / MAPLE_NODE_SLOTS) - 1];
- mn = smn->slot[(count % MAPLE_NODE_SLOTS)];
- smn->slot[count % MAPLE_NODE_SLOTS] = NULL;
+ if (total == 1) { // single allocation in this ma_state
+ mas->alloc = NULL;
+ ret = node;
+ goto single_node;
}
- return mn;
-}
+ if (!node->node_count) { // Single allocation in this node.
+ BUG_ON(!node->slot[0]);
+ mas->alloc = node->slot[0];
+ mas->alloc->total = node->total - 1;
+ ret = node;
+ goto new_head;
+ }
+ node->total--;
+ ret = node->slot[node->node_count];
+ node->slot[node->node_count--] = NULL;
+
+single_node:
+new_head:
+ ret->total = 0;
+ ret->node_count = 0;
+ if (ret->request_count)
+ mas_set_alloc_req(mas, ret->request_count + 1);
+ ret->request_count = 0;
+ return (struct maple_node *)ret;
+}
static inline void mas_push_node(struct ma_state *mas, struct maple_enode *used)
{
- struct maple_node *reuse = mte_to_node(used);
- struct maple_node *node = mas_get_alloc(mas);
- int count;
+ struct maple_alloc *reuse = (struct maple_alloc *)mte_to_node(used);
+ struct maple_alloc *head = mas->alloc;
+ unsigned count;
+ unsigned int requested = mas_alloc_req(mas);
memset(reuse, 0, sizeof(*reuse));
- count = mas_alloc_count(mas);
- if (count == 0) {
- mas->alloc = reuse;
- } else if (count <= MAPLE_NODE_SLOTS) {
- count--;
- node->slot[count] = reuse;
- } else {
- struct maple_node *smn;
+ count = mas_allocated(mas);
- count--;
- smn = node->slot[(count/MAPLE_NODE_SLOTS) - 1];
- smn->slot[count % MAPLE_NODE_SLOTS] = reuse;
- }
- count = mas_alloc_count(mas);
+ if (count && (head->node_count < MAPLE_NODE_SLOTS)) {
+ if (head->slot[0])
+ head->node_count++;
+ head->slot[head->node_count] = reuse;
- BUG_ON(!mas_alloc_count(mas));
-}
+ head->total++;
+ goto done;
+ }
-static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
-{
- if (mt_in_rcu(mas->tree))
- mte_free(used);
- else {
- mas_push_node(mas, used);
+ reuse->total = 1;
+ if ((head) && !(((unsigned long)head & 0x1))) {
+ reuse->slot[0] = head;
+ reuse->total += head->total;
}
+
+ reuse->node_count = 0;
+ mas->alloc = reuse;
+done:
+ if (requested)
+ mas_set_alloc_req(mas, requested - 1);
}
-static inline void mas_node_node(struct ma_state *ms, gfp_t gfp)
+static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
{
- struct maple_node *mn, *smn;
- int req = mas_alloc_req(ms);
- int allocated = mas_alloc_count(ms);
- int slot;
+ struct maple_alloc *node;
+ struct maple_alloc **nodep = &mas->alloc;
+ unsigned int allocated = mas_allocated(mas);
+ unsigned int requested = mas_alloc_req(mas);
+ unsigned int count, success = 0;
- if (!req)
+ if (!requested)
return;
- mn = mas_get_alloc(ms);
- if (!mn) {
- mn = mt_alloc_one(gfp);
- if (!mn)
- goto list_failed;
- req--;
- allocated++;
- }
-
- ms->alloc = mn;
- slot = (allocated - 1);
- if (allocated - 1 >= MAPLE_NODE_SLOTS) {
- slot /= MAPLE_NODE_SLOTS;
- mn = mn->slot[slot - 1];
- }
-
- while (req > 0) {
- smn = mt_alloc_one(gfp);
- if (!smn)
- goto slot_failed;
- smn->parent = NULL;
- mn->slot[slot] = smn;
- req--;
- allocated++;
- slot++;
- if (slot >= MAPLE_NODE_SLOTS) {
- slot = (allocated - 1) / MAPLE_NODE_SLOTS;
- mn = ms->alloc->slot[slot - 1];
- slot = 0;
+ mas_set_alloc_req(mas, 0);
+ if (!mas_allocated(mas) ||
+ mas->alloc->node_count == MAPLE_NODE_SLOTS - 1) {
+ node = (struct maple_alloc *)mt_alloc_one(gfp);
+ if (!node)
+ goto nomem;
+
+ if (mas_allocated(mas)) {
+ node->total = mas->alloc->total;
+ node->slot[0] = mas->alloc;
}
+ node->total++;
+ mas->alloc = node;
+ requested--;
}
-slot_failed:
- mas_set_alloc_req(ms, req);
+ node = mas->alloc;
+ success = allocated = node->total;
+ while(requested) {
+ void **slots = (void**)&node->slot;
+ unsigned int max = MAPLE_NODE_SLOTS - 1;
+
+ if (node->slot[0]) {
+ slots = (void**)&node->slot[node->node_count + 1];
+ max -= node->node_count;
+ }
+
+ count = min(requested, max);
+ count = mt_alloc_bulk(gfp, count, slots);
+ if (!count)
+ goto nomem;
+
+ if (slots == (void**)&node->slot)
+ node->node_count = count - 1; // zero indexed.
+ else
+ node->node_count += count;
+
+ success += count;
+ nodep = &node->slot[0];
+ node = *nodep;
+ // decrement.
+ requested -= count;
+ }
+ mas->alloc->total = success;
+ return;
+nomem:
+ mas_set_alloc_req(mas, requested);
+ if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+ mas->alloc->total = success;
+ mas_set_err(mas, -ENOMEM);
+ return;
-list_failed:
- if (req > 0)
- mas_set_err(ms, -ENOMEM);
+}
+
+static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
+{
+ if (mt_in_rcu(mas->tree))
+ mte_free(used);
+ else {
+ mas_push_node(mas, used);
+ }
}
// Free the allocations.
void mas_empty_alloc(struct ma_state *mas)
{
- struct maple_node *node;
+ struct maple_alloc *node;
- while (mas_get_alloc(mas)) {
- node = mas_next_alloc(mas);
+ while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
+ node = mas->alloc;
+ mas->alloc = mas->alloc->slot[0];
+ if (node->node_count > 0)
+ mt_free_bulk(node->node_count, (void**)&node->slot[1]);
kmem_cache_free(maple_node_cache, node);
}
+ mas->alloc = NULL;
}
/*
if (gfpflags_allow_blocking(gfp)) {
mtree_unlock(mas->tree);
- mas_node_node(mas, gfp);
+ mas_alloc_nodes(mas, gfp);
mtree_lock(mas->tree);
} else {
- mas_node_node(mas, gfp);
+ mas_alloc_nodes(mas, gfp);
}
- if (!mas_get_alloc(mas))
+
+ if (!mas_allocated(mas))
return false;
+
mas->node = MAS_START;
return true;
}
-static inline struct maple_node *mas_node_count(struct ma_state *mas, int count)
+static void mas_node_count(struct ma_state *mas, int count)
{
- int allocated = mas_alloc_count(mas);
+ unsigned allocated = mas_allocated(mas);
- //BUG_ON(count > 127);
if (allocated < count) {
mas_set_alloc_req(mas, count - allocated);
- mas_node_node(mas, GFP_NOWAIT | __GFP_NOWARN);
+ mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN);
}
- return mas->alloc;
}
int mas_entry_count(struct ma_state *mas, unsigned long nr_entries)
struct maple_enode *entry;
void **slots = ma_slots(mte_to_node(mas->node), mt);
- for (offset= mas_offset(mas); offset < mt_slots[mt]; offset++) {
+ for (offset = mas_offset(mas); offset < mt_slots[mt]; offset++) {
entry = slots[offset];
if (!entry) // end of node data.
break;
static inline struct maple_enode
*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
{
- return mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), b_node->type);
+ return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
}
/*
if (!count)
count++;
}
- l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
mast->orig_l->depth++;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas);
if (mas_is_err(mas))
return 0;
- new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node));
+ new_node = mt_mk_node(mas_pop_node(mas), mte_node_type(mas->node));
mte_to_node(new_node)->parent = mas_mn(mas)->parent;
mas->node = new_node;
if (mas_is_err(mas))
return 0;
- node = mas_next_alloc(mas);
+ node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
mas->node = mt_mk_node(node, type);
if (mas_is_err(mas))
return false;
- newnode = mas_next_alloc(mas);
+ newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
newnode = &reuse;
enum maple_type type;
end = mas_dead_leaves(&mas, slots);
- kmem_cache_free_bulk(maple_node_cache, end, slots);
+ mt_free_bulk(end, slots);
if (mas.node == start)
break;
}
free_leaf:
- kmem_cache_free(maple_node_cache, node);
+ ma_free_rcu(node);
}
void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt)
static noinline void check_new_node(struct maple_tree *mt)
{
- struct maple_node *mn, *smn;
+ struct maple_node *mn;
+ struct maple_alloc *smn;
int i, j, total, full_slots, count = 0;
MA_STATE(mas, mt, 0, 0);
MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
- mn = mas_get_alloc(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ mn = mas_pop_node(&mas);
MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] == NULL);
- MT_BUG_ON(mt, mn->slot[1] == NULL);
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ mas_push_node(&mas, mn);
mas_nomem(&mas, GFP_KERNEL); // free;
mtree_unlock(mt);
// Validate allocation request.
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
// Eat the requested node.
- mn = mas_next_alloc(&mas);
+ mn = mas_pop_node(&mas);
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] != NULL);
MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
ma_free_rcu(mn);
mas.node = MAS_START;
// Drop the lock and allocate 3 nodes.
mas_nomem(&mas, GFP_KERNEL);
// Ensure 3 are allocated.
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
// Allocation request of 0.
MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
- mn = mas.alloc;
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] == NULL);
- MT_BUG_ON(mt, mn->slot[1] == NULL);
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
// Ensure we counted 3.
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
// Free.
mas_nomem(&mas, GFP_KERNEL);
// Set allocation request to 127.
total = 127;
- full_slots = (total - MAPLE_NODE_SLOTS) / MAPLE_NODE_SLOTS;
mas_node_count(&mas, total);
// Drop the lock and allocate 127 nodes.
mas_nomem(&mas, GFP_KERNEL);
- mn = mas_get_alloc(&mas);
- MT_BUG_ON(mt, mn == NULL);
- count++;
- for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
- j = 0;
- smn = mn->slot[i];
- MT_BUG_ON(mt, smn == NULL);
- count++;
- while ((i < full_slots) && (j < MAPLE_NODE_SLOTS)) {
- MT_BUG_ON(mt, smn->slot[j] == NULL);
- count++;
- j++;
+ MT_BUG_ON(mt, !mas.alloc);
+ i = 1;
+ smn = mas.alloc;
+ while(i < total) {
+ for (j = 0; j < MAPLE_NODE_SLOTS - 1; j++) {
+ i++;
+ MT_BUG_ON(mt, !smn->slot[j]);
+ if (i == total)
+ break;
}
+ smn = smn->slot[0]; // next.
}
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 127);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 127);
mas_nomem(&mas, GFP_KERNEL); // Free.
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
for (i = 1; i < 128; i++) {
mas_node_count(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
- MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled
+ MT_BUG_ON(mt, mas_allocated(&mas) != i); // check request filled
for (j = i; j > 0; j--) { //Free the requests
- mn = mas_next_alloc(&mas); // get the next node.
+ mn = mas_pop_node(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
ma_free_rcu(mn);
}
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
}
for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
MA_STATE(mas2, mt, 0, 0);
mas_node_count(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
- MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled
+ MT_BUG_ON(mt, mas_allocated(&mas) != i); // check request filled
for (j = 1; j <= i; j++) { // Move the allocations to mas2
- mn = mas_next_alloc(&mas); // get the next node.
+ mn = mas_pop_node(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
mas_push_node(&mas2, (struct maple_enode *)mn);
- MT_BUG_ON(mt, mas_alloc_count(&mas2) != j);
+ MT_BUG_ON(mt, mas_allocated(&mas2) != j);
}
- MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
- MT_BUG_ON(mt, mas_alloc_count(&mas2) != i);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas2) != i);
for (j = i; j > 0; j--) { //Free the requests
- MT_BUG_ON(mt, mas_alloc_count(&mas2) != j);
- mn = mas_next_alloc(&mas2); // get the next node.
+ MT_BUG_ON(mt, mas_allocated(&mas2) != j);
+ mn = mas_pop_node(&mas2); // get the next node.
MT_BUG_ON(mt, mn == NULL);
ma_free_rcu(mn);
}
- MT_BUG_ON(mt, mas_alloc_count(&mas2) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
}
mtree_unlock(mt);