/*
* mte_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
* Return: true if dead, false otherwise.
*/
static inline bool mte_dead_node(const struct maple_enode *enode)
* of @mas->alloc->total nodes allocated.
*
* Return: The total number of nodes allocated
- *
*/
static inline unsigned long mas_allocated(const struct ma_state *mas)
{
/*
* ma_set_slot() - Set a nodes rcu slot.
- *
* @mn - the maple node for the operation
* @slot - the slot number to set
* @type - the maple node type
}
/*
- *
* mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
* @mas: The starting maple state
* @mast: The maple_subtree_state, keeps track of 4 maple states.
/*
* mas_rebalance() - Rebalance a given node.
- *
* @mas: The maple state
* @b_node: The big maple node.
*
}
/*
- *
* __mas_walk(): Locates a value and sets the mas->node and slot accordingly.
* range_min and range_max are set to the range which the entry is valid.
* @mas: The maple state
}
/*
- *
* mas_spanning_store() - Create a subtree with the store operation completed
* and new nodes where necessary, then place the sub-tree in the actual tree.
* Note that mas is expected to point to the node which caused the store to
return NULL;
}
-/*
- * mas_prev() - Get the previous entry
- * @mas: The maple state
- * @min: The minimum value to check.
- *
- * Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
- * searchable nodes. If mas->node is MAS_START, it will first look up the
- * index, then get the previous entry.
- *
- * Return: the previous value or %NULL.
- */
-void *mas_prev(struct ma_state *mas, unsigned long min)
-{
- void *entry;
-
- if (!mas->index) {// Nothing comes before 0.
- mas->last = 0;
- return NULL;
- }
-
- if (mas_is_none(mas))
- mas->node = MAS_START;
-
- if (!mas_searchable(mas))
- return NULL;
-
-
- if (mas_is_start(mas)) {
- mas_start(mas);
- mas_walk(mas);
- }
-
- do {
- entry = _mas_prev(mas, min);
- } while (!mas_is_none(mas) && !entry);
-
- return entry;
-}
-EXPORT_SYMBOL_GPL(mas_prev);
-
/*
* _mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
return true;
}
-/*
- * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
- *
- * Some users need to pause a walk and drop the lock they're holding in
- * order to yield to a higher priority thread or carry out an operation
- * on an entry. Those users should call this function before they drop
- * the lock. It resets the @mas to be suitable for the next iteration
- * of the loop after the user has reacquired the lock. If most entries
- * found during a walk require you to call mas_pause(), the mt_for_each()
- * iterator may be more appropriate.
- *
- */
-void mas_pause(struct ma_state *mas)
-{
- // Overflow protection.
- if (mas->last == ULONG_MAX) {
- mas->node = MAS_NONE;
- return;
- }
-
- mas_reset(mas);
- mas->last++;
- mas->index = mas->last;
-}
-EXPORT_SYMBOL_GPL(mas_pause);
-
-
static inline bool mas_rewind_node(struct ma_state *mas)
{
unsigned char slot;
* range.
* @mas: The maple state
* @limit: The maximum value to check.
- *
- * Internal function.
+ *.
*
* Return: Point to the next entry or %NULL
*
return entry;
}
-/*
- * mas_find: If mas->node == MAS_START, find the first
- * non-NULL entry >= mas->index.
- * Otherwise, find the first non-NULL entry > mas->index
- * @mas: The maple state
- * @max: The maximum value to check.
- *
- * Must hold rcu_read_lock or the write lock.
- * If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
- *
- * Return: The entry or %NULL.
- */
-void *mas_find(struct ma_state *mas, unsigned long max)
-{
- void *entry = NULL;
- bool first = false;
-
- if (mas_is_start(mas))
- first = true;
-
-retry:
- while (mas_search_cont(mas, mas->index, max, entry)) {
- entry = _mas_next(mas, max);
-
- if (unlikely(mas_dead_node(mas, mas->index))) {
- if (first)
- mas->node = MAS_START;
-
- goto retry;
- }
- first = false;
- }
-
- return entry;
-}
-EXPORT_SYMBOL_GPL(mas_find);
-
/*
* _mt_find() - Search from start up until an entry is found.
* @mt: The maple tree
* @*index: Pointer which contains the start location of the search
* @max: The maximum value to check
* @start: If this is the first time being called or not.
- *
- * Internal function. Does not return the zero entry. Handles locking.
+ *. Does not return the zero entry. Handles locking.
* Return: the entry or %NULL
*/
void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
return entry;
}
-/*
- * mt_find() - Search from the start up until an entry is found.
- * @mt: The maple tree
- * @*index: Pointer which contains the start location of the search
- * @max: The maximum value to check
- *
- * Handles locking.
- *
- * Return: The entry at or after the @*index or %NULL
- */
-void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
-{
- return _mt_find(mt, index, max, true);
-}
-EXPORT_SYMBOL(mt_find);
-
-/*
- * mas_next() - Get the next entry.
- * @mas: The maple state
- * @max: The maximum index to check.
- *
- * Must hold rcu_read_lock or the write lock.
- * Can return the zero entry.
- *
- * Return: The next entry or %NULL
- */
-void *mas_next(struct ma_state *mas, unsigned long max)
-{
- if (mas_is_none(mas))
- mas->node = MAS_START;
-
- return _mas_next(mas, max);
-}
-EXPORT_SYMBOL_GPL(mas_next);
-
-/*
- * mas_erase() - Find the range in which index resides and erase the entire
- * range.
- * @mas: The maple state
- *
- * Must hold the write lock.
- * Searches for @mas->index, sets @mas->index and @mas->last to the range and
- * erases that range.
- *
- * Return: the entry that was erased, @mas->index and @mas->last are updated.
- */
-static inline void *mas_erase(struct ma_state *mas)
-{
- unsigned long r_max, r_min;
- void *entry = NULL;
-
- entry = mas_range_load(mas, &r_min, &r_max);
-retry:
- mas->node = MAS_START;
- mas->index = r_min;
- mas->last = r_max;
- _mas_store(mas, NULL, true);
- if (mas_nomem(mas, GFP_KERNEL))
- goto retry;
-
- return entry;
-}
-
/*
* mas_dead_leaves() - Mark all leaves of a node as dead.
* @mas: The maple state
* @*slots: Pointer to the slot array
- *
- * Internal function.
+ *.
* Must hold the write lock.
*
* Return: The number of leaves marked as dead.
/* Interface */
-void __init maple_tree_init(void)
-{
- maple_node_cache = kmem_cache_create("maple_node",
- sizeof(struct maple_node), sizeof(struct maple_node),
- SLAB_PANIC, NULL);
-}
-
-/*
- * mtree_init() - Initialize a maple tree.
- * @mt: The maple tree
- * @ma_flags: The flags to use for the tree.
- */
-void mtree_init(struct maple_tree *mt, unsigned int ma_flags)
-{
- spin_lock_init(&mt->ma_lock);
- mt->ma_flags = ma_flags;
- rcu_assign_pointer(mt->ma_root, NULL);
-}
-EXPORT_SYMBOL(mtree_init);
-
/*
- * mtree_load() - Load a value stored in a maple tree
- * @mt: The maple tree
- * @index: The index to load
+ * mas_store() - Store an @entry.
+ * @mas: The maple state.
+ * @entry: The entry to store.
*
- * Return: the entry of %NULL
+ * The @mas->index and @mas->last is used to set the range for the @entry.
+ * Note: The @mas should have pre-allocated entries to ensure there is memory to
+ * store the entry. Please see mas_entry_count()/mas_destroy() for more details.
*/
-void *mtree_load(struct maple_tree *mt, unsigned long index)
+void *mas_store(struct ma_state *mas, void *entry)
{
- void *entry;
+ void *existing = NULL;
- MA_STATE(mas, mt, index, index);
- trace_mtree_load(&mas);
- rcu_read_lock();
- entry = mas_load(&mas);
- rcu_read_unlock();
- if (xa_is_zero(entry))
+ if (mas->index > mas->last) {
+ mas_set_err(mas, -EINVAL);
return NULL;
+ }
- return entry;
+ existing = _mas_store(mas, entry, true);
+ if (unlikely(mas_is_err(mas)))
+ return existing;
+
+ if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
+ mas->node = MAS_START;
+
+ return existing;
}
-EXPORT_SYMBOL(mtree_load);
/*
- * mtree_store_range() - Store an entry at a given range.
- * @mt: The maple tree
- * @index: The start of the range
- * @last: The end of the range
+ * mas_store_gfp() - Store a value into the tree.
+ * @mas: The maple state
* @entry: The entry to store
- * @gfp: The GFP_FLAGS to use for allocations
+ * @gfp: The GFP_FLAGS to use for allocations if necessary.
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
-int mtree_store_range(struct maple_tree *mt, unsigned long index,
- unsigned long last, void *entry, gfp_t gfp)
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
- MA_STATE(mas, mt, index, last);
-
- trace_mtree_store_range(&mas, entry);
- if (WARN_ON_ONCE(xa_is_advanced(entry)))
- return -EINVAL;
- if (index > last)
- return -EINVAL;
+ // Detect spanning store.
+ if ((mas->last == mas->max && !entry) ||
+ (mas->last > mas->max))
+ mas->node = MAS_START;
- mas_lock(&mas);
retry:
- _mas_store(&mas, entry, true);
+ _mas_store(mas, entry, true);
+ if (unlikely(mas_nomem(mas, gfp)))
+ goto retry;
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
+ mas->node = MAS_START;
+
+ return 0;
+
+}
+
+/*
+ * mas_entry_count() - Set the expected number of entries that will be inserted.
+ * @mas: The maple state
+ * @nr_entries: The number of expected entries.
+ *
+ * This will attempt to pre-allocate enough nodes to store the expected number
+ * of entries. The allocations will occur using the bulk allocator interface
+ * for speed. Please call mas_destroy() on the @mas after inserting the entries
+ * to ensure any unused nodes are freed.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_entry_count(struct ma_state *mas, unsigned long nr_entries)
+{
+ int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
+ struct maple_enode *enode = mas->node;
+ int nr_nodes;
+ int ret;
+
+ // Optimize splitting for bulk insert in-order.
+ mas->mas_flags |= MA_STATE_BULK;
+
+ // Avoid overflow, assume a gap between each entry and a trailing null
+ // If this is wrong, it just means allocation can happen during
+ // insertion of entries.
+ nr_nodes = max(nr_entries, nr_entries * 2 + 1);
+
+ if (!mt_is_alloc(mas->tree))
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+
+ // Leaves
+ nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 1);
+ // Internal nodes.
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ mas_node_count(mas, nr_nodes);
+
+ if (!mas_is_err(mas))
+ return 0;
+
+ ret = xa_err(mas->node);
+ mas->node = enode;
+ return ret;
+
+}
+
+/*
+ * mas_destroy() - destroy a maple state.
+ * @mas: The maple state
+ *
+ * Frees any allocated nodes associated with this maple state.
+ */
+void mas_destroy(struct ma_state *mas)
+{
+ struct maple_alloc *node;
+
+ // When using mas_for_each() to insert an expected number of elements,
+ // it is possible that the number inserted is less than the expected
+ // number. To fix an invalid final node, a check is performed here to
+ // rebalance the previous node with the final node.
+ if (mas->mas_flags & MA_STATE_REBALANCE) {
+ unsigned char end;
+ unsigned long range_min, range_max;
+
+ if (mas_is_start(mas))
+ mas_start(mas);
+
+ __mas_walk(mas, &range_min, &range_max);
+ end = mas_data_end(mas) + 1;
+ if (end < mt_min_slot_count(mas->node) - 1)
+ mas_destroy_rebalance(mas, end);
+
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ }
+ mas->mas_flags &= ~MA_STATE_BULK;
+
+ while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
+ node = mas->alloc;
+ mas->alloc = mas->alloc->slot[0];
+ if (node->node_count > 0)
+ mt_free_bulk(node->node_count, (void **)&node->slot[1]);
+ kmem_cache_free(maple_node_cache, node);
+ }
+ mas->alloc = NULL;
+}
+
+/*
+ * mas_next() - Get the next entry.
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next(struct ma_state *mas, unsigned long max)
+{
+ if (mas_is_none(mas))
+ mas->node = MAS_START;
+
+ return _mas_next(mas, max);
+}
+EXPORT_SYMBOL_GPL(mas_next);
+
+/*
+ * mas_prev() - Get the previous entry
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * searchable nodes. If mas->node is MAS_START, it will first look up the
+ * index, then get the previous entry.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev(struct ma_state *mas, unsigned long min)
+{
+ void *entry;
+
+ if (!mas->index) {// Nothing comes before 0.
+ mas->last = 0;
+ return NULL;
+ }
+
+ if (mas_is_none(mas))
+ mas->node = MAS_START;
+
+ if (!mas_searchable(mas))
+ return NULL;
+
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ mas_walk(mas);
+ }
+
+ do {
+ entry = _mas_prev(mas, min);
+ } while (!mas_is_none(mas) && !entry);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_prev);
+
+/*
+ * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @mas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call mas_pause(), the mt_for_each()
+ * iterator may be more appropriate.
+ *
+ */
+void mas_pause(struct ma_state *mas)
+{
+ // Overflow protection.
+ if (mas->last == ULONG_MAX) {
+ mas->node = MAS_NONE;
+ return;
+ }
+
+ mas_reset(mas);
+ mas->last++;
+ mas->index = mas->last;
+}
+EXPORT_SYMBOL_GPL(mas_pause);
+
+
+/*
+ * mas_find: If mas->node == MAS_START, find the first
+ * non-NULL entry >= mas->index.
+ * Otherwise, find the first non-NULL entry > mas->index
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+ bool first = false;
+
+ if (mas_is_start(mas))
+ first = true;
+
+retry:
+ while (mas_search_cont(mas, mas->index, max, entry)) {
+ entry = _mas_next(mas, max);
+
+ if (unlikely(mas_dead_node(mas, mas->index))) {
+ if (first)
+ mas->node = MAS_START;
+
+ goto retry;
+ }
+ first = false;
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/*
+ * mas_erase() - Find the range in which index resides and erase the entire
+ * range.
+ * @mas: The maple state
+ *
+ * Must hold the write lock.
+ * Searches for @mas->index, sets @mas->index and @mas->last to the range and
+ * erases that range.
+ *
+ * Return: the entry that was erased, @mas->index and @mas->last are updated.
+ */
+static inline void *mas_erase(struct ma_state *mas)
+{
+ unsigned long r_max, r_min;
+ void *entry = NULL;
+
+ entry = mas_range_load(mas, &r_min, &r_max);
+retry:
+ mas->node = MAS_START;
+ mas->index = r_min;
+ mas->last = r_max;
+ _mas_store(mas, NULL, true);
+ if (mas_nomem(mas, GFP_KERNEL))
+ goto retry;
+
+ return entry;
+}
+
+/*
+ * mas_nomem() - * Check if there was an error allocating and do the allocation
+ * if necessary If there are allocations, then free them.
+ * @mas: The maple state
+ * @gfp: The GFP_FALGS to use for allocations
+ */
+bool mas_nomem(struct ma_state *mas, gfp_t gfp)
+ __must_hold(mas->tree->lock)
+{
+ if (likely(mas->node != MA_ERROR(-ENOMEM))) {
+ mas_destroy(mas);
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp)) {
+ mtree_unlock(mas->tree);
+ mas_alloc_nodes(mas, gfp);
+ mtree_lock(mas->tree);
+ } else {
+ mas_alloc_nodes(mas, gfp);
+ }
+
+ if (!mas_allocated(mas))
+ return false;
+
+ mas->node = MAS_START;
+ return true;
+}
+
+void __init maple_tree_init(void)
+{
+ maple_node_cache = kmem_cache_create("maple_node",
+ sizeof(struct maple_node), sizeof(struct maple_node),
+ SLAB_PANIC, NULL);
+}
+
+/*
+ * mtree_init() - Initialize a maple tree.
+ * @mt: The maple tree
+ * @ma_flags: The flags to use for the tree.
+ */
+void mtree_init(struct maple_tree *mt, unsigned int ma_flags)
+{
+ spin_lock_init(&mt->ma_lock);
+ mt->ma_flags = ma_flags;
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+EXPORT_SYMBOL(mtree_init);
+
+/*
+ * mtree_load() - Load a value stored in a maple tree
+ * @mt: The maple tree
+ * @index: The index to load
+ *
+ * Return: the entry of %NULL
+ */
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
+ void *entry;
+
+ MA_STATE(mas, mt, index, index);
+ trace_mtree_load(&mas);
+ rcu_read_lock();
+ entry = mas_load(&mas);
+ rcu_read_unlock();
+ if (xa_is_zero(entry))
+ return NULL;
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_load);
+
+/*
+ * mtree_store_range() - Store an entry at a given range.
+ * @mt: The maple tree
+ * @index: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store_range(struct maple_tree *mt, unsigned long index,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(mas, mt, index, last);
+
+ trace_mtree_store_range(&mas, entry);
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (index > last)
+ return -EINVAL;
+
+ mas_lock(&mas);
+retry:
+ _mas_store(&mas, entry, true);
if (mas_nomem(&mas, gfp))
goto retry;
EXPORT_SYMBOL(mtree_destroy);
/*
- * mas_store() - Store an @entry.
- * @mas: The maple state.
- * @entry: The entry to store.
- *
- * The @mas->index and @mas->last is used to set the range for the @entry.
- * Note: The @mas should have pre-allocated entries to ensure there is memory to
- * store the entry. Please see mas_entry_count()/mas_destroy() for more details.
- */
-void *mas_store(struct ma_state *mas, void *entry)
-{
- void *existing = NULL;
-
- if (mas->index > mas->last) {
- mas_set_err(mas, -EINVAL);
- return NULL;
- }
-
- existing = _mas_store(mas, entry, true);
- if (unlikely(mas_is_err(mas)))
- return existing;
-
- if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
- mas->node = MAS_START;
-
- return existing;
-}
-
-/*
- * mas_store_gfp() - Store a value into the tree.
- * @mas: The maple state
- * @entry: The entry to store
- * @gfp: The GFP_FLAGS to use for allocations if necessary.
- *
- * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
- * be allocated.
- */
-int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
-{
-
- // Detect spanning store.
- if ((mas->last == mas->max && !entry) ||
- (mas->last > mas->max))
- mas->node = MAS_START;
-
-retry:
- _mas_store(mas, entry, true);
- if (unlikely(mas_nomem(mas, gfp)))
- goto retry;
-
- if (unlikely(mas_is_err(mas)))
- return xa_err(mas->node);
-
- if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
- mas->node = MAS_START;
-
- return 0;
-
-}
-
-/*
- * mas_entry_count() - Set the expected number of entries that will be inserted.
- * @mas: The maple state
- * @nr_entries: The number of expected entries.
- *
- * This will attempt to pre-allocate enough nodes to store the expected number
- * of entries. The allocations will occur using the bulk allocator interface
- * for speed. Please call mas_destroy() on the @mas after inserting the entries
- * to ensure any unused nodes are freed.
- *
- * Return: 0 on success, -ENOMEM if memory could not be allocated.
- */
-int mas_entry_count(struct ma_state *mas, unsigned long nr_entries)
-{
- int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
- struct maple_enode *enode = mas->node;
- int nr_nodes;
- int ret;
-
- // Optimize splitting for bulk insert in-order.
- mas->mas_flags |= MA_STATE_BULK;
-
- // Avoid overflow, assume a gap between each entry and a trailing null
- // If this is wrong, it just means allocation can happen during
- // insertion of entries.
- nr_nodes = max(nr_entries, nr_entries * 2 + 1);
-
- if (!mt_is_alloc(mas->tree))
- nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
-
- // Leaves
- nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 1);
- // Internal nodes.
- nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
- mas_node_count(mas, nr_nodes);
-
- if (!mas_is_err(mas))
- return 0;
-
- ret = xa_err(mas->node);
- mas->node = enode;
- return ret;
-
-}
-
-/*
- * mas_destroy() - destroy a maple state.
- * @mas: The maple state
+ * mt_find() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @*index: Pointer which contains the start location of the search
+ * @max: The maximum value to check
*
- * Frees any allocated nodes associated with this maple state.
- */
-void mas_destroy(struct ma_state *mas)
-{
- struct maple_alloc *node;
-
- // When using mas_for_each() to insert an expected number of elements,
- // it is possible that the number inserted is less than the expected
- // number. To fix an invalid final node, a check is performed here to
- // rebalance the previous node with the final node.
- if (mas->mas_flags & MA_STATE_REBALANCE) {
- unsigned char end;
- unsigned long range_min, range_max;
-
- if (mas_is_start(mas))
- mas_start(mas);
-
- __mas_walk(mas, &range_min, &range_max);
- end = mas_data_end(mas) + 1;
- if (end < mt_min_slot_count(mas->node) - 1)
- mas_destroy_rebalance(mas, end);
-
- mas->mas_flags &= ~MA_STATE_REBALANCE;
- }
- mas->mas_flags &= ~MA_STATE_BULK;
-
- while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
- node = mas->alloc;
- mas->alloc = mas->alloc->slot[0];
- if (node->node_count > 0)
- mt_free_bulk(node->node_count, (void **)&node->slot[1]);
- kmem_cache_free(maple_node_cache, node);
- }
- mas->alloc = NULL;
-}
-
-/*
- * mas_nomem() - * Check if there was an error allocating and do the allocation
- * if necessary If there are allocations, then free them.
- * @mas: The maple state
- * @gfp: The GFP_FALGS to use for allocations
+ * Handles locking.
*
- * Internal function
+ * Return: The entry at or after the @*index or %NULL
*/
-bool mas_nomem(struct ma_state *mas, gfp_t gfp)
- __must_hold(mas->tree->lock)
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
{
- if (likely(mas->node != MA_ERROR(-ENOMEM))) {
- mas_destroy(mas);
- return false;
- }
-
- if (gfpflags_allow_blocking(gfp)) {
- mtree_unlock(mas->tree);
- mas_alloc_nodes(mas, gfp);
- mtree_lock(mas->tree);
- } else {
- mas_alloc_nodes(mas, gfp);
- }
-
- if (!mas_allocated(mas))
- return false;
-
- mas->node = MAS_START;
- return true;
+ return _mt_find(mt, index, max, true);
}
+EXPORT_SYMBOL(mt_find);
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned int maple_tree_tests_run;