return ma_is_leaf(mte_node_type(entry));
}
-static inline enum maple_type mt_node_hole(const void *entry)
-{
- return (unsigned long)entry & 4;
-
-}
/* Private
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
{
mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
}
-static void mte_free(struct maple_enode *enode)
+static inline void mte_free(struct maple_enode *enode)
{
ma_free(mte_to_node(enode));
}
{
return (void *)((unsigned long)node & ~2);
}
-static inline
-void *mte_is_full(const struct maple_enode *node)
-{
- return (void *)((unsigned long)node & ~4);
-}
static inline
void mte_set_full(const struct maple_enode *node)
node = (void *)((unsigned long)node | 4);
}
-static inline unsigned int mte_slot_mask(const struct maple_enode *node)
-{
- unsigned int bitmask = 0x78; // Bits 3-6
-
- if (mte_node_type(node) == MAPLE_RANGE16_SLOTS)
- bitmask |= 0x04; // Set bit 2.
- return bitmask;
-}
-static inline bool _ma_is_root(struct maple_node *node)
+static inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
static inline bool mte_is_root(const struct maple_enode *node)
{
- return _ma_is_root(mte_to_node(node));
+ return ma_is_root(mte_to_node(node));
}
static inline bool mt_is_alloc(struct maple_tree *mt)
return ma_get_gap(mte_to_node(mn), gap, mte_node_type(mn));
}
-static inline void ma_set_gap(struct maple_node *mn, unsigned char gap,
- enum maple_type type, unsigned long val)
+static inline void mte_set_gap(const struct maple_enode *mn,
+ unsigned char gap, unsigned long val)
{
- switch (type) {
+ switch (mte_node_type(mn)) {
default:
break;
case maple_arange_64:
- mn->ma64.gap[gap] = val;
+ mte_to_node(mn)->ma64.gap[gap] = val;
break;
}
}
-static inline void mte_set_gap(const struct maple_enode *mn,
- unsigned char gap, unsigned long val)
-{
- ma_set_gap(mte_to_node(mn), gap, mte_node_type(mn), val);
-}
-static inline void mte_cp_gap(struct maple_enode *dst,
- unsigned char dloc, struct maple_enode *src, unsigned long sloc)
-{
- mte_set_gap(dst, dloc, mte_get_gap(src, sloc));
-}
-
-static inline void mas_update_limits(struct ma_state *mas, unsigned char slot,
- enum maple_type type)
-{
- if (slot > 0)
- mas->min = _mte_get_pivot(mas->node, slot - 1, type) + 1;
-
- if (slot < mt_pivots[type])
- mas->max = _mte_get_pivot(mas->node, slot, type);
-}
-
-/**
- * mas_retry() - Retry the operation if appropriate.
- * @mas: Maple Tree operation state.
- * @entry: Entry from tree.
- *
- * The advanced functions may sometimes return an internal entry, such as
- * a retry entry or a zero entry. This function sets up the @mas to restart
- * the walk from the head of the array if needed.
- *
- * Context: Any context.
- * Return: true if the operation needs to be retried.
- */
-bool mas_retry(struct ma_state *mas, const void *entry)
-{
- if (xa_is_deleted(entry))
- return true;
- if (xa_is_zero(entry))
- return true;
- if (!xa_is_retry(entry))
- return false;
- mas_reset(mas);
- return true;
-}
static inline void mas_ascend(struct ma_state *mas)
{
unsigned long max = 0, min = ULONG_MAX;
bool set_max = false, set_min = false;
- if (_ma_is_root(a_node))
+ if (ma_is_root(a_node))
goto no_parent;
p_enode = mt_mk_node(mte_parent(mas->node),
}
no_parent:
- if (_ma_is_root(a_node)) {
+ if (ma_is_root(a_node)) {
if (!set_min)
min = 0;
if (!set_max)
mas->node = p_enode;
}
-static inline bool mas_touches_null(struct ma_state *mas)
-{
- unsigned char slot = mas_get_slot(mas);
- if (slot && !mas_get_rcu_slot(mas, slot - 1))
- return true;
- if ((slot < mt_slot_count(mas->node) - 1) &&
- !mas_get_rcu_slot(mas, slot + 1))
- return true;
-
- return false;
-}
-
-static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot,
- unsigned long val)
-{
- MA_STATE(safe_mas, mas->tree, mas->index, mas->last);
- mas_dup_state(&safe_mas, mas);
-
-restart:
- if (slot >= mt_pivot_count(safe_mas.node)) {
- if (mte_is_root(safe_mas.node))
- return;
-
- slot = mte_parent_slot(safe_mas.node);
- mas_ascend(&safe_mas);
- goto restart;
- }
- mte_set_pivot(safe_mas.node, slot, val);
-}
-
static inline struct maple_node *mas_next_alloc(struct ma_state *ms)
{
int cnt;
/* Private
* mas_data_end() - Find the end of the data (slot). Sets the value of the
- * last pivot to @last_piv, sets @coalesce to the number of slots that can be
- * removed by coalescing.
+ * last pivot to @last_piv.
*
- * Note: XA_RETRY entries are considered past the end, so this is not fully safe
- * to check the space a node has when coalescing and rebalancing.
*/
static inline unsigned char _mas_data_end(const struct ma_state *mas,
const enum maple_type type, unsigned long *last_piv)
}
}
/* Private
- * _mas_replace() - Replace a maple node in the tree with mas->node. Uses the
+ * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
* parent encoding to locate the maple node in the tree.
- * @free: Free the old node
- * @push: push the old node onto the allocated nodes in mas->alloc
+ *
+ * @mas - the ma_state to use for operations.
+ * @advanced - boolean to free/adopt (false) or leave the node as is (true)
*
*/
-static inline void _mas_replace(struct ma_state *mas, bool free, bool push,
- bool adopt)
+static inline void mas_replace(struct ma_state *mas, bool advanced)
{
struct maple_node *mn = mas_mn(mas);
struct maple_enode *parent = NULL;
if (mte_to_node(prev) == mn)
return;
- if (adopt && !mte_is_leaf(mas->node))
+ if (!advanced && !mte_is_leaf(mas->node))
mas_adopt_children(mas, mas->node);
if (mte_is_root(mas->node)) {
mte_update_rcu_slot(parent, slot, mas->node);
}
- if (free) {
+ if (!advanced) {
mte_free(prev);
return;
}
-
- if (push)
- mas_push_node(mas, prev);
-
-}
-static inline void mas_replace(struct ma_state *mas)
-{
- _mas_replace(mas, true, false, true);
-}
-
-static inline enum maple_type mas_ptype_leaf(struct ma_state *mas)
-{
- enum maple_type pt = mte_node_type(mas->node);
-
- switch (pt) {
- case maple_arange_64:
- case maple_range_64:
- default:
- return maple_leaf_64;
- }
}
#define MAPLE_BIG_NODE_SLOTS (MAPLE_NODE_SLOTS * 2 + 1)
enum maple_type type;
};
-struct maple_clean_list {
- struct maple_enode *enode;
- struct list_head list;
-};
-
static inline struct maple_enode *mas_check_split_parent(struct ma_state *mas,
unsigned char slot)
{
}
static inline void mab_shift_right(struct maple_big_node *b_node,
- unsigned char b_end,
unsigned char shift, bool alloc)
{
+ unsigned char b_end = b_node->b_end - 1;
do {
b_node->pivot[b_end + shift] = b_node->pivot[b_end];
b_node->slot[b_end + shift] = b_node->slot[b_end];
return i;
}
-static inline unsigned long mas_next_node(struct ma_state *mas,
- unsigned long max);
-static inline void mas_prev_node(struct ma_state *mas, unsigned long limit);
static inline void mas_descend_adopt(struct ma_state *mas)
{
*
* Returns the number of elements in b_node during the last loop.
*/
+static inline void mas_prev_node(struct ma_state *mas, unsigned long limit);
+static inline unsigned long mas_next_node(struct ma_state *mas,
+ unsigned long max);
static inline int mas_combine_separate(struct ma_state *mas,
struct ma_state *orig_l_mas,
struct ma_state *orig_r_mas,
} else if (mas_prev_sibling(orig_l_mas)) {
end = mas_data_end(orig_l_mas);
// shift b_node by prev size
- mab_shift_right(b_node, b_node->b_end - 1,
- end + 1,
+ mab_shift_right(b_node, end + 1,
(mt_is_alloc(mas->tree) ? true : false));
// copy in prev.
mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
l_slot = 0;
end = mas_data_end(orig_l_mas);
// shift b_node by prev size
- mab_shift_right(b_node, b_node->b_end - 1, end + 1,
- (mt_is_alloc(mas->tree) ? true : false));
+ mab_shift_right(b_node, end + 1,
+ (mt_is_alloc(mas->tree) ? true : false));
// copy in prev.
mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
l_mas.min = orig_l_mas->min;
smp_wmb();
// Insert new sub-tree
- _mas_replace(mas, false, false, false);
+ mas_replace(mas, true);
if (!mte_is_leaf(mas->node))
mas_descend_adopt(mas);
if (mas_is_err(mas))
return 0;
-
mas_dup_state(&l_mas, mas);
mas_dup_state(&r_mas, mas);
-
-
+ b_node->b_end++;
if (mas_next_sibling(&r_mas)) {
b_node->b_end = mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas),
- b_node, b_node->b_end + 1);
+ b_node, b_node->b_end);
r_mas.last = r_mas.index = r_mas.max;
} else {
unsigned char shift;
mas_prev_sibling(&l_mas);
shift = mas_data_end(&l_mas) + 1;
- mab_shift_right(b_node, b_node->b_end, shift,
+ mab_shift_right(b_node, shift,
(mt_is_alloc(mas->tree) ? true : false));
mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
- b_node->b_end += shift + 1;
+ b_node->b_end += shift;
l_mas.index = l_mas.last = l_mas.min;
}
}
static inline int mas_split(struct ma_state *mas,
- struct maple_big_node *b_node,
- unsigned char new_end)
+ struct maple_big_node *b_node)
{
struct maple_enode *ancestor = MAS_NONE;
continue;
}
- b_node->b_end = new_end;
b_node->min = mas->min;
b_node->type = type;
l = ma_mnode_ptr(mas_next_alloc(mas));
j = mab_mas_cp(b_node, 0, slot_cnt, &l_mas, 0);
mte_set_pivot(r_mas.node, 0, r_mas.max);
- mab_mas_cp(b_node, j, new_end, &r_mas, 0);
+ mab_mas_cp(b_node, j, b_node->b_end, &r_mas, 0);
mas_set_slot(&l_mas, mte_parent_slot(mas->node));
r_mas.min = l_mas.max + 1;
smp_wmb();
// Insert the new data in the tree
- _mas_replace(mas, false, false, false);
+ mas_replace(mas, true);
mas_descend_adopt(mas);
do {
return mas_rebalance(mas, b_node);
if (b_node->b_end >= mt_slot_count(mas->node))
- return mas_split(mas, b_node, b_node->b_end);
+ return mas_split(mas, b_node);
mas_node_cnt(mas, 1);
if (mas_is_err(mas))
mas->node = new_node;
mab_mas_cp(b_node, 0, b_node->b_end, mas, 0);
- _mas_replace(mas, true, false, true);
+ mas_replace(mas, false);
if (mt_is_alloc(mas->tree))
mas_update_gap(mas, false);
return 2;
{
void *contents = rcu_dereference_protected(mas->tree->ma_root,
lockdep_is_held(&mas->tree->ma_lock));
- enum maple_type mt = mas_ptype_leaf(mas);
+ enum maple_type type = maple_leaf_64;
int slot = 0;
if (mas_is_err(mas))
return 0;
- mas->node = mt_mk_node(mas_next_alloc(mas), mt);
+ mas->node = mt_mk_node(mas_next_alloc(mas), type);
mas_mn(mas)->parent = ma_parent_ptr(
((unsigned long)mas->tree | MA_ROOT_PARENT));
if (mas->index < mmap_end - 1)
mte_set_pivot(mas->node, slot++, mmap_end - 1);
mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY);
- mte_set_pivot(mas->node, slot++, mt_max[mt]);
+ mte_set_pivot(mas->node, slot++, mt_max[type]);
}
/* swap the new root into the tree */
return ret;
}
-static inline void mas_zero_to_end(struct ma_state *mas, unsigned char slot)
-{
- for(; slot < mt_slot_count(mas->node); slot++) {
- if (slot < mt_pivot_count(mas->node))
- mte_set_pivot(mas->node, slot, 0);
- mte_set_rcu_slot(mas->node, slot, NULL);
- if (mt_is_alloc(mas->tree)) {
- mte_set_gap(mas->node, slot, 0);
- }
- }
-}
-/* Private
- *
- * mas_remove_slot() - Remove the contents of a node by shifting slots over.
- * @mas - the ma_state, note the node cannot be a leaf due to the gap use.
- * @slot - Slot that contains the contents to be removed.
- *
- * Note: Not active node safe, not leaf node safe.
- */
-static inline void mas_move_slot(struct ma_state *mas, unsigned char slot,
- unsigned char shift)
-{
- unsigned char end = mas_data_end(mas);
- unsigned char old_slot = slot + shift;
- unsigned long piv;
-
- for(; old_slot < end; old_slot++, slot++) {
-
- if (old_slot < mt_pivot_count(mas->node))
- piv = mte_get_pivot(mas->node, old_slot);
- else
- piv = mas->max;
- mte_set_pivot(mas->node, piv, slot);
- mte_set_rcu_slot(mas->node, slot,
- mte_get_rcu_slot(mas->node, old_slot, mas->tree));
- if (mt_is_alloc(mas->tree))
- mte_set_gap(mas->node, slot,
- mte_get_gap(mas->node, old_slot));
- }
-
- // Zero the rest.
- mas_zero_to_end(mas, slot);
-}
-
static inline unsigned char mas_extend_null( struct ma_state *l_mas,
struct ma_state *r_mas)
{
return r_slot;
}
+/*
+ * Private
+ * __mas_walk(): Locates a value and sets the mas->node and slot accordingly.
+ * range_min and range_max are set to the range which the entry is valid.
+ * Returns true if mas->node is a leaf.
+ *
+ * Will not point to a skip entry.
+ * May point to a deleted or retry entry.
+ *
+ */
static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max);
+ unsigned long *range_max)
+{
+ struct maple_enode *next;
+ enum maple_type type;
+ bool ret = false;
+
+ while (true) {
+ type = mte_node_type(mas->node);
+
+ if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
+ return false;
+
+ if (ma_is_leaf(type)) // Leaf.
+ return true;
+
+ next = mas_get_rcu_slot(mas, mas_get_slot(mas));
+
+ // Traverse.
+ mas->max = *range_max;
+ mas->min = *range_min;
+ if (unlikely(mt_is_empty(next)))
+ return false;
+
+ mas->node = next;
+ mas_set_slot(mas, 0);
+ }
+ return ret;
+}
/* Private
*
* mas_spanning_store() - Create a subtree with the store operation completed
done:
if (mas_is_err(mas))
- return NULL;
+ return NULL;
if (ret > 2)
return NULL;
}
+
+
+
/** Private
* prev node entry
*/
return found;
}
-/*
- * Private
- * __mas_walk(): Locates a value and sets the mas->node and slot accordingly.
- * range_min and range_max are set to the range which the entry is valid.
- * Returns true if mas->node is a leaf.
- *
- * Will not point to a skip entry.
- * May point to a deleted or retry entry.
- *
- */
-static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max)
-{
- struct maple_enode *next;
- enum maple_type type;
- bool ret = false;
-
- while (true) {
- type = mte_node_type(mas->node);
-
- if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
- return false;
-
- if (ma_is_leaf(type)) // Leaf.
- return true;
-
- next = mas_get_rcu_slot(mas, mas_get_slot(mas));
-
- // Traverse.
- mas->max = *range_max;
- mas->min = *range_min;
- if (unlikely(mt_is_empty(next)))
- return false;
-
- mas->node = next;
- mas_set_slot(mas, 0);
- }
- return ret;
-}
/** Private
* _mas_range_walk(): A walk that supports returning the range in which an
* index is located.
EXPORT_SYMBOL_GPL(mas_pause);
-static inline bool mas_rewind_node(struct ma_state *mas);
+static inline bool mas_rewind_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas_get_slot(mas);
+ if (!slot) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ slot = mte_parent_slot(mas->node);
+ mas_ascend(mas);
+ }
+ } while (!slot);
+
+ mas_set_slot(mas, --slot);
+ return true;
+}
static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size)
{
struct maple_enode *last = NULL;
last = mas->node;
}
}
-static inline bool mas_skip_node(struct ma_state *mas);
+/* Skip this slot in the parent. */
+static inline bool mas_skip_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas_get_slot(mas);
+ if (slot > mt_slot_count(mas->node) - 1) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ slot = mte_parent_slot(mas->node);
+ mas_ascend(mas);
+ }
+ } while (slot > mt_slot_count(mas->node) - 1);
+
+ mas_set_slot(mas, ++slot);
+ if (slot > 0)
+ mas->min = mte_get_pivot(mas->node, slot - 1) + 1;
+
+ if (slot < mt_pivot_count(mas->node))
+ mas->max = mte_get_pivot(mas->node, slot);
+ return true;
+}
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
{
struct maple_enode *last = NULL;
return _mas_next(mas, max, &index);
}
EXPORT_SYMBOL_GPL(mas_next);
-static inline bool mas_rewind_node(struct ma_state *mas)
-{
- unsigned char slot;
-
- do {
- if (mte_is_root(mas->node)) {
- slot = mas_get_slot(mas);
- if (!slot) {
- mas_set_err(mas, -EBUSY);
- return false;
- }
- } else {
- slot = mte_parent_slot(mas->node);
- mas_ascend(mas);
- }
- } while (!slot);
-
- mas_set_slot(mas, --slot);
- return true;
-}
-/* Skip this slot in the parent. */
-static inline bool mas_skip_node(struct ma_state *mas)
-{
- unsigned char slot;
-
- do {
- if (mte_is_root(mas->node)) {
- slot = mas_get_slot(mas);
- if (slot > mt_slot_count(mas->node) - 1) {
- mas_set_err(mas, -EBUSY);
- return false;
- }
- } else {
- slot = mte_parent_slot(mas->node);
- mas_ascend(mas);
- }
- } while (slot > mt_slot_count(mas->node) - 1);
-
- mas_set_slot(mas, ++slot);
- mas_update_limits(mas, slot, mte_node_type(mas->node));
- return true;
-}
/* Private
* mas_erase() - Find the range in which index resides and erase the entire
* range.