From 4cf6ad608f89d6a6e84a4a1660bd1e1c3cb20ffe Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 2 Mar 2020 11:22:53 -0500 Subject: [PATCH] maple_tree: Fix RCU reads to use rcu_dereference_check and writes to use rcu_assign_pointer Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 241 ++++++++++++++++++++++++----------------------- 1 file changed, 125 insertions(+), 116 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f55495005132..d448bcb69bce 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -553,59 +553,77 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char slot, } static inline struct maple_enode *ma_get_rcu_slot( const struct maple_node *mn, unsigned char slot, - enum maple_type type) + enum maple_type type, struct maple_tree *mtree) { switch (type) { case maple_range_64: case maple_leaf_64: - return rcu_dereference(mn->mr64.slot[slot]); + return rcu_dereference_check(mn->mr64.slot[slot], + lockdep_is_held(mtree->ma_lock)); default: case maple_dense: - return rcu_dereference(mn->slot[slot]); + return rcu_dereference_check(mn->slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_arange_64: - return rcu_dereference(mn->ma64.slot[slot]); + return rcu_dereference_check(mn->ma64.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_6: - return rcu_dereference(mn->ms6.slot[slot]); + return rcu_dereference_check(mn->ms6.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_9: - return rcu_dereference(mn->ms9.slot[slot]); + return rcu_dereference_check(mn->ms9.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_16: - return rcu_dereference(mn->ms16.slot[slot]); + return rcu_dereference_check(mn->ms16.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_21: - return rcu_dereference(mn->ms21.slot[slot]); + return rcu_dereference_check(mn->ms21.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_32: - return rcu_dereference(mn->ms32.slot[slot]); + return rcu_dereference_check(mn->ms32.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_sparse_64: - return rcu_dereference(mn->ms64.slot[slot]); + return rcu_dereference_check(mn->ms64.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_range_16: case maple_leaf_16: - return rcu_dereference(mn->mr16.slot[slot]); + return rcu_dereference_check(mn->mr16.slot[slot], + lockdep_is_held(mtree->ma_lock)); case maple_range_32: case maple_leaf_32: - return rcu_dereference(mn->mr32.slot[slot]); + return rcu_dereference_check(mn->mr32.slot[slot], + lockdep_is_held(mtree->ma_lock)); } } static inline struct maple_enode *_mte_get_rcu_slot( const struct maple_enode *mn, unsigned char slot, - enum maple_type type) + enum maple_type type, struct maple_tree *mtree) { - return ma_get_rcu_slot(mte_to_node(mn), slot, type); + return ma_get_rcu_slot(mte_to_node(mn), slot, type, mtree); } static inline struct maple_enode *mte_get_rcu_slot(const struct maple_enode *mn, - unsigned char slot) + unsigned char slot, struct maple_tree *mtree) { - return _mte_get_rcu_slot(mn, slot, mte_node_type(mn)); + return _mte_get_rcu_slot(mn, slot, mte_node_type(mn), mtree); } -static inline struct maple_enode *mte_get_rcu_sanitized( - const struct maple_enode *mn, unsigned char slot) + +static inline struct maple_enode *mas_get_rcu_slot(const struct ma_state *mas, + unsigned char slot) { - void *entry = mte_get_rcu_slot(mn, slot); + return mte_get_rcu_slot(mas->node, slot, mas->tree); +} +static inline struct maple_enode *mas_get_rcu_sanitized( + struct ma_state *mas, unsigned char slot) +{ + void *entry = mte_get_rcu_slot(mas->node, slot, mas->tree); if (mt_will_coalesce(entry)) return NULL; return entry; } + static inline void ma_set_rcu_slot(struct maple_node *mn, unsigned char slot, enum maple_type type, void *val) { @@ -613,42 +631,42 @@ static inline void ma_set_rcu_slot(struct maple_node *mn, switch (type) { default: case maple_dense: - RCU_INIT_POINTER(mn->slot[slot], val); + rcu_assign_pointer(mn->slot[slot], val); break; case maple_sparse_6: - RCU_INIT_POINTER(mn->ms6.slot[slot], val); + rcu_assign_pointer(mn->ms6.slot[slot], val); break; case maple_sparse_9: - RCU_INIT_POINTER(mn->ms9.slot[slot], val); + rcu_assign_pointer(mn->ms9.slot[slot], val); break; case maple_sparse_16: - RCU_INIT_POINTER(mn->ms16.slot[slot], val); + rcu_assign_pointer(mn->ms16.slot[slot], val); break; case maple_sparse_21: - RCU_INIT_POINTER(mn->ms21.slot[slot], val); + rcu_assign_pointer(mn->ms21.slot[slot], val); break; case maple_sparse_32: - RCU_INIT_POINTER(mn->ms32.slot[slot], val); + rcu_assign_pointer(mn->ms32.slot[slot], val); break; case maple_sparse_64: - RCU_INIT_POINTER(mn->ms64.slot[slot], val); + rcu_assign_pointer(mn->ms64.slot[slot], val); break; case maple_range_16: case maple_leaf_16: - RCU_INIT_POINTER(mn->mr16.slot[slot], val); + rcu_assign_pointer(mn->mr16.slot[slot], val); break; case maple_range_32: case maple_leaf_32: - RCU_INIT_POINTER(mn->mr32.slot[slot], val); + rcu_assign_pointer(mn->mr32.slot[slot], val); break; case maple_range_64: case maple_leaf_64: BUG_ON(slot >= 8); - RCU_INIT_POINTER(mn->mr64.slot[slot], val); + rcu_assign_pointer(mn->mr64.slot[slot], val); break; case maple_arange_64: BUG_ON(slot >= 5); - RCU_INIT_POINTER(mn->ma64.slot[slot], val); + rcu_assign_pointer(mn->ma64.slot[slot], val); break; } } @@ -675,21 +693,7 @@ static inline void mas_descend(struct ma_state *mas) if (slot) mas->min = mas_get_safe_pivot(mas, slot - 1) + 1; mas->max = mas_get_safe_pivot(mas, slot); - mas->node = mte_get_rcu_slot(mas->node, mas_get_slot(mas)); -} -/** Private - * mte_cp_rcu_slot() = Copy from one node to anther. Upon seeing a retry, - * copies NULL. - */ -static inline void mte_cp_rcu_slot(struct maple_enode *dst, - unsigned char dloc, struct maple_enode *src, unsigned long sloc) -{ - void *entry = mte_get_rcu_slot(src, sloc); - - if (mt_is_empty(entry) || xa_is_retry(entry)) - entry = NULL; - - mte_set_rcu_slot(dst, dloc, entry); + mas->node = mas_get_rcu_slot(mas, mas_get_slot(mas)); } static inline void mte_update_rcu_slot(const struct maple_enode *mn, @@ -1162,7 +1166,7 @@ static inline unsigned char mas_data_end(const struct ma_state *mas, break; } - entry = _mte_get_rcu_slot(mn, slot, type); + entry = _mte_get_rcu_slot(mn, slot, type, mas->tree); if (mt_will_coalesce(entry)) { if (piv == prev_piv || !slot) (*coalesce)++; @@ -1229,7 +1233,7 @@ static inline unsigned char mas_append_entry(struct ma_state *mas, void *entry) unsigned long wr_pivot = mas->min ? mas->min - 1 : 0; unsigned char coalesce, dst_slot = mas_get_slot(mas); - if (!mte_get_rcu_slot(mas->node, 0) && !mte_get_pivot(mas->node, 0)) + if (!mas_get_rcu_slot(mas, 0) && !mte_get_pivot(mas->node, 0)) dst_slot = 0; // empty node. else if (dst_slot > mt_slot_count(mas->node)) { // Should not happen. dst_slot = mas_data_end(mas, mte_node_type(mas->node), @@ -1240,13 +1244,13 @@ static inline unsigned char mas_append_entry(struct ma_state *mas, void *entry) if (dst_slot && mas->index <= wr_pivot) { mas_set_safe_pivot(mas, dst_slot - 1, mas->index - 1); } else if (entry && mas->index && (mas->index - 1 != wr_pivot)) { - if (dst_slot && !mte_get_rcu_slot(mas->node, dst_slot - 1)) + if (dst_slot && !mas_get_rcu_slot(mas, dst_slot - 1)) dst_slot--; mte_set_rcu_slot(mas->node, dst_slot, NULL); mas_set_safe_pivot(mas, dst_slot++, mas->index - 1); } else if (!entry) { // appending NULL value. - if (mte_get_rcu_slot(mas->node, dst_slot)) { + if (mas_get_rcu_slot(mas, dst_slot)) { mas_set_safe_pivot(mas, dst_slot, mas->index - 1); dst_slot++; } @@ -1284,7 +1288,7 @@ static inline unsigned char _mas_append(struct ma_state *mas, this_piv = ma_get_pivot(smn, src_end, stype); } - src_data = mte_get_rcu_slot(mas->node, dst_slot); + src_data = mas_get_rcu_slot(mas, dst_slot); if (!src_data) { if (!this_piv) break; @@ -1299,14 +1303,14 @@ static inline unsigned char _mas_append(struct ma_state *mas, } // Append data from src. - src_data = ma_get_rcu_slot(smn, src_start, stype); + src_data = ma_get_rcu_slot(smn, src_start, stype, mas->tree); for (src_slot = src_start; src_slot <= src_end; src_slot++) { bool next_dst = true; if (dst_slot >= mt_slot_count(mas->node)) return dst_slot; - src_data = ma_get_rcu_slot(smn, src_slot, stype); + src_data = ma_get_rcu_slot(smn, src_slot, stype, mas->tree); if (src_slot >= mt_pivots[stype]) src_piv = src_max; else @@ -1439,7 +1443,7 @@ static unsigned char mas_append_split_data(struct ma_state *left, unsigned char split, unsigned char start, unsigned char end, unsigned char slot, void *entry) { - void *existing_entry = mte_get_rcu_sanitized(src->node, slot); + void *existing_entry = mas_get_rcu_sanitized(src, slot); struct ma_state *dst = left; unsigned char dst_slot = slot; unsigned long slot_min, slot_max; @@ -1484,7 +1488,7 @@ static unsigned char mas_append_split_data(struct ma_state *left, goto done; mas_get_range(src, slot, &slot_min, &slot_max); - existing_entry = mte_get_rcu_sanitized(src->node, slot); + existing_entry = mas_get_rcu_sanitized(src, slot); if (slot_min <= src->last && slot_max > src->last) { mte_set_rcu_slot(dst->node, dst_slot, existing_entry); @@ -1632,7 +1636,7 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas) if (ma_is_dense(mt)) { for (i = 0; i < mt_slot_count(mas->node); i++) { - entry = mte_get_rcu_slot(mas->node, i); + entry = mas_get_rcu_slot(mas, i); if (!mt_is_empty(entry) || xa_is_retry(entry)) { if (gap > max_gap) max_gap = gap; @@ -1653,7 +1657,7 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas) pend = mas->max; gap = pend - pstart + 1; - entry = mte_get_rcu_slot(mas->node, i); + entry = mas_get_rcu_slot(mas, i); if (!mt_is_empty(entry) || xa_is_retry(entry)) { prev_gap = 0; @@ -1773,7 +1777,7 @@ static inline unsigned long mas_first_node(struct ma_state *mas, if (pivot > limit) goto no_entry; - mn = mte_get_rcu_slot(mas->node, slot); + mn = mas_get_rcu_slot(mas, slot); if (mt_is_empty(mn)) { min = pivot + 1; @@ -1829,7 +1833,7 @@ static inline unsigned long mas_first_entry(struct ma_state *mas, /* Private * mte_destroy_walk: Free the sub-tree from @mn and below. */ -void mte_destroy_walk(struct maple_enode *mn) +void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree) { struct maple_enode *node; unsigned int type = mte_node_type(mn); @@ -1842,9 +1846,9 @@ void mte_destroy_walk(struct maple_enode *mn) case maple_range_64: case maple_arange_64: for (i = 0; i < slot_cnt; i++) { - node = mte_get_rcu_slot(mn, i); + node = mte_get_rcu_slot(mn, i, mtree); if (!mt_is_empty(node) && !xa_is_retry(node)) - mte_destroy_walk(node); + mte_destroy_walk(node, mtree); } break; default: @@ -1854,7 +1858,8 @@ void mte_destroy_walk(struct maple_enode *mn) } -static inline void mte_adopt_children(struct maple_enode *parent) +static inline void mas_adopt_children(struct ma_state *mas, + struct maple_enode *parent) { enum maple_type type = mte_node_type(parent); @@ -1867,7 +1872,7 @@ static inline void mte_adopt_children(struct maple_enode *parent) _mte_get_pivot(parent, slot, type) == 0) break; - child = _mte_get_rcu_slot(parent, slot, type); + child = _mte_get_rcu_slot(parent, slot, type, mas->tree); if (!mt_is_empty(child)) mte_set_parent(child, parent, slot); } @@ -1893,14 +1898,14 @@ static inline void _mas_replace(struct ma_state *mas, bool free, bool push) parent = mt_mk_node(mte_parent(mas->node), ptype); slot = mte_parent_slot(mas->node); - prev = mte_get_rcu_slot(parent, slot); + prev = mte_get_rcu_slot(parent, slot, mas->tree); } if (mte_to_node(prev) == mn) return; if (!mte_is_leaf(mas->node)) - mte_adopt_children(mas->node); + mas_adopt_children(mas, mas->node); if (mte_is_root(mas->node)) { mn->parent = ma_parent_ptr( @@ -1943,8 +1948,9 @@ static inline void mas_gap_link(struct ma_state *mas, struct maple_enode *parent mte_set_gap(parent, slot, gap); mas->max = max; } -static inline void mte_link(struct maple_enode *new, struct maple_enode *parent, - unsigned char slot, unsigned long pivot, enum maple_type type) +static inline void mas_link(struct ma_state *mas, struct maple_enode *new, + struct maple_enode *parent, unsigned char slot, + unsigned long pivot, enum maple_type type) { unsigned char pivot_cnt = mt_pivots[type]; @@ -1954,7 +1960,7 @@ static inline void mte_link(struct maple_enode *new, struct maple_enode *parent, mte_set_rcu_slot(parent, slot, new); if (!mte_is_leaf(new)) - mte_adopt_children(new); + mas_adopt_children(mas, new); } static inline enum maple_type mas_ptype_leaf(struct ma_state *mas) @@ -2066,11 +2072,11 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, right.max = mas->max; // left will be placed in link, not p_slot as coalescing may occur. - mte_link(left.node, new_p_mas.node, link, left.max, ptype); + mas_link(mas, left.node, new_p_mas.node, link, left.max, ptype); // right (if it exists, will be placed in link + 1; if (right.node) - mte_link(right.node, new_p_mas.node, link + 1, + mas_link(mas, right.node, new_p_mas.node, link + 1, right.max, ptype); // Append data from p_slot + 1 to the end. @@ -2078,7 +2084,7 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, mas_append(&new_p_mas, &parent, p_slot + 1, p_end); // Update encoded slots in children - mte_adopt_children(new_p_mas.node); + mas_adopt_children(&new_p_mas, new_p_mas.node); mas_dup_state(mas, &new_p_mas); // Replace the parent node & free the old parent. @@ -2131,7 +2137,7 @@ static inline enum maple_type mas_determine_type(struct ma_state *mas, sibling_slot -= 1; else sibling_slot += 1; - sibling = mte_get_rcu_slot(mas->node, sibling_slot); + sibling = mas_get_rcu_slot(mas, sibling_slot); if (!sibling) return mt; @@ -2160,7 +2166,8 @@ static inline int _mas_add_dense(struct ma_state *mas, void *entry, // FIXME: Check entire range, not what we would insert this time. if (!overwrite) { do { - if (_mte_get_rcu_slot(mas->node, min++, this_type)) + if (_mte_get_rcu_slot(mas->node, min++, this_type, + mas->tree)) return 0; } while (min < max); } @@ -2199,7 +2206,7 @@ static inline int __mas_add_slot_cnt(struct ma_state *mas, if (this_piv < prev_piv) goto skip_slot; - data = mte_get_rcu_slot(mas->node, this_slot); + data = mas_get_rcu_slot(mas, this_slot); if (!data || mt_will_coalesce(data)) { if (prev_null) goto skip_slot; @@ -2238,7 +2245,7 @@ static inline int _mas_add_slot_cnt(struct ma_state *mas, slot_cnt++; // (2?) if (max > mas->last) { // ends before this_slot. - void *prev_val = mte_get_rcu_slot(mas->node, slot); + void *prev_val = mas_get_rcu_slot(mas, slot); slot_cnt++; // (2 or 3?) prev_piv = max; if (!prev_val || mt_will_coalesce(prev_val)) @@ -2318,7 +2325,7 @@ static inline int __mas_add(struct ma_state *mas, void *entry, mas_get_range(mas, slot, &prev_piv, &piv); - existing_entry = mte_get_rcu_sanitized(mas->node, slot); + existing_entry = mas_get_rcu_sanitized(mas, slot); if (prev_piv <= mas->last && piv > mas->last) { mte_set_rcu_slot(cp.node, end_slot, existing_entry); mas_set_safe_pivot(&cp, end_slot++, piv); @@ -2534,7 +2541,7 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, max = mas->max; if (slot <= old_end) - contents = mte_get_rcu_slot(mas->node, slot); + contents = mas_get_rcu_slot(mas, slot); // Check early failures. @@ -2714,7 +2721,7 @@ walk_down: if (slot) mas->min = mas_get_safe_pivot(mas, slot - 1) + 1; mas->max = mas_get_safe_pivot(mas, slot); - entry = mte_get_rcu_slot(mas->node, slot); + entry = mas_get_rcu_slot(mas, slot); if (xa_is_skip(entry)) { if (mas->max >= max) { goto no_entry; @@ -2782,7 +2789,7 @@ walk_down: if (slot) mas->min = mas_get_safe_pivot(mas, slot - 1); mas->max = mas_get_safe_pivot(mas, slot); - mas->node = mte_get_rcu_slot(mas->node, slot); + mas->node = mas_get_rcu_slot(mas, slot); if (mt_is_empty(mas->node)) goto done; @@ -2851,7 +2858,7 @@ restart_prev_node: if (slot != 0 && pivot == 0) break; - mn = mte_get_rcu_slot(mas->node, slot); + mn = mas_get_rcu_slot(mas, slot); if (mt_is_empty(mn) || xa_is_retry(mn)) continue; @@ -2927,7 +2934,7 @@ restart_next_node: if (slot != 0 && pivot == 0) break; - mn = mte_get_rcu_slot(mas->node, slot); + mn = mas_get_rcu_slot(mas, slot); if (mt_is_empty(mn) || xa_is_retry(mn)) { prev_piv = pivot; continue; @@ -2981,7 +2988,7 @@ static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long limit, if (pivot < limit) goto no_entry; - entry = mte_get_rcu_slot(mas->node, slot); + entry = mas_get_rcu_slot(mas, slot); if (!mt_is_empty(entry)) goto found; } while (slot--); @@ -3021,7 +3028,7 @@ static inline bool mas_next_nentry(struct ma_state *mas, unsigned long max, if (r_start > max) goto no_entry; - entry = mte_get_rcu_slot(mas->node, slot); + entry = mas_get_rcu_slot(mas, slot); if (!mt_is_empty(entry)) goto found; @@ -3064,7 +3071,7 @@ static inline void* mas_last_entry(struct ma_state *mas, while (range_start < limit) { mas_set_slot(mas, slot); if (!mas_next_nentry(mas, limit, &range_start)) { - void *entry = mte_get_rcu_slot(mas->node, slot - 1); + void *entry = mas_get_rcu_slot(mas, slot - 1); if (mte_is_leaf(mas->node)) { mas->index = range_start - 1; mas->index = mte_get_pivot(mas->node, slot - 1); @@ -3137,7 +3144,7 @@ next_node: if (mas_is_none(mas)) return NULL; - entry = mte_get_rcu_slot(mas->node, mas_get_slot(mas)); + entry = mas_get_rcu_slot(mas, mas_get_slot(mas)); if (mas_dead_node(mas, index)) goto retry; @@ -3215,7 +3222,7 @@ static inline void* _mas_prev(struct ma_state *mas, unsigned long limit) else mas->index = mas->min; - return mte_get_rcu_slot(mas->node, mas_get_slot(mas)); + return mas_get_rcu_slot(mas, mas_get_slot(mas)); } /* @@ -3273,7 +3280,7 @@ static inline void mas_coalesce_root(struct ma_state *mas) */ if (!hard_data || (end + 1 == coalesce) || - (end == 1 && !mte_get_rcu_slot(this_enode, 1))) { + (end == 1 && !mte_get_rcu_slot(this_enode, 1, mas->tree))) { unsigned long piv; min = mas->min; @@ -3283,7 +3290,7 @@ static inline void mas_coalesce_root(struct ma_state *mas) if (mte_is_leaf(this_enode)) { if (!piv) { void *entry = mte_get_rcu_slot(this_enode, - mas_get_slot(mas)); + mas_get_slot(mas), mas->tree); rcu_assign_pointer(mas->tree->ma_root, entry); mte_free(this_enode); @@ -3375,7 +3382,7 @@ use_left: mas_append(mas, r_mas, 0, r_end_slot); if (!mte_is_leaf(mas->node)) - mte_adopt_children(mas->node); + mas_adopt_children(mas, mas->node); mte_set_pivot(p_mas->node, mte_parent_slot(mas->node), r_mas->max); mte_set_rcu_slot(p_mas->node, mte_parent_slot(r_mas->node), @@ -3529,7 +3536,7 @@ start: done: if (!mte_is_leaf(mas->node)) - mte_adopt_children(mas->node); + mas_adopt_children(mas, mas->node); if (free) mas_replace(mas); @@ -3613,7 +3620,7 @@ static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size) } /* check if this slot is full */ - entry = _mte_get_rcu_slot(mas->node, i, type); + entry = mas_get_rcu_slot(mas, i); if (entry && !xa_is_deleted(entry)) { this_gap = 0; goto next_slot; @@ -3699,7 +3706,7 @@ next: struct maple_enode *next; unsigned char coalesce; - next = _mte_get_rcu_slot(mas->node, i, type); + next = mas_get_rcu_slot(mas, i); mas->min = min; mas->max = max; if (!mt_is_empty(next)) { @@ -3753,7 +3760,7 @@ static inline bool _mas_awalk(struct ma_state *mas, unsigned long size) if (mas->index > pivot) goto next; - entry = _mte_get_rcu_slot(mas->node, i, type); + entry = mas_get_rcu_slot(mas, i); if (unlikely(xa_is_skip(entry))) goto next; @@ -3823,7 +3830,7 @@ descend: if (!ma_is_leaf(type)) { //descend struct maple_enode *next; - next = _mte_get_rcu_slot(mas->node, i, type); + next = mas_get_rcu_slot(mas, i); mas->min = min; mas->max = max; if (!mt_is_empty(next)) { @@ -3905,7 +3912,7 @@ skip_entry: goto done; } - next = _mte_get_rcu_slot(mas->node, i, type); + next = mas_get_rcu_slot(mas, i); if (unlikely(xa_is_skip(next))) { if (unlikely(i == mt_slots[type] - 1)) { i = MAPLE_NODE_SLOTS; @@ -3979,7 +3986,7 @@ static inline int mas_safe_slot(struct ma_state *mas, int *slot, if (!mas_get_safe_pivot(mas, (*slot) + delta)) return false; - entry = mte_get_rcu_slot(mas->node, (*slot) + delta); + entry = mas_get_rcu_slot(mas, (*slot) + delta); if (!mt_is_empty(entry) && !xa_is_retry(entry)) return true; *slot += delta; @@ -4093,7 +4100,7 @@ void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, leaf = _mas_range_walk(&mas, &range_start, &range_end); slot = mas_get_slot(&mas); if (leaf == true && slot != MAPLE_NODE_SLOTS) - entry = mte_get_rcu_slot(mas.node, slot); + entry = mas_get_rcu_slot(&mas, slot); mas.last = range_end; if (mt_is_empty(entry) || xa_is_zero(entry) || xa_is_retry(entry)) @@ -4188,7 +4195,7 @@ static inline int mas_build_replacement(struct ma_state *mas, void *new_entry, if (mte_is_leaf(mas->node)) { - entry = mte_get_rcu_slot(mas->node, mas_get_slot(mas)); + entry = mas_get_rcu_slot(mas, mas_get_slot(mas)); if (!mt_is_empty(entry)) { new_mas.index = r_index; @@ -4219,13 +4226,13 @@ skip_right: mas_update_gap(mas, false); mas->node = MAS_START; mas->alloc = new_mas.alloc; - mte_destroy_walk(last); + mte_destroy_walk(last, mas->tree); return node_cnt; error: if (new_mas.tree) - mte_destroy_walk(new_mas.tree->ma_root); + mte_destroy_walk(new_mas.tree->ma_root, new_mas.tree); return 0; } @@ -4396,7 +4403,7 @@ static inline int mas_add(struct ma_state *mas, void *entry, bool overwrite, if (mas->index == 0 && !overwrite) goto exists; } else if (!overwrite) { - void *entry = mte_get_rcu_slot(mas->node, slot); + void *entry = mas_get_rcu_slot(mas, slot); if (!mt_is_empty(entry)) goto exists; @@ -4652,7 +4659,7 @@ retry: if (slot >= MAPLE_NODE_SLOTS) return NULL; - entry = mte_get_rcu_slot(mas->node, slot); + entry = mas_get_rcu_slot(mas, slot); if (mte_dead_node(mas->node)) goto retry; } @@ -4740,7 +4747,7 @@ static inline void *mas_erase(struct ma_state *mas) if (slot == MAPLE_NODE_SLOTS) return NULL; - entry = mte_get_rcu_slot(mas->node, slot); + entry = mas_get_rcu_slot(mas, slot); mte_update_rcu_slot(mas->node, slot, XA_DELETED_ENTRY); // dense nodes only need to set a single value. @@ -4943,7 +4950,7 @@ void mtree_destroy(struct maple_tree *mt) mtree_lock(mt); destroyed = mt->ma_root; if (xa_is_node(destroyed)) - mte_destroy_walk(destroyed); + mte_destroy_walk(destroyed, mt); mt->ma_flags = 0; rcu_assign_pointer(mt->ma_root, NULL); @@ -5155,7 +5162,7 @@ void mas_validate_gaps(struct ma_state *mas) if (mte_is_dense(mte)) { for (i = 0; i < mt_slot_count(mte); i++) { - if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) { + if (!mt_is_empty(mas_get_rcu_slot(mas, i))) { if (gap > max_gap) max_gap = gap; gap = 0; @@ -5172,13 +5179,13 @@ void mas_validate_gaps(struct ma_state *mas) p_end = mas->max; if (mte_is_leaf(mte)) { - if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) { + if (!mt_is_empty(mas_get_rcu_slot(mas, i))) { gap = 0; goto not_empty; } gap += p_end - p_start + 1; } else { - void *entry = mte_get_rcu_slot(mas->node, i); + void *entry = mas_get_rcu_slot(mas, i); gap = mte_get_gap(mte, i); if (mt_is_empty(entry) || xa_is_retry(entry)) { if (gap != p_end - p_start + 1) { @@ -5187,8 +5194,8 @@ void mas_validate_gaps(struct ma_state *mas) pr_err(MA_PTR"[%u] -> "MA_PTR" %lu != %lu - %lu + 1\n", mas_mn(mas), i, - mte_get_rcu_slot(mas->node, i), - gap, p_end, p_start); + mas_get_rcu_slot(mas, i), gap, + p_end, p_start); MT_BUG_ON(mas->tree, gap != p_end - p_start + 1); @@ -5209,8 +5216,7 @@ void mas_validate_gaps(struct ma_state *mas) not_empty: p_start = p_end + 1; if (p_end >= mas->max) - break; - } + break; } counted: if (mte_is_root(mte)) @@ -5243,14 +5249,17 @@ void mas_validate_parent_slot(struct ma_state *mas) // Check prev/next parent slot for duplicate node entry for (i = 0; i < mt_slots[p_type]; i++) { - if (i == p_slot) + if (i == p_slot) { MT_BUG_ON(mas->tree, - ma_get_rcu_slot(parent, i, p_type) != mas->node); - else if (ma_get_rcu_slot(parent, i, p_type) == mas->node) { + ma_get_rcu_slot(parent, i, p_type, mas->tree) != + mas->node); + } else if (ma_get_rcu_slot(parent, i, p_type, mas->tree) == + mas->node) { pr_err("parent contains invalid child at "MA_PTR"[%u] " MA_PTR"\n", parent, i, mas_mn(mas)); MT_BUG_ON(mas->tree, - ma_get_rcu_slot(parent, i, p_type) == mas->node); + ma_get_rcu_slot(parent, i, p_type, mas->tree) == + mas->node); } } } @@ -5272,7 +5281,7 @@ void mas_validate_limits(struct ma_state *mas) break; if (prev_piv > piv) { - void *entry = mte_get_rcu_slot(mas->node, i); + void *entry = mas_get_rcu_slot(mas, i); if (!mt_will_coalesce(entry)) { pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i, piv, prev_piv); @@ -5282,7 +5291,7 @@ void mas_validate_limits(struct ma_state *mas) } if (piv < mas->min) { - void *entry = mte_get_rcu_slot(mas->node, i); + void *entry = mas_get_rcu_slot(mas, i); if (!mt_will_coalesce(entry)) { if (piv < mas->min) -- 2.50.1