From 4dd06e502c1309c7a3e006eda74873e9a11fde6f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 20 Oct 2021 08:39:45 -0400 Subject: [PATCH] maple tree: More support for external locking Documentation, and don't try to drop the lock to allocate memory if we're using an external lock. Signed-off-by: Matthew Wilcox (Oracle) --- Documentation/core-api/maple-tree.rst | 49 +++++---------------------- lib/maple_tree.c | 12 ++++--- 2 files changed, 17 insertions(+), 44 deletions(-) diff --git a/Documentation/core-api/maple-tree.rst b/Documentation/core-api/maple-tree.rst index 9079aa44e441..d0e8bf1ebb20 100644 --- a/Documentation/core-api/maple-tree.rst +++ b/Documentation/core-api/maple-tree.rst @@ -86,15 +86,7 @@ Locking ------- When using the Normal API, you do not have to worry about locking. -The Maple Tree uses RCU and internal spinlock to synchronise access: - -No lock needed: - * mas_destroy() - * mas_entry_count() - * mas_is_none() - * mas_reset() - * mas_set_range() - * mas_set() +The Maple Tree uses RCU and an internal spinlock to synchronise access: Takes RCU read lock: * mtree_load() @@ -113,48 +105,25 @@ Takes ma_lock internally: * mt_set_in_rcu() * mt_clear_in_rcu() - -Assume RCU read lock held on entry: - * mas_walk() - * mas_next() - * mas_prev() - * mas_find() - * mas_pause() - -Assume ma_lock held on entry: - * mas_store() - * mas_store_gfp() - * mas_nomem() - -Assumes ma_lock or RCU read lock is held on entry: - * mas_for_each() - - If you want to take advantage of the lock to protect the data structures -that you are storing in the Maple Tree, you can call mas_lock() before -calling mas_walk(), then take a reference count on the object you have -found before calling the mas_unlock(). This will prevent stores from +that you are storing in the Maple Tree, you can call mtree_lock() before +calling mtree_load(), then take a reference count on the object you have +found before calling mtree_unlock(). This will prevent stores from removing the object from the tree between looking up the object and incrementing the refcount. You can also use RCU to avoid dereferencing freed memory, but an explanation of that is beyond the scope of this document. -If it is necessary to drop the locks during an iteration, then mas_pause() -provides this functionality. It is worth noting that dropping the locks -allows for other tasks to alter the tree so your code may get a split view -of the past and the current state of a tree in this scenario. Note that -the next call using the maple state will re-walk the tree from the root. - Advanced API ============ The advanced API offers more flexibility and better performance at the cost of an interface which can be harder to use and has fewer safeguards. -No locking is done for you by the advanced API, and you are required -to use the mas_lock while modifying the tree. You can choose whether -to use the mas_lock or the RCU lock while doing read-only operations on -the tree. You can mix advanced and normal operations on the same array -and the normal API is implemented in terms of the advanced API. +You must take care of your own locking while using the advanced API. +You can use the ma_lock, RCU or an external lock for protection. +You can mix advanced and normal operations on the same array, as long +as the locking is compatible. The normal API is implemented in terms +of the advanced API. The advanced API is based around the ma_state, this is where the 'mas' prefix originates. diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d104f2a34458..c9dd1ceb5c67 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -747,10 +747,14 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) } } +static inline bool mt_external_lock(const struct maple_tree *mt) +{ + return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN; +} + static inline bool mt_locked(const struct maple_tree *mt) { - return ((mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN) || - lockdep_is_held(&mt->ma_lock); + return mt_external_lock(mt) || lockdep_is_held(&mt->ma_lock); } static inline void *mt_slot(const struct maple_tree *mt, @@ -5939,7 +5943,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) return false; } - if (gfpflags_allow_blocking(gfp)) { + if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { mtree_unlock(mas->tree); mas_alloc_nodes(mas, gfp); mtree_lock(mas->tree); @@ -5968,8 +5972,8 @@ void __init maple_tree_init(void) */ void mtree_init(struct maple_tree *mt, unsigned int ma_flags) { - spin_lock_init(&mt->ma_lock); mt->ma_flags = ma_flags; + spin_lock_init(&mt->ma_lock); rcu_assign_pointer(mt->ma_root, NULL); } EXPORT_SYMBOL(mtree_init); -- 2.50.1