From: Liam R. Howlett Date: Fri, 6 Dec 2024 20:08:19 +0000 (-0500) Subject: maple_tree: Add single node allocation support to maple state X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=624c061087e31ba30ec79ec356f4a5782d771d06;p=users%2Fjedix%2Flinux-maple.git maple_tree: Add single node allocation support to maple state The fast path through a write will require replacing a single node in the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so special case the node store operation by just allocating one node in the maple state. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 19aa3f2881a0..10917f19de0c 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -443,6 +443,7 @@ struct ma_state { unsigned long min; /* The minimum index of this node - implied pivot min */ unsigned long max; /* The maximum index of this node - implied pivot max */ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */ + struct maple_node *alloc; /* allocated nodes */ unsigned long node_request; enum maple_status status; /* The status of the state (active, start, none, etc) */ unsigned char depth; /* depth of tree descent during write */ @@ -489,8 +490,9 @@ struct ma_wr_state { .status = ma_start, \ .min = 0, \ .max = ULONG_MAX, \ - .node_request= 0, \ .sheaf = NULL, \ + .alloc = NULL, \ + .node_request= 0, \ .mas_flags = 0, \ .store_type = wr_invalid, \ } diff --git a/lib/maple_tree.c b/lib/maple_tree.c index b7372ebe2410..519dbe416296 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1088,16 +1088,23 @@ static int mas_ascend(struct ma_state *mas) * * Return: A pointer to a maple node. */ -static inline struct maple_node *mas_pop_node(struct ma_state *mas) +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas) { struct maple_node *ret; + if (mas->alloc) { + ret = mas->alloc; + mas->alloc = NULL; + goto out; + } + if (WARN_ON_ONCE(!mas->sheaf)) return NULL; ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf); - memset(ret, 0, sizeof(*ret)); +out: + memset(ret, 0, sizeof(*ret)); return ret; } @@ -1108,9 +1115,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas) */ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) { - if (unlikely(mas->sheaf)) { - unsigned long refill = mas->node_request; + if (!mas->node_request) + return; + + if (mas->node_request == 1) { + if (mas->sheaf) + goto use_sheaf; + + if (mas->alloc) + return; + mas->alloc = mt_alloc_one(gfp); + if (!mas->alloc) + goto error; + + mas->node_request = 0; + return; + } + +use_sheaf: + if (unlikely(mas->alloc)) { + mt_free_one(mas->alloc); + mas->alloc = NULL; + } + + if (mas->sheaf) { + unsigned long refill; + + refill = mas->node_request; if(kmem_cache_sheaf_count(mas->sheaf) >= refill) { mas->node_request = 0; return; @@ -5383,7 +5415,11 @@ void mas_destroy(struct ma_state *mas) if (mas->sheaf) kmem_cache_return_sheaf(maple_node_cache, GFP_KERNEL, mas->sheaf); - + if (mas->allocated) { + kmem_cache_free_bulk(maple_node_cache, mas->allocated, + (void **)mas->alloc); + mas->allocated = 0; + } mas->sheaf = NULL; } EXPORT_SYMBOL_GPL(mas_destroy); @@ -6071,7 +6107,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) mas_alloc_nodes(mas, gfp); } - if (!mas->sheaf) + if (!mas->sheaf && !mas->alloc) return false; mas->status = ma_start;