From: Matthew Wilcox (Oracle) Date: Fri, 1 Oct 2021 22:09:46 +0000 (-0400) Subject: mm/slub: Convert full slab management to struct slab X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=678e398461c66717bc74d9f8b82d7875643f8bd1;p=users%2Fwilly%2Flinux.git mm/slub: Convert full slab management to struct slab Pass struct slab to add_full() and remove_full(). Improves type safety. Signed-off-by: Matthew Wilcox (Oracle) --- diff --git a/mm/slub.c b/mm/slub.c index 6d81e54e61df..32a1bd4c8a88 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1185,22 +1185,22 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, * Tracking of fully allocated slabs for debugging purposes. */ static void add_full(struct kmem_cache *s, - struct kmem_cache_node *n, struct page *page) + struct kmem_cache_node *n, struct slab *slab) { if (!(s->flags & SLAB_STORE_USER)) return; lockdep_assert_held(&n->list_lock); - list_add(&page->slab_list, &n->full); + list_add(&slab->slab_list, &n->full); } -static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) +static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) { if (!(s->flags & SLAB_STORE_USER)) return; lockdep_assert_held(&n->list_lock); - list_del(&page->slab_list); + list_del(&slab->slab_list); } /* Tracking of the number of slabs for debugging purposes */ @@ -1616,9 +1616,9 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) { return 1; } static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, - struct page *page) {} + struct slab *slab) {} static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, - struct page *page) {} + struct slab *slab) {} slab_flags_t kmem_cache_flags(unsigned int object_size, slab_flags_t flags, const char *name) { @@ -2402,12 +2402,12 @@ redo: if (l == M_PARTIAL) remove_partial(n, slab); else if (l == M_FULL) - remove_full(s, n, slab_page(slab)); + remove_full(s, n, slab); if (m == M_PARTIAL) add_partial(n, slab, tail); else if (m == M_FULL) - add_full(s, n, slab_page(slab)); + add_full(s, n, slab); } l = m; @@ -3361,7 +3361,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, * then add it. */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { - remove_full(s, n, slab_page(slab)); + remove_full(s, n, slab); add_partial(n, slab, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -3377,7 +3377,7 @@ slab_empty: stat(s, FREE_REMOVE_PARTIAL); } else { /* Slab must be on the full list */ - remove_full(s, n, slab_page(slab)); + remove_full(s, n, slab); } spin_unlock_irqrestore(&n->list_lock, flags);