]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
slub: Allocate frozen pages
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 31 May 2022 13:36:14 +0000 (09:36 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
Since slub does not use the page refcount, it can allocate and
free frozen pages, saving one atomic operation per free.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/slab_common.c
mm/slub.c

index 1cba98acc486251705096ae84238156355a4d374..32b3b8c2d2a0a3bf6f0e2a23cf5d5d3a9b460b05 100644 (file)
@@ -940,7 +940,7 @@ void free_large_kmalloc(struct folio *folio, void *object)
        kasan_kfree_large(object);
        kmsan_kfree_large(object);
 
-       mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+       lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
                              -(PAGE_SIZE << order));
        __free_pages(folio_page(folio, 0), order);
 }
index 13459c69095a25b6a1db8e24472654dca3b9d0d0..2b90d15c44cbc5a12a7e67eb751272cb18986b3a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1843,23 +1843,23 @@ static void *setup_object(struct kmem_cache *s, void *object)
 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
                struct kmem_cache_order_objects oo)
 {
-       struct folio *folio;
+       struct page *page;
        struct slab *slab;
        unsigned int order = oo_order(oo);
 
        if (node == NUMA_NO_NODE)
-               folio = (struct folio *)alloc_pages(flags, order);
+               page = alloc_frozen_pages(flags, order);
        else
-               folio = (struct folio *)__alloc_pages_node(node, flags, order);
+               page = __alloc_frozen_pages(flags, order, node, NULL);
 
-       if (!folio)
+       if (!page)
                return NULL;
 
-       slab = folio_slab(folio);
-       __folio_set_slab(folio);
+       slab = (struct slab *)page;
+       __SetPageSlab(page);
        /* Make the flag visible before any changes to folio->mapping */
        smp_wmb();
-       if (page_is_pfmemalloc(folio_page(folio, 0)))
+       if (page_is_pfmemalloc(page))
                slab_set_pfmemalloc(slab);
 
        return slab;
@@ -2054,19 +2054,19 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 
 static void __free_slab(struct kmem_cache *s, struct slab *slab)
 {
-       struct folio *folio = slab_folio(slab);
-       int order = folio_order(folio);
+       struct page *page = (struct page *)slab;
+       int order = compound_order(page);
        int pages = 1 << order;
 
        __slab_clear_pfmemalloc(slab);
-       folio->mapping = NULL;
+       page->mapping = NULL;
        /* Make the mapping reset visible before clearing the flag */
        smp_wmb();
-       __folio_clear_slab(folio);
+       __ClearPageSlab(page);
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
        unaccount_slab(slab, order, s);
-       __free_pages(folio_page(folio, 0), order);
+       free_frozen_pages(page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)