]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert detached_freelist to use a struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 15:59:10 +0000 (11:59 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 12:33:50 +0000 (08:33 -0400)
This gives us a little bit of extra typesafety as we know that
nobody called virt_to_page() instead of virt_to_head_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index f5aadbccdab4793dc5f8e2668b8ff5e64da31295..050a0610b3ef6cdd97d1feef541aa3b15602dac8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3502,7 +3502,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
 EXPORT_SYMBOL(kmem_cache_free);
 
 struct detached_freelist {
-       struct page *page;
+       struct slab *slab;
        void *tail;
        void *freelist;
        int cnt;
@@ -3522,8 +3522,8 @@ static inline void free_nonslab_page(struct page *page, void *object)
 /*
  * This function progressively scans the array with free objects (with
  * a limited look ahead) and extract objects belonging to the same
- * page.  It builds a detached freelist directly within the given
- * page/objects.  This can happen without any need for
+ * slab.  It builds a detached freelist directly within the given
+ * slab/objects.  This can happen without any need for
  * synchronization, because the objects are owned by running process.
  * The freelist is build up as a single linked list in the objects.
  * The idea is, that this detached freelist can then be bulk
@@ -3538,10 +3538,10 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        size_t first_skipped_index = 0;
        int lookahead = 3;
        void *object;
-       struct page *page;
+       struct slab *slab;
 
        /* Always re-init detached_freelist */
-       df->page = NULL;
+       df->slab = NULL;
 
        do {
                object = p[--size];
@@ -3551,16 +3551,16 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!object)
                return 0;
 
-       page = virt_to_head_page(object);
+       slab = virt_to_slab(object);
        if (!s) {
                /* Handle kalloc'ed objects */
-               if (unlikely(!PageSlab(page))) {
-                       free_nonslab_page(page, object);
+               if (unlikely(!slab_test_cache(slab))) {
+                       free_nonslab_page(slab_page(slab), object);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
                /* Derive kmem_cache from object */
-               df->s = page->slab_cache;
+               df->s = slab->slab_cache;
        } else {
                df->s = cache_from_obj(s, object); /* Support for memcg */
        }
@@ -3573,7 +3573,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        }
 
        /* Start new detached freelist */
-       df->page = page;
+       df->slab = slab;
        set_freepointer(df->s, object, NULL);
        df->tail = object;
        df->freelist = object;
@@ -3585,8 +3585,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
                if (!object)
                        continue; /* Skip processed objects */
 
-               /* df->page is always set at this point */
-               if (df->page == virt_to_head_page(object)) {
+               /* df->slab is always set at this point */
+               if (df->slab == virt_to_slab(object)) {
                        /* Opportunity build freelist */
                        set_freepointer(df->s, object, df->freelist);
                        df->freelist = object;
@@ -3618,10 +3618,10 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
                struct detached_freelist df;
 
                size = build_detached_freelist(s, size, p, &df);
-               if (!df.page)
+               if (!df.slab)
                        continue;
 
-               slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
+               slab_free(df.s, slab_page(df.slab), df.freelist, df.tail, df.cnt, _RET_IP_);
        } while (likely(size));
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);