]> www.infradead.org Git - users/willy/xarray.git/commitdiff
Merge branch 'slab/for-6.12/rcu_barriers' into slab/for-next
authorVlastimil Babka <vbabka@suse.cz>
Fri, 13 Sep 2024 09:08:27 +0000 (11:08 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Fri, 13 Sep 2024 09:08:27 +0000 (11:08 +0200)
Merge most of SLUB feature work for 6.12:

- Barrier for pending kfree_rcu() in kmem_cache_destroy() and associated
  refactoring of the destroy path (Vlastimil Babka)
- CONFIG_SLUB_RCU_DEBUG to allow KASAN catching UAF bugs in
  SLAB_TYPESAFE_BY_RCU caches (Jann Horn)
- kmem_cache_charge() for delayed kmemcg charging (Shakeel Butt)

1  2 
mm/slab_common.c
mm/slub.c

Simple merge
diff --cc mm/slub.c
index d52c88f29f69ae39833adf94394244680646e931,aa512de974e7459e097c1d2f462f2aa250e4c701..81cea762d0944636a350c418292c2964b5bd2bbc
+++ b/mm/slub.c
@@@ -2247,15 -2334,9 +2334,15 @@@ bool slab_free_hook(struct kmem_cache *
                rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
                memset((char *)kasan_reset_tag(x) + inuse, 0,
                       s->size - inuse - rsize);
 +              /*
 +               * Restore orig_size, otherwize kmalloc redzone overwritten
 +               * would be reported
 +               */
 +              set_orig_size(s, x, orig_size);
 +
        }
        /* KASAN might put x into memory quarantine, delaying its reuse. */
-       return !kasan_slab_free(s, x, init);
+       return !kasan_slab_free(s, x, init, still_accessible);
  }
  
  static __fastpath_inline