static void test_leak_destroy(struct kunit *test)
{
- struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
64, SLAB_NO_MERGE);
kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_destroy(s);
- KUNIT_EXPECT_EQ(test, 1, slab_errors);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
}
static int test_init(struct kunit *test)
return false;
}
+#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
+bool slab_in_kunit_test(void);
+#else
+static inline bool slab_in_kunit_test(void) { return false; }
+#endif
+
#ifdef CONFIG_SLAB_OBJ_EXT
/*
kasan_cache_shutdown(s);
err = __kmem_cache_shutdown(s);
- WARN(err, "%s %s: Slab cache still has objects when called from %pS",
- __func__, s->name, (void *)_RET_IP_);
+ if (!slab_in_kunit_test())
+ WARN(err, "%s %s: Slab cache still has objects when called from %pS",
+ __func__, s->name, (void *)_RET_IP_);
list_del(&s->list);
return true;
}
-static bool slab_in_kunit_test(void)
+bool slab_in_kunit_test(void)
{
struct kunit_resource *resource;
}
#else
static inline bool slab_add_kunit_errors(void) { return false; }
-static inline bool slab_in_kunit_test(void) { return false; }
#endif
static inline unsigned int size_from_object(struct kmem_cache *s)
for_each_object(p, s, addr, slab->objects) {
if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
+ if (slab_add_kunit_errors())
+ continue;
pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}