]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
kasan: apply write-only mode in kasan kunit testcases
authorYeoreum Yun <yeoreum.yun@arm.com>
Mon, 1 Sep 2025 10:46:23 +0000 (11:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:39 +0000 (17:25 -0700)
When KASAN is configured in write-only mode, fetch/load operations do not
trigger tag check faults.

As a result, the outcome of some test cases may differ compared to when
KASAN is configured without write-only mode.

Therefore, by modifying pre-exist testcases check the write only makes tag
check fault (TCF) where writing is perform in "allocated memory" but tag
is invalid (i.e) redzone write in atomic_set() testcases.  Otherwise check
the invalid fetch/read doesn't generate TCF.

Also, skip some testcases affected by initial value (i.e) atomic_cmpxchg()
testcase maybe successd if it passes valid atomic_t address and invalid
oldaval address.  In this case, if invalid atomic_t doesn't have the same
oldval, it won't trigger write operation so the test will pass.

Link: https://lkml.kernel.org/r/20250901104623.402172-3-yeoreum.yun@arm.com
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Breno Leitao <leitao@debian.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: D Scott Phillips <scott@os.amperecomputing.com>
Cc: Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>
Cc: James Morse <james.morse@arm.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Pankaj Gupta <pankaj.gupta@amd.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kasan/kasan_test_c.c

index 4cf2b5f8d6c1459d8d2d0a2ea09574d0a92dbf84..5d9c9c41f48838e4752b0c8521403b84cebd94b1 100644 (file)
@@ -94,11 +94,13 @@ static void kasan_test_exit(struct kunit *test)
 }
 
 /**
- * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
- * KASAN report; causes a KUnit test failure otherwise.
+ * KUNIT_EXPECT_KASAN_RESULT - check that the executed expression
+ * causes a KUnit test failure when the result is different from @fail.
  *
  * @test: Currently executing KUnit test.
- * @expression: Expression that must produce a KASAN report.
+ * @expr: Expression to be tested.
+ * @expr_str: Expression to be tested encoded as a string.
+ * @fail: Whether expression should produce a KASAN report.
  *
  * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
  * checking is auto-disabled. When this happens, this test handler reenables
@@ -110,25 +112,29 @@ static void kasan_test_exit(struct kunit *test)
  * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
  * expression to prevent that.
  *
- * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
+ * In between KUNIT_EXPECT_KASAN_RESULT checks, test_status.report_found is kept
  * as false. This allows detecting KASAN reports that happen outside of the
  * checks by asserting !test_status.report_found at the start of
- * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
+ * KUNIT_EXPECT_KASAN_RESULT and in kasan_test_exit.
  */
-#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {                 \
+#define KUNIT_EXPECT_KASAN_RESULT(test, expr, expr_str, fail)          \
+do {                                                                   \
        if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                         \
            kasan_sync_fault_possible())                                \
                migrate_disable();                                      \
        KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));  \
        barrier();                                                      \
-       expression;                                                     \
+       expr;                                                           \
        barrier();                                                      \
        if (kasan_async_fault_possible())                               \
                kasan_force_async_fault();                              \
-       if (!READ_ONCE(test_status.report_found)) {                     \
-               KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "  \
-                               "expected in \"" #expression            \
-                                "\", but none occurred");              \
+       if (READ_ONCE(test_status.report_found) != fail) {              \
+               KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure"   \
+                               "%sexpected in \"" expr_str             \
+                                "\", but %soccurred",                  \
+                               (fail ? " " : " not "),         \
+                               (test_status.report_found ?             \
+                                "" : "none "));                        \
        }                                                               \
        if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                         \
            kasan_sync_fault_possible()) {                              \
@@ -141,6 +147,34 @@ static void kasan_test_exit(struct kunit *test)
        WRITE_ONCE(test_status.async_fault, false);                     \
 } while (0)
 
+/*
+ * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
+ * KASAN report; causes a KUnit test failure otherwise.
+ *
+ * @test: Currently executing KUnit test.
+ * @expr: Expression that must produce a KASAN report.
+ */
+#define KUNIT_EXPECT_KASAN_FAIL(test, expr)                    \
+       KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr, true)
+
+/*
+ * KUNIT_EXPECT_KASAN_FAIL_READ - check that the executed expression
+ * produces a KASAN report when the write-only mode is not enabled;
+ * causes a KUnit test failure otherwise.
+ *
+ * Note: At the moment, this macro does not check whether the produced
+ * KASAN report is a report about a bad read access. It is only intended
+ * for checking the write-only KASAN mode functionality without failing
+ * KASAN tests.
+ *
+ * @test: Currently executing KUnit test.
+ * @expr: Expression that must only produce a KASAN report
+ *        when the write-only mode is not enabled.
+ */
+#define KUNIT_EXPECT_KASAN_FAIL_READ(test, expr)                       \
+       KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr,                    \
+                       !kasan_write_only_enabled())                    \
+
 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {                  \
        if (!IS_ENABLED(config))                                        \
                kunit_skip((test), "Test requires " #config "=y");      \
@@ -183,8 +217,8 @@ static void kmalloc_oob_right(struct kunit *test)
        KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
 
        /* Out-of-bounds access past the aligned kmalloc object. */
-       KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
-                                       ptr[size + KASAN_GRANULE_SIZE + 5]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] =
+                       ptr[size + KASAN_GRANULE_SIZE + 5]);
 
        kfree(ptr);
 }
@@ -198,7 +232,7 @@ static void kmalloc_oob_left(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
        OPTIMIZER_HIDE_VAR(ptr);
-       KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr = *(ptr - 1));
        kfree(ptr);
 }
 
@@ -211,7 +245,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
        OPTIMIZER_HIDE_VAR(ptr);
-       KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
        kfree(ptr);
 }
 
@@ -291,7 +325,7 @@ static void kmalloc_large_uaf(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
        kfree(ptr);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
 }
 
 static void kmalloc_large_invalid_free(struct kunit *test)
@@ -323,7 +357,7 @@ static void page_alloc_oob_right(struct kunit *test)
        ptr = page_address(pages);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
        free_pages((unsigned long)ptr, order);
 }
 
@@ -338,7 +372,7 @@ static void page_alloc_uaf(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
        free_pages((unsigned long)ptr, order);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
 }
 
 static void krealloc_more_oob_helper(struct kunit *test,
@@ -458,7 +492,7 @@ static void krealloc_uaf(struct kunit *test)
 
        KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
        KUNIT_ASSERT_NULL(test, ptr2);
-       KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, *(volatile char *)ptr1);
 }
 
 static void kmalloc_oob_16(struct kunit *test)
@@ -501,7 +535,7 @@ static void kmalloc_uaf_16(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
        kfree(ptr2);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr1 = *ptr2);
        kfree(ptr1);
 }
 
@@ -640,8 +674,8 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
        memset((char *)ptr, 0, 64);
        OPTIMIZER_HIDE_VAR(ptr);
        OPTIMIZER_HIDE_VAR(invalid_size);
-       KUNIT_EXPECT_KASAN_FAIL(test,
-               memmove((char *)ptr, (char *)ptr + 4, invalid_size));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
+                       memmove((char *)ptr, (char *)ptr + 4, invalid_size));
        kfree(ptr);
 }
 
@@ -654,7 +688,7 @@ static void kmalloc_uaf(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
        kfree(ptr);
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[8]);
 }
 
 static void kmalloc_uaf_memset(struct kunit *test)
@@ -701,7 +735,7 @@ again:
                goto again;
        }
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[40]);
        KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
 
        kfree(ptr2);
@@ -727,19 +761,19 @@ static void kmalloc_uaf3(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
        kfree(ptr2);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[8]);
 }
 
 static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
 {
        int *i_unsafe = unsafe;
 
-       KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*i_unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
-       KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, smp_load_acquire(i_unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_read(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
@@ -752,18 +786,31 @@ static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
+       /*
+        * The result of the test below may vary due to garbage values of
+        * unsafe in write-only mode.
+        * Therefore, skip this test when KASAN is configured in write-only mode.
+        */
+       if (!kasan_write_only_enabled())
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+       /*
+        * The result of the test below may vary due to garbage values of
+        * unsafe in write-only mode.
+        * Therefore, skip this test when KASAN is configured in write-only mode.
+        */
+       if (!kasan_write_only_enabled()) {
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+       }
 
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_long_read(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
@@ -776,16 +823,29 @@ static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
+       /*
+        * The result of the test below may vary due to garbage values of
+        * unsafe in write-only mode.
+        * Therefore, skip this test when KASAN is configured in write-only mode.
+        */
+       if (!kasan_write_only_enabled())
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
        KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
-       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+       /*
+        * The result of the test below may vary due to garbage values of
+        * unsafe in write-only mode.
+        * Therefore, skip this test when KASAN is configured in write-only mode.
+        */
+       if (!kasan_write_only_enabled()) {
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
+               KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+       }
 }
 
 static void kasan_atomics(struct kunit *test)
@@ -842,8 +902,8 @@ static void ksize_unpoisons_memory(struct kunit *test)
        /* These must trigger a KASAN report. */
        if (IS_ENABLED(CONFIG_KASAN_GENERIC))
                KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size + 5]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[real_size - 1]);
 
        kfree(ptr);
 }
@@ -863,8 +923,8 @@ static void ksize_uaf(struct kunit *test)
 
        OPTIMIZER_HIDE_VAR(ptr);
        KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size]);
 }
 
 /*
@@ -899,9 +959,9 @@ static void rcu_uaf(struct kunit *test)
        global_rcu_ptr = rcu_dereference_protected(
                                (struct kasan_rcu_info __rcu *)ptr, NULL);
 
-       KUNIT_EXPECT_KASAN_FAIL(test,
-               call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
-               rcu_barrier());
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
+                       call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
+                       rcu_barrier());
 }
 
 static void workqueue_uaf_work(struct work_struct *work)
@@ -924,8 +984,8 @@ static void workqueue_uaf(struct kunit *test)
        queue_work(workqueue, work);
        destroy_workqueue(workqueue);
 
-       KUNIT_EXPECT_KASAN_FAIL(test,
-               ((volatile struct work_struct *)work)->data);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
+                       ((volatile struct work_struct *)work)->data);
 }
 
 static void kfree_via_page(struct kunit *test)
@@ -972,7 +1032,7 @@ static void kmem_cache_oob(struct kunit *test)
                return;
        }
 
-       KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, *p = p[size + OOB_TAG_OFF]);
 
        kmem_cache_free(cache, p);
        kmem_cache_destroy(cache);
@@ -1068,7 +1128,7 @@ static void kmem_cache_rcu_uaf(struct kunit *test)
         */
        rcu_barrier();
 
-       KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*p));
 
        kmem_cache_destroy(cache);
 }
@@ -1246,7 +1306,7 @@ static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t
                KUNIT_EXPECT_KASAN_FAIL(test,
                        ((volatile char *)&elem[size])[0]);
        else
-               KUNIT_EXPECT_KASAN_FAIL(test,
+               KUNIT_EXPECT_KASAN_FAIL_READ(test,
                        ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
 
        mempool_free(elem, pool);
@@ -1312,7 +1372,7 @@ static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
        mempool_free(elem, pool);
 
        ptr = page ? page_address((struct page *)elem) : elem;
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
 }
 
 static void mempool_kmalloc_uaf(struct kunit *test)
@@ -1571,7 +1631,7 @@ static void kasan_memchr(struct kunit *test)
 
        OPTIMIZER_HIDE_VAR(ptr);
        OPTIMIZER_HIDE_VAR(size);
-       KUNIT_EXPECT_KASAN_FAIL(test,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
                kasan_ptr_result = memchr(ptr, '1', size + 1));
 
        kfree(ptr);
@@ -1598,7 +1658,7 @@ static void kasan_memcmp(struct kunit *test)
 
        OPTIMIZER_HIDE_VAR(ptr);
        OPTIMIZER_HIDE_VAR(size);
-       KUNIT_EXPECT_KASAN_FAIL(test,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
                kasan_int_result = memcmp(ptr, arr, size+1));
        kfree(ptr);
 }
@@ -1635,7 +1695,7 @@ static void kasan_strings(struct kunit *test)
                        strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
 
        /* strscpy should fail if the first byte is unreadable. */
-       KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
                                              KASAN_GRANULE_SIZE));
 
        kfree(src);
@@ -1648,17 +1708,17 @@ static void kasan_strings(struct kunit *test)
         * will likely point to zeroed byte.
         */
        ptr += 16;
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strchr(ptr, '1'));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strrchr(ptr, '1'));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strcmp(ptr, "2"));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strncmp(ptr, "2", 1));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strlen(ptr));
 
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strnlen(ptr, 1));
 }
 
 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
@@ -1677,12 +1737,18 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
 {
        KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
-       KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
+       /*
+        * When KASAN is running in write-only mode,
+        * a fault won't occur when the bit is set.
+        * Therefore, skip the test_and_set_bit_lock test in write-only mode.
+        */
+       if (!kasan_write_only_enabled())
+               KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
-       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = test_bit(nr, addr));
        if (nr < 7)
                KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
                                xor_unlock_is_negative_byte(1 << nr, addr));
@@ -1806,7 +1872,7 @@ static void vmalloc_oob(struct kunit *test)
                KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
 
        /* An aligned access into the first out-of-bounds granule. */
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
+       KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)v_ptr)[size + 5]);
 
        /* Check that in-bounds accesses to the physical page are valid. */
        page = vmalloc_to_page(v_ptr);
@@ -2083,15 +2149,15 @@ static void copy_user_test_oob(struct kunit *test)
 
        KUNIT_EXPECT_KASAN_FAIL(test,
                unused = copy_from_user(kmem, usermem, size + 1));
-       KUNIT_EXPECT_KASAN_FAIL(test,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
                unused = copy_to_user(usermem, kmem, size + 1));
        KUNIT_EXPECT_KASAN_FAIL(test,
                unused = __copy_from_user(kmem, usermem, size + 1));
-       KUNIT_EXPECT_KASAN_FAIL(test,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
                unused = __copy_to_user(usermem, kmem, size + 1));
        KUNIT_EXPECT_KASAN_FAIL(test,
                unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
-       KUNIT_EXPECT_KASAN_FAIL(test,
+       KUNIT_EXPECT_KASAN_FAIL_READ(test,
                unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
 
        /*