KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
 }
 
+static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
+{
+       int *i_unsafe = (int *)unsafe;
+
+       KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
+
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
+       KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+}
+
+static void kasan_atomics(struct kunit *test)
+{
+       void *a1, *a2;
+
+       /*
+        * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
+        * that the following 16 bytes will make up the redzone.
+        */
+       a1 = kzalloc(48, GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
+       a2 = kzalloc(sizeof(int), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
+
+       /* Use atomics to access the redzone. */
+       kasan_atomics_helper(test, a1 + 48, a2);
+
+       kfree(a1);
+       kfree(a2);
+}
+
 static void kmalloc_double_kzfree(struct kunit *test)
 {
        char *ptr;
        KUNIT_CASE(kasan_strings),
        KUNIT_CASE(kasan_bitops_generic),
        KUNIT_CASE(kasan_bitops_tags),
+       KUNIT_CASE(kasan_atomics),
        KUNIT_CASE(vmalloc_helpers_tags),
        KUNIT_CASE(vmalloc_oob),
        KUNIT_CASE(vmap_tags),