#include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/compiler.h>
+#include <linux/kcsan.h>
 #include <asm/processor.h>
 
+/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
 /*
  * Version using sequence counter only.
  * This can be used when code has its own mutex protecting the
                cpu_relax();
                goto repeat;
        }
+       kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
        return ret;
 }
 
 {
        unsigned ret = READ_ONCE(s->sequence);
        smp_rmb();
+       kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
        return ret;
 }
 
 {
        unsigned ret = READ_ONCE(s->sequence);
        smp_rmb();
+       kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
        return ret & ~1;
 }
 
  */
 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
-       return unlikely(s->sequence != start);
+       kcsan_atomic_next(0);
+       return unlikely(READ_ONCE(s->sequence) != start);
 }
 
 /**
 
 static inline void raw_write_seqcount_begin(seqcount_t *s)
 {
+       kcsan_nestable_atomic_begin();
        s->sequence++;
        smp_wmb();
 }
 {
        smp_wmb();
        s->sequence++;
+       kcsan_nestable_atomic_end();
 }
 
 /**
  */
 static inline void raw_write_seqcount_barrier(seqcount_t *s)
 {
+       kcsan_nestable_atomic_begin();
        s->sequence++;
        smp_wmb();
        s->sequence++;
+       kcsan_nestable_atomic_end();
 }
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
 static inline void write_seqcount_invalidate(seqcount_t *s)
 {
        smp_wmb();
+       kcsan_nestable_atomic_begin();
        s->sequence+=2;
+       kcsan_nestable_atomic_end();
 }
 
 typedef struct {
  */
 static inline unsigned read_seqbegin(const seqlock_t *sl)
 {
-       return read_seqcount_begin(&sl->seqcount);
+       unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+       kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry */
+       kcsan_flat_atomic_begin();
+       return ret;
 }
 
 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 {
+       /*
+        * Assume not nested: read_seqretry may be called multiple times when
+        * completing read critical section.
+        */
+       kcsan_flat_atomic_end();
+
        return read_seqcount_retry(&sl->seqcount, start);
 }