As I was working on a syzbot report, I found that KCSAN would
probably complain that reading q->head or q->tail without
barriers could lead to invalid results.
Add corresponding READ_ONCE() and WRITE_ONCE() to avoid
load-store tearing.
Fixes: d94ba80ebbea ("ptp: Added a brand new class driver for ptp clocks.")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Richard Cochran <richardcochran@gmail.com>
Link: https://lore.kernel.org/r/20231109174859.3995880-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 
 
        for (i = 0; i < cnt; i++) {
                event[i] = queue->buf[queue->head];
-               queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+               /* Paired with READ_ONCE() in queue_cnt() */
+               WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
        }
 
        spin_unlock_irqrestore(&queue->lock, flags);
 
        dst->t.sec = seconds;
        dst->t.nsec = remainder;
 
+       /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
        if (!queue_free(queue))
-               queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+               WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
 
-       queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
+       WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
 
        spin_unlock_irqrestore(&queue->lock, flags);
 }
 
  * that a writer might concurrently increment the tail does not
  * matter, since the queue remains nonempty nonetheless.
  */
-static inline int queue_cnt(struct timestamp_event_queue *q)
+static inline int queue_cnt(const struct timestamp_event_queue *q)
 {
-       int cnt = q->tail - q->head;
+       /*
+        * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
+        * ptp_read(), extts_fifo_show().
+        */
+       int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
        return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
 }
 
 
        qcnt = queue_cnt(queue);
        if (qcnt) {
                event = queue->buf[queue->head];
-               queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+               /* Paired with READ_ONCE() in queue_cnt() */
+               WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
        }
        spin_unlock_irqrestore(&queue->lock, flags);