sk->sk_ack_backlog can be read without any lock being held.
We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing
and/or potential KCSAN warnings.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 
 static inline void sk_acceptq_removed(struct sock *sk)
 {
-       sk->sk_ack_backlog--;
+       WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
 }
 
 static inline void sk_acceptq_added(struct sock *sk)
 {
-       sk->sk_ack_backlog++;
+       WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
 }
 
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+       return READ_ONCE(sk->sk_ack_backlog) > sk->sk_max_ack_backlog;
 }
 
 /*
 
                 * tcpi_unacked -> Number of children ready for accept()
                 * tcpi_sacked  -> max backlog
                 */
-               info->tcpi_unacked = sk->sk_ack_backlog;
+               info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
                info->tcpi_sacked = sk->sk_max_ack_backlog;
                return;
        }
 
        struct tcp_info *info = _info;
 
        if (inet_sk_state_load(sk) == TCP_LISTEN) {
-               r->idiag_rqueue = sk->sk_ack_backlog;
+               r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
                r->idiag_wqueue = sk->sk_max_ack_backlog;
        } else if (sk->sk_type == SOCK_STREAM) {
                const struct tcp_sock *tp = tcp_sk(sk);
 
 
        state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
-               rx_queue = sk->sk_ack_backlog;
+               rx_queue = READ_ONCE(sk->sk_ack_backlog);
        else
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
 
 
        state = inet_sk_state_load(sp);
        if (state == TCP_LISTEN)
-               rx_queue = sp->sk_ack_backlog;
+               rx_queue = READ_ONCE(sp->sk_ack_backlog);
        else
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
 
                *err = -1;
                return;
        }
-       dst->value = sk->sk_ack_backlog;
+       dst->value = READ_ONCE(sk->sk_ack_backlog);
 }
 
 META_COLLECTOR(int_sk_max_ack_bl)
 
                r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
                r->idiag_wqueue = infox->asoc->sndbuf_used;
        } else {
-               r->idiag_rqueue = sk->sk_ack_backlog;
+               r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
                r->idiag_wqueue = sk->sk_max_ack_backlog;
        }
        if (infox->sctpinfo)