bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
-       u32 tail_gso_size, tail_gso_segs;
+       u32 limit, tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
         * to reduce memory overhead, so add a little headroom here.
         * Few sockets backlog are possibly concurrently non empty.
         */
-       limit += 64*1024;
+       limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
 
        if (unlikely(sk_add_backlog(sk, skb, limit))) {
                bh_unlock_sock(sk);