reuse = rcu_dereference(sk->sk_reuseport_cb);
                if (likely(reuse)) {
                        last_overflow = READ_ONCE(reuse->synq_overflow_ts);
-                       return time_after32(now, last_overflow +
-                                           TCP_SYNCOOKIE_VALID);
+                       return !time_between32(now, last_overflow - HZ,
+                                              last_overflow +
+                                              TCP_SYNCOOKIE_VALID);
                }
        }
 
        last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
-       return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
+
+       /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
+        * then we're under synflood. However, we have to use
+        * 'last_overflow - HZ' as lower bound. That's because a concurrent
+        * tcp_synq_overflow() could update .ts_recent_stamp after we read
+        * jiffies but before we store .ts_recent_stamp into last_overflow,
+        * which could lead to rejecting a valid syncookie.
+        */
+       return !time_between32(now, last_overflow - HZ,
+                              last_overflow + TCP_SYNCOOKIE_VALID);
 }
 
 static inline u32 tcp_cookie_time(void)