#include <linux/filter.h>
 #include <linux/rculist_nulls.h>
+#include <linux/poll.h>
 
 #include <asm/atomic.h>
 #include <net/dst.h>
        return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
 }
 
+/**
+ * sk_has_sleeper - check if there are any waiting processes
+ * @sk: socket
+ *
+ * Returns true if socket has waiting processes
+ *
+ * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths:
+ *
+ * CPU1                  CPU2
+ *
+ * sys_select            receive packet
+ *   ...                 ...
+ *   __add_wait_queue    update tp->rcv_nxt
+ *   ...                 ...
+ *   tp->rcv_nxt check   sock_def_readable
+ *   ...                 {
+ *   schedule               ...
+ *                          if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ *                              wake_up_interruptible(sk->sk_sleep)
+ *                          ...
+ *                       }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ */
+static inline int sk_has_sleeper(struct sock *sk)
+{
+       /*
+        * We need to be sure we are in sync with the
+        * add_wait_queue modifications to the wait queue.
+        *
+        * This memory barrier is paired in the sock_poll_wait.
+        */
+       smp_mb();
+       return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp:           file
+ * @wait_address:   socket wait queue
+ * @p:              poll_table
+ *
+ * See the comments in the sk_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp,
+               wait_queue_head_t *wait_address, poll_table *p)
+{
+       if (p && wait_address) {
+               poll_wait(filp, wait_address, p);
+               /*
+                * We need to be sure we are in sync with the
+                * socket flags modification.
+                *
+                * This memory barrier is paired in the sk_has_sleeper.
+               */
+               smp_mb();
+       }
+}
+
 /*
  *     Queue a received datagram if it will fit. Stream and sequenced
  *     protocols can't normally use this as they need to fit buffers in
 
 static void vcc_def_wakeup(struct sock *sk)
 {
        read_lock(&sk->sk_callback_lock);
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up(sk->sk_sleep);
        read_unlock(&sk->sk_callback_lock);
 }
        read_lock(&sk->sk_callback_lock);
 
        if (vcc_writable(sk)) {
-               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+               if (sk_has_sleeper(sk))
                        wake_up_interruptible(sk->sk_sleep);
 
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        struct atm_vcc *vcc;
        unsigned int mask;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        mask = 0;
 
        vcc = ATM_SD(sock);
 
        struct sock *sk = sock->sk;
        unsigned int mask;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        mask = 0;
 
        /* exceptional events? */
 
 static void sock_def_wakeup(struct sock *sk)
 {
        read_lock(&sk->sk_callback_lock);
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up_interruptible_all(sk->sk_sleep);
        read_unlock(&sk->sk_callback_lock);
 }
 static void sock_def_error_report(struct sock *sk)
 {
        read_lock(&sk->sk_callback_lock);
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
        sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
        read_unlock(&sk->sk_callback_lock);
 static void sock_def_readable(struct sock *sk, int len)
 {
        read_lock(&sk->sk_callback_lock);
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
                                                POLLRDNORM | POLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
         * progress.  --DaveM
         */
        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
-               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+               if (sk_has_sleeper(sk))
                        wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
                                                POLLWRNORM | POLLWRBAND);
 
 
 {
        read_lock(&sk->sk_callback_lock);
 
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up_interruptible(sk->sk_sleep);
        /* Should agree with poll, otherwise some programs break */
        if (sock_writeable(sk))
 
        unsigned int mask;
        struct sock *sk = sock->sk;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        if (sk->sk_state == DCCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
 
        struct sock *sk = sock->sk;
        struct tcp_sock *tp = tcp_sk(sk);
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        if (sk->sk_state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
 
 static void iucv_sock_wake_msglim(struct sock *sk)
 {
        read_lock(&sk->sk_callback_lock);
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+       if (sk_has_sleeper(sk))
                wake_up_interruptible_all(sk->sk_sleep);
        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        read_unlock(&sk->sk_callback_lock);
        struct sock *sk = sock->sk;
        unsigned int mask = 0;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
 
        if (sk->sk_state == IUCV_LISTEN)
                return iucv_accept_poll(sk);
 
        _enter("%p", sk);
        read_lock(&sk->sk_callback_lock);
        if (rxrpc_writable(sk)) {
-               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+               if (sk_has_sleeper(sk))
                        wake_up_interruptible(sk->sk_sleep);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        unsigned int mask;
        struct sock *sk = sock->sk;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        mask = 0;
 
        /* the socket is readable if there are any messages waiting on the Rx
 
 {
        read_lock(&sk->sk_callback_lock);
        if (unix_writable(sk)) {
-               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+               if (sk_has_sleeper(sk))
                        wake_up_interruptible_sync(sk->sk_sleep);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        struct sock *sk = sock->sk;
        unsigned int mask;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        mask = 0;
 
        /* exceptional events? */
        struct sock *sk = sock->sk, *other;
        unsigned int mask, writable;
 
-       poll_wait(file, sk->sk_sleep, wait);
+       sock_poll_wait(file, sk->sk_sleep, wait);
        mask = 0;
 
        /* exceptional events? */
                other = unix_peer_get(sk);
                if (other) {
                        if (unix_peer(other) != sk) {
-                               poll_wait(file, &unix_sk(other)->peer_wait,
+                               sock_poll_wait(file, &unix_sk(other)->peer_wait,
                                          wait);
                                if (unix_recvq_full(other))
                                        writable = 0;