* Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+                                    unsigned int limit)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize > sk->sk_rcvbuf;
+       return qsize > limit;
 }
 
 /* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+                                             unsigned int limit)
 {
-       if (sk_rcvqueues_full(sk, skb))
+       if (sk_rcvqueues_full(sk, skb, limit))
                return -ENOBUFS;
 
        __sk_add_backlog(sk, skb);
 
 
        skb->dev = NULL;
 
-       if (sk_rcvqueues_full(sk, skb)) {
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
        }
                rc = sk_backlog_rcv(sk, skb);
 
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-       } else if (sk_add_backlog(sk, skb)) {
+       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                bh_unlock_sock(sk);
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
 
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v4_do_rcv(sk, skb);
                }
-       } else if (unlikely(sk_add_backlog(sk, skb))) {
+       } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
 
                goto drop;
 
 
-       if (sk_rcvqueues_full(sk, skb))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
                goto drop;
 
        rc = 0;
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
-       else if (sk_add_backlog(sk, skb)) {
+       else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                bh_unlock_sock(sk);
                goto drop;
        }
 
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v6_do_rcv(sk, skb);
                }
-       } else if (unlikely(sk_add_backlog(sk, skb))) {
+       } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
 
 
                sk = stack[i];
                if (skb1) {
-                       if (sk_rcvqueues_full(sk, skb1)) {
+                       if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
                                kfree_skb(skb1);
                                goto drop;
                        }
                        bh_lock_sock(sk);
                        if (!sock_owned_by_user(sk))
                                udpv6_queue_rcv_skb(sk, skb1);
-                       else if (sk_add_backlog(sk, skb1)) {
+                       else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
                                kfree_skb(skb1);
                                bh_unlock_sock(sk);
                                goto drop;
 
        /* deliver */
 
-       if (sk_rcvqueues_full(sk, skb)) {
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
                sock_put(sk);
                goto discard;
        }
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
-       else if (sk_add_backlog(sk, skb)) {
+       else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                bh_unlock_sock(sk);
                sock_put(sk);
 
        else {
                dprintk("%s: adding to backlog...\n", __func__);
                llc_set_backlog_type(skb, LLC_PACKET);
-               if (sk_add_backlog(sk, skb))
+               if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
                        goto drop_unlock;
        }
 out:
 
                sctp_bh_lock_sock(sk);
 
                if (sock_owned_by_user(sk)) {
-                       if (sk_add_backlog(sk, skb))
+                       if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
                                sctp_chunk_free(chunk);
                        else
                                backloged = 1;
        struct sctp_ep_common *rcvr = chunk->rcvr;
        int ret;
 
-       ret = sk_add_backlog(sk, skb);
+       ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
        if (!ret) {
                /* Hold the assoc/ep while hanging on the backlog queue.
                 * This way, we know structures we need will not disappear
 
        if (!sock_owned_by_user(sk)) {
                res = filter_rcv(sk, buf);
        } else {
-               if (sk_add_backlog(sk, buf))
+               if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
                        res = TIPC_ERR_OVERLOAD;
                else
                        res = TIPC_OK;
 
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
                } else {
-                       queued = !sk_add_backlog(sk, skb);
+                       queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
                }
                bh_unlock_sock(sk);
                sock_put(sk);