spin_unlock(busy);
 }
 
+static int udp_rmem_schedule(struct sock *sk, int size)
+{
+       int delta;
+
+       delta = size - sk->sk_forward_alloc;
+       if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV))
+               return -ENOBUFS;
+
+       return 0;
+}
+
 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 {
        struct sk_buff_head *list = &sk->sk_receive_queue;
-       int rmem, delta, amt, err = -ENOMEM;
+       int rmem, err = -ENOMEM;
        spinlock_t *busy = NULL;
        int size;
 
                goto uncharge_drop;
 
        spin_lock(&list->lock);
-       if (size >= sk->sk_forward_alloc) {
-               amt = sk_mem_pages(size);
-               delta = amt << PAGE_SHIFT;
-               if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
-                       err = -ENOBUFS;
-                       spin_unlock(&list->lock);
-                       goto uncharge_drop;
-               }
-
-               sk->sk_forward_alloc += delta;
+       err = udp_rmem_schedule(sk, size);
+       if (err) {
+               spin_unlock(&list->lock);
+               goto uncharge_drop;
        }
 
        sk->sk_forward_alloc -= size;