*
  * returns 0 on success and <0 if the counter->usage will exceed the
  * counter->limit _locked call expects the counter->lock to be taken
+ *
+ * charge_nofail works the same, except that it charges the resource
+ * counter unconditionally, and returns < 0 if the after the current
+ * charge we are over limit.
  */
 
 int __must_check res_counter_charge_locked(struct res_counter *counter,
                unsigned long val);
 int __must_check res_counter_charge(struct res_counter *counter,
                unsigned long val, struct res_counter **limit_fail_at);
+int __must_check res_counter_charge_nofail(struct res_counter *counter,
+               unsigned long val, struct res_counter **limit_fail_at);
 
 /*
  * uncharge - tell that some portion of the resource is released
 
        struct res_counter *fail;
        int ret;
 
-       ret = res_counter_charge(prot->memory_allocated,
-                                amt << PAGE_SHIFT, &fail);
-
+       ret = res_counter_charge_nofail(prot->memory_allocated,
+                                       amt << PAGE_SHIFT, &fail);
        if (ret < 0)
                *parent_status = OVER_LIMIT;
 }
 }
 
 static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+sk_memory_allocated_sub(struct sock *sk, int amt)
 {
        struct proto *prot = sk->sk_prot;
 
-       if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
-           parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
                memcg_memory_allocated_sub(sk->sk_cgrp, amt);
 
        atomic_long_sub(amt, prot->memory_allocated);
 
        return ret;
 }
 
+int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
+                             struct res_counter **limit_fail_at)
+{
+       int ret, r;
+       unsigned long flags;
+       struct res_counter *c;
+
+       r = ret = 0;
+       *limit_fail_at = NULL;
+       local_irq_save(flags);
+       for (c = counter; c != NULL; c = c->parent) {
+               spin_lock(&c->lock);
+               r = res_counter_charge_locked(c, val);
+               if (r)
+                       c->usage += val;
+               spin_unlock(&c->lock);
+               if (r < 0 && ret == 0) {
+                       *limit_fail_at = c;
+                       ret = r;
+               }
+       }
+       local_irq_restore(flags);
+
+       return ret;
+}
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
 {
        if (WARN_ON(counter->usage < val))
 
        /* Alas. Undo changes. */
        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
 
-       sk_memory_allocated_sub(sk, amt, parent_status);
+       sk_memory_allocated_sub(sk, amt);
 
        return 0;
 }
 void __sk_mem_reclaim(struct sock *sk)
 {
        sk_memory_allocated_sub(sk,
-                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
+                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
        sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
        if (sk_under_memory_pressure(sk) &&