#endif /* CONFIG_CGROUP_WRITEBACK */
 
 struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+                            gfp_t gfp_mask);
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
 #ifdef CONFIG_MEMCG
 extern struct static_key_false memcg_sockets_enabled_key;
 
        return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
 }
 
+static inline gfp_t gfp_memcg_charge(void)
+{
+       return in_softirq() ? GFP_NOWAIT : GFP_KERNEL;
+}
+
 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
 {
        return noblock ? 0 : sk->sk_rcvtimeo;
 
  * mem_cgroup_charge_skmem - charge socket memory
  * @memcg: memcg to charge
  * @nr_pages: number of pages to charge
+ * @gfp_mask: reclaim mode
  *
  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
- * @memcg's configured limit, %false if the charge had to be forced.
+ * @memcg's configured limit, %false if it doesn't.
  */
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+                            gfp_t gfp_mask)
 {
-       gfp_t gfp_mask = GFP_KERNEL;
-
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
                struct page_counter *fail;
 
                        memcg->tcpmem_pressure = 0;
                        return true;
                }
-               page_counter_charge(&memcg->tcpmem, nr_pages);
                memcg->tcpmem_pressure = 1;
+               if (gfp_mask & __GFP_NOFAIL) {
+                       page_counter_charge(&memcg->tcpmem, nr_pages);
+                       return true;
+               }
                return false;
        }
 
-       /* Don't block in the packet receive path */
-       if (in_softirq())
-               gfp_mask = GFP_NOWAIT;
-
-       mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
-
-       if (try_charge(memcg, gfp_mask, nr_pages) == 0)
+       if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
+               mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
                return true;
+       }
 
-       try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
        return false;
 }
 
 
 {
        struct proto *prot = sk->sk_prot;
        long allocated = sk_memory_allocated_add(sk, amt);
+       bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
        bool charged = true;
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
-           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
+       if (memcg_charge &&
+           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                               gfp_memcg_charge())))
                goto suppress_allocation;
 
        /* Under limit. */
                /* Fail only if socket is _under_ its sndbuf.
                 * In this case we cannot block, so that we have to fail.
                 */
-               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
+               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
+                       /* Force charge with __GFP_NOFAIL */
+                       if (memcg_charge && !charged) {
+                               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                       gfp_memcg_charge() | __GFP_NOFAIL);
+                       }
                        return 1;
+               }
        }
 
        if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
 
        sk_memory_allocated_sub(sk, amt);
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+       if (memcg_charge && charged)
                mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
 
        return 0;
 
                                   atomic_read(&newsk->sk_rmem_alloc));
                mem_cgroup_sk_alloc(newsk);
                if (newsk->sk_memcg && amt)
-                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
+                                               GFP_KERNEL | __GFP_NOFAIL);
 
                release_sock(newsk);
        }
 
        sk_memory_allocated_add(sk, amt);
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
-               mem_cgroup_charge_skmem(sk->sk_memcg, amt);
+               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                       gfp_memcg_charge() | __GFP_NOFAIL);
 }
 
 /* Send a FIN. The caller locks the socket for us.