*/
 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
-       bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
+       struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
        struct proto *prot = sk->sk_prot;
-       bool charged = true;
+       bool charged = false;
        long allocated;
 
        sk_memory_allocated_add(sk, amt);
        allocated = sk_memory_allocated(sk);
-       if (memcg_charge &&
-           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
-                                               gfp_memcg_charge())))
-               goto suppress_allocation;
+
+       if (memcg) {
+               if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
+                       goto suppress_allocation;
+               charged = true;
+       }
 
        /* Under limit. */
        if (allocated <= sk_prot_mem_limits(sk, 0)) {
                 */
                if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
                        /* Force charge with __GFP_NOFAIL */
-                       if (memcg_charge && !charged) {
-                               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                       if (memcg && !charged) {
+                               mem_cgroup_charge_skmem(memcg, amt,
                                        gfp_memcg_charge() | __GFP_NOFAIL);
                        }
                        return 1;
 
        sk_memory_allocated_sub(sk, amt);
 
-       if (memcg_charge && charged)
-               mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
+       if (charged)
+               mem_cgroup_uncharge_skmem(memcg, amt);
 
        return 0;
 }