struct cg_proto {
        void                    (*enter_memory_pressure)(struct sock *sk);
-       struct res_counter      *memory_allocated;      /* Current allocated memory. */
-       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
-       int                     *memory_pressure;
-       long                    *sysctl_mem;
+       struct res_counter      memory_allocated;       /* Current allocated memory. */
+       struct percpu_counter   sockets_allocated;      /* Current number of sockets. */
+       int                     memory_pressure;
+       long                    sysctl_mem[3];
        unsigned long           flags;
        /*
         * memcg field is used to find which memcg we belong directly
                return false;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return !!*sk->sk_cgrp->memory_pressure;
+               return !!sk->sk_cgrp->memory_pressure;
 
-       return !!*sk->sk_prot->memory_pressure;
+       return !!sk->sk_prot->memory_pressure;
 }
 
 static inline void sk_leave_memory_pressure(struct sock *sk)
                struct proto *prot = sk->sk_prot;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       if (*cg_proto->memory_pressure)
-                               *cg_proto->memory_pressure = 0;
+                       if (cg_proto->memory_pressure)
+                               cg_proto->memory_pressure = 0;
        }
 
 }
        struct res_counter *fail;
        int ret;
 
-       ret = res_counter_charge_nofail(prot->memory_allocated,
+       ret = res_counter_charge_nofail(&prot->memory_allocated,
                                        amt << PAGE_SHIFT, &fail);
        if (ret < 0)
                *parent_status = OVER_LIMIT;
 static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
                                              unsigned long amt)
 {
-       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+       res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
 }
 
 static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
 {
        u64 ret;
-       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
        return ret >> PAGE_SHIFT;
 }
 
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_dec(cg_proto->sockets_allocated);
+                       percpu_counter_dec(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_dec(prot->sockets_allocated);
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_inc(cg_proto->sockets_allocated);
+                       percpu_counter_inc(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_inc(prot->sockets_allocated);
        struct proto *prot = sk->sk_prot;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+               return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
 
        return percpu_counter_read_positive(prot->sockets_allocated);
 }
 
 #include <linux/memcontrol.h>
 #include <linux/module.h>
 
-static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
-{
-       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
-}
-
 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
 {
        if (sk->sk_cgrp->memory_pressure)
-               *sk->sk_cgrp->memory_pressure = 1;
+               sk->sk_cgrp->memory_pressure = 1;
 }
 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
 
         */
        struct res_counter *res_parent = NULL;
        struct cg_proto *cg_proto, *parent_cg;
-       struct tcp_memcontrol *tcp;
        struct mem_cgroup *parent = parent_mem_cgroup(memcg);
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       tcp->tcp_prot_mem[0] = sysctl_tcp_mem[0];
-       tcp->tcp_prot_mem[1] = sysctl_tcp_mem[1];
-       tcp->tcp_prot_mem[2] = sysctl_tcp_mem[2];
-       tcp->tcp_memory_pressure = 0;
+       cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
+       cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
+       cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
+       cg_proto->memory_pressure = 0;
+       cg_proto->memcg = memcg;
 
        parent_cg = tcp_prot.proto_cgroup(parent);
        if (parent_cg)
-               res_parent = parent_cg->memory_allocated;
-
-       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
-       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+               res_parent = &parent_cg->memory_allocated;
 
-       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
-       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
-       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
-       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
-       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
-       cg_proto->memcg = memcg;
+       res_counter_init(&cg_proto->memory_allocated, res_parent);
+       percpu_counter_init(&cg_proto->sockets_allocated, 0);
 
        return 0;
 }
 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
 {
        struct cg_proto *cg_proto;
-       struct tcp_memcontrol *tcp;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+       percpu_counter_destroy(&cg_proto->sockets_allocated);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
        u64 old_lim;
        int i;
        if (val > RES_COUNTER_MAX)
                val = RES_COUNTER_MAX;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
        if (ret)
                return ret;
 
        for (i = 0; i < 3; i++)
-               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
-                                            sysctl_tcp_mem[i]);
+               cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                               sysctl_tcp_mem[i]);
 
        if (val == RES_COUNTER_MAX)
                clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
 
 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return default_val;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+       return res_counter_read_u64(&cg_proto->memory_allocated, type);
 }
 
 static u64 tcp_read_usage(struct mem_cgroup *memcg)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+       return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
 }
 
 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
        struct mem_cgroup *memcg;
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        memcg = mem_cgroup_from_css(css);
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
-       tcp = tcp_from_cgproto(cg_proto);
 
        switch (event) {
        case RES_MAX_USAGE:
-               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               res_counter_reset_max(&cg_proto->memory_allocated);
                break;
        case RES_FAILCNT:
-               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               res_counter_reset_failcnt(&cg_proto->memory_allocated);
                break;
        }