unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup_per_zone *mz;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        /*
         * Should page_cgroup's go to their own slab?
         * One could optimize the performance of the charging routine
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        /*
         * Check if our page_cgroup is valid
         */
 {
        struct page_cgroup *pc;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        lock_page_cgroup(page);
        pc = page_get_page_cgroup(page);
        if (pc)
        int ret = -EBUSY;
        int node, zid;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        css_get(&mem->css);
        /*
         * page reclaim code (kswapd etc..) will move pages between
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
                                struct cgroup *cont)
 {
+       if (mem_cgroup_subsys.disabled)
+               return 0;
        return cgroup_add_files(cont, ss, mem_cgroup_files,
                                        ARRAY_SIZE(mem_cgroup_files));
 }
        struct mm_struct *mm;
        struct mem_cgroup *mem, *old_mem;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        mm = get_task_mm(p);
        if (mm == NULL)
                return;