if (memcg_bpf_enabled())
                        objcg = get_obj_cgroup_from_current();
 #endif
+               ma->objcg = objcg;
                for_each_possible_cpu(cpu) {
                        c = per_cpu_ptr(pc, cpu);
                        c->unit_size = unit_size;
 #ifdef CONFIG_MEMCG_KMEM
        objcg = get_obj_cgroup_from_current();
 #endif
+       ma->objcg = objcg;
        for_each_possible_cpu(cpu) {
                cc = per_cpu_ptr(pcc, cpu);
                for (i = 0; i < NUM_CACHES; i++) {
                        rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
                        rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                }
-               /* objcg is the same across cpus */
-               if (c->objcg)
-                       obj_cgroup_put(c->objcg);
+               if (ma->objcg)
+                       obj_cgroup_put(ma->objcg);
                destroy_mem_alloc(ma, rcu_in_progress);
        }
        if (ma->caches) {
                                rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                        }
                }
-               if (c->objcg)
-                       obj_cgroup_put(c->objcg);
+               if (ma->objcg)
+                       obj_cgroup_put(ma->objcg);
                destroy_mem_alloc(ma, rcu_in_progress);
        }
 }