tsk->active_mm = mm;
        activate_mm(active_mm, mm);
        task_unlock(tsk);
-       mm_update_next_owner(old_mm);
        arch_pick_mmap_layout(mm);
        if (old_mm) {
                up_read(&old_mm->mmap_sem);
                BUG_ON(active_mm != old_mm);
+               mm_update_next_owner(old_mm);
                mmput(old_mm);
                return 0;
        }
 
  */
 void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
 {
-       struct cgroup *oldcgrp, *newcgrp;
+       struct cgroup *oldcgrp, *newcgrp = NULL;
 
        if (need_mm_owner_callback) {
                int i;
                for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
                        oldcgrp = task_cgroup(old, ss->subsys_id);
-                       newcgrp = task_cgroup(new, ss->subsys_id);
+                       if (new)
+                               newcgrp = task_cgroup(new, ss->subsys_id);
                        if (oldcgrp == newcgrp)
                                continue;
                        if (ss->mm_owner_changed)
 
         * If there are other users of the mm and the owner (us) is exiting
         * we need to find a new owner to take on the responsibility.
         */
-       if (!mm)
-               return 0;
        if (atomic_read(&mm->mm_users) <= 1)
                return 0;
        if (mm->owner != p)
        } while_each_thread(g, c);
 
        read_unlock(&tasklist_lock);
+       /*
+        * We found no owner yet mm_users > 1: this implies that we are
+        * most likely racing with swapoff (try_to_unuse()) or /proc or
+        * ptrace or page migration (get_task_mm()).  Mark owner as NULL,
+        * so that subsystems can understand the callback and take action.
+        */
+       down_write(&mm->mmap_sem);
+       cgroup_mm_owner_callbacks(mm->owner, NULL);
+       mm->owner = NULL;
+       up_write(&mm->mmap_sem);
        return;
 
 assign_new_owner:
 
 
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
+       /*
+        * mm_update_next_owner() may clear mm->owner to NULL
+        * if it races with swapoff, page migration, etc.
+        * So this can be called with p == NULL.
+        */
+       if (unlikely(!p))
+               return NULL;
+
        return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
                                struct mem_cgroup, css);
 }
        if (likely(!memcg)) {
                rcu_read_lock();
                mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+               if (unlikely(!mem)) {
+                       rcu_read_unlock();
+                       kmem_cache_free(page_cgroup_cache, pc);
+                       return 0;
+               }
                /*
                 * For every charge from the cgroup, increment reference count
                 */
 
        rcu_read_lock();
        mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (unlikely(!mem)) {
+               rcu_read_unlock();
+               return 0;
+       }
        css_get(&mem->css);
        rcu_read_unlock();