*
  * 'guarantee' is set if the caller promises that a new css_set for the task
  * will already exist. If not set, this function might sleep, and can fail with
- * -ENOMEM. Otherwise, it can only fail with -ESRCH.
+ * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
  */
 static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
                               struct task_struct *tsk, bool guarantee)
        }
        put_css_set(oldcg);
 
-       /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
+       /* @tsk can't exit as its threadgroup is locked */
        task_lock(tsk);
-       if (tsk->flags & PF_EXITING) {
-               task_unlock(tsk);
-               put_css_set(newcg);
-               return -ESRCH;
-       }
+       WARN_ON_ONCE(tsk->flags & PF_EXITING);
        rcu_assign_pointer(tsk->cgroups, newcg);
        task_unlock(tsk);
 
  * @cgrp: the cgroup the task is attaching to
  * @tsk: the task to be attached
  *
- * Call holding cgroup_mutex. May take task_lock of
- * the task 'tsk' during call.
+ * Call with cgroup_mutex and threadgroup locked. May take task_lock of
+ * @tsk during call.
  */
 int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
        struct cgroup *oldcgrp;
        struct cgroupfs_root *root = cgrp->root;
 
+       /* @tsk either already exited or can't exit until the end */
+       if (tsk->flags & PF_EXITING)
+               return -ESRCH;
+
        /* Nothing to do if the task is already in that cgroup */
        oldcgrp = task_cgroup_from_root(tsk, root);
        if (cgrp == oldcgrp)
        tsk = leader;
        i = 0;
        do {
+               /* @tsk either already exited or can't exit until the end */
+               if (tsk->flags & PF_EXITING)
+                       continue;
+
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
                get_task_struct(tsk);
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
                oldcgrp = task_cgroup_from_root(tsk, root);
                if (cgrp == oldcgrp)
                        continue;
-               /* if the thread is PF_EXITING, it can just get skipped. */
                retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
-               if (retval == 0) {
-                       /* attach each task to each subsystem */
-                       for_each_subsys(root, ss) {
-                               if (ss->attach_task)
-                                       ss->attach_task(cgrp, tsk);
-                       }
-               } else {
-                       BUG_ON(retval != -ESRCH);
+               BUG_ON(retval);
+               /* attach each task to each subsystem */
+               for_each_subsys(root, ss) {
+                       if (ss->attach_task)
+                               ss->attach_task(cgrp, tsk);
                }
        }
        /* nothing is sensitive to fork() after this point. */
 
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
- * function to attach either it or all tasks in its threadgroup. Will take
- * cgroup_mutex; may take task_lock of task.
+ * function to attach either it or all tasks in its threadgroup. Will lock
+ * cgroup_mutex and threadgroup; may take task_lock of task.
  */
 static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
 {
                         * detect it later.
                         */
                        tsk = tsk->group_leader;
-               } else if (tsk->flags & PF_EXITING) {
-                       /* optimization for the single-task-only case */
-                       rcu_read_unlock();
-                       cgroup_unlock();
-                       return -ESRCH;
                }
                /*
                 * even if we're attaching all tasks in the thread group, we
                get_task_struct(tsk);
        }
 
-       if (threadgroup) {
-               threadgroup_lock(tsk);
+       threadgroup_lock(tsk);
+
+       if (threadgroup)
                ret = cgroup_attach_proc(cgrp, tsk);
-               threadgroup_unlock(tsk);
-       } else {
+       else
                ret = cgroup_attach_task(cgrp, tsk);
-       }
+
+       threadgroup_unlock(tsk);
+
        put_task_struct(tsk);
        cgroup_unlock();
        return ret;