* will be able to manipulate the current directory, etc.
         * It would be nice to force an unshare instead...
         */
-       t = p;
        n_fs = 1;
        spin_lock(&p->fs->lock);
        rcu_read_lock();
-       while_each_thread(p, t) {
+       for_other_threads(p, t) {
                if (t->fs == p->fs)
                        n_fs++;
        }
 
 #define while_each_thread(g, t) \
        while ((t = next_thread(t)) != g)
 
+#define for_other_threads(p, t)        \
+       for (t = p; (t = next_thread(t)) != p; )
+
 #define __for_each_thread(signal, t)   \
        list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
                lockdep_is_held(&tasklist_lock))
 
  */
 int zap_other_threads(struct task_struct *p)
 {
-       struct task_struct *t = p;
+       struct task_struct *t;
        int count = 0;
 
        p->signal->group_stop_count = 0;
 
-       while_each_thread(p, t) {
+       for_other_threads(p, t) {
                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                /* Don't require de_thread to wait for the vhost_worker */
                if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
                        sig->group_exit_code = signr;
 
                sig->group_stop_count = 0;
-
                if (task_set_jobctl_pending(current, signr | gstop))
                        sig->group_stop_count++;
 
-               t = current;
-               while_each_thread(current, t) {
+               for_other_threads(current, t) {
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
        if (sigisemptyset(&retarget))
                return;
 
-       t = tsk;
-       while_each_thread(tsk, t) {
+       for_other_threads(tsk, t) {
                if (t->flags & PF_EXITING)
                        continue;