kfree(rdtgrp);
 }
 
-struct task_move_callback {
-       struct callback_head    work;
-       struct rdtgroup         *rdtgrp;
-};
-
-static void move_myself(struct callback_head *head)
+static void _update_task_closid_rmid(void *task)
 {
-       struct task_move_callback *callback;
-       struct rdtgroup *rdtgrp;
-
-       callback = container_of(head, struct task_move_callback, work);
-       rdtgrp = callback->rdtgrp;
-
        /*
-        * If resource group was deleted before this task work callback
-        * was invoked, then assign the task to root group and free the
-        * resource group.
+        * If the task is still current on this CPU, update PQR_ASSOC MSR.
+        * Otherwise, the MSR is updated when the task is scheduled in.
         */
-       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
-           (rdtgrp->flags & RDT_DELETED)) {
-               current->closid = 0;
-               current->rmid = 0;
-               rdtgroup_remove(rdtgrp);
-       }
-
-       if (unlikely(current->flags & PF_EXITING))
-               goto out;
-
-       preempt_disable();
-       /* update PQR_ASSOC MSR to make resource group go into effect */
-       resctrl_sched_in();
-       preempt_enable();
+       if (task == current)
+               resctrl_sched_in();
+}
 
-out:
-       kfree(callback);
+static void update_task_closid_rmid(struct task_struct *t)
+{
+       if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
+               smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
+       else
+               _update_task_closid_rmid(t);
 }
 
 static int __rdtgroup_move_task(struct task_struct *tsk,
                                struct rdtgroup *rdtgrp)
 {
-       struct task_move_callback *callback;
-       int ret;
-
-       callback = kzalloc(sizeof(*callback), GFP_KERNEL);
-       if (!callback)
-               return -ENOMEM;
-       callback->work.func = move_myself;
-       callback->rdtgrp = rdtgrp;
-
        /*
-        * Take a refcount, so rdtgrp cannot be freed before the
-        * callback has been invoked.
+        * Set the task's closid/rmid before the PQR_ASSOC MSR can be
+        * updated by them.
+        *
+        * For ctrl_mon groups, move both closid and rmid.
+        * For monitor groups, can move the tasks only from
+        * their parent CTRL group.
         */
-       atomic_inc(&rdtgrp->waitcount);
-       ret = task_work_add(tsk, &callback->work, TWA_RESUME);
-       if (ret) {
-               /*
-                * Task is exiting. Drop the refcount and free the callback.
-                * No need to check the refcount as the group cannot be
-                * deleted before the write function unlocks rdtgroup_mutex.
-                */
-               atomic_dec(&rdtgrp->waitcount);
-               kfree(callback);
-               rdt_last_cmd_puts("Task exited\n");
-       } else {
-               /*
-                * For ctrl_mon groups move both closid and rmid.
-                * For monitor groups, can move the tasks only from
-                * their parent CTRL group.
-                */
-               if (rdtgrp->type == RDTCTRL_GROUP) {
-                       tsk->closid = rdtgrp->closid;
+
+       if (rdtgrp->type == RDTCTRL_GROUP) {
+               tsk->closid = rdtgrp->closid;
+               tsk->rmid = rdtgrp->mon.rmid;
+       } else if (rdtgrp->type == RDTMON_GROUP) {
+               if (rdtgrp->mon.parent->closid == tsk->closid) {
                        tsk->rmid = rdtgrp->mon.rmid;
-               } else if (rdtgrp->type == RDTMON_GROUP) {
-                       if (rdtgrp->mon.parent->closid == tsk->closid) {
-                               tsk->rmid = rdtgrp->mon.rmid;
-                       } else {
-                               rdt_last_cmd_puts("Can't move task to different control group\n");
-                               ret = -EINVAL;
-                       }
+               } else {
+                       rdt_last_cmd_puts("Can't move task to different control group\n");
+                       return -EINVAL;
                }
        }
-       return ret;
+
+       /*
+        * Ensure the task's closid and rmid are written before determining if
+        * the task is current that will decide if it will be interrupted.
+        */
+       barrier();
+
+       /*
+        * By now, the task's closid and rmid are set. If the task is current
+        * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
+        * group go into effect. If the task is not current, the MSR will be
+        * updated when the task is scheduled in.
+        */
+       update_task_closid_rmid(tsk);
+
+       return 0;
 }
 
 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)