/**
  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
  * @wq: the target workqueue
- * @cpu: the CPU coming up or going down
+ * @cpu: the CPU to update pool association for
+ * @hotplug_cpu: the CPU coming up or going down
  * @online: whether @cpu is coming up or going down
  *
  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
  * CPU_DOWN_PREPARE.
  */
 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
-                                  bool online)
+                                  int hotplug_cpu, bool online)
 {
        int node = cpu_to_node(cpu);
-       int cpu_off = online ? -1 : cpu;
+       int off_cpu = online ? -1 : hotplug_cpu;
        struct pool_workqueue *old_pwq = NULL, *pwq;
        struct workqueue_attrs *target_attrs;
        cpumask_t *cpumask;
         * and create a new one if they don't match.  If the target cpumask
         * equals the default pwq's, the default pwq should be used.
         */
-       if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
+       if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, off_cpu, cpumask)) {
                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
                        return;
        } else {
        }
 
        /* update NUMA affinity of unbound workqueues */
-       list_for_each_entry(wq, &workqueues, list)
-               wq_update_unbound_numa(wq, cpu, true);
+       list_for_each_entry(wq, &workqueues, list) {
+               int tcpu;
+
+               for_each_possible_cpu(tcpu) {
+                       if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
+                               wq_update_unbound_numa(wq, tcpu, cpu, true);
+                       }
+               }
+       }
 
        mutex_unlock(&wq_pool_mutex);
        return 0;
 
        /* update NUMA affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
-       list_for_each_entry(wq, &workqueues, list)
-               wq_update_unbound_numa(wq, cpu, false);
+       list_for_each_entry(wq, &workqueues, list) {
+               int tcpu;
+
+               for_each_possible_cpu(tcpu) {
+                       if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
+                               wq_update_unbound_numa(wq, tcpu, cpu, false);
+                       }
+               }
+       }
        mutex_unlock(&wq_pool_mutex);
 
        return 0;
        }
 
        list_for_each_entry(wq, &workqueues, list) {
-               wq_update_unbound_numa(wq, smp_processor_id(), true);
+               wq_update_unbound_numa(wq, smp_processor_id(), smp_processor_id(),
+                                      true);
                WARN(init_rescuer(wq),
                     "workqueue: failed to create early rescuer for %s",
                     wq->name);