* update as well, the only side effect might be a cycle delay for
         * the softlockup check.
         */
-       for_each_cpu(cpu, &watchdog_allowed_mask)
+       for_each_cpu(cpu, &watchdog_allowed_mask) {
                per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
-       wq_watchdog_touch(-1);
+               wq_watchdog_touch(cpu);
+       }
 }
 
 void touch_softlockup_watchdog_sync(void)
 
                        continue;
 
                /* get the latest of pool and touched timestamps */
+               if (pool->cpu >= 0)
+                       touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+               else
+                       touched = READ_ONCE(wq_watchdog_touched);
                pool_ts = READ_ONCE(pool->watchdog_ts);
-               touched = READ_ONCE(wq_watchdog_touched);
 
                if (time_after(pool_ts, touched))
                        ts = pool_ts;
                else
                        ts = touched;
 
-               if (pool->cpu >= 0) {
-                       unsigned long cpu_touched =
-                               READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
-                                                 pool->cpu));
-                       if (time_after(cpu_touched, ts))
-                               ts = cpu_touched;
-               }
-
                /* did we stall? */
                if (time_after(jiffies, ts + thresh)) {
                        lockup_detected = true;
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
-       else
-               wq_watchdog_touched = jiffies;
+
+       wq_watchdog_touched = jiffies;
 }
 
 static void wq_watchdog_set_thresh(unsigned long thresh)