}
 
 static struct cpumask save_cpumask;
-static bool disable_migrate;
 
 static void move_to_next_cpu(void)
 {
        struct trace_array *tr = hwlat_trace;
        int next_cpu;
 
-       if (disable_migrate)
-               return;
        /*
         * If for some reason the user modifies the CPU affinity
         * of this thread, then stop migrating for the duration
         * of the current test.
         */
        if (!cpumask_equal(current_mask, current->cpus_ptr))
-               goto disable;
+               goto change_mode;
 
        get_online_cpus();
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
                next_cpu = cpumask_first(current_mask);
 
        if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
-               goto disable;
+               goto change_mode;
 
        cpumask_clear(current_mask);
        cpumask_set_cpu(next_cpu, current_mask);
        sched_setaffinity(0, current_mask);
        return;
 
- disable:
-       disable_migrate = true;
+ change_mode:
+       hwlat_data.thread_mode = MODE_NONE;
+       pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
 }
 
 /*
 
        hwlat_trace = tr;
 
-       disable_migrate = false;
        hwlat_data.count = 0;
        tr->max_latency = 0;
        save_tracing_thresh = tracing_thresh;