* Called on the new cpu, just before
                                        * enabling interrupts. Must not sleep,
                                        * must not fail */
-#define CPU_DYING_IDLE         0x000B /* CPU (unsigned)v dying, reached
-                                       * idle loop. */
-#define CPU_BROKEN             0x000C /* CPU (unsigned)v did not die properly,
+#define CPU_BROKEN             0x000B /* CPU (unsigned)v did not die properly,
                                        * perhaps due to preemption. */
 
 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
 
        }
 }
 
-/*
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
- * bit masks.
- */
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
-{
-       unsigned long flags;
-       unsigned long mask;
-       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
-
-       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-               return;
-
-       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
-       mask = rdp->grpmask;
-       raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
-       rnp->qsmaskinitnext &= ~mask;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-}
-
 /*
  * The CPU has been completely removed, and some other CPU is reporting
  * this fact from process context.  Do the remainder of the cleanup,
                rcu_init_percpu_data(cpu, rsp);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
+ */
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
+
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return;
+
+       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+       mask = rdp->grpmask;
+       raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+       rnp->qsmaskinitnext &= ~mask;
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+void rcu_report_dead(unsigned int cpu)
+{
+       struct rcu_state *rsp;
+
+       /* QS for any half-done expedited RCU-sched GP. */
+       preempt_disable();
+       rcu_report_exp_rdp(&rcu_sched_state,
+                          this_cpu_ptr(rcu_sched_state.rda), true);
+       preempt_enable();
+       for_each_rcu_flavor(rsp)
+               rcu_cleanup_dying_idle_cpu(cpu, rsp);
+}
+#endif
+
 /*
  * Handle CPU online/offline notification events.
  */
                for_each_rcu_flavor(rsp)
                        rcu_cleanup_dying_cpu(rsp);
                break;
-       case CPU_DYING_IDLE:
-               /* QS for any half-done expedited RCU-sched GP. */
-               preempt_disable();
-               rcu_report_exp_rdp(&rcu_sched_state,
-                                  this_cpu_ptr(rcu_sched_state.rda), true);
-               preempt_enable();
-
-               for_each_rcu_flavor(rsp) {
-                       rcu_cleanup_dying_idle_cpu(cpu, rsp);
-               }
-               break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED: