return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
 }
 
+/*
+ * We are reporting a quiescent state on behalf of some other CPU, so
+ * it is our responsibility to check for and handle potential overflow
+ * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * After all, the CPU might be in deep idle state, and thus executing no
+ * code whatsoever.
+ */
+static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
+{
+       lockdep_assert_held(&rnp->lock);
+       if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+               WRITE_ONCE(rdp->gpwrap, true);
+       if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
+               rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+}
+
 /*
  * Snapshot the specified CPU's dynticks counter so that we can later
  * credit them with an implicit quiescent state.  Return 1 if this CPU
        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
-               if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
-                                rdp->mynode->gpnum))
-                       WRITE_ONCE(rdp->gpwrap, true);
+               rcu_gpnum_ovf(rdp->mynode, rdp);
                return 1;
        }
        return 0;
 }
 
+/*
+ * Handler for the irq_work request posted when a grace period has
+ * gone on for too long, but not yet long enough for an RCU CPU
+ * stall warning.  Set state appropriately, but just complain if
+ * there is unexpected state on entry.
+ */
+static void rcu_iw_handler(struct irq_work *iwp)
+{
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+
+       rdp = container_of(iwp, struct rcu_data, rcu_iw);
+       rnp = rdp->mynode;
+       raw_spin_lock_rcu_node(rnp);
+       if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
+               rdp->rcu_iw_gpnum = rnp->gpnum;
+               rdp->rcu_iw_pending = false;
+       }
+       raw_spin_unlock_rcu_node(rnp);
+}
+
 /*
  * Return true if the specified CPU has passed through a quiescent
  * state by virtue of being in or having passed through an dynticks
        unsigned long jtsq;
        bool *rnhqp;
        bool *ruqp;
-       struct rcu_node *rnp;
+       struct rcu_node *rnp = rdp->mynode;
 
        /*
         * If the CPU passed through or entered a dynticks idle phase with
        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
+               rcu_gpnum_ovf(rnp, rdp);
                return 1;
        }
 
         * might not be the case for nohz_full CPUs looping in the kernel.
         */
        jtsq = jiffies_till_sched_qs;
-       rnp = rdp->mynode;
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
        if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
            READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+               rcu_gpnum_ovf(rnp, rdp);
                return 1;
        } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
                /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
        if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
                rdp->offline_fqs++;
+               rcu_gpnum_ovf(rnp, rdp);
                return 1;
        }
 
        }
 
        /*
-        * If more than halfway to RCU CPU stall-warning time, do
-        * a resched_cpu() to try to loosen things up a bit.
+        * If more than halfway to RCU CPU stall-warning time, do a
+        * resched_cpu() to try to loosen things up a bit.  Also check to
+        * see if the CPU is getting hammered with interrupts, but only
+        * once per grace period, just to keep the IPIs down to a dull roar.
         */
-       if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
+       if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
+               if (IS_ENABLED(CONFIG_IRQ_WORK) &&
+                   !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+                   (rnp->ffmask & rdp->grpmask)) {
+                       init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
+                       rdp->rcu_iw_pending = true;
+                       rdp->rcu_iw_gpnum = rnp->gpnum;
+                       irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
+               }
+       }
 
        return 0;
 }
 {
        int cpu;
        unsigned long flags;
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
 
         */
        pr_err("INFO: %s self-detected stall on CPU", rsp->name);
        print_cpu_stall_info_begin();
+       raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
        print_cpu_stall_info(rsp, smp_processor_id());
+       raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
        print_cpu_stall_info_end();
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                rdp->core_needs_qs = need_gp;
                zero_cpu_stall_ticks(rdp);
                WRITE_ONCE(rdp->gpwrap, false);
+               rcu_gpnum_ovf(rnp, rdp);
        }
        return ret;
 }
        rdp->cpu_no_qs.b.norm = true;
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
+       rdp->rcu_iw_pending = false;
+       rdp->rcu_iw_gpnum = rnp->gpnum - 1;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
  */
 int rcutree_online_cpu(unsigned int cpu)
 {
-       sync_sched_exp_online_cleanup(cpu);
-       rcutree_affinity_setting(cpu, -1);
+       unsigned long flags;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+               rnp = rdp->mynode;
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               rnp->ffmask |= rdp->grpmask;
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
        if (IS_ENABLED(CONFIG_TREE_SRCU))
                srcu_online_cpu(cpu);
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+               return 0; /* Too early in boot for scheduler work. */
+       sync_sched_exp_online_cleanup(cpu);
+       rcutree_affinity_setting(cpu, -1);
        return 0;
 }
 
  */
 int rcutree_offline_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+               rnp = rdp->mynode;
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               rnp->ffmask &= ~rdp->grpmask;
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
+
        rcutree_affinity_setting(cpu, cpu);
        if (IS_ENABLED(CONFIG_TREE_SRCU))
                srcu_offline_cpu(cpu);
        for_each_online_cpu(cpu) {
                rcutree_prepare_cpu(cpu);
                rcu_cpu_starting(cpu);
-               if (IS_ENABLED(CONFIG_TREE_SRCU))
-                       srcu_online_cpu(cpu);
+               rcutree_online_cpu(cpu);
        }
 }