static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 {
        bool ret = false;
-       bool need_gp;
+       bool need_qs;
        const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
                               rcu_segcblist_is_offloaded(&rdp->cblist);
 
            unlikely(READ_ONCE(rdp->gpwrap))) {
                if (!offloaded)
                        ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
+               rdp->core_needs_qs = false;
                trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
        } else {
                if (!offloaded)
                        ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
+               if (rdp->core_needs_qs)
+                       rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
        }
 
        /* Now handle the beginnings of any new-to-this-CPU grace periods. */
                 * go looking for one.
                 */
                trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
-               need_gp = !!(rnp->qsmask & rdp->grpmask);
-               rdp->cpu_no_qs.b.norm = need_gp;
-               rdp->core_needs_qs = need_gp;
+               need_qs = !!(rnp->qsmask & rdp->grpmask);
+               rdp->cpu_no_qs.b.norm = need_qs;
+               rdp->core_needs_qs = need_qs;
                zero_cpu_stall_ticks(rdp);
        }
        rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
                return;
        }
        mask = rdp->grpmask;
+       if (rdp->cpu == smp_processor_id())
+               rdp->core_needs_qs = false;
        if ((rnp->qsmask & mask) == 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        } else {