{
        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
                return 1;
        }
         * of the current RCU grace period.
         */
        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
         */
        jtsq = jiffies_till_sched_qs;
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
-       if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
+       if (time_after(jiffies, rcu_state.gp_start + jtsq) &&
            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
            rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
+               trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
-       } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
+       } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
                /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
                smp_store_release(ruqp, true);
        }
 
        /* If waiting too long on an offline CPU, complain. */
        if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
-           time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+           time_after(jiffies, rcu_state.gp_start + HZ)) {
                bool onl;
                struct rcu_node *rnp1;
 
         */
        rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
        if (!READ_ONCE(*rnhqp) &&
-           (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
-            time_after(jiffies, rdp->rsp->jiffies_resched))) {
+           (time_after(jiffies, rcu_state.gp_start + jtsq) ||
+            time_after(jiffies, rcu_state.jiffies_resched))) {
                WRITE_ONCE(*rnhqp, true);
                /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
                smp_store_release(ruqp, true);
-               rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
+               rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */
        }
 
        /*
         * see if the CPU is getting hammered with interrupts, but only
         * once per grace period, just to keep the IPIs down to a dull roar.
         */
-       if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
+       if (jiffies - rcu_state.gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
                    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
                              unsigned long gp_seq_req, const char *s)
 {
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+       trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
                                      rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
                              unsigned long gp_seq_req)
 {
        bool ret = false;
-       struct rcu_state *rsp = rdp->rsp;
+       struct rcu_state *rsp = &rcu_state;
        struct rcu_node *rnp;
 
        /*
  */
 static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-       struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
-       struct rcu_state *rsp = rdp->rsp;
+       struct rcu_state *rsp = &rcu_state;
 
        if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
                _rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence);
        rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
        rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
        rdp->cpu = cpu;
-       rdp->rsp = &rcu_state;
        rcu_boot_init_nocb_percpu_data(rdp);
 }
 
 
                 */
                WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
                WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
-               trace_rcu_preempt_task(rdp->rsp->name,
+               trace_rcu_preempt_task(rcu_state.name,
                                       t->pid,
                                       (rnp->qsmask & rdp->grpmask)
                                       ? rnp->gp_seq
        if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
                mod_timer(&rdp->nocb_timer, jiffies + 1);
        WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
-       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
        raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 }
 
        /* If we are not being polled and there is a kthread, awaken it ... */
        t = READ_ONCE(rdp->nocb_kthread);
        if (rcu_nocb_poll || !t) {
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                    TPS("WakeNotPoll"));
                return;
        }
                if (!irqs_disabled_flags(flags)) {
                        /* ... if queue was empty ... */
                        wake_nocb_leader(rdp, false);
-                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("WakeEmpty"));
                } else {
                        wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
                /* ... or if many callbacks queued. */
                if (!irqs_disabled_flags(flags)) {
                        wake_nocb_leader(rdp, true);
-                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("WakeOvf"));
                } else {
                        wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE,
                }
                rdp->qlen_last_fqs_check = LONG_MAX / 2;
        } else {
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
        }
        return;
 }
                return false;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
        if (__is_kfree_rcu_offset((unsigned long)rhp->func))
-               trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+               trace_rcu_kfree_callback(rcu_state.name, rhp,
                                         (unsigned long)rhp->func,
                                         -atomic_long_read(&rdp->nocb_q_count_lazy),
                                         -atomic_long_read(&rdp->nocb_q_count));
        else
-               trace_rcu_callback(rdp->rsp->name, rhp,
+               trace_rcu_callback(rcu_state.name, rhp,
                                   -atomic_long_read(&rdp->nocb_q_count_lazy),
                                   -atomic_long_read(&rdp->nocb_q_count));
 
        struct rcu_node *rnp = rdp->mynode;
 
        local_irq_save(flags);
-       c = rcu_seq_snap(&rdp->rsp->gp_seq);
+       c = rcu_seq_snap(&rcu_state.gp_seq);
        if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
                local_irq_restore(flags);
        } else {
 
        /* Wait for callbacks to appear. */
        if (!rcu_nocb_poll) {
-               trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
+               trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep"));
                swait_event_interruptible_exclusive(my_rdp->nocb_wq,
                                !READ_ONCE(my_rdp->nocb_leader_sleep));
                raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
                raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
        } else if (firsttime) {
                firsttime = false; /* Don't drown trace log with "Poll"! */
-               trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
+               trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll"));
        }
 
        /*
                if (rcu_nocb_poll) {
                        schedule_timeout_interruptible(1);
                } else {
-                       trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
+                       trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu,
                                            TPS("WokeEmpty"));
                }
                goto wait_again;
 static void nocb_follower_wait(struct rcu_data *rdp)
 {
        for (;;) {
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep"));
                swait_event_interruptible_exclusive(rdp->nocb_wq,
                                         READ_ONCE(rdp->nocb_follower_head));
                if (smp_load_acquire(&rdp->nocb_follower_head)) {
                        return;
                }
                WARN_ON(signal_pending(current));
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
        }
 }
 
                rdp->nocb_follower_tail = &rdp->nocb_follower_head;
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                BUG_ON(!list);
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
 
                /* Each pass through the following loop invokes a callback. */
-               trace_rcu_batch_start(rdp->rsp->name,
+               trace_rcu_batch_start(rcu_state.name,
                                      atomic_long_read(&rdp->nocb_q_count_lazy),
                                      atomic_long_read(&rdp->nocb_q_count), -1);
                c = cl = 0;
                        next = list->next;
                        /* Wait for enqueuing to complete, if needed. */
                        while (next == NULL && &list->next != tail) {
-                               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                                    TPS("WaitQueue"));
                                schedule_timeout_interruptible(1);
-                               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                                    TPS("WokeQueue"));
                                next = list->next;
                        }
                        debug_rcu_head_unqueue(list);
                        local_bh_disable();
-                       if (__rcu_reclaim(rdp->rsp->name, list))
+                       if (__rcu_reclaim(rcu_state.name, list))
                                cl++;
                        c++;
                        local_bh_enable();
                        cond_resched_tasks_rcu_qs();
                        list = next;
                }
-               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+               trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1);
                smp_mb__before_atomic();  /* _add after CB invocation. */
                atomic_long_add(-c, &rdp->nocb_q_count);
                atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
        ndw = READ_ONCE(rdp->nocb_defer_wakeup);
        WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
        __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
-       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
 }
 
 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */