* hold it, acquire the root rcu_node structure's lock in order to
         * start one (if needed).
         */
-       if (rnp != rnp_root) {
-               raw_spin_lock(&rnp_root->lock);
-               smp_mb__after_unlock_lock();
-       }
+       if (rnp != rnp_root)
+               raw_spin_lock_rcu_node(rnp_root);
 
        /*
         * Get a new grace-period number.  If there really is no grace
        if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
             rdp->completed == READ_ONCE(rnp->completed) &&
             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
-           !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
+           !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
                local_irq_restore(flags);
                return;
        }
-       smp_mb__after_unlock_lock();
        needwake = __note_gp_changes(rsp, rnp, rdp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        if (needwake)
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        WRITE_ONCE(rsp->gp_activity, jiffies);
-       raw_spin_lock_irq(&rnp->lock);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irq_rcu_node(rnp);
        if (!READ_ONCE(rsp->gp_flags)) {
                /* Spurious wakeup, tell caller to go back to sleep.  */
                raw_spin_unlock_irq(&rnp->lock);
         */
        rcu_for_each_leaf_node(rsp, rnp) {
                rcu_gp_slow(rsp, gp_preinit_delay);
-               raw_spin_lock_irq(&rnp->lock);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irq_rcu_node(rnp);
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
                rcu_gp_slow(rsp, gp_init_delay);
-               raw_spin_lock_irq(&rnp->lock);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irq_rcu_node(rnp);
                rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
        }
        /* Clear flag to prevent immediate re-entry. */
        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
-               raw_spin_lock_irq(&rnp->lock);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irq_rcu_node(rnp);
                WRITE_ONCE(rsp->gp_flags,
                           READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
                raw_spin_unlock_irq(&rnp->lock);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        WRITE_ONCE(rsp->gp_activity, jiffies);
-       raw_spin_lock_irq(&rnp->lock);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irq_rcu_node(rnp);
        gp_duration = jiffies - rsp->gp_start;
        if (gp_duration > rsp->gp_max)
                rsp->gp_max = gp_duration;
         * grace period is recorded in any of the rcu_node structures.
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
-               raw_spin_lock_irq(&rnp->lock);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irq_rcu_node(rnp);
                WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
                WARN_ON_ONCE(rnp->qsmask);
                WRITE_ONCE(rnp->completed, rsp->gpnum);
                rcu_gp_slow(rsp, gp_cleanup_delay);
        }
        rnp = rcu_get_root(rsp);
-       raw_spin_lock_irq(&rnp->lock);
-       smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
+       raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
        rcu_nocb_gp_set(rnp, nocb);
 
        /* Declare grace period done. */
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                rnp_c = rnp;
                rnp = rnp->parent;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                oldmask = rnp_c->qsmask;
        }
 
        gps = rnp->gpnum;
        mask = rnp->grpmask;
        raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
-       raw_spin_lock(&rnp_p->lock);    /* irqs already disabled. */
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
        rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
 }
 
        struct rcu_node *rnp;
 
        rnp = rdp->mynode;
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if ((rdp->cpu_no_qs.b.norm &&
             rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
            rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
                rnp = rnp->parent;
                if (!rnp)
                        break;
-               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
-               smp_mb__after_unlock_lock(); /* GP memory ordering. */
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                rnp->qsmaskinit &= ~mask;
                rnp->qsmask &= ~mask;
                if (rnp->qsmaskinit) {
 
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();    /* Enforce GP memory-order guarantee. */
+       raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
        rnp->qsmaskinitnext &= ~mask;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
        rcu_for_each_leaf_node(rsp, rnp) {
                cond_resched_rcu_qs();
                mask = 0;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->qsmask == 0) {
                        if (rcu_state_p == &rcu_sched_state ||
                            rsp != rcu_state_p ||
        /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
 
        /* Reached the root of the rcu_node tree, acquire lock. */
-       raw_spin_lock_irqsave(&rnp_old->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
        raw_spin_unlock(&rnp_old->fqslock);
        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                rsp->n_force_qs_lh++;
                if (!rcu_gp_in_progress(rsp)) {
                        struct rcu_node *rnp_root = rcu_get_root(rsp);
 
-                       raw_spin_lock(&rnp_root->lock);
-                       smp_mb__after_unlock_lock();
+                       raw_spin_lock_rcu_node(rnp_root);
                        needwake = rcu_start_gp(rsp);
                        raw_spin_unlock(&rnp_root->lock);
                        if (needwake)
         * CPUs for the current rcu_node structure up the rcu_node tree.
         */
        rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmaskinit == rnp->expmaskinitnext) {
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        continue;  /* No new CPUs, nothing to do. */
                rnp_up = rnp->parent;
                done = false;
                while (rnp_up) {
-                       raw_spin_lock_irqsave(&rnp_up->lock, flags);
-                       smp_mb__after_unlock_lock();
+                       raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
                        if (rnp_up->expmaskinit)
                                done = true;
                        rnp_up->expmaskinit |= mask;
 
        sync_exp_reset_tree_hotplug(rsp);
        rcu_for_each_node_breadth_first(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                WARN_ON_ONCE(rnp->expmask);
                rnp->expmask = rnp->expmaskinit;
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                mask = rnp->grpmask;
                raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
                rnp = rnp->parent;
-               raw_spin_lock(&rnp->lock); /* irqs already disabled */
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
                WARN_ON_ONCE(!(rnp->expmask & mask));
                rnp->expmask &= ~mask;
        }
 {
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        __rcu_report_exp_rnp(rsp, rnp, wake, flags);
 }
 
 {
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!(rnp->expmask & mask)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
 
        sync_exp_reset_tree(rsp);
        rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
                /* Each pass checks a CPU for identity, offline, and idle. */
                mask_ofl_test = 0;
         */
        rnp = rdp->mynode;
        mask = rdp->grpmask;
-       raw_spin_lock(&rnp->lock);              /* irqs already disabled. */
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
        rnp->qsmaskinitnext |= mask;
        rnp->expmaskinitnext |= mask;
        if (!rdp->beenonline)
 
                /* Possibly blocking in an RCU read-side critical section. */
                rdp = this_cpu_ptr(rcu_state_p->rda);
                rnp = rdp->mynode;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                t->rcu_read_unlock_special.b.blocked = true;
                t->rcu_blocked_node = rnp;
 
                 */
                for (;;) {
                        rnp = t->rcu_blocked_node;
-                       raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
-                       smp_mb__after_unlock_lock();
+                       raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                        if (rnp == t->rcu_blocked_node)
                                break;
                        WARN_ON_ONCE(1);
            READ_ONCE(rnp->boost_tasks) == NULL)
                return 0;  /* Nothing left to boost. */
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
        /*
         * Recheck under the lock: all tasks in need of boosting
                           "rcub/%d", rnp_index);
        if (IS_ERR(t))
                return PTR_ERR(t);
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        sp.sched_priority = kthread_prio;
                if (!*rdp->nxttail[RCU_DONE_TAIL])
                        continue;
                rnp = rdp->mynode;
-               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
-               smp_mb__after_unlock_lock();
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
                raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                if (needwake)
        bool needwake;
        struct rcu_node *rnp = rdp->mynode;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        needwake = rcu_start_future_gp(rnp, rdp, &c);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        if (needwake)