.name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
+       .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
                        trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
                                                  rnp->grplo, rnp->grphi,
                                                  TPS("wait"));
-                       wait_event(rnp->exp_wq[(s >> 1) & 0x1],
+                       wait_event(rnp->exp_wq[(s >> 1) & 0x3],
                                   sync_exp_work_done(rsp,
                                                      &rdp->exp_workdone2, s));
                        return true;
        synchronize_sched_expedited_wait(rsp);
        rcu_exp_gp_seq_end(rsp);
        trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+
+       /*
+        * Switch over to wakeup mode, allowing the next GP, but -only- the
+        * next GP, to proceed.
+        */
+       mutex_lock(&rsp->exp_wake_mutex);
+       mutex_unlock(&rsp->exp_mutex);
+
        rcu_for_each_node_breadth_first(rsp, rnp) {
                if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
                        spin_lock(&rnp->exp_lock);
                                rnp->exp_seq_rq = s;
                        spin_unlock(&rnp->exp_lock);
                }
-               wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x1]);
+               wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
        }
        trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
-       mutex_unlock(&rsp->exp_mutex);
+       mutex_unlock(&rsp->exp_wake_mutex);
 }
 
 /**
                        rcu_init_one_nocb(rnp);
                        init_waitqueue_head(&rnp->exp_wq[0]);
                        init_waitqueue_head(&rnp->exp_wq[1]);
+                       init_waitqueue_head(&rnp->exp_wq[2]);
+                       init_waitqueue_head(&rnp->exp_wq[3]);
                        spin_lock_init(&rnp->exp_lock);
                }
        }
 
 
        spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
        unsigned long exp_seq_rq;
-       wait_queue_head_t exp_wq[2];
+       wait_queue_head_t exp_wq[4];
 } ____cacheline_internodealigned_in_smp;
 
 /*
        /* End of fields guarded by barrier_mutex. */
 
        struct mutex exp_mutex;                 /* Serialize expedited GP. */
+       struct mutex exp_wake_mutex;            /* Serialize wakeup. */
        unsigned long expedited_sequence;       /* Take a ticket. */
        atomic_long_t expedited_normal;         /* # fallbacks to normal. */
        atomic_t expedited_need_qs;             /* # CPUs left to check in. */