The rcu_data structure's ->gpwrap field is read locklessly, and so
this commit adds the required READ_ONCE() to a pair of laods in order
to avoid destructive compiler optimizations.
This data race was reported by KCSAN.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
 
        rcu_lockdep_assert_cblist_protected(rdp);
        c = rcu_seq_snap(&rcu_state.gp_seq);
-       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+       if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
                /* Old request still live, so mark recent callbacks. */
                (void)rcu_segcblist_accelerate(&rdp->cblist, c);
                return;
 
                        continue;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        rdp = per_cpu_ptr(&rcu_data, cpu);
-                       if (rdp->gpwrap ||
+                       if (READ_ONCE(rdp->gpwrap) ||
                            ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
                                         READ_ONCE(rdp->gp_seq_needed)))
                                continue;