]> www.infradead.org Git - linux.git/commitdiff
rcu: Eliminate rcu_irq_enter_disabled()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 3 Oct 2017 23:51:47 +0000 (16:51 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 27 Nov 2017 16:42:03 +0000 (08:42 -0800)
Now that the irq path uses the rcu_nmi_{enter,exit}() algorithm,
rcu_irq_enter() and rcu_irq_exit() may be used from any context.  There is
thus no need for rcu_irq_enter_disabled() and for the checks using it.
This commit therefore eliminates rcu_irq_enter_disabled().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/tracepoint.h
kernel/rcu/tree.c
kernel/trace/trace.c

index b3dbf9502fd0fa54ff39d4dcba0a74cc46724538..ce9beec35e34a512fa76069124b2de83daaf6068 100644 (file)
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
 static inline void rcu_idle_enter(void) { }
 static inline void rcu_idle_exit(void) { }
 static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
 static inline void rcu_irq_exit_irqson(void) { }
 static inline void rcu_irq_enter_irqson(void) { }
 static inline void rcu_irq_exit(void) { }
index 37d6fd3b7ff82ad8217bcb31965eb4a4b5df7cdc..fd996cdf18334c654882704bf2dbfcd0920491f2 100644 (file)
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
 void rcu_irq_exit(void);
 void rcu_irq_enter_irqson(void);
 void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
 
 void exit_rcu(void);
 
index a26ffbe09e715c522908e7be6637e195d423c5cc..c94f466d57ef1ee7b50d5afb57fc07407ea3833c 100644 (file)
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
                                                                        \
                if (!(cond))                                            \
                        return;                                         \
-               if (rcucheck) {                                         \
-                       if (WARN_ON_ONCE(rcu_irq_enter_disabled()))     \
-                               return;                                 \
+               if (rcucheck)                                           \
                        rcu_irq_enter_irqson();                         \
-               }                                                       \
                rcu_read_lock_sched_notrace();                          \
                it_func_ptr = rcu_dereference_sched((tp)->funcs);       \
                if (it_func_ptr) {                                      \
index d123474fe829487a36b3f4d9a29ecd9dc6f62fef..444aa2b3f24d766cc3683490b83b1e599b42ff01 100644 (file)
@@ -270,20 +270,6 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
        .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
 };
 
-/*
- * There's a few places, currently just in the tracing infrastructure,
- * that uses rcu_irq_enter() to make sure RCU is watching. But there's
- * a small location where that will not even work. In those cases
- * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
- * can be called.
- */
-static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
-
-bool rcu_irq_enter_disabled(void)
-{
-       return this_cpu_read(disable_rcu_irq_enter);
-}
-
 /*
  * Record entry into an extended quiescent state.  This is only to be
  * called when not already in an extended quiescent state.
@@ -792,10 +778,8 @@ static void rcu_eqs_enter_common(bool user)
                do_nocb_deferred_wakeup(rdp);
        }
        rcu_prepare_for_idle();
-       __this_cpu_inc(disable_rcu_irq_enter);
-       rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
-       rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
-       __this_cpu_dec(disable_rcu_irq_enter);
+       rdtp->dynticks_nesting = 0;
+       rcu_dynticks_eqs_enter();
        rcu_dynticks_task_enter();
 
        /*
@@ -1001,10 +985,8 @@ static void rcu_eqs_exit(bool user)
        if (oldval) {
                rdtp->dynticks_nesting++;
        } else {
-               __this_cpu_inc(disable_rcu_irq_enter);
                rcu_eqs_exit_common(1, user);
                rdtp->dynticks_nesting = 1;
-               __this_cpu_dec(disable_rcu_irq_enter);
                WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
        }
 }
index 73e67b68c53b47d5b422970cd0dee1d0bec27002..dbce1be3bab8ace00c7c658f21834ae3f0d649c9 100644 (file)
@@ -2682,17 +2682,6 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
        if (unlikely(in_nmi()))
                return;
 
-       /*
-        * It is possible that a function is being traced in a
-        * location that RCU is not watching. A call to
-        * rcu_irq_enter() will make sure that it is, but there's
-        * a few internal rcu functions that could be traced
-        * where that wont work either. In those cases, we just
-        * do nothing.
-        */
-       if (unlikely(rcu_irq_enter_disabled()))
-               return;
-
        rcu_irq_enter_irqson();
        __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
        rcu_irq_exit_irqson();