]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
locking: Remove rcu_read_{,un}lock() for preempt_{dis,en}able()
authorYanfei Xu <yanfei.xu@windriver.com>
Wed, 13 Oct 2021 13:41:52 +0000 (21:41 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 19 Oct 2021 15:27:06 +0000 (17:27 +0200)
preempt_disable/enable() is equal to RCU read-side crital section, and
the spinning codes in mutex and rwsem could ensure that the preemption
is disabled. So let's remove the unnecessary rcu_read_lock/unlock for
saving some cycles in hot codes.

Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20211013134154.1085649-2-yanfei.xu@windriver.com
kernel/locking/mutex.c
kernel/locking/rwsem.c

index 2fede72b6af530b0e093aa8ecf8993156436786a..db19136111921b8f0d222018e429d07ed1c9f5e3 100644 (file)
@@ -351,13 +351,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 {
        bool ret = true;
 
-       rcu_read_lock();
+       lockdep_assert_preemption_disabled();
+
        while (__mutex_owner(lock) == owner) {
                /*
                 * Ensure we emit the owner->on_cpu, dereference _after_
-                * checking lock->owner still matches owner. If that fails,
-                * owner might point to freed memory. If it still matches,
-                * the rcu_read_lock() ensures the memory stays valid.
+                * checking lock->owner still matches owner. And we already
+                * disabled preemption which is equal to the RCU read-side
+                * crital section in optimistic spinning code. Thus the
+                * task_strcut structure won't go away during the spinning
+                * period
                 */
                barrier();
 
@@ -377,7 +380,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 
                cpu_relax();
        }
-       rcu_read_unlock();
 
        return ret;
 }
@@ -390,19 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
        struct task_struct *owner;
        int retval = 1;
 
+       lockdep_assert_preemption_disabled();
+
        if (need_resched())
                return 0;
 
-       rcu_read_lock();
+       /*
+        * We already disabled preemption which is equal to the RCU read-side
+        * crital section in optimistic spinning code. Thus the task_strcut
+        * structure won't go away during the spinning period.
+        */
        owner = __mutex_owner(lock);
 
        /*
         * As lock holder preemption issue, we both skip spinning if task is not
         * on cpu or its cpu is preempted
         */
+
        if (owner)
                retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
-       rcu_read_unlock();
 
        /*
         * If lock->owner is not set, the mutex has been released. Return true
index 29eea50a3e6782b38a3674a49a7200cf5e0b6d03..884aa08e0624735aa8cc261176544104ab52a29d 100644 (file)
@@ -635,7 +635,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
        }
 
        preempt_disable();
-       rcu_read_lock();
+       /*
+        * Disable preemption is equal to the RCU read-side crital section,
+        * thus the task_strcut structure won't go away.
+        */
        owner = rwsem_owner_flags(sem, &flags);
        /*
         * Don't check the read-owner as the entry may be stale.
@@ -643,7 +646,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
        if ((flags & RWSEM_NONSPINNABLE) ||
            (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
                ret = false;
-       rcu_read_unlock();
        preempt_enable();
 
        lockevent_cond_inc(rwsem_opt_fail, !ret);
@@ -671,12 +673,13 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
        unsigned long flags, new_flags;
        enum owner_state state;
 
+       lockdep_assert_preemption_disabled();
+
        owner = rwsem_owner_flags(sem, &flags);
        state = rwsem_owner_state(owner, flags);
        if (state != OWNER_WRITER)
                return state;
 
-       rcu_read_lock();
        for (;;) {
                /*
                 * When a waiting writer set the handoff flag, it may spin
@@ -694,7 +697,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
                 * Ensure we emit the owner->on_cpu, dereference _after_
                 * checking sem->owner still matches owner, if that fails,
                 * owner might point to free()d memory, if it still matches,
-                * the rcu_read_lock() ensures the memory stays valid.
+                * our spinning context already disabled preemption which is
+                * equal to RCU read-side crital section ensures the memory
+                * stays valid.
                 */
                barrier();
 
@@ -705,7 +710,6 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
 
                cpu_relax();
        }
-       rcu_read_unlock();
 
        return state;
 }