]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
locking/osq_lock: Use atomic_try_cmpxchg_release() in osq_unlock()
authorUros Bizjak <ubizjak@gmail.com>
Tue, 1 Oct 2024 11:45:57 +0000 (13:45 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 25 Oct 2024 08:01:50 +0000 (10:01 +0200)
Replace this pattern in osq_unlock():

    atomic_cmpxchg(*ptr, old, new) == old

... with the simpler and faster:

    atomic_try_cmpxchg(*ptr, &old, new)

The x86 CMPXCHG instruction returns success in the ZF flag,
so this change saves a compare after the CMPXCHG.  The code
in the fast path of osq_unlock() improves from:

 11b: 31 c9                 xor    %ecx,%ecx
 11d: 8d 50 01              lea    0x1(%rax),%edx
 120: 89 d0                 mov    %edx,%eax
 122: f0 0f b1 0f           lock cmpxchg %ecx,(%rdi)
 126: 39 c2                 cmp    %eax,%edx
 128: 75 05                 jne    12f <...>

to:

 12b: 31 d2                 xor    %edx,%edx
 12d: 83 c0 01              add    $0x1,%eax
 130: f0 0f b1 17           lock cmpxchg %edx,(%rdi)
 134: 75 05                 jne    13b <...>

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20241001114606.820277-1-ubizjak@gmail.com
kernel/locking/osq_lock.c

index 75a6f6133866d81ab4bd94f128d3f20f85c7c15f..b4233dc2c2b04c13add7d5ac1395705307ea9383 100644 (file)
@@ -215,8 +215,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(atomic_cmpxchg_release(&lock->tail, curr,
-                                         OSQ_UNLOCKED_VAL) == curr))
+       if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL))
                return;
 
        /*