]> www.infradead.org Git - users/hch/misc.git/commitdiff
powerpc/qspinlock: Add spinlock contention tracepoint
authorNysal Jan K.A. <nysal@linux.ibm.com>
Thu, 31 Jul 2025 06:18:53 +0000 (11:48 +0530)
committerMadhavan Srinivasan <maddy@linux.ibm.com>
Mon, 1 Sep 2025 08:08:12 +0000 (13:38 +0530)
Add a lock contention tracepoint in the queued spinlock slowpath.
Also add the __lockfunc annotation so that in_lock_functions()
works as expected.

Signed-off-by: Nysal Jan K.A. <nysal@linux.ibm.com>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250731061856.1858898-1-nysal@linux.ibm.com
arch/powerpc/lib/qspinlock.c

index bcc7e4dff8c3056f09d91698a62d2b582a977860..95ab4cdf582ebddee3fc7de213df0cda11f0c6ab 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched/clock.h>
 #include <asm/qspinlock.h>
 #include <asm/paravirt.h>
+#include <trace/events/lock.h>
 
 #define MAX_NODES      4
 
@@ -708,26 +709,26 @@ release:
        qnodesp->count--;
 }
 
-void queued_spin_lock_slowpath(struct qspinlock *lock)
+void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock)
 {
+       trace_contention_begin(lock, LCB_F_SPIN);
        /*
         * This looks funny, but it induces the compiler to inline both
         * sides of the branch rather than share code as when the condition
         * is passed as the paravirt argument to the functions.
         */
        if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
-               if (try_to_steal_lock(lock, true)) {
+               if (try_to_steal_lock(lock, true))
                        spec_barrier();
-                       return;
-               }
-               queued_spin_lock_mcs_queue(lock, true);
+               else
+                       queued_spin_lock_mcs_queue(lock, true);
        } else {
-               if (try_to_steal_lock(lock, false)) {
+               if (try_to_steal_lock(lock, false))
                        spec_barrier();
-                       return;
-               }
-               queued_spin_lock_mcs_queue(lock, false);
+               else
+                       queued_spin_lock_mcs_queue(lock, false);
        }
+       trace_contention_end(lock, 0);
 }
 EXPORT_SYMBOL(queued_spin_lock_slowpath);