From: Nysal Jan K.A. Date: Thu, 31 Jul 2025 06:18:53 +0000 (+0530) Subject: powerpc/qspinlock: Add spinlock contention tracepoint X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=4f61d54d2245c15b23ad78a89f854fb2496b6216;p=users%2Fhch%2Fmisc.git powerpc/qspinlock: Add spinlock contention tracepoint Add a lock contention tracepoint in the queued spinlock slowpath. Also add the __lockfunc annotation so that in_lock_functions() works as expected. Signed-off-by: Nysal Jan K.A. Reviewed-by: Shrikanth Hegde Signed-off-by: Madhavan Srinivasan Link: https://patch.msgid.link/20250731061856.1858898-1-nysal@linux.ibm.com --- diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index bcc7e4dff8c3..95ab4cdf582e 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -9,6 +9,7 @@ #include #include #include +#include #define MAX_NODES 4 @@ -708,26 +709,26 @@ release: qnodesp->count--; } -void queued_spin_lock_slowpath(struct qspinlock *lock) +void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock) { + trace_contention_begin(lock, LCB_F_SPIN); /* * This looks funny, but it induces the compiler to inline both * sides of the branch rather than share code as when the condition * is passed as the paravirt argument to the functions. */ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) { - if (try_to_steal_lock(lock, true)) { + if (try_to_steal_lock(lock, true)) spec_barrier(); - return; - } - queued_spin_lock_mcs_queue(lock, true); + else + queued_spin_lock_mcs_queue(lock, true); } else { - if (try_to_steal_lock(lock, false)) { + if (try_to_steal_lock(lock, false)) spec_barrier(); - return; - } - queued_spin_lock_mcs_queue(lock, false); + else + queued_spin_lock_mcs_queue(lock, false); } + trace_contention_end(lock, 0); } EXPORT_SYMBOL(queued_spin_lock_slowpath);