#include <linux/compiler.h>
#include <asm/paravirt.h>
#include <asm/bitops.h>
+#include <linux/sdt.h>
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
+ u64 spinstart = 0, spinend, spintime;
inc = xadd(&lock->tickets, inc);
if (likely(inc.head == inc.tail))
goto out;
+ if (DTRACE_LOCKSTAT_ENABLED(spin__spin))
+ spinstart = dtrace_gethrtime_ns();
+
for (;;) {
unsigned count = SPIN_THRESHOLD;
__ticket_check_and_clear_slowpath(lock, inc.head);
out:
barrier(); /* make sure nothing creeps before the lock is taken */
+ if (DTRACE_LOCKSTAT_ENABLED(spin__spin) && spinstart) {
+ spinend = dtrace_gethrtime_ns();
+ spintime = spinend > spinstart ? spinend - spinstart : 0;
+ DTRACE_LOCKSTAT(spin__spin, spinlock_t *, lock,
+ uint64_t, spintime);
+ }
}
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{ "proc", "__proc_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
{ "io", "__io_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
{ "ip", "__ip_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
+ { "lockstat", "__lockstat_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
{ "tcp", "__tcp_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
{ "udp", "__udp_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
{ "mib", "__mib_", &stab_attr, DTRACE_PRIV_KERNEL, &sdt_pops, 0 },
extern void dtrace_vtime_suspend(void);
extern void dtrace_vtime_resume(void);
extern void dtrace_chill(ktime_t, ktime_t, ktime_t);
+extern ktime_t dtrace_gethrtime(void);
extern void dtrace_skip_instruction(struct pt_regs *);
# error "please don't include this file directly"
#endif
+#include <linux/sdt.h>
+
/*
* include/linux/rwlock_api_smp.h
*
preempt_disable();
if (do_raw_read_trylock(lock)) {
rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
return 1;
}
preempt_enable();
preempt_disable();
if (do_raw_write_trylock(lock)) {
rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
return 1;
}
preempt_enable();
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
}
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
do_raw_read_lock_flags, &flags);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
return flags;
}
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
}
static inline void __raw_read_lock_bh(rwlock_t *lock)
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
}
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
do_raw_write_lock_flags, &flags);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
return flags;
}
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
}
static inline void __raw_write_lock_bh(rwlock_t *lock)
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
}
static inline void __raw_write_lock(rwlock_t *lock)
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+ DTRACE_LOCKSTAT(rw__acquire, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
}
#endif /* CONFIG_PREEMPT */
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
local_irq_restore(flags);
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
local_irq_enable();
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_READER);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
local_irq_restore(flags);
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
local_irq_enable();
preempt_enable();
}
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
+ DTRACE_LOCKSTAT(rw__release, struct rwlock *, lock, int,
+ DTRACE_LOCKSTAT_RW_WRITER);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
#define DTRACE_SRP(name, ...) \
DTRACE_PROBE(__srp_##name, ## __VA_ARGS__);
+#define DTRACE_LOCKSTAT_ENABLED(name) \
+ DTRACE_PROBE_ENABLED(__lockstat_##name)
+
+#define DTRACE_LOCKSTAT(name, ...) \
+ DTRACE_PROBE(__lockstat_##name, ## __VA_ARGS__)
+
+#define DTRACE_LOCKSTAT_RW_WRITER 0
+#define DTRACE_LOCKSTAT_RW_READER 1
+
#endif /* _LINUX_SDT_H_ */
#ifndef _LINUX_SDT_INTERNAL_H_
#define _LINUX_SDT_INTERNAL_H_
+#include <linux/types.h>
+
/*
* This counts the number of args.
*/
#define __DTRACE_APPLY_17(m, def, x1, x2, x3, x4, x5, x6, x7, x8, ...) m(x1), m(x2), m(x3), m(x4), m(x5), m(x6), m(x7), m(x8)
#define __DTRACE_APPLY_18(m, def, x1, x2, x3, x4, x5, x6, x7, x8, ...) m(x1), m(x2), m(x3), m(x4), m(x5), m(x6), m(x7), m(x8)
+/* Needed for lockstat probes where we cannot include ktime.h */
+extern u64 dtrace_gethrtime_ns(void);
+
#endif /* _LINUX_SDT_INTERNAL_H */
# error "please don't include this file directly"
#endif
+#include <linux/sdt.h>
+
/*
* include/linux/spinlock_api_smp.h
*
preempt_disable();
if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
return 1;
}
preempt_enable();
#else
do_raw_spin_lock_flags(lock, &flags);
#endif
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
return flags;
}
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
}
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
+ DTRACE_LOCKSTAT(spin__release, spinlock_t *, lock);
preempt_enable();
}
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
+ DTRACE_LOCKSTAT(spin__release, spinlock_t *, lock);
local_irq_restore(flags);
preempt_enable();
}
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
+ DTRACE_LOCKSTAT(spin__release, spinlock_t *, lock);
local_irq_enable();
preempt_enable();
}
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
+ DTRACE_LOCKSTAT(spin__release, spinlock_t *, lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ DTRACE_LOCKSTAT(spin__acquire, spinlock_t *, lock);
return 1;
}
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
EXPORT_SYMBOL(dtrace_chill);
+/* Needed for lockstat probes where we cannot include ktime.h */
+u64 dtrace_gethrtime_ns(void)
+{
+ return ktime_get_raw_fast_ns();
+}
+EXPORT_SYMBOL(dtrace_gethrtime_ns);
+
+ktime_t dtrace_gethrtime(void)
+{
+ return ns_to_ktime(dtrace_gethrtime_ns());
+}
+EXPORT_SYMBOL(dtrace_gethrtime);
+
void dtrace_stacktrace(stacktrace_state_t *st)
{
struct stack_trace trace;
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
+#include <linux/sdt.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
}
EXPORT_SYMBOL(mutex_lock);
mutex_clear_owner(lock);
#endif
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+ DTRACE_LOCKSTAT(adaptive__release, struct mutex *, lock);
}
EXPORT_SYMBOL(mutex_unlock);
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
+ u64 spinstart = 0, spinend, spintotal = 0;
+ u64 waitstart, waitend, waittotal = 0;
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned long flags;
if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
/* got the lock, yay! */
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
preempt_enable();
return 0;
}
lock_contended(&lock->dep_map, ip);
+ if (DTRACE_LOCKSTAT_ENABLED(adaptive__spin))
+ spinstart = dtrace_gethrtime_ns();
+
for (;;) {
/*
* Lets try to take the lock again - this is needed even if
/* didn't get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- schedule_preempt_disabled();
+
+ if (DTRACE_LOCKSTAT_ENABLED(adaptive__block)) {
+ waitstart = dtrace_gethrtime_ns();
+ schedule_preempt_disabled();
+ waitend = dtrace_gethrtime_ns();
+ if (waitend > waitstart)
+ waittotal += waitend - waitstart;
+ } else
+ schedule_preempt_disabled();
+
spin_lock_mutex(&lock->wait_lock, flags);
}
__set_task_state(task, TASK_RUNNING);
}
spin_unlock_mutex(&lock->wait_lock, flags);
+
+ if (DTRACE_LOCKSTAT_ENABLED(adaptive__spin) && spinstart) {
+ spinend = dtrace_gethrtime_ns();
+ spintotal = (spinend > spinstart) ? (spinend - spinstart) : 0;
+ spintotal = (spintotal > waittotal) ?
+ (spintotal - waittotal) : 0;
+ DTRACE_LOCKSTAT(adaptive__spin, struct mutex *, lock,
+ uint64_t, spintotal);
+ }
+ if (DTRACE_LOCKSTAT_ENABLED(adaptive__block) && waittotal)
+ DTRACE_LOCKSTAT(adaptive__block, struct mutex *, lock,
+ uint64_t, waittotal);
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
preempt_enable();
return 0;
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
+ DTRACE_LOCKSTAT(adaptive__acquire__error, struct mutex *, lock,
+ int, ret);
preempt_enable();
return ret;
}
ret = __mutex_fastpath_lock_retval(&lock->count);
if (likely(!ret)) {
mutex_set_owner(lock);
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
return 0;
} else
return __mutex_lock_interruptible_slowpath(lock);
ret = __mutex_fastpath_lock_retval(&lock->count);
if (likely(!ret)) {
mutex_set_owner(lock);
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
return 0;
} else
return __mutex_lock_killable_slowpath(lock);
int ret;
ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
- if (ret)
+ if (ret) {
mutex_set_owner(lock);
+ DTRACE_LOCKSTAT(adaptive__acquire, struct mutex *, lock);
+ }
return ret;
}
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/sdt.h>
#include <linux/spinlock.h>
#include <asm/qrwlock.h>
*/
void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
{
+ u64 spinstart = 0, spinend, spintime;
+
/*
* Readers come here when they cannot get the lock without waiting
*/
+ if (DTRACE_LOCKSTAT_ENABLED(rw__spin))
+ spinstart = dtrace_gethrtime_ns();
if (unlikely(in_interrupt())) {
/*
* Readers in interrupt context will get the lock immediately
* is available without waiting in the queue.
*/
rspin_until_writer_unlock(lock, cnts);
- return;
+ goto done;
}
atomic_sub(_QR_BIAS, &lock->cnts);
* Signal the next one in queue to become queue head
*/
arch_spin_unlock(&lock->lock);
+done:
+ if (DTRACE_LOCKSTAT_ENABLED(rw__spin) && spinstart) {
+ spinend = dtrace_gethrtime_ns();
+ spintime = spinend > spinstart ? spinend - spinstart : 0;
+ DTRACE_LOCKSTAT(rw__spin, rwlock_t *, lock, uint64_t, spintime,
+ int, DTRACE_LOCKSTAT_RW_READER);
+ }
+
}
EXPORT_SYMBOL(queued_read_lock_slowpath);
void queued_write_lock_slowpath(struct qrwlock *lock)
{
u32 cnts;
+ u64 spinstart = 0, spinend, spintime;
/* Put the writer into the wait queue */
+ if (DTRACE_LOCKSTAT_ENABLED(rw__spin))
+ spinstart = dtrace_gethrtime_ns();
arch_spin_lock(&lock->lock);
/* Try to acquire the lock directly if no reader is present */
}
unlock:
arch_spin_unlock(&lock->lock);
+ if (DTRACE_LOCKSTAT_ENABLED(rw__spin) && spinstart) {
+ spinend = dtrace_gethrtime_ns();
+ spintime = spinend > spinstart ? spinend - spinstart : 0;
+ DTRACE_LOCKSTAT(rw__spin, rwlock_t *, lock, uint64_t, spintime,
+ int, DTRACE_LOCKSTAT_RW_WRITER);
+ }
}
EXPORT_SYMBOL(queued_write_lock_slowpath);