With all the other work done, we can finally move the SDT probe
type information out of the static sdt_args array in the SDT kernel
module and put them in the probe definitions they relate to.
Most of it is already there -- all probes already carry type
information. All that is missing is translations, so we add those.
The syntax for these is
type: translation
or
type : (translation, ...)
i.e. a source type, followed by a colon, followed by zero or more
translated types (if there is more than one, they must be bracketed),
with optional spaces if you like. The resulting probe will have as many
arguments as there are translations. Obviously, if DTrace userspace is
to do anything with this knowledge it will need a translator from the
specified source type to each of the specified translated types: DTrace
picks these up from kernel-versioned subdirectories of
/usr/lib64/dtrace/ (e.g. /usr/lib64/dtrace/4.1).
If a probe appears more than once (say, in different functions), each
instance must presently have the same types. Changing translations
between instances of a single probe will be diagnosed if CONFIG_DT_DEBUG
is enabled.
Typos in the types are only diagnosed at runtime: a dtrace -vln of your
new probe will tell you if all is well.
Signed-off-by: Nick Alcock <nick.alcock@oracle.com>
Acked-by: Kris Van Hees <kris.van.hees@oracle.com>
Orabug:
24661801
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- DTRACE_IO1(wait__start, struct buffer_head *, bh);
+ DTRACE_IO1(wait__start, struct buffer_head * : (bufinfo_t *, devinfo_t *, fileinfo_t *), bh);
wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
- DTRACE_IO1(wait__done, struct buffer_head *, bh);
+ DTRACE_IO1(wait__done, struct buffer_head * : (bufinfo_t *, devinfo_t *, fileinfo_t *), bh);
}
EXPORT_SYMBOL(__wait_on_buffer);
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
set_bit(BH_Quiet, &bh->b_state);
- DTRACE_IO2(done, struct buffer_head *, bh, int, bio->bi_rw);
+ DTRACE_IO2(done, struct buffer_head * : (bufinfo_t *, devinfo_t *, fileinfo_t *), bh, int, bio->bi_rw);
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
rw |= REQ_PRIO;
bio_get(bio);
- DTRACE_IO2(start, struct buffer_head *, bh, int, rw);
+ DTRACE_IO2(start, struct buffer_head * : (bufinfo_t *, devinfo_t *,
+ fileinfo_t *), bh, int, rw);
submit_bio(rw, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
}
put_pid(pid);
- DTRACE_PROC1(lwp__create, struct task_struct *, p);
- DTRACE_PROC1(create, struct task_struct *, p);
+ DTRACE_PROC1(lwp__create, struct task_struct * : (lwpsinfo_t *, psinfo_t *), p);
+ DTRACE_PROC1(create, struct task_struct * : psinfo_t *, p);
} else {
nr = PTR_ERR(p);
}
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
- DTRACE_SCHED2(enqueue, struct task_struct *, p,
+ DTRACE_SCHED2(enqueue, struct task_struct * : (lwpsinfo_t *,
+ psinfo_t *), p,
cpuinfo_t *, rq->dtrace_cpu_info);
update_rq_clock(rq);
sched_info_queued(rq, p);
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
- DTRACE_SCHED3(dequeue, struct task_struct *, p,
+ DTRACE_SCHED3(dequeue, struct task_struct * : (lwpsinfo_t *,
+ psinfo_t *), p,
cpuinfo_t *, rq->dtrace_cpu_info,
int, 0);
update_rq_clock(rq);
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
- DTRACE_SCHED1(wakeup, struct task_struct *, p);
+ DTRACE_SCHED1(wakeup, struct task_struct * : (lwpsinfo_t *,
+ psinfo_t *), p);
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
trace_sched_switch(prev, next);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
- DTRACE_SCHED1(off__cpu, struct task_struct *, next);
+ DTRACE_SCHED1(off__cpu, struct task_struct * : (lwpsinfo_t *,
+ psinfo_t *), next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
p->prio = effective_prio(p);
delta = p->prio - old_prio;
- DTRACE_SCHED2(change__pri, struct task_struct *, p, int, old_prio);
+ DTRACE_SCHED2(change__pri, struct task_struct * : (lwpsinfo_t *,
+ psinfo_t *), p,
+ int, old_prio);
if (queued) {
enqueue_task(rq, p, 0);
/*
do_raw_spin_unlock(&rq->lock);
sched_preempt_enable_no_resched();
- DTRACE_SCHED1(surrender, struct task_struct *, current);
+ DTRACE_SCHED1(surrender,
+ struct task_struct * : (lwpsinfo_t *, psinfo_t *),
+ current);
schedule();
return 0;
local_irq_restore(flags);
if (yielded > 0) {
- DTRACE_SCHED1(surrender, struct task_struct *, curr);
+ DTRACE_SCHED1(surrender,
+ struct task_struct * : (lwpsinfo_t *, psinfo_t *),
+ curr);
schedule();
}
result = TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t,
from_ancestor_ns || (info == SEND_SIG_FORCED))) {
- DTRACE_PROC2(signal__discard, struct task_struct *, t,
+ DTRACE_PROC2(signal__discard,
+ struct task_struct * : (lwpsinfo_t *, psinfo_t *), t,
int, sig);
goto ret;
}
signalfd_notify(t, sig);
sigaddset(&pending->signal, sig);
complete_signal(sig, t, group);
- DTRACE_PROC2(signal__send, struct task_struct *, t, int, sig);
+ DTRACE_PROC2(signal__send,
+ struct task_struct * : (lwpsinfo_t *, psinfo_t *), t,
+ int, sig);
ret:
trace_signal_generate(sig, info, t, group, result);
return ret;
list_add_tail(&q->list, &pending->list);
sigaddset(&pending->signal, sig);
complete_signal(sig, t, group);
- DTRACE_PROC2(signal__send, struct task_struct *, t, int, sig);
+ DTRACE_PROC2(signal__send,
+ struct task_struct * : (lwpsinfo_t *, psinfo_t *), t,
+ int, sig);
result = TRACE_SIGNAL_DELIVERED;
out:
trace_signal_generate(sig, &q->info, t, group, result);
{
struct task_struct *p = current;
- DTRACE_SCHED1(tick, struct task_struct *, p);
+ DTRACE_SCHED1(tick, struct task_struct * : (lwpsinfo_t *, psinfo_t *),
+ p);
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);