]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
dtrace: cleanup (and adding) of SDT probe points
authorKris Van Hees <kris.van.hees@oracle.com>
Sun, 9 Sep 2012 20:56:15 +0000 (16:56 -0400)
committerNick Alcock <nick.alcock@oracle.com>
Mon, 29 Jun 2015 21:40:31 +0000 (22:40 +0100)
Changed io SDT probe points to be located at the buffer_head level rather than
the bio level.  This may need to be revisted depending on further analysis,
but doing it this way provides consistent semantics that were not guaranteed by
the previous bio-based placement.

Changed the sched STD probes to not pass irrelevant arguments, and to pass
specific runqueue CPU information.  The CPU information is not available from
the task structure, so it needs to be passed explicily.

Added proc SDT probes start and lwp-start.

Added proc SDT probes for signal-discard and signal-clear.

Corrected the argument to the exit proc SDT probe, which should indicate the
reason for the process termination (exit, killed, core dumped) rather than the
return code of the process.

Provided argument information for all the new (and changed) SDT probe points.
This depends on working xlator support in userspace.

Enabling of SDT probes now uses a generic dtrace_invop_(enable|disable) rather
than SDT-specific functions.

SDT probes are not destroyed correctly, to ensure that subsequent uses will not
result in unpleasant events.

Signed-off-by: Kris Van Hees <kris.van.hees@oracle.com>
block/bio.c
fs/buffer.c
kernel/exit.c
kernel/sched/core.c
kernel/signal.c

index b83b542cdc3ac2dfecdc2424ed58214c3a1a157c..f66a4eae16ee4a96c9469c7a9311de3437a923c5 100644 (file)
@@ -1765,10 +1765,6 @@ void bio_endio(struct bio *bio, int error)
                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                        error = -EIO;
 
-               DTRACE_IO3(done, struct bio *, bio, struct block_device *,
-                          bio->bi_bdev_orig ? bio->bi_bdev_orig : bio->bi_bdev,
-                          void *, NULL);
-
                if (!atomic_dec_and_test(&bio->bi_remaining))
                        return;
 
index aaeec7d8ec0303f64c275d1aef459ae29822cc63..3842fbb96d78d688ebac029e1c28e89529b9942f 100644 (file)
@@ -116,11 +116,9 @@ EXPORT_SYMBOL(buffer_check_dirty_writeback);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-       DTRACE_IO3(wait__start, struct buffer_head *, bh,
-                  struct block_device *, bh->b_bdev, void *, NULL);
+       DTRACE_IO1(wait__start, struct buffer_head *, bh);
        wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
-       DTRACE_IO3(wait__done, struct buffer_head *, bh,
-                  struct block_device *, bh->b_bdev, void *, NULL);
+       DTRACE_IO1(wait__done, struct buffer_head *, bh);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
index 1f646d0dfb1cc8b4107ae0a95f9bd52bfc2ac1f6..7125b5184bcc6242bbef6100a7e1df56e73baade 100644 (file)
@@ -732,7 +732,7 @@ void do_exit(long code)
        taskstats_exit(tsk, group_dead);
 
        DTRACE_PROC(lwp__exit);
-       DTRACE_PROC1(exit, int, code);
+       DTRACE_PROC1(exit, int, code & 0x80 ? 3 : code & 0x7f ? 2 : 1);
 
        exit_mm(tsk);
 
index 3dedfe585f7d1461a498d0d9d4c84968128f6d96..50b513f89f4cffbcffc4cec65f8052fb27096941 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
+#include <linux/dtrace_cpu.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -804,8 +805,8 @@ static void set_load_weight(struct task_struct *p)
 
 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       DTRACE_SCHED3(enqueue, struct task_struct *, p,
-                              struct task_struct *, p, void *, NULL);
+       DTRACE_SCHED2(enqueue, struct task_struct *, p,
+                              cpuinfo_t *, rq->dtrace_cpu_info);
        update_rq_clock(rq);
        sched_info_queued(rq, p);
        p->sched_class->enqueue_task(rq, p, flags);
@@ -813,8 +814,9 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 
 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       DTRACE_SCHED4(dequeue, struct task_struct *, p,
-                              struct task_struct *, p, void *, NULL, int, 0);
+       DTRACE_SCHED3(dequeue, struct task_struct *, p,
+                              cpuinfo_t *, rq->dtrace_cpu_info,
+                              int, 0);
        update_rq_clock(rq);
        sched_info_dequeued(rq, p);
        p->sched_class->dequeue_task(rq, p, flags);
@@ -1676,6 +1678,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       DTRACE_SCHED1(wakeup, struct task_struct *, p);
+
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
@@ -1740,8 +1744,6 @@ static void try_to_wake_up_local(struct task_struct *p)
        if (!(p->state & TASK_NORMAL))
                goto out;
 
-       DTRACE_SCHED2(wakeup, struct task_struct *, p,
-                             struct task_struct *, p);
        if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
@@ -2185,8 +2187,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
        trace_sched_switch(prev, next);
        sched_info_switch(rq, prev, next);
        perf_event_task_sched_out(prev, next);
-       DTRACE_SCHED2(off__cpu, struct task_struct *, next,
-                               struct task_struct *, next);
+       DTRACE_SCHED1(off__cpu, struct task_struct *, next);
        fire_sched_out_preempt_notifiers(prev, next);
        prepare_lock_switch(rq, next);
        prepare_arch_switch(next);
@@ -2300,6 +2301,9 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
 
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
+
+       DTRACE_PROC(start);
+       DTRACE_PROC(lwp__start);
 }
 
 /*
@@ -2766,31 +2770,35 @@ static void __sched __schedule(void)
        raw_spin_lock_irq(&rq->lock);
 
        rq->clock_skip_update <<= 1; /* promote REQ to ACT */
-       DTRACE_SCHED1(preempt, long, prev->state);
 
        switch_count = &prev->nivcsw;
-       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-               if (unlikely(signal_pending_state(prev->state, prev))) {
-                       prev->state = TASK_RUNNING;
-               } else {
-                       deactivate_task(rq, prev, DEQUEUE_SLEEP);
-                       prev->on_rq = 0;
+       if (prev->state) {
+               if (!(preempt_count() & PREEMPT_ACTIVE)) {
+                       DTRACE_SCHED(sleep);
 
-                       /*
-                        * If a worker went to sleep, notify and ask workqueue
-                        * whether it wants to wake up a task to maintain
-                        * concurrency.
-                        */
-                       if (prev->flags & PF_WQ_WORKER) {
-                               struct task_struct *to_wakeup;
-
-                               to_wakeup = wq_worker_sleeping(prev, cpu);
-                               if (to_wakeup)
-                                       try_to_wake_up_local(to_wakeup);
+                       if (unlikely(signal_pending_state(prev->state, prev))) {
+                               prev->state = TASK_RUNNING;
+                       } else {
+                               deactivate_task(rq, prev, DEQUEUE_SLEEP);
+                               prev->on_rq = 0;
+       
+                               /*
+                                * If a worker went to sleep, notify and ask
+                                * workqueue whether it wants to wake up a task
+                                * to maintain concurrency.
+                                */
+                               if (prev->flags & PF_WQ_WORKER) {
+                                       struct task_struct *to_wakeup;
+       
+                                       to_wakeup = wq_worker_sleeping(prev, cpu);
+                                       if (to_wakeup)
+                                               try_to_wake_up_local(to_wakeup);
+                               }
                        }
+                       switch_count = &prev->nvcsw;
                }
-               switch_count = &prev->nvcsw;
-       }
+       } else
+               DTRACE_SCHED(preempt);
 
        if (task_on_rq_queued(prev))
                update_rq_clock(rq);
@@ -3117,8 +3125,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
-       DTRACE_SCHED3(change__pri, struct task_struct *, p,
-                                  struct task_struct *, p, int, old_prio);
+       DTRACE_SCHED2(change__pri, struct task_struct *, p, int, old_prio);
        if (queued) {
                enqueue_task(rq, p, 0);
                /*
@@ -4223,8 +4230,7 @@ SYSCALL_DEFINE0(sched_yield)
        schedstat_inc(rq, yld_count);
        current->sched_class->yield_task(rq);
 
-       DTRACE_SCHED2(surrender, struct task_struct *, current,
-                                struct task_struct *, current);
+       DTRACE_SCHED1(surrender, struct task_struct *, current);
 
        /*
         * Since we are going to call schedule() anyway, there's
@@ -4374,8 +4380,7 @@ again:
 
        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
        if (yielded) {
-               DTRACE_SCHED2(surrender, struct task_struct *, curr,
-                                        struct task_struct *, curr);
+               DTRACE_SCHED1(surrender, struct task_struct *, curr);
 
                schedstat_inc(rq, yld_count);
                /*
@@ -7241,6 +7246,10 @@ void __init sched_init(void)
 #endif
                init_rq_hrtick(rq);
                atomic_set(&rq->nr_iowait, 0);
+
+#ifdef CONFIG_DTRACE
+               rq->dtrace_cpu_info = per_cpu_info(i);
+#endif
        }
 
        set_load_weight(&init_task);
index 7186267e64867f084852cf7a89c5d68a4a756bb9..fa109bb027cdc5b3cb88cb8c847ada5a63fbebef 100644 (file)
@@ -1033,8 +1033,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 
        result = TRACE_SIGNAL_IGNORED;
        if (!prepare_signal(sig, t,
-                       from_ancestor_ns || (info == SEND_SIG_FORCED)))
+                           from_ancestor_ns || (info == SEND_SIG_FORCED))) {
+               DTRACE_PROC2(signal__discard, struct task_struct *, t,
+                            int, sig);
                goto ret;
+       }
 
        pending = group ? &t->signal->shared_pending : &t->pending;
        /*
@@ -2854,8 +2857,10 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
        }
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       if (sig)
+       if (sig) {
+               DTRACE_PROC1(signal__clear, int, sig);
                return sig;
+       }
        return timeout ? -EINTR : -EAGAIN;
 }