]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
dtrace: new IO and sched provider probes
authorKris Van Hees <kris.van.hees@oracle.com>
Mon, 6 Aug 2012 09:01:08 +0000 (05:01 -0400)
committerNick Alcock <nick.alcock@oracle.com>
Mon, 29 Jun 2015 21:40:28 +0000 (22:40 +0100)
New IO provider probes: start, done, wait-start, wait-done

New sched provider probes: enqueue, dequeue, wakeup, preempt, remain-cpu,
change-pri, surrender

(Note that the preempt probe currently passes a debugging argument that
will be removed in the future to match the argument-less version in the
documentation.)

Signed-off-by: Kris Van Hees <kris.van.hees@oracle.com>
block/bio.c
block/blk-core.c
fs/buffer.c
include/linux/blk_types.h
kernel/sched/core.c

index f66a4eae16ee4a96c9469c7a9311de3437a923c5..b83b542cdc3ac2dfecdc2424ed58214c3a1a157c 100644 (file)
@@ -1765,6 +1765,10 @@ void bio_endio(struct bio *bio, int error)
                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                        error = -EIO;
 
+               DTRACE_IO3(done, struct bio *, bio, struct block_device *,
+                          bio->bi_bdev_orig ? bio->bi_bdev_orig : bio->bi_bdev,
+                          void *, NULL);
+
                if (!atomic_dec_and_test(&bio->bi_remaining))
                        return;
 
index 03b5f8d77f37b4cbad3a12f3a98f9c3ea63a50e7..8825a0c28db894693a08926210495966a1f49c2f 100644 (file)
@@ -1699,6 +1699,9 @@ static inline void blk_partition_remap(struct bio *bio)
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
 
+#ifdef CONFIG_DTRACE
+               bio->bi_bdev_orig = bdev;
+#endif
                bio->bi_iter.bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
index c7a5602d01eed200912d3a90ca4ac6780209cb6f..aaeec7d8ec0303f64c275d1aef459ae29822cc63 100644 (file)
@@ -116,7 +116,11 @@ EXPORT_SYMBOL(buffer_check_dirty_writeback);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
+       DTRACE_IO3(wait__start, struct buffer_head *, bh,
+                  struct block_device *, bh->b_bdev, void *, NULL);
        wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
+       DTRACE_IO3(wait__done, struct buffer_head *, bh,
+                  struct block_device *, bh->b_bdev, void *, NULL);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
@@ -2945,6 +2949,7 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
        if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
                set_bit(BH_Quiet, &bh->b_state);
 
+       DTRACE_IO2(done, struct buffer_head *, bh, int, bio->bi_rw);
        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
        bio_put(bio);
 }
@@ -3042,6 +3047,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
                rw |= REQ_PRIO;
 
        bio_get(bio);
+       DTRACE_IO2(start, struct buffer_head *, bh, int, rw);
        submit_bio(rw, bio);
 
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
index b7299febc4b4adfee00cb8b05d6fbf6558f01547..46354fb3069010bebc83f17da8a9f2e77757f35d 100644 (file)
@@ -45,7 +45,10 @@ struct bvec_iter {
  */
 struct bio {
        struct bio              *bi_next;       /* request queue link */
-       struct block_device     *bi_bdev;
+       struct block_device     *bi_bdev;       /* bdev (possibly remapped */
+#ifdef CONFIG_DTRACE
+       struct block_device     *bi_bdev_orig;  /* bdev before remapping */
+#endif
        unsigned long           bi_flags;       /* status, command, etc */
        unsigned long           bi_rw;          /* bottom bits READ/WRITE,
                                                 * top bits priority
index 5c97598a45e85fa4ce6baaccb9106a686778800f..3dedfe585f7d1461a498d0d9d4c84968128f6d96 100644 (file)
@@ -804,6 +804,8 @@ static void set_load_weight(struct task_struct *p)
 
 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       DTRACE_SCHED3(enqueue, struct task_struct *, p,
+                              struct task_struct *, p, void *, NULL);
        update_rq_clock(rq);
        sched_info_queued(rq, p);
        p->sched_class->enqueue_task(rq, p, flags);
@@ -811,6 +813,8 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 
 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       DTRACE_SCHED4(dequeue, struct task_struct *, p,
+                              struct task_struct *, p, void *, NULL, int, 0);
        update_rq_clock(rq);
        sched_info_dequeued(rq, p);
        p->sched_class->dequeue_task(rq, p, flags);
@@ -1736,6 +1740,8 @@ static void try_to_wake_up_local(struct task_struct *p)
        if (!(p->state & TASK_NORMAL))
                goto out;
 
+       DTRACE_SCHED2(wakeup, struct task_struct *, p,
+                             struct task_struct *, p);
        if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
@@ -2760,6 +2766,7 @@ static void __sched __schedule(void)
        raw_spin_lock_irq(&rq->lock);
 
        rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+       DTRACE_SCHED1(preempt, long, prev->state);
 
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -2800,8 +2807,10 @@ static void __sched __schedule(void)
 
                rq = context_switch(rq, prev, next); /* unlocks the rq */
                cpu = cpu_of(rq);
-       } else
+       } else {
+               DTRACE_SCHED(remain__cpu);
                raw_spin_unlock_irq(&rq->lock);
+       }
 
        post_schedule(rq);
 
@@ -3108,6 +3117,8 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
+       DTRACE_SCHED3(change__pri, struct task_struct *, p,
+                                  struct task_struct *, p, int, old_prio);
        if (queued) {
                enqueue_task(rq, p, 0);
                /*
@@ -4212,6 +4223,9 @@ SYSCALL_DEFINE0(sched_yield)
        schedstat_inc(rq, yld_count);
        current->sched_class->yield_task(rq);
 
+       DTRACE_SCHED2(surrender, struct task_struct *, current,
+                                struct task_struct *, current);
+
        /*
         * Since we are going to call schedule() anyway, there's
         * no need to preempt or enable interrupts:
@@ -4360,6 +4374,9 @@ again:
 
        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
        if (yielded) {
+               DTRACE_SCHED2(surrender, struct task_struct *, curr,
+                                        struct task_struct *, curr);
+
                schedstat_inc(rq, yld_count);
                /*
                 * Make p's CPU reschedule; pick_next_entity takes care of