*/
void __wait_on_buffer(struct buffer_head * bh)
{
+ DTRACE_IO3(wait__start, struct buffer_head *, bh,
+ struct block_device *, bh->b_bdev, void *, NULL);
wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
+ DTRACE_IO3(wait__done, struct buffer_head *, bh,
+ struct block_device *, bh->b_bdev, void *, NULL);
}
EXPORT_SYMBOL(__wait_on_buffer);
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
set_bit(BH_Quiet, &bh->b_state);
+ DTRACE_IO2(done, struct buffer_head *, bh, int, bio->bi_rw);
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
rw |= REQ_PRIO;
bio_get(bio);
+ DTRACE_IO2(start, struct buffer_head *, bh, int, rw);
submit_bio(rw, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
+ DTRACE_SCHED3(enqueue, struct task_struct *, p,
+ struct task_struct *, p, void *, NULL);
update_rq_clock(rq);
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
+ DTRACE_SCHED4(dequeue, struct task_struct *, p,
+ struct task_struct *, p, void *, NULL, int, 0);
update_rq_clock(rq);
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
if (!(p->state & TASK_NORMAL))
goto out;
+ DTRACE_SCHED2(wakeup, struct task_struct *, p,
+ struct task_struct *, p);
if (!task_on_rq_queued(p))
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
raw_spin_lock_irq(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+ DTRACE_SCHED1(preempt, long, prev->state);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
rq = context_switch(rq, prev, next); /* unlocks the rq */
cpu = cpu_of(rq);
- } else
+ } else {
+ DTRACE_SCHED(remain__cpu);
raw_spin_unlock_irq(&rq->lock);
+ }
post_schedule(rq);
p->prio = effective_prio(p);
delta = p->prio - old_prio;
+ DTRACE_SCHED3(change__pri, struct task_struct *, p,
+ struct task_struct *, p, int, old_prio);
if (queued) {
enqueue_task(rq, p, 0);
/*
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);
+ DTRACE_SCHED2(surrender, struct task_struct *, current,
+ struct task_struct *, current);
+
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
+ DTRACE_SCHED2(surrender, struct task_struct *, curr,
+ struct task_struct *, curr);
+
schedstat_inc(rq, yld_count);
/*
* Make p's CPU reschedule; pick_next_entity takes care of