return ret;
 }
 
-/**
- * ring_buffer_wait - wait for input to the ring buffer
- * @buffer: buffer to wait on
- * @cpu: the cpu buffer to wait on
- * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
- *
- * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
- * as data is added to any of the @buffer's cpu buffers. Otherwise
- * it will wait for data to be added to a specific cpu buffer.
- */
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+static inline bool
+rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
+            int cpu, int full, ring_buffer_cond_fn cond, void *data)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
-       DEFINE_WAIT(wait);
-       struct rb_irq_work *work;
-       int ret = 0;
-
-       /*
-        * Depending on what the caller is waiting for, either any
-        * data in any cpu buffer, or a specific buffer, put the
-        * caller on the appropriate wait queue.
-        */
-       if (cpu == RING_BUFFER_ALL_CPUS) {
-               work = &buffer->irq_work;
-               /* Full only makes sense on per cpu reads */
-               full = 0;
-       } else {
-               if (!cpumask_test_cpu(cpu, buffer->cpumask))
-                       return -ENODEV;
-               cpu_buffer = buffer->buffers[cpu];
-               work = &cpu_buffer->irq_work;
-       }
+       if (rb_watermark_hit(buffer, cpu, full))
+               return true;
 
-       if (full)
-               prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
-       else
-               prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+       if (cond(data))
+               return true;
 
        /*
         * The events can happen in critical sections where
         * a task has been queued. It's OK for spurious wake ups.
         */
        if (full)
-               work->full_waiters_pending = true;
+               rbwork->full_waiters_pending = true;
        else
-               work->waiters_pending = true;
+               rbwork->waiters_pending = true;
 
-       if (rb_watermark_hit(buffer, cpu, full))
-               goto out;
+       return false;
+}
 
-       if (signal_pending(current)) {
-               ret = -EINTR;
-               goto out;
+/*
+ * The default wait condition for ring_buffer_wait() is to just to exit the
+ * wait loop the first time it is woken up.
+ */
+static bool rb_wait_once(void *data)
+{
+       long *once = data;
+
+       /* wait_event() actually calls this twice before scheduling*/
+       if (*once > 1)
+               return true;
+
+       (*once)++;
+       return false;
+}
+
+/**
+ * ring_buffer_wait - wait for input to the ring buffer
+ * @buffer: buffer to wait on
+ * @cpu: the cpu buffer to wait on
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
+ *
+ * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
+ * as data is added to any of the @buffer's cpu buffers. Otherwise
+ * it will wait for data to be added to a specific cpu buffer.
+ */
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       struct wait_queue_head *waitq;
+       ring_buffer_cond_fn cond;
+       struct rb_irq_work *rbwork;
+       void *data;
+       long once = 0;
+       int ret = 0;
+
+       cond = rb_wait_once;
+       data = &once;
+
+       /*
+        * Depending on what the caller is waiting for, either any
+        * data in any cpu buffer, or a specific buffer, put the
+        * caller on the appropriate wait queue.
+        */
+       if (cpu == RING_BUFFER_ALL_CPUS) {
+               rbwork = &buffer->irq_work;
+               /* Full only makes sense on per cpu reads */
+               full = 0;
+       } else {
+               if (!cpumask_test_cpu(cpu, buffer->cpumask))
+                       return -ENODEV;
+               cpu_buffer = buffer->buffers[cpu];
+               rbwork = &cpu_buffer->irq_work;
        }
 
-       schedule();
- out:
        if (full)
-               finish_wait(&work->full_waiters, &wait);
+               waitq = &rbwork->full_waiters;
        else
-               finish_wait(&work->waiters, &wait);
+               waitq = &rbwork->waiters;
 
-       if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
-               ret = -EINTR;
+       ret = wait_event_interruptible((*waitq),
+                               rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
 
        return ret;
 }