})
 
 typedef bool (*ring_buffer_cond_fn)(void *data);
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
+                    ring_buffer_cond_fn cond, void *data);
 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table, int full);
 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
 
  * @buffer: buffer to wait on
  * @cpu: the cpu buffer to wait on
  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
+ * @cond: condition function to break out of wait (NULL to run once)
+ * @data: the data to pass to @cond.
  *
  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
+                    ring_buffer_cond_fn cond, void *data)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct wait_queue_head *waitq;
-       ring_buffer_cond_fn cond;
        struct rb_irq_work *rbwork;
-       void *data;
        long once = 0;
        int ret = 0;
 
-       cond = rb_wait_once;
-       data = &once;
+       if (!cond) {
+               cond = rb_wait_once;
+               data = &once;
+       }
 
        /*
         * Depending on what the caller is waiting for, either any
 
 
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
+struct pipe_wait {
+       struct trace_iterator           *iter;
+       int                             wait_index;
+};
+
+static bool wait_pipe_cond(void *data)
+{
+       struct pipe_wait *pwait = data;
+       struct trace_iterator *iter = pwait->iter;
+
+       if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
+               return true;
+
+       return iter->closed;
+}
+
 static int wait_on_pipe(struct trace_iterator *iter, int full)
 {
+       struct pipe_wait pwait;
        int ret;
 
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
 
-       ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
+       pwait.wait_index = atomic_read_acquire(&iter->wait_index);
+       pwait.iter = iter;
+
+       ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
+                              wait_pipe_cond, &pwait);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        /*
        struct ftrace_buffer_info *info = file->private_data;
        struct trace_iterator *iter = &info->iter;
 
-       iter->wait_index++;
+       iter->closed = true;
        /* Make sure the waiters see the new wait_index */
-       smp_wmb();
+       (void)atomic_fetch_inc_release(&iter->wait_index);
 
        ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
 
                .spd_release    = buffer_spd_release,
        };
        struct buffer_ref *ref;
+       bool woken = false;
        int page_size;
        int entries, i;
        ssize_t ret = 0;
 
        /* did we read anything? */
        if (!spd.nr_pages) {
-               long wait_index;
 
                if (ret)
                        goto out;
 
+               if (woken)
+                       goto out;
+
                ret = -EAGAIN;
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
                        goto out;
 
-               wait_index = READ_ONCE(iter->wait_index);
-
                ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
                if (ret)
                        goto out;
                if (!tracer_tracing_is_on(iter->tr))
                        goto out;
 
-               /* Make sure we see the new wait_index */
-               smp_rmb();
-               if (wait_index != iter->wait_index)
-                       goto out;
+               /* Iterate one more time to collect any new data then exit */
+               woken = true;
 
                goto again;
        }
 
        mutex_lock(&trace_types_lock);
 
-       iter->wait_index++;
        /* Make sure the waiters see the new wait_index */
-       smp_wmb();
+       (void)atomic_fetch_inc_release(&iter->wait_index);
 
        ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);