void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
 int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
                          size_t len, int cpu, int full);
 
 
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
+       struct buffer_data_page         *free_page;
        unsigned long                   nr_pages;
        unsigned int                    current_context;
        struct list_head                *pages;
  */
 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
 {
-       struct buffer_data_page *bpage;
+       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+       struct buffer_data_page *bpage = NULL;
+       unsigned long flags;
        struct page *page;
 
+       local_irq_save(flags);
+       arch_spin_lock(&cpu_buffer->lock);
+
+       if (cpu_buffer->free_page) {
+               bpage = cpu_buffer->free_page;
+               cpu_buffer->free_page = NULL;
+       }
+
+       arch_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
+
+       if (bpage)
+               goto out;
+
        page = alloc_pages_node(cpu_to_node(cpu),
                                GFP_KERNEL | __GFP_NORETRY, 0);
        if (!page)
 
        bpage = page_address(page);
 
+ out:
        rb_init_page(bpage);
 
        return bpage;
 /**
  * ring_buffer_free_read_page - free an allocated read page
  * @buffer: the buffer the page was allocate for
+ * @cpu: the cpu buffer the page came from
  * @data: the page to free
  *
  * Free a page allocated from ring_buffer_alloc_read_page.
  */
-void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
+void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
 {
-       free_page((unsigned long)data);
+       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+       struct buffer_data_page *bpage = data;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       arch_spin_lock(&cpu_buffer->lock);
+
+       if (!cpu_buffer->free_page) {
+               cpu_buffer->free_page = bpage;
+               bpage = NULL;
+       }
+
+       arch_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
+
+       free_page((unsigned long)bpage);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
 
 
 struct ftrace_buffer_info {
        struct trace_iterator   iter;
        void                    *spare;
+       unsigned int            spare_cpu;
        unsigned int            read;
 };
 
                return -EBUSY;
 #endif
 
-       if (!info->spare)
+       if (!info->spare) {
                info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
                                                          iter->cpu_file);
+               info->spare_cpu = iter->cpu_file;
+       }
        if (!info->spare)
                return -ENOMEM;
 
        __trace_array_put(iter->tr);
 
        if (info->spare)
-               ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
+               ring_buffer_free_read_page(iter->trace_buffer->buffer,
+                                          info->spare_cpu, info->spare);
        kfree(info);
 
        mutex_unlock(&trace_types_lock);
 struct buffer_ref {
        struct ring_buffer      *buffer;
        void                    *page;
+       int                     cpu;
        int                     ref;
 };
 
        if (--ref->ref)
                return;
 
-       ring_buffer_free_read_page(ref->buffer, ref->page);
+       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
        kfree(ref);
        buf->private = 0;
 }
        if (--ref->ref)
                return;
 
-       ring_buffer_free_read_page(ref->buffer, ref->page);
+       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
        kfree(ref);
        spd->partial[i].private = 0;
 }
                        kfree(ref);
                        break;
                }
+               ref->cpu = iter->cpu_file;
 
                r = ring_buffer_read_page(ref->buffer, &ref->page,
                                          len, iter->cpu_file, 1);
                if (r < 0) {
-                       ring_buffer_free_read_page(ref->buffer, ref->page);
+                       ring_buffer_free_read_page(ref->buffer, ref->cpu,
+                                                  ref->page);
                        kfree(ref);
                        break;
                }