rcu_read_unlock();
 }
 
+static unsigned long perf_data_size(struct perf_buffer *buffer);
+
+static void
+perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
+{
+       long max_size = perf_data_size(buffer);
+
+       if (watermark)
+               buffer->watermark = min(max_size, watermark);
+
+       if (!buffer->watermark)
+               buffer->watermark = max_size / 2;
+
+       if (flags & PERF_BUFFER_WRITABLE)
+               buffer->writable = 1;
+
+       atomic_set(&buffer->refcount, 1);
+}
+
 #ifndef CONFIG_PERF_USE_VMALLOC
 
 /*
 }
 
 static struct perf_buffer *
-perf_buffer_alloc(struct perf_event *event, int nr_pages)
+perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
        struct perf_buffer *buffer;
        unsigned long size;
        if (!buffer)
                goto fail;
 
-       buffer->user_page = perf_mmap_alloc_page(event->cpu);
+       buffer->user_page = perf_mmap_alloc_page(cpu);
        if (!buffer->user_page)
                goto fail_user_page;
 
        for (i = 0; i < nr_pages; i++) {
-               buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu);
+               buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
                if (!buffer->data_pages[i])
                        goto fail_data_pages;
        }
 
        buffer->nr_pages = nr_pages;
 
+       perf_buffer_init(buffer, watermark, flags);
+
        return buffer;
 
 fail_data_pages:
 }
 
 static struct perf_buffer *
-perf_buffer_alloc(struct perf_event *event, int nr_pages)
+perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
        struct perf_buffer *buffer;
        unsigned long size;
        buffer->page_order = ilog2(nr_pages);
        buffer->nr_pages = 1;
 
+       perf_buffer_init(buffer, watermark, flags);
+
        return buffer;
 
 fail_all_buf:
        return ret;
 }
 
-static void
-perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer)
-{
-       long max_size = perf_data_size(buffer);
-
-       if (event->attr.watermark) {
-               buffer->watermark = min_t(long, max_size,
-                                       event->attr.wakeup_watermark);
-       }
-
-       if (!buffer->watermark)
-               buffer->watermark = max_size / 2;
-
-       atomic_set(&buffer->refcount, 1);
-       rcu_assign_pointer(event->buffer, buffer);
-}
-
 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
 {
        struct perf_buffer *buffer;
        unsigned long vma_size;
        unsigned long nr_pages;
        long user_extra, extra;
-       int ret = 0;
+       int ret = 0, flags = 0;
 
        /*
         * Don't allow mmap() of inherited per-task counters. This would
 
        WARN_ON(event->buffer);
 
-       buffer = perf_buffer_alloc(event, nr_pages);
+       if (vma->vm_flags & VM_WRITE)
+               flags |= PERF_BUFFER_WRITABLE;
+
+       buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
+                                  event->cpu, flags);
        if (!buffer) {
                ret = -ENOMEM;
                goto unlock;
        }
-
-       perf_buffer_init(event, buffer);
-       if (vma->vm_flags & VM_WRITE)
-               event->buffer->writable = 1;
+       rcu_assign_pointer(event->buffer, buffer);
 
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;