spinlock_t                      reader_lock; /* serialize readers */
        raw_spinlock_t                  lock;
        struct lock_class_key           lock_key;
-       struct list_head                pages;
+       struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
        struct buffer_page              *tail_page;     /* write to tail */
        struct buffer_page              *commit_page;   /* committed pages */
  */
 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       struct list_head *head = &cpu_buffer->pages;
+       struct list_head *head = cpu_buffer->pages;
        struct buffer_page *bpage, *tmp;
 
        if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
                             unsigned nr_pages)
 {
-       struct list_head *head = &cpu_buffer->pages;
        struct buffer_page *bpage, *tmp;
        unsigned long addr;
        LIST_HEAD(pages);
        unsigned i;
 
+       WARN_ON(!nr_pages);
+
        for (i = 0; i < nr_pages; i++) {
                bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                                    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
                rb_init_page(bpage->page);
        }
 
-       list_splice(&pages, head);
+       /*
+        * The ring buffer page list is a circular list that does not
+        * start and end with a list head. All page list items point to
+        * other pages.
+        */
+       cpu_buffer->pages = pages.next;
+       list_del(&pages);
 
        rb_check_pages(cpu_buffer);
 
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
        cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-       INIT_LIST_HEAD(&cpu_buffer->pages);
 
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
                goto fail_free_reader;
 
        cpu_buffer->head_page
-               = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+               = list_entry(cpu_buffer->pages, struct buffer_page, list);
        cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
 
        return cpu_buffer;
 
 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       struct list_head *head = &cpu_buffer->pages;
+       struct list_head *head = cpu_buffer->pages;
        struct buffer_page *bpage, *tmp;
 
        free_buffer_page(cpu_buffer->reader_page);
 
-       list_for_each_entry_safe(bpage, tmp, head, list) {
-               list_del_init(&bpage->list);
+       if (head) {
+               list_for_each_entry_safe(bpage, tmp, head, list) {
+                       list_del_init(&bpage->list);
+                       free_buffer_page(bpage);
+               }
+               bpage = list_entry(head, struct buffer_page, list);
                free_buffer_page(bpage);
        }
+
        kfree(cpu_buffer);
 }
 
        synchronize_sched();
 
        for (i = 0; i < nr_pages; i++) {
-               if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+               if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
                        return;
-               p = cpu_buffer->pages.next;
+               p = cpu_buffer->pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
-       if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+       if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
                return;
 
        rb_reset_cpu(cpu_buffer);
                p = pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
-               list_add_tail(&bpage->list, &cpu_buffer->pages);
+               list_add_tail(&bpage->list, cpu_buffer->pages);
        }
        rb_reset_cpu(cpu_buffer);
 
 {
        struct list_head *p = (*bpage)->list.next;
 
-       if (p == &cpu_buffer->pages)
-               p = p->next;
-
        *bpage = list_entry(p, struct buffer_page, list);
 }
 
        cpu_buffer->reader_page->list.next = reader->list.next;
        cpu_buffer->reader_page->list.prev = reader->list.prev;
 
+       /*
+        * cpu_buffer->pages just needs to point to the buffer, it
+        *  has no specific buffer page to point to. Lets move it out
+        *  of our way so we don't accidently swap it.
+        */
+       cpu_buffer->pages = reader->list.prev;
+
        local_set(&cpu_buffer->reader_page->write, 0);
        local_set(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
 {
        cpu_buffer->head_page
-               = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+               = list_entry(cpu_buffer->pages, struct buffer_page, list);
        local_set(&cpu_buffer->head_page->write, 0);
        local_set(&cpu_buffer->head_page->entries, 0);
        local_set(&cpu_buffer->head_page->page->commit, 0);