struct ring_buffer_per_cpu {
        int                             cpu;
        atomic_t                        record_disabled;
+       atomic_t                        resize_disabled;
        struct trace_buffer     *buffer;
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        unsigned                        flags;
        int                             cpus;
        atomic_t                        record_disabled;
-       atomic_t                        resize_disabled;
        cpumask_var_t                   cpumask;
 
        struct lock_class_key           *reader_lock_key;
 
        size = nr_pages * BUF_PAGE_SIZE;
 
-       /*
-        * Don't succeed if resizing is disabled, as a reader might be
-        * manipulating the ring buffer and is expecting a sane state while
-        * this is true.
-        */
-       if (atomic_read(&buffer->resize_disabled))
-               return -EBUSY;
-
        /* prevent another thread from changing buffer sizes */
        mutex_lock(&buffer->mutex);
 
+
        if (cpu_id == RING_BUFFER_ALL_CPUS) {
+               /*
+                * Don't succeed if resizing is disabled, as a reader might be
+                * manipulating the ring buffer and is expecting a sane state while
+                * this is true.
+                */
+               for_each_buffer_cpu(buffer, cpu) {
+                       cpu_buffer = buffer->buffers[cpu];
+                       if (atomic_read(&cpu_buffer->resize_disabled)) {
+                               err = -EBUSY;
+                               goto out_err_unlock;
+                       }
+               }
+
                /* calculate the pages to update */
                for_each_buffer_cpu(buffer, cpu) {
                        cpu_buffer = buffer->buffers[cpu];
                if (nr_pages == cpu_buffer->nr_pages)
                        goto out;
 
+               /*
+                * Don't succeed if resizing is disabled, as a reader might be
+                * manipulating the ring buffer and is expecting a sane state while
+                * this is true.
+                */
+               if (atomic_read(&cpu_buffer->resize_disabled)) {
+                       err = -EBUSY;
+                       goto out_err_unlock;
+               }
+
                cpu_buffer->nr_pages_to_update = nr_pages -
                                                cpu_buffer->nr_pages;
 
                        free_buffer_page(bpage);
                }
        }
+ out_err_unlock:
        mutex_unlock(&buffer->mutex);
        return err;
 }
 
        iter->cpu_buffer = cpu_buffer;
 
-       atomic_inc(&buffer->resize_disabled);
+       atomic_inc(&cpu_buffer->resize_disabled);
        atomic_inc(&cpu_buffer->record_disabled);
 
        return iter;
        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        atomic_dec(&cpu_buffer->record_disabled);
-       atomic_dec(&cpu_buffer->buffer->resize_disabled);
+       atomic_dec(&cpu_buffer->resize_disabled);
        kfree(iter->event);
        kfree(iter);
 }
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return;
 
-       atomic_inc(&buffer->resize_disabled);
+       atomic_inc(&cpu_buffer->resize_disabled);
        atomic_inc(&cpu_buffer->record_disabled);
 
        /* Make sure all commits have finished */
        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        atomic_dec(&cpu_buffer->record_disabled);
-       atomic_dec(&buffer->resize_disabled);
+       atomic_dec(&cpu_buffer->resize_disabled);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);