* User-space reading this value should issue an rmb(), on SMP capable
         * platforms, after reading this value -- see perf_counter_wakeup().
         */
-       __u32   data_head;              /* head in the data section */
+       __u64   data_head;              /* head in the data section */
 };
 
 #define PERF_EVENT_MISC_CPUMODE_MASK   (3 << 0)
        int                             nr_locked;      /* nr pages mlocked  */
 
        atomic_t                        poll;           /* POLL_ for wakeups */
-       atomic_t                        head;           /* write position    */
        atomic_t                        events;         /* event limit       */
 
-       atomic_t                        done_head;      /* completed head    */
+       atomic_long_t                   head;           /* write position    */
+       atomic_long_t                   done_head;      /* completed head    */
+
        atomic_t                        lock;           /* concurrent writes */
 
        atomic_t                        wakeup;         /* needs a wakeup    */
 
 struct perf_output_handle {
        struct perf_counter     *counter;
        struct perf_mmap_data   *data;
-       unsigned int            offset;
-       unsigned int            head;
+       unsigned long           head;
+       unsigned long           offset;
        int                     nmi;
        int                     overflow;
        int                     locked;
 static void perf_output_unlock(struct perf_output_handle *handle)
 {
        struct perf_mmap_data *data = handle->data;
-       int head, cpu;
+       unsigned long head;
+       int cpu;
 
        data->done_head = data->head;
 
         * before we publish the new head, matched by a rmb() in userspace when
         * reading this position.
         */
-       while ((head = atomic_xchg(&data->done_head, 0)))
+       while ((head = atomic_long_xchg(&data->done_head, 0)))
                data->user_page->data_head = head;
 
        /*
        /*
         * Therefore we have to validate we did not indeed do so.
         */
-       if (unlikely(atomic_read(&data->done_head))) {
+       if (unlikely(atomic_long_read(&data->done_head))) {
                /*
                 * Since we had it locked, we can lock it again.
                 */
        do {
                offset = head = atomic_read(&data->head);
                head += size;
-       } while (atomic_cmpxchg(&data->head, offset, head) != offset);
+       } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
 
        handle->offset  = offset;
        handle->head    = head;
         * Check we didn't copy past our reservation window, taking the
         * possible unsigned int wrap into account.
         */
-       WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
+       WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
 }
 
 #define perf_output_put(handle, x) \