u64
 armpmu_event_update(struct perf_event *event,
                    struct hw_perf_event *hwc,
-                   int idx, int overflow)
+                   int idx)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        u64 delta, prev_raw_count, new_raw_count;
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       new_raw_count &= armpmu->max_period;
-       prev_raw_count &= armpmu->max_period;
-
-       if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
-       else
-               delta = new_raw_count - prev_raw_count;
+       delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx, 0);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
        if (!(hwc->state & PERF_HES_STOPPED)) {
                armpmu->disable(hwc, hwc->idx);
                barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx, 0);
+               armpmu_event_update(event, hwc, hwc->idx);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
        hwc->config_base            |= (unsigned long)mapping;
 
        if (!hwc->sample_period) {
-               hwc->sample_period  = armpmu->max_period;
+               /*
+                * For non-sampling runs, limit the sample_period to half
+                * of the counter width. That way, the new counter value
+                * is far less likely to overtake the previous one unless
+                * you have some serious IRQ latency issues.
+                */
+               hwc->sample_period  = armpmu->max_period >> 1;
                hwc->last_period    = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
        }
 
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;