perf_disable();
        power_pmu_read(counter);
        left = counter->hw.sample_period;
+       counter->hw.last_period = left;
        val = 0;
        if (left < 0x80000000L)
                val = 0x80000000L - left;
 
        counter->hw.config = events[n];
        counter->hw.counter_base = cflags[n];
-       atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
+       counter->hw.last_period = counter->hw.sample_period;
+       atomic64_set(&counter->hw.period_left, counter->hw.last_period);
 
        /*
         * See if we need to reserve the PMU.
         */
        if (record) {
                struct perf_sample_data data = {
-                       .regs = regs,
-                       .addr = 0,
+                       .regs   = regs,
+                       .addr   = 0,
+                       .period = counter->hw.last_period,
                };
 
                if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
 
 
        if (!hwc->sample_period) {
                hwc->sample_period = x86_pmu.max_period;
+               hwc->last_period = hwc->sample_period;
                atomic64_set(&hwc->period_left, hwc->sample_period);
        }
 
        if (unlikely(left <= -period)) {
                left = period;
                atomic64_set(&hwc->period_left, left);
+               hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
                atomic64_set(&hwc->period_left, left);
+               hwc->last_period = period;
                ret = 1;
        }
        /*
                if (val & (1ULL << (x86_pmu.counter_bits - 1)))
                        continue;
 
-               /* counter overflow */
-               handled = 1;
-               inc_irq_stat(apic_perf_irqs);
+               /*
+                * counter overflow
+                */
+               handled         = 1;
+               data.period     = counter->hw.last_period;
+
                if (!x86_perf_counter_set_period(counter, hwc, idx))
                        continue;
 
                        amd_pmu_disable_counter(hwc, idx);
        }
 
+       if (handled)
+               inc_irq_stat(apic_perf_irqs);
+
        return handled;
 }
 
 
        };
        atomic64_t                      prev_count;
        u64                             sample_period;
+       u64                             last_period;
        atomic64_t                      period_left;
        u64                             interrupts;
 
 extern void perf_counter_update_userpage(struct perf_counter *counter);
 
 struct perf_sample_data {
-       struct pt_regs  *regs;
-       u64             addr;
+       struct pt_regs          *regs;
+       u64                     addr;
+       u64                     period;
 };
 
 extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
 
                perf_output_put(&handle, cpu_entry);
 
        if (sample_type & PERF_SAMPLE_PERIOD)
-               perf_output_put(&handle, counter->hw.sample_period);
+               perf_output_put(&handle, data->period);
 
        /*
         * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
        if (unlikely(left <= -period)) {
                left = period;
                atomic64_set(&hwc->period_left, left);
+               hwc->last_period = period;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
                atomic64_add(period, &hwc->period_left);
+               hwc->last_period = period;
        }
 
        atomic64_set(&hwc->prev_count, -left);
                                    int nmi, struct pt_regs *regs, u64 addr)
 {
        struct perf_sample_data data = {
-               .regs = regs,
-               .addr = addr,
+               .regs   = regs,
+               .addr   = addr,
+               .period = counter->hw.last_period,
        };
 
        perf_swcounter_update(counter);