static_call(x86_pmu_enable_all)(added);
 }
 
-static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
+DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
 
 /*
  * Set the next IRQ period, based on the hwc->period_left value.
        if (is_counter_pair(hwc))
                wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff);
 
-       /*
-        * Due to erratum on certan cpu we need
-        * a second write to be sure the register
-        * is updated properly
-        */
-       if (x86_pmu.perfctr_second_write) {
-               wrmsrl(hwc->event_base,
-                       (u64)(-left) & x86_pmu.cntval_mask);
-       }
-
        perf_event_update_userpage(event);
 
        return ret;
 
        }
 }
 
+static int p4_pmu_set_period(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       s64 left = this_cpu_read(pmc_prev_left[hwc->idx]);
+       int ret;
+
+       ret = x86_perf_event_set_period(event);
+
+       if (hwc->event_base) {
+               /*
+                * This handles erratum N15 in intel doc 249199-029,
+                * the counter may not be updated correctly on write
+                * so we need a second write operation to do the trick
+                * (the official workaround didn't work)
+                *
+                * the former idea is taken from OProfile code
+                */
+               wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+       }
+
+       return ret;
+}
+
 static int p4_pmu_handle_irq(struct pt_regs *regs)
 {
        struct perf_sample_data data;
                /* event overflow for sure */
                perf_sample_data_init(&data, 0, hwc->last_period);
 
-               if (!x86_perf_event_set_period(event))
+               if (!static_call(x86_pmu_set_period)(event))
                        continue;
 
 
        .enable_all             = p4_pmu_enable_all,
        .enable                 = p4_pmu_enable_event,
        .disable                = p4_pmu_disable_event,
+
+       .set_period             = p4_pmu_set_period,
+
        .eventsel               = MSR_P4_BPU_CCCR0,
        .perfctr                = MSR_P4_BPU_PERFCTR0,
        .event_map              = p4_pmu_event_map,
        .max_period             = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
        .hw_config              = p4_hw_config,
        .schedule_events        = p4_pmu_schedule_events,
-       /*
-        * This handles erratum N15 in intel doc 249199-029,
-        * the counter may not be updated correctly on write
-        * so we need a second write operation to do the trick
-        * (the official workaround didn't work)
-        *
-        * the former idea is taken from OProfile code
-        */
-       .perfctr_second_write   = 1,
 
        .format_attrs           = intel_p4_formats_attr,
 };
 
 
        struct event_constraint *event_constraints;
        struct x86_pmu_quirk *quirks;
-       int             perfctr_second_write;
        void            (*limit_period)(struct perf_event *event, s64 *l);
 
        /* PMI handler bits */
 }
 
 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
 
 int x86_perf_event_set_period(struct perf_event *event);