if (unlikely(!hwc->event_base))
                return 0;
 
-       if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event)
-               return x86_pmu.update_topdown_event(event);
-
        /*
         * Careful: an NMI might modify the previous event value.
         *
        if (unlikely(!hwc->event_base))
                return 0;
 
-       if (unlikely(is_topdown_count(event)) &&
-           x86_pmu.set_topdown_event_period)
-               return x86_pmu.set_topdown_event_period(event);
-
        /*
         * If we are way outside a reasonable range then just skip forward:
         */
 
        for (i = 0; i < 4; i++) {
                event = cpuc->events[i];
                if (event)
-                       x86_perf_event_update(event);
+                       static_call(x86_pmu_update)(event);
        }
 
        for (i = 0; i < 4; i++) {
                event = cpuc->events[i];
 
                if (event) {
-                       x86_perf_event_set_period(event);
+                       static_call(x86_pmu_set_period)(event);
                        __x86_pmu_enable_event(&event->hw,
                                        ARCH_PERFMON_EVENTSEL_ENABLE);
                } else
  */
 int intel_pmu_save_and_restart(struct perf_event *event)
 {
-       x86_perf_event_update(event);
+       static_call(x86_pmu_update)(event);
        /*
         * For a checkpointed counter always reset back to 0.  This
         * avoids a situation where the counter overflows, aborts the
                wrmsrl(event->hw.event_base, 0);
                local64_set(&event->hw.prev_count, 0);
        }
+       return static_call(x86_pmu_set_period)(event);
+}
+
+static int intel_pmu_set_period(struct perf_event *event)
+{
+       if (unlikely(is_topdown_count(event)) &&
+           x86_pmu.set_topdown_event_period)
+               return x86_pmu.set_topdown_event_period(event);
+
        return x86_perf_event_set_period(event);
 }
 
+static u64 intel_pmu_update(struct perf_event *event)
+{
+       if (unlikely(is_topdown_count(event)) &&
+           x86_pmu.update_topdown_event)
+               return x86_pmu.update_topdown_event(event);
+
+       return x86_perf_event_update(event);
+}
+
 static void intel_pmu_reset(void)
 {
        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
        .add                    = intel_pmu_add_event,
        .del                    = intel_pmu_del_event,
        .read                   = intel_pmu_read_event,
+       .set_period             = intel_pmu_set_period,
+       .update                 = intel_pmu_update,
        .hw_config              = intel_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,