}
 }
 
+static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
+{
+       unsigned long flags = x86_pmu.free_running_flags;
+
+       if (event->attr.use_clockid)
+               flags &= ~PERF_SAMPLE_TIME;
+       return flags;
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
        int ret = x86_pmu_hw_config(event);
        if (event->attr.precise_ip) {
                if (!event->attr.freq) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
-                       if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS))
+                       if (!(event->attr.sample_type &
+                             ~intel_pmu_free_running_flags(event)))
                                event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
                }
                if (x86_pmu.pebs_aliases)
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
+       .free_running_flags     = PEBS_FREERUNNING_FLAGS,
+
        /*
         * Intel PMCs cannot be accessed sanely above 32-bit width,
         * so we install an artificial 1<<31 period regardless of
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
+       .free_running_flags     = PEBS_FREERUNNING_FLAGS,
        /*
         * Intel PMCs cannot be accessed sanely above 32 bit width,
         * so we install an artificial 1<<31 period regardless of