intel_pmu_enable_all(added);
 }
 
+static void enable_counter_freeze(void)
+{
+       update_debugctlmsr(get_debugctlmsr() |
+                       DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
+}
+
+static void disable_counter_freeze(void)
+{
+       update_debugctlmsr(get_debugctlmsr() &
+                       ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
+}
+
 static inline u64 intel_pmu_get_status(void)
 {
        u64 status;
        return handled;
 }
 
+static bool disable_counter_freezing;
+static int __init intel_perf_counter_freezing_setup(char *s)
+{
+       disable_counter_freezing = true;
+       pr_info("Intel PMU Counter freezing feature disabled\n");
+       return 1;
+}
+__setup("disable_counter_freezing", intel_perf_counter_freezing_setup);
+
+/*
+ * Simplified handler for Arch Perfmon v4:
+ * - We rely on counter freezing/unfreezing to enable/disable the PMU.
+ * This is done automatically on PMU ack.
+ * - Ack the PMU only after the APIC.
+ */
+
+static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int handled = 0;
+       bool bts = false;
+       u64 status;
+       int pmu_enabled = cpuc->enabled;
+       int loops = 0;
+
+       /* PMU has been disabled because of counter freezing */
+       cpuc->enabled = 0;
+       if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+               bts = true;
+               intel_bts_disable_local();
+               handled = intel_pmu_drain_bts_buffer();
+               handled += intel_bts_interrupt();
+       }
+       status = intel_pmu_get_status();
+       if (!status)
+               goto done;
+again:
+       intel_pmu_lbr_read();
+       if (++loops > 100) {
+               static bool warned;
+
+               if (!warned) {
+                       WARN(1, "perfevents: irq loop stuck!\n");
+                       perf_event_print_debug();
+                       warned = true;
+               }
+               intel_pmu_reset();
+               goto done;
+       }
+
+
+       handled += handle_pmi_common(regs, status);
+done:
+       /* Ack the PMI in the APIC */
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+
+       /*
+        * The counters start counting immediately while ack the status.
+        * Make it as close as possible to IRET. This avoids bogus
+        * freezing on Skylake CPUs.
+        */
+       if (status) {
+               intel_pmu_ack_status(status);
+       } else {
+               /*
+                * CPU may issues two PMIs very close to each other.
+                * When the PMI handler services the first one, the
+                * GLOBAL_STATUS is already updated to reflect both.
+                * When it IRETs, the second PMI is immediately
+                * handled and it sees clear status. At the meantime,
+                * there may be a third PMI, because the freezing bit
+                * isn't set since the ack in first PMI handlers.
+                * Double check if there is more work to be done.
+                */
+               status = intel_pmu_get_status();
+               if (status)
+                       goto again;
+       }
+
+       if (bts)
+               intel_bts_enable_local();
+       cpuc->enabled = pmu_enabled;
+       return handled;
+}
+
 /*
  * This handler is triggered by the local APIC, so the APIC IRQ handling
  * rules apply:
        if (x86_pmu.version > 1)
                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
+       if (x86_pmu.counter_freezing)
+               enable_counter_freeze();
+
        if (!cpuc->shared_regs)
                return;
 
        free_excl_cntrs(cpu);
 
        fini_debug_store_on_cpu(cpu);
+
+       if (x86_pmu.counter_freezing)
+               disable_counter_freeze();
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
                        max((int)edx.split.num_counters_fixed, assume);
        }
 
+       if (version >= 4)
+               x86_pmu.counter_freezing = !disable_counter_freezing;
+
        if (boot_cpu_has(X86_FEATURE_PDCM)) {
                u64 capabilities;
 
                pr_cont("full-width counters, ");
        }
 
+       /*
+        * For arch perfmon 4 use counter freezing to avoid
+        * several MSR accesses in the PMI.
+        */
+       if (x86_pmu.counter_freezing)
+               x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
+
        kfree(to_free);
        return 0;
 }