static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                                  u64 config, bool exclude_user,
-                                 bool exclude_kernel, bool intr,
-                                 bool in_tx, bool in_tx_cp)
+                                 bool exclude_kernel, bool intr)
 {
        struct perf_event *event;
        struct perf_event_attr attr = {
 
        attr.sample_period = get_sample_period(pmc, pmc->counter);
 
-       if (in_tx)
-               attr.config |= HSW_IN_TX;
-       if (in_tx_cp) {
+       if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
+           guest_cpuid_is_intel(pmc->vcpu)) {
                /*
                 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
                 * period. Just clear the sample period so at least
                 * allocating the counter doesn't fail.
                 */
                attr.sample_period = 0;
-               attr.config |= HSW_IN_TX_CHECKPOINTED;
        }
 
        event = perf_event_create_kernel_counter(&attr, -1, current,
        pmc_reprogram_counter(pmc, type, config,
                              !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                              !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-                             eventsel & ARCH_PERFMON_EVENTSEL_INT,
-                             (eventsel & HSW_IN_TX),
-                             (eventsel & HSW_IN_TX_CHECKPOINTED));
+                             eventsel & ARCH_PERFMON_EVENTSEL_INT);
 }
 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
 
                              kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
                              !(en_field & 0x2), /* exclude user */
                              !(en_field & 0x1), /* exclude kernel */
-                             pmi, false, false);
+                             pmi);
 }
 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 
 
        struct kvm_pmc *pmc;
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
+       u64 reserved_bits;
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
                                return 0;
-                       if (!(data & pmu->reserved_bits)) {
+                       reserved_bits = pmu->reserved_bits;
+                       if ((pmc->idx == 2) &&
+                           (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+                               reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+                       if (!(data & reserved_bits)) {
                                reprogram_gp_counter(pmc, data);
                                return 0;
                        }
        entry = kvm_find_cpuid_entry(vcpu, 7, 0);
        if (entry &&
            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-               pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+               pmu->reserved_bits ^= HSW_IN_TX;
+               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+       }
 
        bitmap_set(pmu->all_valid_pmc_idx,
                0, pmu->nr_arch_gp_counters);