return new_raw_count;
 }
 
+static inline unsigned int x86_pmu_config_addr(int index)
+{
+       return x86_pmu.eventsel + index;
+}
+
+static inline unsigned int x86_pmu_event_addr(int index)
+{
+       return x86_pmu.perfctr + index;
+}
+
 static atomic_t active_events;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+               if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
                        goto perfctr_fail;
        }
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+               if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
                        goto eventsel_fail;
        }
 
 
 eventsel_fail:
        for (i--; i >= 0; i--)
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_evntsel_nmi(x86_pmu_config_addr(i));
 
        i = x86_pmu.num_counters;
 
 perfctr_fail:
        for (i--; i >= 0; i--)
-               release_perfctr_nmi(x86_pmu.perfctr + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
 
        return false;
 }
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               release_perfctr_nmi(x86_pmu.perfctr + i);
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
+               release_evntsel_nmi(x86_pmu_config_addr(i));
        }
 }
 
         * complain and bail.
         */
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               reg = x86_pmu.eventsel + i;
+               reg = x86_pmu_config_addr(i);
                ret = rdmsrl_safe(reg, &val);
                if (ret)
                        goto msr_fail;
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu.perfctr, val);
-       ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+       ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
        if (ret || val != val_new)
                goto msr_fail;
 
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               rdmsrl(x86_pmu.eventsel + idx, val);
+               rdmsrl(x86_pmu_config_addr(idx), val);
                if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
-               wrmsrl(x86_pmu.eventsel + idx, val);
+               wrmsrl(x86_pmu_config_addr(idx), val);
        }
 }
 
        pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
-               rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
+               rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
+               rdmsrl(x86_pmu_event_addr(idx), pmc_count);
 
                prev_left = per_cpu(pmc_prev_left[idx], cpu);
 
 
        printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
-               checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
+               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
+               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
                checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);