return x86_event_sysfs_show(page, config, event);
 }
 
-static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
 {
        /*
         * Decrease period by the depth of the BRS feature to get the last N
         * taken branches and approximate the desired period
         */
-       if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
-               left -= x86_pmu.lbr_nr;
-
-       return left;
+       if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
+               *left -= x86_pmu.lbr_nr;
 }
 
 static __initconst const struct x86_pmu amd_pmu = {
 
                event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
 
        if (event->attr.sample_period && x86_pmu.limit_period) {
-               if (x86_pmu.limit_period(event, event->attr.sample_period) >
-                               event->attr.sample_period)
+               s64 left = event->attr.sample_period;
+               x86_pmu.limit_period(event, &left);
+               if (left > event->attr.sample_period)
                        return -EINVAL;
        }
 
                left = x86_pmu.max_period;
 
        if (x86_pmu.limit_period)
-               left = x86_pmu.limit_period(event, left);
+               x86_pmu.limit_period(event, &left);
 
-       per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
+       this_cpu_write(pmc_prev_left[idx], left);
 
        /*
         * The hw event starts counting from this event offset,
                return -EINVAL;
 
        if (value && x86_pmu.limit_period) {
-               if (x86_pmu.limit_period(event, value) > value)
+               s64 left = value;
+               x86_pmu.limit_period(event, &left);
+               if (left > value)
                        return -EINVAL;
        }
 
 
  * Therefore the effective (average) period matches the requested period,
  * despite coarser hardware granularity.
  */
-static u64 bdw_limit_period(struct perf_event *event, u64 left)
+static void bdw_limit_period(struct perf_event *event, s64 *left)
 {
        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
-               if (left < 128)
-                       left = 128;
-               left &= ~0x3fULL;
+               if (*left < 128)
+                       *left = 128;
+               *left &= ~0x3fULL;
        }
-       return left;
 }
 
-static u64 nhm_limit_period(struct perf_event *event, u64 left)
+static void nhm_limit_period(struct perf_event *event, s64 *left)
 {
-       return max(left, 32ULL);
+       *left = max(*left, 32LL);
 }
 
-static u64 spr_limit_period(struct perf_event *event, u64 left)
+static void spr_limit_period(struct perf_event *event, s64 *left)
 {
        if (event->attr.precise_ip == 3)
-               return max(left, 128ULL);
-
-       return left;
+               *left = max(*left, 128LL);
 }
 
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 
        struct event_constraint *event_constraints;
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
-       u64             (*limit_period)(struct perf_event *event, u64 l);
+       void            (*limit_period)(struct perf_event *event, s64 *l);
 
        /* PMI handler bits */
        unsigned int    late_ack                :1,