]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sparc64: Fix perf performance counter overflow calculation
authorDave Aldridge <david.j.aldridge@oracle.com>
Fri, 29 Jan 2016 11:03:10 +0000 (03:03 -0800)
committerAllen Pais <allen.pais@oracle.com>
Thu, 4 Feb 2016 13:11:10 +0000 (18:41 +0530)
If sparc_perf_event_update() is called between performcnce counter
overflow interrupts then everything is fine and the total event
count calculation is correct. If however, the
sparc_perf_event_update() is only called when the performance counter
overflows, we do not take the counter wrap into consideration.
This leaves us with an incorrect value for the total event count.

This patch fixes this issue by taking the counter overflow situation
into consideration.

Orabug: 22607658

Signed-off-by: Dave Aldridge <david.j.aldridge@oracle.com>
(cherry picked from commit 6c89361408f964ad2c2c29200987aece3a7c222d)
Signed-off-by: Allen Pais <allen.pais@oracle.com>
arch/sparc/kernel/perf_event.c

index d5e46143f99c805b0fe5d547a0424f86aad4b8f7..d3e4f777eabf11f95f25ab061e5e802c9b13967e 100644 (file)
@@ -68,6 +68,7 @@
 #define MAX_HWEVENTS                   4
 #define MAX_PCRS                       4
 #define MAX_PERIOD                     ((1UL << 32) - 1)
+#define MAX_COUNT                      ((1UL << 32) - 1)
 
 #define PIC_UPPER_INDEX                        0
 #define PIC_LOWER_INDEX                        1
@@ -859,9 +860,9 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
 }
 
 static u64 sparc_perf_event_update(struct perf_event *event,
-                                  struct hw_perf_event *hwc, int idx)
+                                  struct hw_perf_event *hwc, int idx,
+                                  bool overflow)
 {
-       int shift = 64 - 32;
        u64 prev_raw_count, new_raw_count, delta;
 
 again:
@@ -872,8 +873,9 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       delta = (new_raw_count << shift) - (prev_raw_count << shift);
-       delta >>= shift;
+       if (overflow)
+               new_raw_count |= (1UL << 32);
+       delta = new_raw_count - (prev_raw_count & MAX_COUNT);
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
@@ -923,7 +925,7 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
                if (cpuc->current_idx[i] != PIC_NO_INDEX &&
                    cpuc->current_idx[i] != cp->hw.idx) {
                        sparc_perf_event_update(cp, &cp->hw,
-                                               cpuc->current_idx[i]);
+                                               cpuc->current_idx[i], false);
                        cpuc->current_idx[i] = PIC_NO_INDEX;
                }
        }
@@ -1090,7 +1092,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
        }
 
        if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
-               sparc_perf_event_update(event, &event->hw, idx);
+               sparc_perf_event_update(event, &event->hw, idx, false);
                event->hw.state |= PERF_HES_UPTODATE;
        }
 }
@@ -1136,7 +1138,7 @@ static void sparc_pmu_read(struct perf_event *event)
        int idx = active_event_index(cpuc, event);
        struct hw_perf_event *hwc = &event->hw;
 
-       sparc_perf_event_update(event, hwc, idx);
+       sparc_perf_event_update(event, hwc, idx, false);
 }
 
 static atomic_t active_events = ATOMIC_INIT(0);
@@ -1622,7 +1624,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
 
                hwc = &event->hw;
-               val = sparc_perf_event_update(event, hwc, idx);
+               val = sparc_perf_event_update(event, hwc, idx, true);
                if (val & (1ULL << 31))
                        continue;