]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sparc64: Fix a race condition when stopping performance counters
authorDave Aldridge <david.j.aldridge@oracle.com>
Fri, 4 Nov 2016 16:56:07 +0000 (09:56 -0700)
committerAllen Pais <allen.pais@oracle.com>
Sun, 22 Jan 2017 15:38:00 +0000 (21:08 +0530)
When stopping a performance counter that is close to overflowing,
there is a race condition that can occur between writing to the
PCRx register to stop the counter (and also clearing the PCRx.ov
bit at the same time) vs the performance counter overflowing and
setting the PCRx.ov bit in the PCRx register.
The result of this race condition is that we occassionally miss
a performance counter overflow interrupt, which in turn leads
to incorrect event counting.
This race condition has been observed when counting cpu cycles.
To fix this issue when stopping a performance counter,
we simply allow it to continue counting and overflow before
stopping it. This allows the performance counter overflow
interrupt to be generated and acted upon.
This fix is applied for M7, T5 and T4 devices.

Note: This commit is based on the following commits:
8b9b5b404e754e5c271341f5d7ea4797374c9844
a2d17bc33bdcc1cefd84bca44f2fd27075b16058
960f1607bec735e8da7dbd5df818da0a2e2b0305

Orabug: 22876587

Signed-off-by: Dave Aldridge <david.j.aldridge@oracle.com>
Signed-off-by: Eric Saint-Etienne <eric.saint.etienne@oracle.com>
(cherry picked from commit e5b7619e1de2f3e0dd858f632bc08ce64c344245)
Signed-off-by: Allen Pais <allen.pais@oracle.com>
arch/sparc/kernel/perf_event.c

index d4cbecc65acdb1aba09d64d3e809b0df8a151f8d..8e18fe4b01a2bf5b78e0f428873cd937f8eb77f8 100644 (file)
@@ -1036,6 +1036,62 @@ static void sparc_pmu_enable(struct pmu *pmu)
                pcr_ops->write_pcr(i, cpuc->pcr[i]);
 }
 
+/* Used when checking to see if we are counting 'cycles' */
+#define PCR_N4_CYCLES_SELECTED (26 << PCR_N4_SL_SHIFT)
+
+/* Threshold value used to decide whether to let
+ * the 32 bit performance counter overflow
+ * */
+#define PIC_OVERFLOW_THRESHOLD (0xfffffc00)
+
+static bool is_stop_counting_cycles_requested(u64 val)
+{
+       bool ret = false;
+
+       /* Check the value we want to write to the PCR
+        * register to see if we are requesting that we
+        * stop counting 'cycles'
+        */
+       if (!strcmp(sparc_pmu_type, "niagara4") ||
+           !strcmp(sparc_pmu_type, "niagara5") ||
+           !strcmp(sparc_pmu_type, "sparc-m7")) {
+               if ((val & PCR_N4_SL) == PCR_N4_CYCLES_SELECTED)
+                       ret = true;
+       }
+       return ret;
+}
+
+static void wait_for_counter_overflow(int pcr_index)
+{
+       u64 pcr;
+       u32 count;
+
+       if (!strcmp(sparc_pmu_type, "niagara4") ||
+           !strcmp(sparc_pmu_type, "niagara5") ||
+           !strcmp(sparc_pmu_type, "sparc-m7")) {
+               pcr = pcr_ops->read_pcr(pcr_index);
+               if (((pcr & PCR_N4_SL) == PCR_N4_CYCLES_SELECTED) &&
+                   ((pcr & PCR_N4_UTRACE) || (pcr & PCR_N4_STRACE))) {
+
+                       /* We are currently counting cycles. If we are close
+                        * to overflowing the 32 bit performance counter
+                        * (0xffffffff -> 0x00000000), then wait here until
+                        * the overflow happens.
+                        */
+                       count = sparc_pmu->read_pmc(pcr_index);
+                       while (count > PIC_OVERFLOW_THRESHOLD) {
+                               if (count == sparc_pmu->read_pmc(pcr_index)) {
+                                       /* If the count hasn't changed then
+                                        * something has gone wrong !
+                                        */
+                                       break;
+                               }
+                               count = sparc_pmu->read_pmc(pcr_index);
+                       }
+               }
+       }
+}
+
 static void sparc_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1053,6 +1109,10 @@ static void sparc_pmu_disable(struct pmu *pmu)
                val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
                         sparc_pmu->hv_bit | sparc_pmu->irq_bit);
                cpuc->pcr[i] = val;
+
+               if (is_stop_counting_cycles_requested(val) == true)
+                       wait_for_counter_overflow(i);
+
                pcr_ops->write_pcr(i, cpuc->pcr[i]);
        }
 }