From 91685e972c295420be487f2dc92edb5a1c0d2ed1 Mon Sep 17 00:00:00 2001 From: Dave Aldridge Date: Fri, 4 Nov 2016 09:56:07 -0700 Subject: [PATCH] sparc64: Fix a race condition when stopping performance counters When stopping a performance counter that is close to overflowing, there is a race condition that can occur between writing to the PCRx register to stop the counter (and also clearing the PCRx.ov bit at the same time) vs the performance counter overflowing and setting the PCRx.ov bit in the PCRx register. The result of this race condition is that we occassionally miss a performance counter overflow interrupt, which in turn leads to incorrect event counting. This race condition has been observed when counting cpu cycles. To fix this issue when stopping a performance counter, we simply allow it to continue counting and overflow before stopping it. This allows the performance counter overflow interrupt to be generated and acted upon. This fix is applied for M7, T5 and T4 devices. Note: This commit is based on the following commits: 8b9b5b404e754e5c271341f5d7ea4797374c9844 a2d17bc33bdcc1cefd84bca44f2fd27075b16058 960f1607bec735e8da7dbd5df818da0a2e2b0305 Orabug: 22876587 Signed-off-by: Dave Aldridge Signed-off-by: Eric Saint-Etienne (cherry picked from commit e5b7619e1de2f3e0dd858f632bc08ce64c344245) Signed-off-by: Allen Pais --- arch/sparc/kernel/perf_event.c | 60 ++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index d4cbecc65acd..8e18fe4b01a2 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1036,6 +1036,62 @@ static void sparc_pmu_enable(struct pmu *pmu) pcr_ops->write_pcr(i, cpuc->pcr[i]); } +/* Used when checking to see if we are counting 'cycles' */ +#define PCR_N4_CYCLES_SELECTED (26 << PCR_N4_SL_SHIFT) + +/* Threshold value used to decide whether to let + * the 32 bit performance counter overflow + * */ +#define PIC_OVERFLOW_THRESHOLD (0xfffffc00) + +static bool is_stop_counting_cycles_requested(u64 val) +{ + bool ret = false; + + /* Check the value we want to write to the PCR + * register to see if we are requesting that we + * stop counting 'cycles' + */ + if (!strcmp(sparc_pmu_type, "niagara4") || + !strcmp(sparc_pmu_type, "niagara5") || + !strcmp(sparc_pmu_type, "sparc-m7")) { + if ((val & PCR_N4_SL) == PCR_N4_CYCLES_SELECTED) + ret = true; + } + return ret; +} + +static void wait_for_counter_overflow(int pcr_index) +{ + u64 pcr; + u32 count; + + if (!strcmp(sparc_pmu_type, "niagara4") || + !strcmp(sparc_pmu_type, "niagara5") || + !strcmp(sparc_pmu_type, "sparc-m7")) { + pcr = pcr_ops->read_pcr(pcr_index); + if (((pcr & PCR_N4_SL) == PCR_N4_CYCLES_SELECTED) && + ((pcr & PCR_N4_UTRACE) || (pcr & PCR_N4_STRACE))) { + + /* We are currently counting cycles. If we are close + * to overflowing the 32 bit performance counter + * (0xffffffff -> 0x00000000), then wait here until + * the overflow happens. + */ + count = sparc_pmu->read_pmc(pcr_index); + while (count > PIC_OVERFLOW_THRESHOLD) { + if (count == sparc_pmu->read_pmc(pcr_index)) { + /* If the count hasn't changed then + * something has gone wrong ! + */ + break; + } + count = sparc_pmu->read_pmc(pcr_index); + } + } + } +} + static void sparc_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -1053,6 +1109,10 @@ static void sparc_pmu_disable(struct pmu *pmu) val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | sparc_pmu->hv_bit | sparc_pmu->irq_bit); cpuc->pcr[i] = val; + + if (is_stop_counting_cycles_requested(val) == true) + wait_for_counter_overflow(i); + pcr_ops->write_pcr(i, cpuc->pcr[i]); } } -- 2.50.1