#include <asm/mmu.h>
 #include <asm/sysreg.h>
 
+/*
+ * Cache if the event is allowed to trace Context information.
+ * This allows us to perform the check, i.e, perfmon_capable(),
+ * in the context of the event owner, once, during the event_init().
+ */
+#define SPE_PMU_HW_FLAGS_CX                    BIT(0)
+
+static void set_spe_event_has_cx(struct perf_event *event)
+{
+       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+               event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+}
+
+static bool get_spe_event_has_cx(struct perf_event *event)
+{
+       return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
+}
+
 #define ARM_SPE_BUF_PAD_BYTE                   0
 
 struct arm_spe_pmu_buf {
        if (!attr->exclude_kernel)
                reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
 
-       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+       if (get_spe_event_has_cx(event))
                reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
 
        return reg;
            !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
                return -EOPNOTSUPP;
 
+       set_spe_event_has_cx(event);
        reg = arm_spe_event_to_pmscr(event);
        if (!perfmon_capable() &&
            (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
-                   BIT(SYS_PMSCR_EL1_CX_SHIFT) |
                    BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
                return -EACCES;