#define        ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
        (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 
+
+/*
+ * We unconditionally enable ARMv8.5-PMU long event counter support
+ * (64-bit events) where supported. Indicate if this arm_pmu has long
+ * event counter support.
+ */
+static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
+{
+       return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
+}
+
 /*
  * We must chain two programmable counters for 64 bit events,
  * except when we have allocated the 64bit cycle counter (for CPU
 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
 {
        int idx = event->hw.idx;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 
        return !WARN_ON(idx < 0) &&
               armv8pmu_event_is_64bit(event) &&
+              !armv8pmu_has_long_event(cpu_pmu) &&
               (idx != ARMV8_IDX_CYCLE_COUNTER);
 }
 
        isb();
 }
 
-static inline u32 armv8pmu_read_evcntr(int idx)
+static inline u64 armv8pmu_read_evcntr(int idx)
 {
        armv8pmu_select_counter(idx);
        return read_sysreg(pmxevcntr_el0);
        return val;
 }
 
+/*
+ * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
+ * is set the event counters also become 64-bit counters. Unless the
+ * user has requested a long counter (attr.config1) then we want to
+ * interrupt upon 32-bit overflow - we achieve this by applying a bias.
+ */
+static bool armv8pmu_event_needs_bias(struct perf_event *event)
+{
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (armv8pmu_event_is_64bit(event))
+               return false;
+
+       if (armv8pmu_has_long_event(cpu_pmu) ||
+           idx == ARMV8_IDX_CYCLE_COUNTER)
+               return true;
+
+       return false;
+}
+
+static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
+{
+       if (armv8pmu_event_needs_bias(event))
+               value |= GENMASK(63, 32);
+
+       return value;
+}
+
+static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
+{
+       if (armv8pmu_event_needs_bias(event))
+               value &= ~GENMASK(63, 32);
+
+       return value;
+}
+
 static u64 armv8pmu_read_counter(struct perf_event *event)
 {
        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
        else
                value = armv8pmu_read_hw_counter(event);
 
-       return value;
+       return  armv8pmu_unbias_long_counter(event, value);
 }
 
-static inline void armv8pmu_write_evcntr(int idx, u32 value)
+static inline void armv8pmu_write_evcntr(int idx, u64 value)
 {
        armv8pmu_select_counter(idx);
        write_sysreg(value, pmxevcntr_el0);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
+       value = armv8pmu_bias_long_counter(event, value);
+
        if (!armv8pmu_counter_valid(cpu_pmu, idx))
                pr_err("CPU%u writing wrong counter %d\n",
                        smp_processor_id(), idx);
-       else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
-               /*
-                * The cycles counter is really a 64-bit counter.
-                * When treating it as a 32-bit counter, we only count
-                * the lower 32 bits, and set the upper 32-bits so that
-                * we get an interrupt upon 32-bit overflow.
-                */
-               if (!armv8pmu_event_is_64bit(event))
-                       value |= 0xffffffff00000000ULL;
+       else if (idx == ARMV8_IDX_CYCLE_COUNTER)
                write_sysreg(value, pmccntr_el0);
-       } else
+       else
                armv8pmu_write_hw_counter(event, value);
 }
 
        /*
         * Otherwise use events counters
         */
-       if (armv8pmu_event_is_64bit(event))
+       if (armv8pmu_event_is_64bit(event) &&
+           !armv8pmu_has_long_event(cpu_pmu))
                return  armv8pmu_get_chain_idx(cpuc, cpu_pmu);
        else
                return armv8pmu_get_single_idx(cpuc, cpu_pmu);
 
 static void armv8pmu_reset(void *info)
 {
+       struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+       u32 pmcr;
+
        /* The counter and interrupt enable registers are unknown at reset. */
        armv8pmu_disable_counter(U32_MAX);
        armv8pmu_disable_intens(U32_MAX);
         * Initialize & Reset PMNC. Request overflow interrupt for
         * 64 bit cycle counter but cheat in armv8pmu_write_counter().
         */
-       armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
-                           ARMV8_PMU_PMCR_LC);
+       pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
+
+       /* Enable long event counter support where available */
+       if (armv8pmu_has_long_event(cpu_pmu))
+               pmcr |= ARMV8_PMU_PMCR_LP;
+
+       armv8pmu_pmcr_write(pmcr);
 }
 
 static int __armv8_pmuv3_map_event(struct perf_event *event,
        if (pmuver == 0xf || pmuver == 0)
                return;
 
+       cpu_pmu->pmuver = pmuver;
        probe->present = true;
 
        /* Read the nb of CNTx counters supported from PMNC */