+----------------+-----------------+-----------------+-----------------------------+
 | Hisilicon      | Hip08 SMMU PMCG | #162001800      | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
+| Hisilicon      | Hip08 SMMU PMCG | #162001900      | N/A                         |
+|                | Hip09 SMMU PMCG |                 |                             |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 +----------------+-----------------+-----------------+-----------------------------+
 
 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
        /* HiSilicon Hip08 Platform */
        {"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
-        "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+        "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
+       /* HiSilicon Hip09 Platform */
+       {"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+        "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        { }
 };
 
 
 #define SMMU_PMCG_PA_SHIFT              12
 
 #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
+#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
 
 static int cpuhp_state_num;
 
        writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
 }
 
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+                                      struct perf_event *event, int idx);
+
+static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
+{
+       struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+       unsigned int idx;
+
+       for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+               smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
+
+       smmu_pmu_enable(pmu);
+}
+
 static inline void smmu_pmu_disable(struct pmu *pmu)
 {
        struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
        writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
 }
 
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
+{
+       struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+       unsigned int idx;
+
+       /*
+        * The global disable of PMU sometimes fail to stop the counting.
+        * Harden this by writing an invalid event type to each used counter
+        * to forcibly stop counting.
+        */
+       for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+               writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+
+       smmu_pmu_disable(pmu);
+}
+
 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
                                              u32 idx, u64 value)
 {
        switch (model) {
        case IORT_SMMU_V3_PMCG_HISI_HIP08:
                /* HiSilicon Erratum 162001800 */
-               smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+               smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
+               break;
+       case IORT_SMMU_V3_PMCG_HISI_HIP09:
+               smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
                break;
        }
 
        if (!dev->of_node)
                smmu_pmu_get_acpi_options(smmu_pmu);
 
+       /*
+        * For platforms suffer this quirk, the PMU disable sometimes fails to
+        * stop the counters. This will leads to inaccurate or error counting.
+        * Forcibly disable the counters with these quirk handler.
+        */
+       if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
+               smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
+               smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
+       }
+
        /* Pick one CPU to be the preferred one to use */
        smmu_pmu->on_cpu = raw_smp_processor_id();
        WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));