return 0;
 }
 
+static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
+                              const struct sys_reg_desc *rd,
+                              u64 val)
+{
+       u8 pmuver, host_pmuver;
+       bool valid_pmu;
+
+       host_pmuver = kvm_arm_pmu_get_pmuver_limit();
+
+       /*
+        * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
+        * as it doesn't promise more than what the HW gives us. We
+        * allow an IMPDEF PMU though, only if no PMU is supported
+        * (KVM backward compatibility handling).
+        */
+       pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
+       if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
+               return -EINVAL;
+
+       valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
+
+       /* Make sure view register and PMU support do match */
+       if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
+               return -EINVAL;
+
+       /* We can only differ with PMUver, and anything else is an error */
+       val ^= read_id_reg(vcpu, rd);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
+       if (val)
+               return -EINVAL;
+
+       if (valid_pmu)
+               vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
+       else
+               vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
+
+       return 0;
+}
+
 /*
  * cpufeature ID register user accessors
  *
        ID_UNALLOCATED(4,7),
 
        /* CRm=5 */
-       ID_SANITISED(ID_AA64DFR0_EL1),
+       { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
+         .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
        ID_SANITISED(ID_AA64DFR1_EL1),
        ID_UNALLOCATED(5,2),
        ID_UNALLOCATED(5,3),