*/
#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
#include <asm/sysreg.h>
/*
break;
}
}
+
+static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
+{
+ switch (reg) {
+ case HFGRTR_EL2:
+ return &hfgrtr_masks;
+ case HFGWTR_EL2:
+ return &hfgwtr_masks;
+ case HFGITR_EL2:
+ return &hfgitr_masks;
+ case HDFGRTR_EL2:
+ return &hdfgrtr_masks;
+ case HDFGWTR_EL2:
+ return &hdfgwtr_masks;
+ case HAFGRTR_EL2:
+ return &hafgrtr_masks;
+ case HFGRTR2_EL2:
+ return &hfgrtr2_masks;
+ case HFGWTR2_EL2:
+ return &hfgwtr2_masks;
+ case HFGITR2_EL2:
+ return &hfgitr2_masks;
+ case HDFGRTR2_EL2:
+ return &hdfgrtr2_masks;
+ case HDFGWTR2_EL2:
+ return &hdfgwtr2_masks;
+ default:
+ BUILD_BUG_ON(1);
+ }
+}
+
+static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
+{
+ u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
+ struct fgt_masks *m = __fgt_reg_to_masks(reg);
+ u64 clear = 0, set = 0, val = m->nmask;
+
+ set |= fgu & m->mask;
+ clear |= fgu & m->nmask;
+
+ if (is_nested_ctxt(vcpu)) {
+ u64 nested = __vcpu_sys_reg(vcpu, reg);
+ set |= nested & m->mask;
+ clear |= ~nested & m->nmask;
+ }
+
+ val |= set;
+ val &= ~clear;
+ *vcpu_fgt(vcpu, reg) = val;
+}
+
+static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
+{
+ __compute_fgt(vcpu, HFGWTR_EL2);
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ *vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
+}
+
+void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
+{
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
+
+ __compute_fgt(vcpu, HFGRTR_EL2);
+ __compute_hfgwtr(vcpu);
+ __compute_fgt(vcpu, HFGITR_EL2);
+ __compute_fgt(vcpu, HDFGRTR_EL2);
+ __compute_fgt(vcpu, HDFGWTR_EL2);
+ __compute_fgt(vcpu, HAFGRTR_EL2);
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT2))
+ return;
+
+ __compute_fgt(vcpu, HFGRTR2_EL2);
+ __compute_fgt(vcpu, HFGWTR2_EL2);
+ __compute_fgt(vcpu, HFGITR2_EL2);
+ __compute_fgt(vcpu, HDFGRTR2_EL2);
+ __compute_fgt(vcpu, HDFGWTR2_EL2);
+}
__deactivate_cptr_traps_nvhe(vcpu);
}
-#define reg_to_fgt_masks(reg) \
- ({ \
- struct fgt_masks *m; \
- switch(reg) { \
- case HFGRTR_EL2: \
- m = &hfgrtr_masks; \
- break; \
- case HFGWTR_EL2: \
- m = &hfgwtr_masks; \
- break; \
- case HFGITR_EL2: \
- m = &hfgitr_masks; \
- break; \
- case HDFGRTR_EL2: \
- m = &hdfgrtr_masks; \
- break; \
- case HDFGWTR_EL2: \
- m = &hdfgwtr_masks; \
- break; \
- case HAFGRTR_EL2: \
- m = &hafgrtr_masks; \
- break; \
- case HFGRTR2_EL2: \
- m = &hfgrtr2_masks; \
- break; \
- case HFGWTR2_EL2: \
- m = &hfgwtr2_masks; \
- break; \
- case HFGITR2_EL2: \
- m = &hfgitr2_masks; \
- break; \
- case HDFGRTR2_EL2: \
- m = &hdfgrtr2_masks; \
- break; \
- case HDFGWTR2_EL2: \
- m = &hdfgwtr2_masks; \
- break; \
- default: \
- BUILD_BUG_ON(1); \
- } \
- \
- m; \
- })
-
-#define compute_clr_set(vcpu, reg, clr, set) \
- do { \
- u64 hfg = __vcpu_sys_reg(vcpu, reg); \
- struct fgt_masks *m = reg_to_fgt_masks(reg); \
- set |= hfg & m->mask; \
- clr |= ~hfg & m->nmask; \
- } while(0)
-
-#define reg_to_fgt_group_id(reg) \
- ({ \
- enum fgt_group_id id; \
- switch(reg) { \
- case HFGRTR_EL2: \
- case HFGWTR_EL2: \
- id = HFGRTR_GROUP; \
- break; \
- case HFGITR_EL2: \
- id = HFGITR_GROUP; \
- break; \
- case HDFGRTR_EL2: \
- case HDFGWTR_EL2: \
- id = HDFGRTR_GROUP; \
- break; \
- case HAFGRTR_EL2: \
- id = HAFGRTR_GROUP; \
- break; \
- case HFGRTR2_EL2: \
- case HFGWTR2_EL2: \
- id = HFGRTR2_GROUP; \
- break; \
- case HFGITR2_EL2: \
- id = HFGITR2_GROUP; \
- break; \
- case HDFGRTR2_EL2: \
- case HDFGWTR2_EL2: \
- id = HDFGRTR2_GROUP; \
- break; \
- default: \
- BUILD_BUG_ON(1); \
- } \
- \
- id; \
- })
-
-#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
- do { \
- u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
- struct fgt_masks *m = reg_to_fgt_masks(reg); \
- set |= hfg & m->mask; \
- clr |= hfg & m->nmask; \
- } while(0)
-
-#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
- do { \
- struct fgt_masks *m = reg_to_fgt_masks(reg); \
- u64 c = clr, s = set; \
- u64 val; \
- \
- ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
- if (is_nested_ctxt(vcpu)) \
- compute_clr_set(vcpu, reg, c, s); \
- \
- compute_undef_clr_set(vcpu, kvm, reg, c, s); \
- \
- val = m->nmask; \
- val |= s; \
- val &= ~c; \
- write_sysreg_s(val, SYS_ ## reg); \
- } while(0)
-
-#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
- update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
-
static inline bool cpu_has_amu(void)
{
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
ID_AA64PFR0_EL1_AMU_SHIFT);
}
+#define __activate_fgt(hctxt, vcpu, reg) \
+ do { \
+ ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
+ write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
+ } while (0)
+
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
- update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
- cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
- HFGWTR_EL2_TCR_EL1_MASK : 0);
- update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
+ __activate_fgt(hctxt, vcpu, HFGRTR_EL2);
+ __activate_fgt(hctxt, vcpu, HFGWTR_EL2);
+ __activate_fgt(hctxt, vcpu, HFGITR_EL2);
+ __activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
+ __activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
if (cpu_has_amu())
- update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
+ __activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
return;
- update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
- update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
+ __activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
+ __activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
+ __activate_fgt(hctxt, vcpu, HFGITR2_EL2);
+ __activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
+ __activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
}
#define __deactivate_fgt(htcxt, vcpu, reg) \