]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Initialize trap register values in hyp in pKVM
authorFuad Tabba <tabba@google.com>
Fri, 18 Oct 2024 07:48:32 +0000 (08:48 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 31 Oct 2024 18:45:24 +0000 (18:45 +0000)
Handle the initialization of trap registers at the hypervisor in
pKVM, even for non-protected guests. The host is not trusted with
the values of the trap registers, regardless of the VM type.
Therefore, when switching between the host and the guests, only
flush the HCR_EL2 TWI and TWE bits. The host is allowed to
configure these for opportunistic scheduling, as neither affects
the protection of VMs or the hypervisor.

Reported-by: Will Deacon <will@kernel.org>
Fixes: 814ad8f96e92 ("KVM: arm64: Drop trapping of PAuth instructions/keys")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241018074833.2563674-5-tabba@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 1a224d5df2075f25678c4cfdd6a12e3eb7990d0e..6aa0b13d86e581a36ed529bcd932498045d2d6df 100644 (file)
@@ -105,8 +105,10 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
 
        hyp_vcpu->vcpu.arch.hw_mmu      = host_vcpu->arch.hw_mmu;
 
-       hyp_vcpu->vcpu.arch.hcr_el2     = host_vcpu->arch.hcr_el2;
        hyp_vcpu->vcpu.arch.mdcr_el2    = host_vcpu->arch.mdcr_el2;
+       hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
+       hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
+                                                (HCR_TWI | HCR_TWE);
 
        hyp_vcpu->vcpu.arch.iflags      = host_vcpu->arch.iflags;
 
index 954df57a935f5e143f8340e9e77bb511b5dd0626..01616c39a810777a123b6a3a5da33a16b6768ad7 100644 (file)
@@ -204,11 +204,46 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
        }
 }
 
+static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+
+       if (has_hvhe())
+               vcpu->arch.hcr_el2 |= HCR_E2H;
+
+       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
+               /* route synchronous external abort exceptions to EL2 */
+               vcpu->arch.hcr_el2 |= HCR_TEA;
+               /* trap error record accesses */
+               vcpu->arch.hcr_el2 |= HCR_TERR;
+       }
+
+       if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+               vcpu->arch.hcr_el2 |= HCR_FWB;
+
+       if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+           !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+               vcpu->arch.hcr_el2 |= HCR_TID4;
+       else
+               vcpu->arch.hcr_el2 |= HCR_TID2;
+
+       if (vcpu_has_ptrauth(vcpu))
+               vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+}
+
 /*
  * Initialize trap register values in protected mode.
  */
 static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
+       vcpu->arch.mdcr_el2 = 0;
+
+       pkvm_vcpu_reset_hcr(vcpu);
+
+       if ((!vcpu_is_protected(vcpu)))
+               return;
+
        pvm_init_trap_regs(vcpu);
        pvm_init_traps_aa64pfr0(vcpu);
        pvm_init_traps_aa64pfr1(vcpu);