]> www.infradead.org Git - nvme.git/commitdiff
KVM: SEV: Add support to handle AP reset MSR protocol
authorTom Lendacky <thomas.lendacky@amd.com>
Wed, 1 May 2024 07:10:45 +0000 (02:10 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 7 May 2024 17:28:03 +0000 (13:28 -0400)
Add support for AP Reset Hold being invoked using the GHCB MSR protocol,
available in version 2 of the GHCB specification.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Message-ID: <20240501071048.2208265-2-michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/sev-common.h
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.h

index b463fcbd4b9070b005a7b187f0b62c2046163a18..01261f7054ad7b1d35afa4e919c67c6ac5471037 100644 (file)
        (((unsigned long)fn) << 32))
 
 /* AP Reset Hold */
-#define GHCB_MSR_AP_RESET_HOLD_REQ     0x006
-#define GHCB_MSR_AP_RESET_HOLD_RESP    0x007
+#define GHCB_MSR_AP_RESET_HOLD_REQ             0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP            0x007
+#define GHCB_MSR_AP_RESET_HOLD_RESULT_POS      12
+#define GHCB_MSR_AP_RESET_HOLD_RESULT_MASK     GENMASK_ULL(51, 0)
 
 /* GHCB GPA Register */
 #define GHCB_MSR_REG_GPA_REQ           0x012
index 598d78b4107f3042935731d05f35f071c3f358da..6e31cb408dd8790de813f4b69acb6f7c4a8d2b8f 100644 (file)
@@ -49,6 +49,10 @@ static bool sev_es_debug_swap_enabled = true;
 module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
 static u64 sev_supported_vmsa_features;
 
+#define AP_RESET_HOLD_NONE             0
+#define AP_RESET_HOLD_NAE_EVENT                1
+#define AP_RESET_HOLD_MSR_PROTO                2
+
 static u8 sev_enc_bit;
 static DECLARE_RWSEM(sev_deactivate_lock);
 static DEFINE_MUTEX(sev_bitmap_lock);
@@ -2727,6 +2731,9 @@ vmgexit_err:
 
 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
 {
+       /* Clear any indication that the vCPU is in a type of AP Reset Hold */
+       svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
+
        if (!svm->sev_es.ghcb)
                return;
 
@@ -2938,6 +2945,22 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
                                  GHCB_MSR_INFO_POS);
                break;
        }
+       case GHCB_MSR_AP_RESET_HOLD_REQ:
+               svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
+               ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+
+               /*
+                * Preset the result to a non-SIPI return and then only set
+                * the result to non-zero when delivering a SIPI.
+                */
+               set_ghcb_msr_bits(svm, 0,
+                                 GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
+                                 GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
+
+               set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
+                                 GHCB_MSR_INFO_MASK,
+                                 GHCB_MSR_INFO_POS);
+               break;
        case GHCB_MSR_TERM_REQ: {
                u64 reason_set, reason_code;
 
@@ -3037,6 +3060,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
                ret = 1;
                break;
        case SVM_VMGEXIT_AP_HLT_LOOP:
+               svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
                ret = kvm_emulate_ap_reset_hold(vcpu);
                break;
        case SVM_VMGEXIT_AP_JUMP_TABLE: {
@@ -3280,15 +3304,31 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
                return;
        }
 
-       /*
-        * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
-        * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
-        * non-zero value.
-        */
-       if (!svm->sev_es.ghcb)
-               return;
+       /* Subsequent SIPI */
+       switch (svm->sev_es.ap_reset_hold_type) {
+       case AP_RESET_HOLD_NAE_EVENT:
+               /*
+                * Return from an AP Reset Hold VMGEXIT, where the guest will
+                * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
+                */
+               ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
+               break;
+       case AP_RESET_HOLD_MSR_PROTO:
+               /*
+                * Return from an AP Reset Hold VMGEXIT, where the guest will
+                * set the CS and RIP. Set GHCB data field to a non-zero value.
+                */
+               set_ghcb_msr_bits(svm, 1,
+                                 GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
+                                 GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
 
-       ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
+               set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
+                                 GHCB_MSR_INFO_MASK,
+                                 GHCB_MSR_INFO_POS);
+               break;
+       default:
+               break;
+       }
 }
 
 struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
index 32390178254738cb3198c01459b31fa2839da45d..6fd0f586268114c196b61d81152ce178dcb1b753 100644 (file)
@@ -199,6 +199,7 @@ struct vcpu_sev_es_state {
        u8 valid_bitmap[16];
        struct kvm_host_map ghcb_map;
        bool received_first_sipi;
+       unsigned int ap_reset_hold_type;
 
        /* SEV-ES scratch area support */
        u64 sw_scratch;