]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/smpboot: Allow parallel bringup for SEV-ES parallel-6.2-v14
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 7 Mar 2023 19:06:50 +0000 (19:06 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 8 Mar 2023 16:26:03 +0000 (16:26 +0000)
Enable parallel bringup for SEV-ES guests. The APs can't actually
execute the CPUID instruction directly during early startup, but they
can make the GHCB call directly instead, just as the VC trap handler
would do.

Thanks to Sabin for talking me through the way this works.

Suggested-by: Sabin Rapan <sabrapan@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/include/asm/sev-common.h
arch/x86/include/asm/sev.h
arch/x86/include/asm/smp.h
arch/x86/kernel/head_64.S
arch/x86/kernel/smpboot.c

index b8357d6ecd47ef6766a0fe9fe5161f6447228c16..f25df4bd318ec9fe378cd855a85a32c3396f326c 100644 (file)
@@ -70,6 +70,7 @@
        /* GHCBData[63:12] */                           \
        (((u64)(v) & GENMASK_ULL(63, 12)) >> 12)
 
+#ifndef __ASSEMBLY__
 /*
  * SNP Page State Change Operation
  *
@@ -160,6 +161,8 @@ struct snp_psc_desc {
 
 #define GHCB_RESP_CODE(v)              ((v) & GHCB_MSR_INFO_MASK)
 
+#endif /* __ASSEMBLY__ */
+
 /*
  * Error codes related to GHCB input that can be communicated back to the guest
  * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
index ebc271bb6d8ed1d74e07194604b25244646ffc0a..d687a586cafa7c83a8100184bf5d541269366aab 100644 (file)
@@ -135,6 +135,10 @@ struct snp_secrets_page_layout {
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 extern struct static_key_false sev_es_enable_key;
+static inline bool sev_es_active(void)
+{
+       return static_branch_unlikely(&sev_es_enable_key);
+}
 extern void __sev_es_ist_enter(struct pt_regs *regs);
 extern void __sev_es_ist_exit(void);
 static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
@@ -198,6 +202,7 @@ bool snp_init(struct boot_params *bp);
 void __init __noreturn snp_abort(void);
 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
 #else
+static inline bool sev_es_active(void) { return false; }
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
index defe76ee9e6456a92c5605b85e58d2c1f1140de0..1584f04a7007f6dccf8b230b45478e9b4b9310e2 100644 (file)
@@ -204,7 +204,10 @@ extern unsigned int smpboot_control;
 /* Control bits for startup_64 */
 #define STARTUP_APICID_CPUID_0B        0x80000000
 #define STARTUP_APICID_CPUID_01        0x40000000
+#define STARTUP_APICID_SEV_ES  0x20000000
 
-#define STARTUP_PARALLEL_MASK (STARTUP_APICID_CPUID_01 | STARTUP_APICID_CPUID_0B)
+#define STARTUP_PARALLEL_MASK (STARTUP_APICID_CPUID_01 | \
+                              STARTUP_APICID_CPUID_0B | \
+                              STARTUP_APICID_SEV_ES)
 
 #endif /* _ASM_X86_SMP_H */
index c35f7c17383214fffa2e021f3a20ac201d87d3fb..714c2afdbd9a83f434b5f3381948b96cca1946b1 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/nospec-branch.h>
 #include <asm/fixmap.h>
 #include <asm/smp.h>
+#include <asm/sev-common.h>
 
 /*
  * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
@@ -242,6 +243,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
         *
         * Bit 31       STARTUP_APICID_CPUID_0B flag (use CPUID 0x0b)
         * Bit 30       STARTUP_APICID_CPUID_01 flag (use CPUID 0x01)
+        * Bit 29       STARTUP_APICID_SEV_ES flag (CPUID 0x0b via GHCB MSR)
         * Bit 0-24     CPU# if STARTUP_APICID_CPUID_xx flags are not set
         */
        movl    smpboot_control(%rip), %ecx
@@ -249,6 +251,10 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        jnz     .Luse_cpuid_0b
        testl   $STARTUP_APICID_CPUID_01, %ecx
        jnz     .Luse_cpuid_01
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+       testl   $STARTUP_APICID_SEV_ES, %ecx
+       jnz     .Luse_sev_cpuid_0b
+#endif
        andl    $0x0FFFFFFF, %ecx
        jmp     .Lsetup_cpu
 
@@ -259,6 +265,30 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        shr     $24, %edx
        jmp     .Lsetup_AP
 
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+.Luse_sev_cpuid_0b:
+       /* Set the GHCB MSR to request CPUID 0xB_EDX */
+       movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
+       movl    $(GHCB_CPUID_REQ_EDX << 30) | GHCB_MSR_CPUID_REQ, %eax
+       movl    $0x0B, %edx
+       wrmsr
+
+       /* Perform GHCB MSR protocol */
+       rep; vmmcall            /* vmgexit */
+
+       /*
+        * Get the result. After the RDMSR:
+        *   EAX should be 0xc0000005
+        *   EDX should have the CPUID register value and since EDX
+        *   is the target register, no need to move the result.
+        */
+       rdmsr
+       andl    $GHCB_MSR_INFO_MASK, %eax
+       cmpl    $GHCB_MSR_CPUID_RESP, %eax
+       jne     1f
+       jmp     .Lsetup_AP
+#endif
+
 .Luse_cpuid_0b:
        mov     $0x0B, %eax
        xorl    %ecx, %ecx
index 0dc123a536ab8de727a251752452a2077da6c0d1..b4265c5b46da7fa29490914d020126c030c575f9 100644 (file)
@@ -1515,15 +1515,29 @@ void __init smp_prepare_cpus_common(void)
  * We can do 64-bit AP bringup in parallel if the CPU reports its APIC
  * ID in CPUID (either leaf 0x0B if we need the full APIC ID in X2APIC
  * mode, or leaf 0x01 if 8 bits are sufficient). Otherwise it's too
- * hard. And not for SEV-ES guests because they can't use CPUID that
- * early.
+ * hard.
  */
 static bool prepare_parallel_bringup(void)
 {
-       if (IS_ENABLED(CONFIG_X86_32) || cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
+       bool has_sev_es = sev_es_active();
+
+       if (IS_ENABLED(CONFIG_X86_32))
                return false;
 
-       if (x2apic_mode) {
+       /*
+        * Encrypted guests other than SEV-ES (in the future) will need to
+        * implement an early way of finding the APIC ID, since they will
+        * presumably block direct CPUID too. Be kind to our future selves
+        * by warning here instead of just letting them break. Parallel
+        * startup doesn't have to be in the first round of enabling patches
+        * for any such technology.
+        */
+       if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT) && !has_sev_es) {
+               pr_info("Disabling parallel bringup due to guest memory encryption\n");
+               return false;
+       }
+
+       if (x2apic_mode || has_sev_es) {
                if (boot_cpu_data.cpuid_level < 0x0b)
                        return false;
 
@@ -1532,8 +1546,13 @@ static bool prepare_parallel_bringup(void)
                        return false;
                }
 
-               pr_debug("Using CPUID 0xb for parallel CPU startup\n");
-               smpboot_control = STARTUP_APICID_CPUID_0B;
+               if (has_sev_es) {
+                       pr_debug("Using SEV-ES CPUID 0xb for parallel CPU startup\n");
+                       smpboot_control = STARTUP_APICID_SEV_ES;
+               } else {
+                       pr_debug("Using CPUID 0xb for parallel CPU startup\n");
+                       smpboot_control = STARTUP_APICID_CPUID_0B;
+               }
        } else {
                /* Without X2APIC, what's in CPUID 0x01 should suffice. */
                if (boot_cpu_data.cpuid_level < 0x01)