]> www.infradead.org Git - users/hch/misc.git/commitdiff
x86/sev: Use boot SVSM CA for all startup and init code
authorArd Biesheuvel <ardb@kernel.org>
Thu, 28 Aug 2025 10:22:12 +0000 (12:22 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 3 Sep 2025 15:58:26 +0000 (17:58 +0200)
To avoid having to reason about whether or not to use the per-CPU SVSM calling
area when running startup and init code on the boot CPU, reuse the boot SVSM
calling area as the per-CPU area for the BSP.

Thus, remove the need to make the per-CPU variables and associated state in
sev_cfg accessible to the startup code once confined.

  [ bp: Massage commit message. ]

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/20250828102202.1849035-33-ardb+git@google.com
arch/x86/boot/compressed/sev.c
arch/x86/boot/startup/sev-startup.c
arch/x86/coco/sev/core.c
arch/x86/include/asm/sev-internal.h

index 0e567410d24d7f5d91417673d02b34caadccdc19..4873469b2a39ae62283905c553ba51cbe01535d1 100644 (file)
@@ -37,19 +37,6 @@ struct ghcb *boot_ghcb;
 
 #define __BOOT_COMPRESSED
 
-extern u64 boot_svsm_caa_pa;
-
-struct svsm_ca *svsm_get_caa(void)
-{
-       /* The decompressor is mapped 1:1 so VA == PA */
-       return (struct svsm_ca *)boot_svsm_caa_pa;
-}
-
-u64 svsm_get_caa_pa(void)
-{
-       return boot_svsm_caa_pa;
-}
-
 u8 snp_vmpl;
 
 /* Include code for early handlers */
index 8009a37d53c1fb3af92fa5ada0231d922115af93..b0fc63f8dee1716de66458d9a48b4c844b71bc0d 100644 (file)
@@ -50,9 +50,6 @@ u64 sev_secrets_pa __ro_after_init;
 /* For early boot SVSM communication */
 struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
 
-DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
-DEFINE_PER_CPU(u64, svsm_caa_pa);
-
 /*
  * Nothing shall interrupt this code path while holding the per-CPU
  * GHCB. The backup GHCB is only for NMIs interrupting this path.
@@ -153,7 +150,9 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
                                         unsigned long npages)
 {
        struct psc_desc d = {
-               SNP_PAGE_STATE_PRIVATE, svsm_get_caa(), svsm_get_caa_pa()
+               SNP_PAGE_STATE_PRIVATE,
+               rip_rel_ptr(&boot_svsm_ca_page),
+               boot_svsm_caa_pa
        };
 
        /*
@@ -176,7 +175,9 @@ void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
                                        unsigned long npages)
 {
        struct psc_desc d = {
-               SNP_PAGE_STATE_SHARED, svsm_get_caa(), svsm_get_caa_pa()
+               SNP_PAGE_STATE_SHARED,
+               rip_rel_ptr(&boot_svsm_ca_page),
+               boot_svsm_caa_pa
        };
 
        /*
index a833b2b31d3db3a14ac554ee72ea24088a11b575..9782ebe306750f781f9c99ef655caee83d373fc5 100644 (file)
 #include <asm/cmdline.h>
 #include <asm/msr.h>
 
+DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
+DEFINE_PER_CPU(u64, svsm_caa_pa);
+
+static inline struct svsm_ca *svsm_get_caa(void)
+{
+       if (sev_cfg.use_cas)
+               return this_cpu_read(svsm_caa);
+       else
+               return rip_rel_ptr(&boot_svsm_ca_page);
+}
+
+static inline u64 svsm_get_caa_pa(void)
+{
+       if (sev_cfg.use_cas)
+               return this_cpu_read(svsm_caa_pa);
+       else
+               return boot_svsm_caa_pa;
+}
+
 /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
 #define AP_INIT_CS_LIMIT               0xffff
 #define AP_INIT_DS_LIMIT               0xffff
@@ -1312,7 +1331,8 @@ static void __init alloc_runtime_data(int cpu)
                struct svsm_ca *caa;
 
                /* Allocate the SVSM CA page if an SVSM is present */
-               caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE);
+               caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE)
+                         : &boot_svsm_ca_page;
 
                per_cpu(svsm_caa, cpu) = caa;
                per_cpu(svsm_caa_pa, cpu) = __pa(caa);
@@ -1366,32 +1386,9 @@ void __init sev_es_init_vc_handling(void)
                init_ghcb(cpu);
        }
 
-       /* If running under an SVSM, switch to the per-cpu CA */
-       if (snp_vmpl) {
-               struct svsm_call call = {};
-               unsigned long flags;
-               int ret;
-
-               local_irq_save(flags);
-
-               /*
-                * SVSM_CORE_REMAP_CA call:
-                *   RAX = 0 (Protocol=0, CallID=0)
-                *   RCX = New CA GPA
-                */
-               call.caa = svsm_get_caa();
-               call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
-               call.rcx = this_cpu_read(svsm_caa_pa);
-               ret = svsm_perform_call_protocol(&call);
-               if (ret)
-                       panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n",
-                             ret, call.rax_out);
-
+       if (snp_vmpl)
                sev_cfg.use_cas = true;
 
-               local_irq_restore(flags);
-       }
-
        sev_es_setup_play_dead();
 
        /* Secondary CPUs use the runtime #VC handler */
index 9ff824540b48cf992a174ca22ef18614966f2feb..f98f080410ad8f54961331ccac035773ee286f81 100644 (file)
@@ -62,22 +62,6 @@ DECLARE_PER_CPU(u64, svsm_caa_pa);
 
 extern u64 boot_svsm_caa_pa;
 
-static __always_inline struct svsm_ca *svsm_get_caa(void)
-{
-       if (sev_cfg.use_cas)
-               return this_cpu_read(svsm_caa);
-       else
-               return rip_rel_ptr(&boot_svsm_ca_page);
-}
-
-static __always_inline u64 svsm_get_caa_pa(void)
-{
-       if (sev_cfg.use_cas)
-               return this_cpu_read(svsm_caa_pa);
-       else
-               return boot_svsm_caa_pa;
-}
-
 enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt);
 void vc_forward_exception(struct es_em_ctxt *ctxt);