From: Ard Biesheuvel Date: Thu, 28 Aug 2025 10:22:12 +0000 (+0200) Subject: x86/sev: Use boot SVSM CA for all startup and init code X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c54604fb7f2522fec5b97e86103ec49e539e80fe;p=users%2Fhch%2Fmisc.git x86/sev: Use boot SVSM CA for all startup and init code To avoid having to reason about whether or not to use the per-CPU SVSM calling area when running startup and init code on the boot CPU, reuse the boot SVSM calling area as the per-CPU area for the BSP. Thus, remove the need to make the per-CPU variables and associated state in sev_cfg accessible to the startup code once confined. [ bp: Massage commit message. ] Signed-off-by: Ard Biesheuvel Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/20250828102202.1849035-33-ardb+git@google.com --- diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 0e567410d24d..4873469b2a39 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -37,19 +37,6 @@ struct ghcb *boot_ghcb; #define __BOOT_COMPRESSED -extern u64 boot_svsm_caa_pa; - -struct svsm_ca *svsm_get_caa(void) -{ - /* The decompressor is mapped 1:1 so VA == PA */ - return (struct svsm_ca *)boot_svsm_caa_pa; -} - -u64 svsm_get_caa_pa(void) -{ - return boot_svsm_caa_pa; -} - u8 snp_vmpl; /* Include code for early handlers */ diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c index 8009a37d53c1..b0fc63f8dee1 100644 --- a/arch/x86/boot/startup/sev-startup.c +++ b/arch/x86/boot/startup/sev-startup.c @@ -50,9 +50,6 @@ u64 sev_secrets_pa __ro_after_init; /* For early boot SVSM communication */ struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); -DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); -DEFINE_PER_CPU(u64, svsm_caa_pa); - /* * Nothing shall interrupt this code path while holding the per-CPU * GHCB. The backup GHCB is only for NMIs interrupting this path. @@ -153,7 +150,9 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd unsigned long npages) { struct psc_desc d = { - SNP_PAGE_STATE_PRIVATE, svsm_get_caa(), svsm_get_caa_pa() + SNP_PAGE_STATE_PRIVATE, + rip_rel_ptr(&boot_svsm_ca_page), + boot_svsm_caa_pa }; /* @@ -176,7 +175,9 @@ void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr unsigned long npages) { struct psc_desc d = { - SNP_PAGE_STATE_SHARED, svsm_get_caa(), svsm_get_caa_pa() + SNP_PAGE_STATE_SHARED, + rip_rel_ptr(&boot_svsm_ca_page), + boot_svsm_caa_pa }; /* diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index a833b2b31d3d..9782ebe30675 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -46,6 +46,25 @@ #include #include +DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); +DEFINE_PER_CPU(u64, svsm_caa_pa); + +static inline struct svsm_ca *svsm_get_caa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa); + else + return rip_rel_ptr(&boot_svsm_ca_page); +} + +static inline u64 svsm_get_caa_pa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa_pa); + else + return boot_svsm_caa_pa; +} + /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ #define AP_INIT_CS_LIMIT 0xffff #define AP_INIT_DS_LIMIT 0xffff @@ -1312,7 +1331,8 @@ static void __init alloc_runtime_data(int cpu) struct svsm_ca *caa; /* Allocate the SVSM CA page if an SVSM is present */ - caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE); + caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE) + : &boot_svsm_ca_page; per_cpu(svsm_caa, cpu) = caa; per_cpu(svsm_caa_pa, cpu) = __pa(caa); @@ -1366,32 +1386,9 @@ void __init sev_es_init_vc_handling(void) init_ghcb(cpu); } - /* If running under an SVSM, switch to the per-cpu CA */ - if (snp_vmpl) { - struct svsm_call call = {}; - unsigned long flags; - int ret; - - local_irq_save(flags); - - /* - * SVSM_CORE_REMAP_CA call: - * RAX = 0 (Protocol=0, CallID=0) - * RCX = New CA GPA - */ - call.caa = svsm_get_caa(); - call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA); - call.rcx = this_cpu_read(svsm_caa_pa); - ret = svsm_perform_call_protocol(&call); - if (ret) - panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n", - ret, call.rax_out); - + if (snp_vmpl) sev_cfg.use_cas = true; - local_irq_restore(flags); - } - sev_es_setup_play_dead(); /* Secondary CPUs use the runtime #VC handler */ diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h index 9ff824540b48..f98f080410ad 100644 --- a/arch/x86/include/asm/sev-internal.h +++ b/arch/x86/include/asm/sev-internal.h @@ -62,22 +62,6 @@ DECLARE_PER_CPU(u64, svsm_caa_pa); extern u64 boot_svsm_caa_pa; -static __always_inline struct svsm_ca *svsm_get_caa(void) -{ - if (sev_cfg.use_cas) - return this_cpu_read(svsm_caa); - else - return rip_rel_ptr(&boot_svsm_ca_page); -} - -static __always_inline u64 svsm_get_caa_pa(void) -{ - if (sev_cfg.use_cas) - return this_cpu_read(svsm_caa_pa); - else - return boot_svsm_caa_pa; -} - enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt); void vc_forward_exception(struct es_em_ctxt *ctxt);