]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
arm64: kvm: Introduce nvhe stack size constants
authorKalesh Singh <kaleshsingh@google.com>
Tue, 12 Nov 2024 00:32:50 +0000 (16:32 -0800)
committerMarc Zyngier <maz@kernel.org>
Wed, 8 Jan 2025 11:25:28 +0000 (11:25 +0000)
Refactor nvhe stack code to use NVHE_STACK_SIZE/SHIFT constants,
instead of directly using PAGE_SIZE/SHIFT. This makes the code a bit
easier to read, without introducing any functional changes.

Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Link: https://lore.kernel.org/r/20241112003336.1375584-1-kaleshsingh@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/stacktrace/nvhe.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/host.S
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/hyp/nvhe/stacktrace.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/stacktrace.c

index 8b9f33cf561bb39520023d9b344150993a46b657..717829df294eafd286df3b1e2e37e1dc9a814dab 100644 (file)
 
 #define OVERFLOW_STACK_SIZE    SZ_4K
 
+#define NVHE_STACK_SHIFT       PAGE_SHIFT
+#define NVHE_STACK_SIZE        (UL(1) << NVHE_STACK_SHIFT)
+
 /*
  * With the minimum frame size of [x29, x30], exactly half the combined
  * sizes of the hyp and overflow stacks is the maximum size needed to
  * save the unwinded stacktrace; plus an additional entry to delimit the
  * end.
  */
-#define NVHE_STACKTRACE_SIZE   ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
+#define NVHE_STACKTRACE_SIZE   ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
 
 /*
  * Alignment of kernel segments (e.g. .text, .data).
index 44759281d0d4378ede74f8f989e788f83a3115ef..171f9edef49fc48bfdc5d2f6d1c8ca301b1e99b6 100644 (file)
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
 
 DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
-DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
 
 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
 
index a102c3aebdbc419fe94a0f973eb2b3f06f0ba698..9ac45e23770f961d153bfe84137e22cf2fda526f 100644 (file)
@@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR
 
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
-DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -2339,7 +2339,7 @@ static void __init teardown_hyp_mode(void)
 
        free_hyp_pgds();
        for_each_possible_cpu(cpu) {
-               free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+               free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
                free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
 
                if (free_sve) {
@@ -2527,15 +2527,15 @@ static int __init init_hyp_mode(void)
         * Allocate stack pages for Hypervisor-mode
         */
        for_each_possible_cpu(cpu) {
-               unsigned long stack_page;
+               unsigned long stack_base;
 
-               stack_page = __get_free_page(GFP_KERNEL);
-               if (!stack_page) {
+               stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
+               if (!stack_base) {
                        err = -ENOMEM;
                        goto out_err;
                }
 
-               per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+               per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
        }
 
        /*
@@ -2604,9 +2604,9 @@ static int __init init_hyp_mode(void)
         */
        for_each_possible_cpu(cpu) {
                struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
-               char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+               char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
 
-               err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
+               err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
                if (err) {
                        kvm_err("Cannot map hyp stack\n");
                        goto out_err;
@@ -2618,7 +2618,7 @@ static int __init init_hyp_mode(void)
                 * __hyp_pa() won't do the right thing there, since the stack
                 * has been mapped in the flexible private VA space.
                 */
-               params->stack_pa = __pa(stack_page);
+               params->stack_pa = __pa(stack_base);
        }
 
        for_each_possible_cpu(cpu) {
index 3d610fc51f4d33545b1eaee0b2ec09bb0b9b521b..58f0cb2298cc28c043b740072efcc63f24d6951a 100644 (file)
@@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)
 
        /*
         * Test whether the SP has overflowed, without corrupting a GPR.
-        * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
+        * nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
         * of SP should always be 1.
         */
        add     sp, sp, x0                      // sp' = sp + x0
        sub     x0, sp, x0                      // x0' = sp' - x0 = (sp + x0) - x0 = sp
-       tbz     x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
+       tbz     x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
        sub     x0, sp, x0                      // x0'' = sp' - x0' = (sp + x0) - sp = x0
        sub     sp, sp, x0                      // sp'' = sp' - x0 = (sp + x0) - x0 = sp
 
index 8850b591d775181a9f8545a21f05bb8fdc2b3a21..f41c7440b34b42c5a3d0c62dae68a5df0bf58414 100644 (file)
@@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
 
        prev_base = __io_map_base;
        /*
-        * Efficient stack verification using the PAGE_SHIFT bit implies
+        * Efficient stack verification using the NVHE_STACK_SHIFT bit implies
         * an alignment of our allocation on the order of the size.
         */
-       size = PAGE_SIZE * 2;
+       size = NVHE_STACK_SIZE * 2;
        addr = ALIGN(__io_map_base, size);
 
        ret = __pkvm_alloc_private_va_range(addr, size);
@@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
                 * at the higher address and leave the lower guard page
                 * unbacked.
                 *
-                * Any valid stack address now has the PAGE_SHIFT bit as 1
+                * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
                 * and addresses corresponding to the guard page have the
-                * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+                * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
                 */
-               ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
-                                         PAGE_SIZE, phys, PAGE_HYP);
+               ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
+                                         NVHE_STACK_SIZE, phys, PAGE_HYP);
                if (ret)
                        __io_map_base = prev_base;
        }
index ed6b58b19cfa53c4a7c4b799c593f2b91f517062..5b6eeab1a7743a386108064d1bc9684124b495c7 100644 (file)
@@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
        struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
        struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
 
-       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
+       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
        stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
        stacktrace_info->fp = fp;
        stacktrace_info->pc = pc;
@@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
 {
        struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
        unsigned long high = params->stack_hyp_va;
-       unsigned long low = high - PAGE_SIZE;
+       unsigned long low = high - NVHE_STACK_SIZE;
 
        return (struct stack_info) {
                .low = low,
index d36be6d2ac91b8cadf9f9f186786f8b3893d04c0..8850741243bb5a742010119ac7e1a9687bbb8901 100644 (file)
@@ -706,10 +706,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
 
        mutex_lock(&kvm_hyp_pgd_mutex);
        /*
-        * Efficient stack verification using the PAGE_SHIFT bit implies
+        * Efficient stack verification using the NVHE_STACK_SHIFT bit implies
         * an alignment of our allocation on the order of the size.
         */
-       size = PAGE_SIZE * 2;
+       size = NVHE_STACK_SIZE * 2;
        base = ALIGN_DOWN(io_map_base - size, size);
 
        ret = __hyp_alloc_private_va_range(base);
@@ -726,12 +726,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
         * at the higher address and leave the lower guard page
         * unbacked.
         *
-        * Any valid stack address now has the PAGE_SHIFT bit as 1
+        * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
         * and addresses corresponding to the guard page have the
-        * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+        * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
         */
-       ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
-                                   PAGE_HYP);
+       ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
+                                   phys_addr, PAGE_HYP);
        if (ret)
                kvm_err("Cannot map hyp stack\n");
 
index fdedd8a3ed6f963df6e9c98bc0382c2c11243b2f..af5eec68112791b4cc0550067161bbc41baf5cc6 100644 (file)
@@ -51,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void)
        struct kvm_nvhe_stacktrace_info *stacktrace_info
                                = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
        unsigned long low = (unsigned long)stacktrace_info->stack_base;
-       unsigned long high = low + PAGE_SIZE;
+       unsigned long high = low + NVHE_STACK_SIZE;
 
        return (struct stack_info) {
                .low = low,
@@ -61,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void)
 
 static struct stack_info stackinfo_get_hyp_kern_va(void)
 {
-       unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
-       unsigned long high = low + PAGE_SIZE;
+       unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
+       unsigned long high = low + NVHE_STACK_SIZE;
 
        return (struct stack_info) {
                .low = low,