]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
authorJames Morse <james.morse@arm.com>
Tue, 16 Nov 2021 15:06:19 +0000 (15:06 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 11 Mar 2022 11:11:53 +0000 (12:11 +0100)
commit 5bdf3437603d4af87f9c7f424b0c8aeed2420745 upstream.

CPUs vulnerable to Spectre-BHB either need to make an SMC-CC firmware
call from the vectors, or run a sequence of branches. This gets added
to the hyp vectors. If there is no support for arch-workaround-1 in
firmware, the indirect vector will be used.

kvm_init_vector_slots() only initialises the two indirect slots if
the platform is vulnerable to Spectre-v3a. pKVM's hyp_map_vectors()
only initialises __hyp_bp_vect_base if the platform is vulnerable to
Spectre-v3a.

As there are about to more users of the indirect vectors, ensure
their entries in hyp_spectre_vector_selector[] are always initialised,
and __hyp_bp_vect_base defaults to the regular VA mapping.

The Spectre-v3a check is moved to a helper
kvm_system_needs_idmapped_vectors(), and merged with the code
that creates the hyp mappings.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu.h
arch/arm64/kernel/proton-pack.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/smccc_wa.S

index e7d98997c09c3058dd0656af26528ebd220cd2bc..f42fd0a2e81c8ca260e278a3a7b74ccff605757c 100644 (file)
@@ -66,7 +66,8 @@
 #define ARM64_HAS_TLB_RANGE                    56
 #define ARM64_MTE                              57
 #define ARM64_WORKAROUND_1508412               58
+#define ARM64_SPECTRE_BHB                      59
 
-#define ARM64_NCAPS                            59
+#define ARM64_NCAPS                            60
 
 #endif /* __ASM_CPUCAPS_H */
index 044bb9e2cd74f72347743f04e8d91b486b401dfb..c34cd44637d2328d4701d1d5fd2c3500ab295815 100644 (file)
@@ -35,6 +35,8 @@
 #define KVM_VECTOR_PREAMBLE    (2 * AARCH64_INSN_SIZE)
 
 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
+#define __SMCCC_WORKAROUND_3_SMC_SZ 36
+#define __SPECTRE_BHB_LOOP_SZ       44
 
 #define KVM_HOST_SMCCC_ID(id)                                          \
        ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
@@ -199,6 +201,10 @@ extern void __vgic_v3_init_lrs(void);
 extern u32 __kvm_get_mdcr_el2(void);
 
 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
+extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ];
+extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ];
+extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ];
+extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ];
 
 /*
  * Obtain the PC-relative address of a kernel symbol
index 331394306ccee34633d178dbec9c1800612ac910..47dafd6ab3a30abd5aef575a34950abca498295f 100644 (file)
@@ -237,7 +237,8 @@ static inline void *kvm_get_hyp_vector(void)
        void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
        int slot = -1;
 
-       if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
+       if ((cpus_have_const_cap(ARM64_SPECTRE_V2) ||
+            cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
                vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
                slot = data->hyp_vectors_slot;
        }
index c7315862e2435945e4ed71949b1c7ec4379fae00..bc151b7dc042cfd2f78c5d52d0de956df3844d5d 100644 (file)
@@ -67,6 +67,12 @@ typedef void (*bp_hardening_cb_t)(void);
 struct bp_hardening_data {
        int                     hyp_vectors_slot;
        bp_hardening_cb_t       fn;
+
+       /*
+        * template_start is only used by the BHB mitigation to identify the
+        * hyp_vectors_slot sequence.
+        */
+       const char *template_start;
 };
 
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
index 0ab98d20a274213c8a2a24410c74fa12adf9890b..1eb1a9ee293eb81d5b20165c5993f34cd3eca71b 100644 (file)
@@ -220,9 +220,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
        __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
+static DEFINE_RAW_SPINLOCK(bp_lock);
 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
 {
-       static DEFINE_RAW_SPINLOCK(bp_lock);
        int cpu, slot = -1;
        const char *hyp_vecs_start = __smccc_workaround_1_smc;
        const char *hyp_vecs_end = __smccc_workaround_1_smc +
@@ -253,6 +253,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
 
        __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
        __this_cpu_write(bp_hardening_data.fn, fn);
+       __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
        raw_spin_unlock(&bp_lock);
 }
 #else
@@ -819,3 +820,47 @@ enum mitigation_state arm64_get_spectre_bhb_state(void)
 {
        return spectre_bhb_state;
 }
+
+static int kvm_bhb_get_vecs_size(const char *start)
+{
+       if (start == __smccc_workaround_3_smc)
+               return __SMCCC_WORKAROUND_3_SMC_SZ;
+       else if (start == __spectre_bhb_loop_k8 ||
+                start == __spectre_bhb_loop_k24 ||
+                start == __spectre_bhb_loop_k32)
+               return __SPECTRE_BHB_LOOP_SZ;
+
+       return 0;
+}
+
+void kvm_setup_bhb_slot(const char *hyp_vecs_start)
+{
+       int cpu, slot = -1, size;
+       const char *hyp_vecs_end;
+
+       if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+               return;
+
+       size = kvm_bhb_get_vecs_size(hyp_vecs_start);
+       if (WARN_ON_ONCE(!hyp_vecs_start || !size))
+               return;
+       hyp_vecs_end = hyp_vecs_start + size;
+
+       raw_spin_lock(&bp_lock);
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
+                       slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+                       break;
+               }
+       }
+
+       if (slot == -1) {
+               slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+               __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+       }
+
+       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+       __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+       raw_spin_unlock(&bp_lock);
+}
index 5bc978be804340a43d1835ab12058e4f15ca2ec5..4d63fcd7574b2c4a69ef81dd2a8c5be74e6cfcbe 100644 (file)
@@ -1337,7 +1337,8 @@ static int kvm_map_vectors(void)
         * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
         *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
         */
-       if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+       if (cpus_have_const_cap(ARM64_SPECTRE_V2) ||
+           cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
                __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
                __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
        }
index b0441dbdf68bd012f84d9c4f25eac4824dddb417..6985dfbc1be19c041191eaa908358de3b5f3f76c 100644 (file)
@@ -30,3 +30,69 @@ SYM_DATA_START(__smccc_workaround_1_smc)
 1:     .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
        .org 1b
 SYM_DATA_END(__smccc_workaround_1_smc)
+
+       .global         __smccc_workaround_3_smc
+SYM_DATA_START(__smccc_workaround_3_smc)
+       esb
+       sub     sp, sp, #(8 * 4)
+       stp     x2, x3, [sp, #(8 * 0)]
+       stp     x0, x1, [sp, #(8 * 2)]
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+       smc     #0
+       ldp     x2, x3, [sp, #(8 * 0)]
+       ldp     x0, x1, [sp, #(8 * 2)]
+       add     sp, sp, #(8 * 4)
+1:     .org __smccc_workaround_3_smc + __SMCCC_WORKAROUND_3_SMC_SZ
+       .org 1b
+SYM_DATA_END(__smccc_workaround_3_smc)
+
+       .global __spectre_bhb_loop_k8
+SYM_DATA_START(__spectre_bhb_loop_k8)
+       esb
+       sub     sp, sp, #(8 * 2)
+       stp     x0, x1, [sp, #(8 * 0)]
+       mov     x0, #8
+2:     b       . + 4
+       subs    x0, x0, #1
+       b.ne    2b
+       dsb     nsh
+       isb
+       ldp     x0, x1, [sp, #(8 * 0)]
+       add     sp, sp, #(8 * 2)
+1:     .org __spectre_bhb_loop_k8 + __SPECTRE_BHB_LOOP_SZ
+       .org 1b
+SYM_DATA_END(__spectre_bhb_loop_k8)
+
+       .global __spectre_bhb_loop_k24
+SYM_DATA_START(__spectre_bhb_loop_k24)
+       esb
+       sub     sp, sp, #(8 * 2)
+       stp     x0, x1, [sp, #(8 * 0)]
+       mov     x0, #8
+2:     b       . + 4
+       subs    x0, x0, #1
+       b.ne    2b
+       dsb     nsh
+       isb
+       ldp     x0, x1, [sp, #(8 * 0)]
+       add     sp, sp, #(8 * 2)
+1:     .org __spectre_bhb_loop_k24 + __SPECTRE_BHB_LOOP_SZ
+       .org 1b
+SYM_DATA_END(__spectre_bhb_loop_k24)
+
+       .global __spectre_bhb_loop_k32
+SYM_DATA_START(__spectre_bhb_loop_k32)
+       esb
+       sub     sp, sp, #(8 * 2)
+       stp     x0, x1, [sp, #(8 * 0)]
+       mov     x0, #8
+2:     b       . + 4
+       subs    x0, x0, #1
+       b.ne    2b
+       dsb     nsh
+       isb
+       ldp     x0, x1, [sp, #(8 * 0)]
+       add     sp, sp, #(8 * 2)
+1:     .org __spectre_bhb_loop_k32 + __SPECTRE_BHB_LOOP_SZ
+       .org 1b
+SYM_DATA_END(__spectre_bhb_loop_k32)