]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Survive synchronous exceptions caused by AT instructions
authorJames Morse <james.morse@arm.com>
Fri, 4 Sep 2020 11:24:36 +0000 (12:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Sep 2020 17:03:13 +0000 (19:03 +0200)
commit 88a84ccccb3966bcc3f309cdb76092a9892c0260 upstream.

KVM doesn't expect any synchronous exceptions when executing, any such
exception leads to a panic(). AT instructions access the guest page
tables, and can cause a synchronous external abort to be taken.

The arm-arm is unclear on what should happen if the guest has configured
the hardware update of the access-flag, and a memory type in TCR_EL1 that
does not support atomic operations. B2.2.6 "Possible implementation
restrictions on using atomic instructions" from DDI0487F.a lists
synchronous external abort as a possible behaviour of atomic instructions
that target memory that isn't writeback cacheable, but the page table
walker may behave differently.

Make KVM robust to synchronous exceptions caused by AT instructions.
Add a get_user() style helper for AT instructions that returns -EFAULT
if an exception was generated.

While KVM's version of the exception table mixes synchronous and
asynchronous exceptions, only one of these can occur at each location.

Re-enter the guest when the AT instructions take an exception on the
assumption the guest will take the same exception. This isn't guaranteed
to make forward progress, as the AT instructions may always walk the page
tables, but guest execution may use the translation cached in the TLB.

This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest
entry when an asynchronous exception is pending"), KVM will return to the
host to process IRQs allowing the rest of the system to keep running.

Cc: stable@vger.kernel.org # v4.14
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/switch.c

index aab6367efcec57010496cee8b8b57364ce61ab68..b0f0fb81f5f5e6d17604c82e0cfbadc1c9233bbf 100644 (file)
@@ -83,6 +83,34 @@ extern u32 __init_stage2_translation(void);
                *__hyp_this_cpu_ptr(sym);                               \
         })
 
+#define __KVM_EXTABLE(from, to)                                                \
+       "       .pushsection    __kvm_ex_table, \"a\"\n"                \
+       "       .align          3\n"                                    \
+       "       .long           (" #from " - .), (" #to " - .)\n"       \
+       "       .popsection\n"
+
+
+#define __kvm_at(at_op, addr)                                          \
+( {                                                                    \
+       int __kvm_at_err = 0;                                           \
+       u64 spsr, elr;                                                  \
+       asm volatile(                                                   \
+       "       mrs     %1, spsr_el2\n"                                 \
+       "       mrs     %2, elr_el2\n"                                  \
+       "1:     at      "at_op", %3\n"                                  \
+       "       isb\n"                                                  \
+       "       b       9f\n"                                           \
+       "2:     msr     spsr_el2, %1\n"                                 \
+       "       msr     elr_el2, %2\n"                                  \
+       "       mov     %w0, %4\n"                                      \
+       "9:\n"                                                          \
+       __KVM_EXTABLE(1b, 2b)                                           \
+       : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
+       : "r" (addr), "i" (-EFAULT));                                   \
+       __kvm_at_err;                                                   \
+} )
+
+
 #else /* __ASSEMBLY__ */
 
 .macro hyp_adr_this_cpu reg, sym, tmp
index 886ba290fd0809c5d693b11ef6394f27f9cdc8e2..5e041eabdd03e23f68ed4b572b18565146336b30 100644 (file)
@@ -207,6 +207,15 @@ el1_error:
        mov     x0, #ARM_EXCEPTION_EL1_SERROR
        b       __guest_exit
 
+el2_sync:
+       save_caller_saved_regs_vect
+       stp     x29, x30, [sp, #-16]!
+       bl      kvm_unexpected_el2_exception
+       ldp     x29, x30, [sp], #16
+       restore_caller_saved_regs_vect
+
+       eret
+
 el2_error:
        save_caller_saved_regs_vect
        stp     x29, x30, [sp, #-16]!
@@ -244,7 +253,6 @@ ENDPROC(\label)
        invalid_vector  el2t_irq_invalid
        invalid_vector  el2t_fiq_invalid
        invalid_vector  el2t_error_invalid
-       invalid_vector  el2h_sync_invalid
        invalid_vector  el2h_irq_invalid
        invalid_vector  el2h_fiq_invalid
        invalid_vector  el1_sync_invalid
@@ -261,7 +269,7 @@ ENTRY(__kvm_hyp_vector)
        ventry  el2t_fiq_invalid                // FIQ EL2t
        ventry  el2t_error_invalid              // Error EL2t
 
-       ventry  el2h_sync_invalid               // Synchronous EL2h
+       ventry  el2_sync                        // Synchronous EL2h
        ventry  el2h_irq_invalid                // IRQ EL2h
        ventry  el2h_fiq_invalid                // FIQ EL2h
        ventry  el2_error                       // Error EL2h
index 79c2bc5cbc699d384ad1f78fb83979857b8c7f04..99ae75a43985ccdb0cf8a86f6612115013892505 100644 (file)
@@ -220,10 +220,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
         * saved the guest context yet, and we may return early...
         */
        par = read_sysreg(par_el1);
-       asm volatile("at s1e1r, %0" : : "r" (far));
-       isb();
-
-       tmp = read_sysreg(par_el1);
+       if (!__kvm_at("s1e1r", far))
+               tmp = read_sysreg(par_el1);
+       else
+               tmp = 1; /* back to the guest */
        write_sysreg(par, par_el1);
 
        if (unlikely(tmp & 1))