]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
arm64: Enforce bounce buffers for realm DMA
authorSteven Price <steven.price@arm.com>
Thu, 17 Oct 2024 13:14:30 +0000 (14:14 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 23 Oct 2024 09:19:32 +0000 (10:19 +0100)
Within a realm guest it's not possible for a device emulated by the VMM
to access arbitrary guest memory. So force the use of bounce buffers to
ensure that the memory the emulated devices are accessing is in memory
which is explicitly shared with the host.

This adds a call to swiotlb_update_mem_attributes() which calls
set_memory_decrypted() to ensure the bounce buffer memory is shared with
the host. For non-realm guests or hosts this is a no-op.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Co-developed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/20241017131434.40935-8-steven.price@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/rsi.c
arch/arm64/mm/init.c

index 3e0c83e2296f50356d84f76cceac4d89794eaf45..a23c0a7154d2a82b7a4b07438c46c2cc33a80a8d 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/jump_label.h>
 #include <linux/memblock.h>
 #include <linux/psci.h>
+#include <linux/swiotlb.h>
 
 #include <asm/io.h>
 #include <asm/rsi.h>
index 27a32ff15412aaf22260d8d2a3aa59ee2d7e941f..d21f67d67cf5fd8d93521e637e471611aeb21186 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/kvm_host.h>
 #include <asm/memory.h>
 #include <asm/numa.h>
+#include <asm/rsi.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <linux/sizes.h>
@@ -366,8 +367,14 @@ void __init bootmem_init(void)
  */
 void __init mem_init(void)
 {
+       unsigned int flags = SWIOTLB_VERBOSE;
        bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
 
+       if (is_realm_world()) {
+               swiotlb = true;
+               flags |= SWIOTLB_FORCE;
+       }
+
        if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
                /*
                 * If no bouncing needed for ZONE_DMA, reduce the swiotlb
@@ -379,7 +386,8 @@ void __init mem_init(void)
                swiotlb = true;
        }
 
-       swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
+       swiotlb_init(swiotlb, flags);
+       swiotlb_update_mem_attributes();
 
        /* this will put all unused low memory onto the freelists */
        memblock_free_all();