]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
dma-direct: don't over-decrypt memory
authorRobin Murphy <robin.murphy@arm.com>
Fri, 20 May 2022 17:10:13 +0000 (18:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Jun 2022 12:13:20 +0000 (14:13 +0200)
commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream.

The original x86 sev_alloc() only called set_memory_decrypted() on
memory returned by alloc_pages_node(), so the page order calculation
fell out of that logic. However, the common dma-direct code has several
potential allocators, not all of which are guaranteed to round up the
underlying allocation to a power-of-two size, so carrying over that
calculation for the encryption/decryption size was a mistake. Fix it by
rounding to a *number* of pages, rather than an order.

Until recently there was an even worse interaction with DMA_DIRECT_REMAP
where we could have ended up decrypting part of the next adjacent
vmalloc area, only averted by no architecture actually supporting both
configs at once. Don't ask how I found that one out...

Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code")
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: David Rientjes <rientjes@google.com>
[ backport the functional change without all the prior refactoring ]
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/dma/direct.c

index 06c111544f61d630c79d722111e8b0e573de8782..2922250f93b44cb32a388d5097f306271b5b0161 100644 (file)
@@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
                        goto out_free_pages;
                if (force_dma_unencrypted(dev)) {
                        err = set_memory_decrypted((unsigned long)ret,
-                                                  1 << get_order(size));
+                                                  PFN_UP(size));
                        if (err)
                                goto out_free_pages;
                }
@@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        ret = page_address(page);
        if (force_dma_unencrypted(dev)) {
                err = set_memory_decrypted((unsigned long)ret,
-                                          1 << get_order(size));
+                                          PFN_UP(size));
                if (err)
                        goto out_free_pages;
        }
@@ -231,7 +231,7 @@ done:
 out_encrypt_pages:
        if (force_dma_unencrypted(dev)) {
                err = set_memory_encrypted((unsigned long)page_address(page),
-                                          1 << get_order(size));
+                                          PFN_UP(size));
                /* If memory cannot be re-encrypted, it must be leaked */
                if (err)
                        return NULL;
@@ -244,8 +244,6 @@ out_free_pages:
 void dma_direct_free(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
-       unsigned int page_order = get_order(size);
-
        if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
            !force_dma_unencrypted(dev)) {
                /* cpu_addr is a struct page cookie, not a kernel address */
@@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev, size_t size,
                return;
 
        if (force_dma_unencrypted(dev))
-               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+               set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
 
        if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
                vunmap(cpu_addr);
@@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 
        ret = page_address(page);
        if (force_dma_unencrypted(dev)) {
-               if (set_memory_decrypted((unsigned long)ret,
-                               1 << get_order(size)))
+               if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
                        goto out_free_pages;
        }
        memset(ret, 0, size);
@@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
                struct page *page, dma_addr_t dma_addr,
                enum dma_data_direction dir)
 {
-       unsigned int page_order = get_order(size);
        void *vaddr = page_address(page);
 
        /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
@@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
                return;
 
        if (force_dma_unencrypted(dev))
-               set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
+               set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
 
        dma_free_contiguous(dev, page, size);
 }