swiotlb_force = SWIOTLB_FORCE;
 }
 
-static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                      gfp_t gfp, unsigned long attrs)
-{
-       unsigned int order;
-       struct page *page;
-       void *vaddr = NULL;
-
-       order = get_order(size);
-       page = alloc_pages_node(dev_to_node(dev), gfp, order);
-       if (page) {
-               dma_addr_t addr;
-
-               /*
-                * Since we will be clearing the encryption bit, check the
-                * mask with it already cleared.
-                */
-               addr = __phys_to_dma(dev, page_to_phys(page));
-               if ((addr + size) > dev->coherent_dma_mask) {
-                       __free_pages(page, get_order(size));
-               } else {
-                       vaddr = page_address(page);
-                       *dma_handle = addr;
-               }
-       }
-
-       if (!vaddr)
-               vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-
-       if (!vaddr)
-               return NULL;
-
-       /* Clear the SME encryption bit for DMA use if not swiotlb area */
-       if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
-               set_memory_decrypted((unsigned long)vaddr, 1 << order);
-               memset(vaddr, 0, PAGE_SIZE << order);
-               *dma_handle = __sme_clr(*dma_handle);
-       }
-
-       return vaddr;
-}
-
-static void sev_free(struct device *dev, size_t size, void *vaddr,
-                    dma_addr_t dma_handle, unsigned long attrs)
-{
-       /* Set the SME encryption bit for re-use if not swiotlb area */
-       if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
-               set_memory_encrypted((unsigned long)vaddr,
-                                    1 << get_order(size));
-
-       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
        pgprot_t old_prot, new_prot;
 }
 EXPORT_SYMBOL(sev_active);
 
-static const struct dma_map_ops sev_dma_ops = {
-       .alloc                  = sev_alloc,
-       .free                   = sev_free,
-       .map_page               = swiotlb_map_page,
-       .unmap_page             = swiotlb_unmap_page,
-       .map_sg                 = swiotlb_map_sg_attrs,
-       .unmap_sg               = swiotlb_unmap_sg_attrs,
-       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
-       .sync_single_for_device = swiotlb_sync_single_for_device,
-       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
-       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
-       .mapping_error          = swiotlb_dma_mapping_error,
-};
-
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
        swiotlb_update_mem_attributes();
 
        /*
-        * With SEV, DMA operations cannot use encryption. New DMA ops
-        * are required in order to mark the DMA areas as decrypted or
-        * to use bounce buffers.
+        * With SEV, DMA operations cannot use encryption, we need to use
+        * SWIOTLB to bounce buffer DMA operation.
         */
        if (sev_active())
-               dma_ops = &sev_dma_ops;
+               dma_ops = &swiotlb_dma_ops;
 
        /*
         * With SEV, we need to unroll the rep string I/O instructions.
 
 #include <linux/scatterlist.h>
 #include <linux/dma-contiguous.h>
 #include <linux/pfn.h>
+#include <linux/set_memory.h>
 
 #define DIRECT_MAPPING_ERROR           0
 
 #define ARCH_ZONE_DMA_BITS 24
 #endif
 
+/*
+ * For AMD SEV all DMA must be to unencrypted addresses.
+ */
+static inline bool force_dma_unencrypted(void)
+{
+       return sev_active();
+}
+
 static bool
 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
                const char *caller)
 
 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
-       return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
+       dma_addr_t addr = force_dma_unencrypted() ?
+               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
+       return addr + size - 1 <= dev->coherent_dma_mask;
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        int page_order = get_order(size);
        struct page *page = NULL;
+       void *ret;
 
        /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
        if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
 
        if (!page)
                return NULL;
-
-       *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       memset(page_address(page), 0, size);
-       return page_address(page);
+       ret = page_address(page);
+       if (force_dma_unencrypted()) {
+               set_memory_decrypted((unsigned long)ret, 1 << page_order);
+               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+       } else {
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
+       }
+       memset(ret, 0, size);
+       return ret;
 }
 
 /*
                dma_addr_t dma_addr, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned int page_order = get_order(size);
 
+       if (force_dma_unencrypted())
+               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
        if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
-               free_pages((unsigned long)cpu_addr, get_order(size));
+               free_pages((unsigned long)cpu_addr, page_order);
 }
 
 static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,