select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select DMA_NONCOHERENT_MMAP
        select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
        select GENERIC_CLOCKEVENTS
 
 }
 
 /*
- * Plug in coherent or noncoherent dma ops
+ * Plug in direct dma map ops.
  */
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
        /*
         * IOC hardware snoops all DMA traffic keeping the caches consistent
         * with memory - eliding need for any explicit cache maintenance of
-        * DMA buffers - so we can use dma_direct cache ops.
+        * DMA buffers.
         */
-       if (is_isa_arcv2() && ioc_enable && coherent) {
-               set_dma_ops(dev, &dma_direct_ops);
-               dev_info(dev, "use dma_direct_ops cache ops\n");
-       } else {
-               set_dma_ops(dev, &dma_noncoherent_ops);
-               dev_info(dev, "use dma_noncoherent_ops cache ops\n");
-       }
+       if (is_isa_arcv2() && ioc_enable && coherent)
+               dev->dma_coherent = true;
+
+       dev_info(dev, "use %sncoherent DMA ops\n",
+                dev->dma_coherent ? "" : "non");
 }
 
         */
 
        if (attrs & DMA_ATTR_NON_CONSISTENT)
-               return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+               return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
+                               attrs);
 
        ret = dma_alloc_from_global_coherent(size, dma_handle);
 
                               unsigned long attrs)
 {
        if (attrs & DMA_ATTR_NON_CONSISTENT) {
-               dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+               dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
        } else {
                int ret = dma_release_from_global_coherent(get_order(size),
                                                           cpu_addr);
 
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select CLKDEV_LOOKUP
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select GENERIC_ATOMIC64
        select GENERIC_IRQ_SHOW
        select HAVE_ARCH_TRACEHOOK
 
        select GENERIC_CLOCKEVENTS_BROADCAST
        select MODULES_USE_ELF_RELA
        select GENERIC_CPU_DEVICES
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        ---help---
          Qualcomm Hexagon is a processor architecture designed for high
          performance and low power across a wide variety of applications.
 
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
-       select DMA_NONCOHERENT_OPS if HAS_DMA
+       select DMA_DIRECT_OPS if HAS_DMA
        select HAVE_MEMBLOCK
        select ARCH_DISCARD_MEMBLOCK
        select NO_BOOTMEM
 
        select TIMER_OF
        select CLONE_BACKWARDS3
        select COMMON_CLK
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select DMA_NONCOHERENT_MMAP
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
 
        select NEED_DMA_MAP_STATE
        select DMA_NONCOHERENT_MMAP
        select DMA_NONCOHERENT_CACHE_SYNC
-       select DMA_NONCOHERENT_OPS
 
 config SYS_HAS_EARLY_PRINTK
        bool
 
        return &jazz_dma_ops;
 #elif defined(CONFIG_SWIOTLB)
        return &swiotlb_dma_ops;
-#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
-       return &dma_noncoherent_ops;
 #else
        return &dma_direct_ops;
 #endif
 
 {
        void *ret;
 
-       ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
        if (!ret)
                return NULL;
 
        *dma_handle = vdma_alloc(virt_to_phys(ret), size);
        if (*dma_handle == VDMA_ERROR) {
-               dma_direct_free(dev, size, ret, *dma_handle, attrs);
+               dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
                return NULL;
        }
 
        vdma_free(dma_handle);
        if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
-       return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+       dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
 }
 
 static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
 
  */
 static inline bool cpu_needs_post_dma_flush(struct device *dev)
 {
-       if (dev_is_dma_coherent(dev))
-               return false;
-
        switch (boot_cpu_type()) {
        case CPU_R10000:
        case CPU_R12000:
 {
        void *ret;
 
-       ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
-       if (!ret)
-               return NULL;
-
-       if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+       ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+       if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
                dma_cache_wback_inv((unsigned long) ret, size);
                ret = (void *)UNCAC_ADDR(ret);
        }
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev))
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
-       dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+       dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
 }
 
 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 {
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long addr = (unsigned long)cpu_addr;
+       unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
        unsigned long off = vma->vm_pgoff;
-       unsigned long pfn;
+       unsigned long pfn = page_to_pfn(virt_to_page((void *)addr));
        int ret = -ENXIO;
 
-       if (!dev_is_dma_coherent(dev))
-               addr = CAC_ADDR(addr);
-
-       pfn = page_to_pfn(virt_to_page((void *)addr));
-
        if (attrs & DMA_ATTR_WRITE_COMBINE)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        else
 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
                size_t size, enum dma_data_direction dir)
 {
-       if (!dev_is_dma_coherent(dev))
-               dma_sync_phys(paddr, size, dir);
+       dma_sync_phys(paddr, size, dir);
 }
 
 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 {
        BUG_ON(direction == DMA_NONE);
 
-       if (!dev_is_dma_coherent(dev))
-               dma_sync_virt(vaddr, size, direction);
+       dma_sync_virt(vaddr, size, direction);
 }
 
        select CLKSRC_MMIO
        select CLONE_BACKWARDS
        select COMMON_CLK
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select GENERIC_ATOMIC64
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
 
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_NO_SWAP
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select TIMER_OF
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
 
 config OPENRISC
        def_bool y
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select OF
        select OF_EARLY_FLATTREE
        select IRQ_DOMAIN
 
        depends on PA7000 || PA7100LC || PA7200 || PA7300LC
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select DMA_NONCOHERENT_CACHE_SYNC
 
 config PREFETCH
 
        case pcxl: /* falls through */
        case pcxs:
        case pcxt:
-               hppa_dma_ops = &dma_noncoherent_ops;
+               hppa_dma_ops = &dma_direct_ops;
                break;
        default:
                break;
 
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
        select HAVE_PATA_PLATFORM
        select CLKDEV_LOOKUP
+       select DMA_DIRECT_OPS
        select HAVE_IDE if HAS_IOPORT_MAP
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        bool
 
 config DMA_COHERENT
-       select DMA_DIRECT_OPS
        bool
 
 config DMA_NONCOHERENT
        def_bool !DMA_COHERENT
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select DMA_NONCOHERENT_OPS
 
 config PGTABLE_LEVELS
        default 3 if X2TLB
 
 config SPARC32
        def_bool !64BIT
        select ARCH_HAS_SYNC_DMA_FOR_CPU
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select GENERIC_ATOMIC64
        select CLZ_TAB
        select HAVE_UID16
 
 {
 #ifdef CONFIG_SPARC_LEON
        if (sparc_cpu_model == sparc_leon)
-               return &dma_noncoherent_ops;
+               return &dma_direct_ops;
 #endif
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
        if (bus == &pci_bus_type)
-               return &dma_noncoherent_ops;
+               return &dma_direct_ops;
 #endif
        return dma_ops;
 }
 
 {
        void *vaddr;
 
-       vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+       vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
        if (!vaddr ||
            !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
                return vaddr;
                goto out_free;
        return vaddr;
 out_free:
-       dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
+       dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
        return NULL;
 }
 
                   dma_addr_t dma_addr, unsigned long attrs)
 {
        gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
-       dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+       dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
 }
 
 static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
 
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
-       select DMA_NONCOHERENT_OPS
+       select DMA_DIRECT_OPS
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_SHOW
 
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       /*
-        * Use the non-coherent ops if available.  If an architecture wants a
-        * more fine-grained selection of operations it will have to implement
-        * get_arch_dma_ops itself or use the per-device dma_ops.
-        */
-#ifdef CONFIG_DMA_NONCOHERENT_OPS
-       return &dma_noncoherent_ops;
-#else
        return &dma_direct_ops;
-#endif
 }
 
 #endif /* _ASM_GENERIC_DMA_MAPPING_H */
 
                gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_addr, unsigned long attrs);
 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
                unsigned long attrs);
 
 };
 
 extern const struct dma_map_ops dma_direct_ops;
-extern const struct dma_map_ops dma_noncoherent_ops;
 extern const struct dma_map_ops dma_virt_ops;
 
 #define DMA_BIT_MASK(n)        (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 
                gfp_t gfp, unsigned long attrs);
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
-
-#ifdef CONFIG_DMA_NONCOHERENT_MMAP
 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
-#else
-#define arch_dma_mmap NULL
-#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
 
 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 
        bool
        depends on HAS_DMA
 
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
 config DMA_NONCOHERENT_MMAP
        bool
-       depends on DMA_NONCOHERENT_OPS
+       depends on DMA_DIRECT_OPS
 
 config DMA_NONCOHERENT_CACHE_SYNC
        bool
-       depends on DMA_NONCOHERENT_OPS
+       depends on DMA_DIRECT_OPS
 
 config DMA_VIRT_OPS
        bool
 
 obj-$(CONFIG_DMA_CMA)                  += contiguous.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
 obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
-obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
 obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
 obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
 obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
 
 // SPDX-License-Identifier: GPL-2.0
 /*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
+ * Copyright (C) 2018 Christoph Hellwig.
+ *
+ * DMA operations that map physical memory directly without using an IOMMU.
  */
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/dma-direct.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-contiguous.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/pfn.h>
 #include <linux/set_memory.h>
 
        return addr + size - 1 <= dev->coherent_dma_mask;
 }
 
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        int page_order = get_order(size);
  * NOTE: this function must never look at the dma_addr argument, because we want
  * to be able to use it as a helper for iommu implementations as well.
  */
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
                free_pages((unsigned long)cpu_addr, page_order);
 }
 
+void *dma_direct_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+       if (!dev_is_dma_coherent(dev))
+               return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+       return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+}
+
+void dma_direct_free(struct device *dev, size_t size,
+               void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
+{
+       if (!dev_is_dma_coherent(dev))
+               arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
+       else
+               dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
+{
+       if (!dev_is_dma_coherent(dev) &&
+           IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP))
+               return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static void dma_direct_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       if (dev_is_dma_coherent(dev))
+               return;
+       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_direct_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       if (dev_is_dma_coherent(dev))
+               return;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+static void dma_direct_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       if (dev_is_dma_coherent(dev))
+               return;
+       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+       arch_sync_dma_for_cpu_all(dev);
+}
+
+static void dma_direct_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       if (dev_is_dma_coherent(dev))
+               return;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+       arch_sync_dma_for_cpu_all(dev);
+}
+
+static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
+}
+#endif
+
 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
                unsigned long attrs)
 {
-       dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+       phys_addr_t phys = page_to_phys(page) + offset;
+       dma_addr_t dma_addr = phys_to_dma(dev, phys);
 
        if (!check_addr(dev, dma_addr, size, __func__))
                return DIRECT_MAPPING_ERROR;
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
        return dma_addr;
 }
 
                sg_dma_len(sg) = sg->length;
        }
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
        return nents;
 }
 
 const struct dma_map_ops dma_direct_ops = {
        .alloc                  = dma_direct_alloc,
        .free                   = dma_direct_free,
+       .mmap                   = dma_direct_mmap,
        .map_page               = dma_direct_map_page,
        .map_sg                 = dma_direct_map_sg,
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
+       .sync_single_for_device = dma_direct_sync_single_for_device,
+       .sync_sg_for_device     = dma_direct_sync_sg_for_device,
+#endif
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+       .sync_single_for_cpu    = dma_direct_sync_single_for_cpu,
+       .sync_sg_for_cpu        = dma_direct_sync_sg_for_cpu,
+       .unmap_page             = dma_direct_unmap_page,
+       .unmap_sg               = dma_direct_unmap_sg,
+#endif
        .dma_supported          = dma_direct_supported,
        .mapping_error          = dma_direct_mapping_error,
+       .cache_sync             = arch_dma_cache_sync,
 };
 EXPORT_SYMBOL(dma_direct_ops);
 
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 Christoph Hellwig.
- *
- * DMA operations that map physical memory directly without providing cache
- * coherence.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/scatterlist.h>
-
-static void dma_noncoherent_sync_single_for_device(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
-}
-
-static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t addr;
-
-       addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
-       if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
-                               size, dir);
-       return addr;
-}
-
-static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
-       if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
-       return nents;
-}
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
-    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
-       arch_sync_dma_for_cpu_all(dev);
-}
-
-static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
-       arch_sync_dma_for_cpu_all(dev);
-}
-
-static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-#endif
-
-const struct dma_map_ops dma_noncoherent_ops = {
-       .alloc                  = arch_dma_alloc,
-       .free                   = arch_dma_free,
-       .mmap                   = arch_dma_mmap,
-       .sync_single_for_device = dma_noncoherent_sync_single_for_device,
-       .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
-       .map_page               = dma_noncoherent_map_page,
-       .map_sg                 = dma_noncoherent_map_sg,
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
-    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-       .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
-       .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
-       .unmap_page             = dma_noncoherent_unmap_page,
-       .unmap_sg               = dma_noncoherent_unmap_sg,
-#endif
-       .dma_supported          = dma_direct_supported,
-       .mapping_error          = dma_direct_mapping_error,
-       .cache_sync             = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_noncoherent_ops);