free_pages_exact(vaddr, size);
 }
 
-static dma_addr_t
-or1k_map_page(struct device *dev, struct page *page,
-             unsigned long offset, size_t size,
-             enum dma_data_direction dir,
-             unsigned long attrs)
+static void
+or1k_sync_single_for_device(struct device *dev,
+                           dma_addr_t dma_handle, size_t size,
+                           enum dma_data_direction dir)
 {
        unsigned long cl;
-       dma_addr_t addr = page_to_phys(page) + offset;
+       dma_addr_t addr = dma_handle;
        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 
-       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-               return addr;
-
        switch (dir) {
        case DMA_TO_DEVICE:
                /* Flush the dcache for the requested range */
                break;
        }
 
+}
+
+static dma_addr_t
+or1k_map_page(struct device *dev, struct page *page,
+             unsigned long offset, size_t size,
+             enum dma_data_direction dir,
+             unsigned long attrs)
+{
+       unsigned long cl;
+       dma_addr_t addr = page_to_phys(page) + offset;
+       struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               or1k_sync_single_for_device(dev, addr, size, dir);
        return addr;
 }
 
        return nents;
 }
 
-static void
-or1k_sync_single_for_device(struct device *dev,
-                           dma_addr_t dma_handle, size_t size,
-                           enum dma_data_direction dir)
-{
-       unsigned long cl;
-       dma_addr_t addr = dma_handle;
-       struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
-       /* Flush the dcache for the requested range */
-       for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
-               mtspr(SPR_DCBFR, cl);
-}
-
 const struct dma_map_ops or1k_dma_map_ops = {
        .alloc = or1k_dma_alloc,
        .free = or1k_dma_free,