#include <asm/io-unit.h>
 #include <asm/leon.h>
 
+/* This function must make sure that caches and memory are coherent after DMA
+ * On LEON systems without cache snooping it flushes the entire D-CACHE.
+ */
 #ifndef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l)       /* Anton pulled it out for 2.4.0-xx */
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
+{
+}
 #else
-static inline void mmu_inval_dma_area(void *va, unsigned long len)
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
 {
        if (!sparc_leon3_snooping_enabled())
                leon_flush_dcache_all();
                printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
                goto err_nova;
        }
-       mmu_inval_dma_area((void *)va, len_total);
 
        // XXX The mmu_map_dma_area does this for us below, see comments.
        // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
        release_resource(res);
        kfree(res);
 
-       /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
        pgv = virt_to_page(p);
        mmu_unmap_dma_area(dev, ba, n);
 
                printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
                goto err_nova;
        }
-       mmu_inval_dma_area(va, len_total);
        sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
        *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
                                dma_addr_t ba)
 {
        struct resource *res;
-       void *pgp;
 
        if ((res = _sparc_find_resource(&_sparc_dvma,
            (unsigned long)p)) == NULL) {
                return;
        }
 
-       pgp = phys_to_virt(ba); /* bus_to_virt actually */
-       mmu_inval_dma_area(pgp, n);
+       dma_make_coherent(ba, n);
        sparc_unmapiorange((unsigned long)p, n);
 
        release_resource(res);
        kfree(res);
-
-       free_pages((unsigned long)pgp, get_order(n));
+       free_pages((unsigned long)phys_to_virt(ba), get_order(n));
 }
 
 /*
                             enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        if (dir != PCI_DMA_TODEVICE)
-               mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
 }
 
 /* Map a set of buffers described by scatterlist in streaming
 
        /* IIep is write-through, not flushing. */
        for_each_sg(sgl, sg, nents, n) {
-               BUG_ON(page_address(sg_page(sg)) == NULL);
-               sg->dma_address = virt_to_phys(sg_virt(sg));
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nents;
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }
                                      size_t size, enum dma_data_direction dir)
 {
        if (dir != PCI_DMA_TODEVICE) {
-               mmu_inval_dma_area(phys_to_virt(ba),
-                                  PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
        }
 }
 
                                         size_t size, enum dma_data_direction dir)
 {
        if (dir != PCI_DMA_TODEVICE) {
-               mmu_inval_dma_area(phys_to_virt(ba),
-                                  PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
        }
 }
 
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }