dma_addr_t *dma_addrp, gfp_t gfp)
 {
        struct platform_device *op = to_platform_device(dev);
-       unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+       unsigned long len_total = PAGE_ALIGN(len);
        unsigned long va;
        struct resource *res;
        int order;
                goto err_nova;
        }
        mmu_inval_dma_area(va, len_total);
+
        // XXX The mmu_map_dma_area does this for us below, see comments.
        // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
        /*
                return;
        }
 
-       n = (n + PAGE_SIZE-1) & PAGE_MASK;
+       n = PAGE_ALIGN(n);
        if ((res->end-res->start)+1 != n) {
                printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
                    (long)((res->end-res->start)+1), n);
 static void *pci32_alloc_coherent(struct device *dev, size_t len,
                                  dma_addr_t *pba, gfp_t gfp)
 {
-       unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+       unsigned long len_total = PAGE_ALIGN(len);
        unsigned long va;
        struct resource *res;
        int order;
                return NULL;
        }
        mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
-  (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
        sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
        *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
                return;
        }
 
-       n = (n + PAGE_SIZE-1) & PAGE_MASK;
+       n = PAGE_ALIGN(n);
        if ((res->end-res->start)+1 != n) {
                printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
                    (long)((res->end-res->start)+1), (long)n);
                        BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg_page(sg)),
-                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+                           PAGE_ALIGN(sg->length));
                }
        }
 }
 {
        if (dir != PCI_DMA_TODEVICE) {
                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
-                   (size + PAGE_SIZE-1) & PAGE_MASK);
+                                  PAGE_ALIGN(size));
        }
 }
 
 {
        if (dir != PCI_DMA_TODEVICE) {
                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
-                   (size + PAGE_SIZE-1) & PAGE_MASK);
+                                  PAGE_ALIGN(size));
        }
 }
 
                        BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg_page(sg)),
-                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+                           PAGE_ALIGN(sg->length));
                }
        }
 }
                        BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg_page(sg)),
-                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+                           PAGE_ALIGN(sg->length));
                }
        }
 }
 static struct resource *_sparc_find_resource(struct resource *root,
                                             unsigned long hit)
 {
-        struct resource *tmp;
+       struct resource *tmp;
 
        for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
                if (tmp->start <= hit && tmp->end >= hit)