}
 }
 
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page)
-{
-       dma_addr_t ret;
-
-       if (nv_device_is_pci(device)) {
-               ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
-                                  PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(device->pdev, ret))
-                       ret = 0;
-       } else {
-               ret = page_to_phys(page);
-       }
-
-       return ret;
-}
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
-{
-       if (nv_device_is_pci(device))
-               pci_unmap_page(device->pdev, addr, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-}
-
 int
 nv_device_get_irq(struct nouveau_device *device, bool stall)
 {
 
 resource_size_t
 nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
 
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page);
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
-
 int
 nv_device_get_irq(struct nouveau_device *device, bool stall);
 
 
 
        priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (priv->r100c08_page) {
-               priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
+               priv->r100c08 = dma_map_page(nv_device_base(device),
+                                            priv->r100c08_page, 0, PAGE_SIZE,
+                                            DMA_BIDIRECTIONAL);
                if (!priv->r100c08)
                        nv_warn(priv, "failed 0x100c08 page map\n");
        } else {
        struct nv50_fb_priv *priv = (void *)object;
 
        if (priv->r100c08_page) {
-               nv_device_unmap_page(device, priv->r100c08);
+               dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
                __free_page(priv->r100c08_page);
        }
 
 
        struct nvc0_fb_priv *priv = (void *)object;
 
        if (priv->r100c10_page) {
-               nv_device_unmap_page(device, priv->r100c10);
+               dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
                __free_page(priv->r100c10_page);
        }
 
 
        priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (priv->r100c10_page) {
-               priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
+               priv->r100c10 = dma_map_page(nv_device_base(device),
+                                            priv->r100c10_page, 0, PAGE_SIZE,
+                                            DMA_BIDIRECTIONAL);
                if (!priv->r100c10)
                        return -EFAULT;
        }
 
        struct nouveau_drm *drm;
        struct nouveau_device *device;
        struct drm_device *dev;
+       struct device *pdev;
        unsigned i;
        int r;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
        drm = nouveau_bdev(ttm->bdev);
        device = nv_device(drm->device);
        dev = drm->dev;
+       pdev = nv_device_base(device);
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
        }
 
        for (i = 0; i < ttm->num_pages; i++) {
-               ttm_dma->dma_address[i] = nv_device_map_page(device,
-                                                            ttm->pages[i]);
-               if (!ttm_dma->dma_address[i]) {
+               dma_addr_t addr;
+
+               addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+                                   DMA_BIDIRECTIONAL);
+
+               if (dma_mapping_error(pdev, addr)) {
                        while (--i) {
-                               nv_device_unmap_page(device,
-                                                    ttm_dma->dma_address[i]);
+                               dma_unmap_page(pdev, ttm_dma->dma_address[i],
+                                              PAGE_SIZE, DMA_BIDIRECTIONAL);
                                ttm_dma->dma_address[i] = 0;
                        }
                        ttm_pool_unpopulate(ttm);
                        return -EFAULT;
                }
+
+               ttm_dma->dma_address[i] = addr;
        }
        return 0;
 }
        struct nouveau_drm *drm;
        struct nouveau_device *device;
        struct drm_device *dev;
+       struct device *pdev;
        unsigned i;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
        drm = nouveau_bdev(ttm->bdev);
        device = nv_device(drm->device);
        dev = drm->dev;
+       pdev = nv_device_base(device);
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
 
        for (i = 0; i < ttm->num_pages; i++) {
                if (ttm_dma->dma_address[i]) {
-                       nv_device_unmap_page(device, ttm_dma->dma_address[i]);
+                       dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+                                      DMA_BIDIRECTIONAL);
                }
        }