dma_addr = le64_to_cpu(txd->buffer);
                        dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
 
-                       pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+                       dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len,
+                                      DMA_TO_DEVICE);
                        entry = NEXT_TX(entry);
                }
 
                                drops++;
                                goto drop_it;
                        }
-                       pci_unmap_page(gp->pdev, dma_addr,
-                                      RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&gp->pdev->dev, dma_addr,
+                                      RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE);
                        gp->rx_skbs[entry] = new_skb;
                        skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
-                       rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
+                       rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev,
                                                               virt_to_page(new_skb->data),
                                                               offset_in_page(new_skb->data),
                                                               RX_BUF_ALLOC_SIZE(gp),
-                                                              PCI_DMA_FROMDEVICE));
+                                                              DMA_FROM_DEVICE));
                        skb_reserve(new_skb, RX_OFFSET);
 
                        /* Trim the original skb for the netif. */
 
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
-                       pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len,
+                                               DMA_FROM_DEVICE);
                        skb_copy_from_linear_data(skb, copy_skb->data, len);
-                       pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&gp->pdev->dev, dma_addr,
+                                                  len, DMA_FROM_DEVICE);
 
                        /* We'll reuse the original ring buffer. */
                        skb = copy_skb;
                u32 len;
 
                len = skb->len;
-               mapping = pci_map_page(gp->pdev,
+               mapping = dma_map_page(&gp->pdev->dev,
                                       virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      len, PCI_DMA_TODEVICE);
+                                      len, DMA_TO_DEVICE);
                ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
                if (gem_intme(entry))
                        ctrl |= TXDCTRL_INTME;
                 * Otherwise we could race with the device.
                 */
                first_len = skb_headlen(skb);
-               first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
+               first_mapping = dma_map_page(&gp->pdev->dev,
+                                            virt_to_page(skb->data),
                                             offset_in_page(skb->data),
-                                            first_len, PCI_DMA_TODEVICE);
+                                            first_len, DMA_TO_DEVICE);
                entry = NEXT_TX(entry);
 
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
                if (gp->rx_skbs[i] != NULL) {
                        skb = gp->rx_skbs[i];
                        dma_addr = le64_to_cpu(rxd->buffer);
-                       pci_unmap_page(gp->pdev, dma_addr,
+                       dma_unmap_page(&gp->pdev->dev, dma_addr,
                                       RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
+                                      DMA_FROM_DEVICE);
                        dev_kfree_skb_any(skb);
                        gp->rx_skbs[i] = NULL;
                }
 
                                txd = &gb->txd[ent];
                                dma_addr = le64_to_cpu(txd->buffer);
-                               pci_unmap_page(gp->pdev, dma_addr,
+                               dma_unmap_page(&gp->pdev->dev, dma_addr,
                                               le64_to_cpu(txd->control_word) &
-                                              TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+                                              TXDCTRL_BUFSZ, DMA_TO_DEVICE);
 
                                if (frag != skb_shinfo(skb)->nr_frags)
                                        i++;
 
                gp->rx_skbs[i] = skb;
                skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
-               dma_addr = pci_map_page(gp->pdev,
+               dma_addr = dma_map_page(&gp->pdev->dev,
                                        virt_to_page(skb->data),
                                        offset_in_page(skb->data),
                                        RX_BUF_ALLOC_SIZE(gp),
-                                       PCI_DMA_FROMDEVICE);
+                                       DMA_FROM_DEVICE);
                rxd->buffer = cpu_to_le64(dma_addr);
                dma_wmb();
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
                cancel_work_sync(&gp->reset_task);
 
                /* Free resources */
-               pci_free_consistent(pdev,
-                                   sizeof(struct gem_init_block),
-                                   gp->init_block,
-                                   gp->gblock_dvma);
+               dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block),
+                                 gp->init_block, gp->gblock_dvma);
                iounmap(gp->regs);
                pci_release_regions(pdev);
                free_netdev(dev);
         */
        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
            pdev->device == PCI_DEVICE_ID_SUN_GEM &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+           !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        pr_err("No usable DMA configuration, aborting\n");
                        goto err_disable_device;
         * PAGE_SIZE aligned.
         */
        gp->init_block = (struct gem_init_block *)
-               pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
-                                    &gp->gblock_dvma);
+               dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
+                                  &gp->gblock_dvma, GFP_KERNEL);
        if (!gp->init_block) {
                pr_err("Cannot allocate init block, aborting\n");
                err = -ENOMEM;