unsigned int *bytes_compl)
 {
        if (buffer->unmap_len) {
-               struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+               struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
                dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
                                         buffer->unmap_len);
                if (buffer->unmap_single)
-                       pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
-                                        PCI_DMA_TODEVICE);
+                       dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+                                        DMA_TO_DEVICE);
                else
-                       pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
-                                      PCI_DMA_TODEVICE);
+                       dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+                                      DMA_TO_DEVICE);
                buffer->unmap_len = 0;
                buffer->unmap_single = false;
        }
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 {
        struct efx_nic *efx = tx_queue->efx;
-       struct pci_dev *pci_dev = efx->pci_dev;
+       struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
        unsigned int len, unmap_len = 0, fill_level, insert_ptr;
        fill_level = tx_queue->insert_count - tx_queue->old_read_count;
        q_space = efx->txq_entries - 1 - fill_level;
 
-       /* Map for DMA.  Use pci_map_single rather than pci_map_page
+       /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
         */
        unmap_single = true;
-       dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+       dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
        /* Process all fragments */
        while (1) {
-               if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
-                       goto pci_err;
+               if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+                       goto dma_err;
 
                /* Store fields for marking in the per-fragment final
                 * descriptor */
                i++;
                /* Map for DMA */
                unmap_single = false;
-               dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+               dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
                                            DMA_TO_DEVICE);
        }
 
 
        return NETDEV_TX_OK;
 
- pci_err:
+ dma_err:
        netif_err(efx, tx_err, efx->net_dev,
                  " TX queue %d could not map skb with %d bytes %d "
                  "fragments for DMA\n", tx_queue->queue, skb->len,
        /* Free the fragment we were mid-way through pushing */
        if (unmap_len) {
                if (unmap_single)
-                       pci_unmap_single(pci_dev, unmap_addr, unmap_len,
-                                        PCI_DMA_TODEVICE);
+                       dma_unmap_single(dma_dev, unmap_addr, unmap_len,
+                                        DMA_TO_DEVICE);
                else
-                       pci_unmap_page(pci_dev, unmap_addr, unmap_len,
-                                      PCI_DMA_TODEVICE);
+                       dma_unmap_page(dma_dev, unmap_addr, unmap_len,
+                                      DMA_TO_DEVICE);
        }
 
        return rc;
  */
 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
 {
-
-       struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+       struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
        struct efx_tso_header *tsoh;
        dma_addr_t dma_addr;
        u8 *base_kva, *kva;
 
-       base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+       base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
        if (base_kva == NULL) {
                netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
                          "Unable to allocate page for TSO headers\n");
                return -ENOMEM;
        }
 
-       /* pci_alloc_consistent() allocates pages. */
+       /* dma_alloc_coherent() allocates pages. */
        EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
 
        for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
 /* Free up a TSO header, and all others in the same page. */
 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
                                struct efx_tso_header *tsoh,
-                               struct pci_dev *pci_dev)
+                               struct device *dma_dev)
 {
        struct efx_tso_header **p;
        unsigned long base_kva;
                        p = &(*p)->next;
        }
 
-       pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+       dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
 }
 
 static struct efx_tso_header *
        if (unlikely(!tsoh))
                return NULL;
 
-       tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+       tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
                                        TSOH_BUFFER(tsoh), header_len,
-                                       PCI_DMA_TODEVICE);
-       if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
-                                          tsoh->dma_addr))) {
+                                       DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+                                      tsoh->dma_addr))) {
                kfree(tsoh);
                return NULL;
        }
 static void
 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
 {
-       pci_unmap_single(tx_queue->efx->pci_dev,
+       dma_unmap_single(&tx_queue->efx->pci_dev->dev,
                         tsoh->dma_addr, tsoh->unmap_len,
-                        PCI_DMA_TODEVICE);
+                        DMA_TO_DEVICE);
        kfree(tsoh);
 }
 
                        unmap_addr = (buffer->dma_addr + buffer->len -
                                      buffer->unmap_len);
                        if (buffer->unmap_single)
-                               pci_unmap_single(tx_queue->efx->pci_dev,
+                               dma_unmap_single(&tx_queue->efx->pci_dev->dev,
                                                 unmap_addr, buffer->unmap_len,
-                                                PCI_DMA_TODEVICE);
+                                                DMA_TO_DEVICE);
                        else
-                               pci_unmap_page(tx_queue->efx->pci_dev,
+                               dma_unmap_page(&tx_queue->efx->pci_dev->dev,
                                               unmap_addr, buffer->unmap_len,
-                                              PCI_DMA_TODEVICE);
+                                              DMA_TO_DEVICE);
                        buffer->unmap_len = 0;
                }
                buffer->len = 0;
        int hl = st->header_len;
        int len = skb_headlen(skb) - hl;
 
-       st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
-                                       len, PCI_DMA_TODEVICE);
-       if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+       st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
+                                       len, DMA_TO_DEVICE);
+       if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
                st->unmap_single = true;
                st->unmap_len = len;
                st->in_len = len;
                buffer->continuation = !end_of_packet;
 
                if (st->in_len == 0) {
-                       /* Transfer ownership of the pci mapping */
+                       /* Transfer ownership of the DMA mapping */
                        buffer->unmap_len = st->unmap_len;
                        buffer->unmap_single = st->unmap_single;
                        st->unmap_len = 0;
 
  mem_err:
        netif_err(efx, tx_err, efx->net_dev,
-                 "Out of memory for TSO headers, or PCI mapping error\n");
+                 "Out of memory for TSO headers, or DMA mapping error\n");
        dev_kfree_skb_any(skb);
 
  unwind:
        /* Free the DMA mapping we were in the process of writing out */
        if (state.unmap_len) {
                if (state.unmap_single)
-                       pci_unmap_single(efx->pci_dev, state.unmap_addr,
-                                        state.unmap_len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
+                                        state.unmap_len, DMA_TO_DEVICE);
                else
-                       pci_unmap_page(efx->pci_dev, state.unmap_addr,
-                                      state.unmap_len, PCI_DMA_TODEVICE);
+                       dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+                                      state.unmap_len, DMA_TO_DEVICE);
        }
 
        efx_enqueue_unwind(tx_queue);
 
        while (tx_queue->tso_headers_free != NULL)
                efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-                                   tx_queue->efx->pci_dev);
+                                   &tx_queue->efx->pci_dev->dev);
 }