The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been compile tested.
@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL
@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE
@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE
@@
@@
-    PCI_DMA_NONE
+    DMA_NONE
@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
        if (!page)
                return -ENOMEM;
        mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
+                              DMA_FROM_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                __free_page(page);
                return -EIO;
                return;
 
        dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
-                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                      PAGE_SIZE, DMA_FROM_DEVICE);
 
        __free_page(page);
        rx_pg->page = NULL;
        mapping = dma_map_single(&bp->pdev->dev,
                                 get_l2_fhdr(data),
                                 bp->rx_buf_use_size,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                kfree(data);
                return -EIO;
                }
 
                dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                       skb_headlen(skb), PCI_DMA_TODEVICE);
+                       skb_headlen(skb), DMA_TO_DEVICE);
 
                tx_buf->skb = NULL;
                last = tx_buf->nr_frags;
                        dma_unmap_page(&bp->pdev->dev,
                                dma_unmap_addr(tx_buf, mapping),
                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                               PCI_DMA_TODEVICE);
+                               DMA_TO_DEVICE);
                }
 
                sw_cons = BNX2_NEXT_TX_BD(sw_cons);
 
        dma_sync_single_for_device(&bp->pdev->dev,
                dma_unmap_addr(cons_rx_buf, mapping),
-               BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+               BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
 
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
 
        }
 
        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
-                        PCI_DMA_FROMDEVICE);
+                        DMA_FROM_DEVICE);
        skb = build_skb(data, 0);
        if (!skb) {
                kfree(data);
                        }
 
                        dma_unmap_page(&bp->pdev->dev, mapping_old,
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      PAGE_SIZE, DMA_FROM_DEVICE);
 
                        frag_size -= frag_len;
                        skb->data_len += frag_len;
 
                dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
-                       PCI_DMA_FROMDEVICE);
+                       DMA_FROM_DEVICE);
 
                next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
                next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(tx_buf, mapping),
                                         skb_headlen(skb),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                        tx_buf->skb = NULL;
 
                                dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
                                        skb_frag_size(&skb_shinfo(skb)->frags[k]),
-                                       PCI_DMA_TODEVICE);
+                                       DMA_TO_DEVICE);
                        }
                        dev_kfree_skb(skb);
                }
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
                                         bp->rx_buf_use_size,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
 
                        rx_buf->data = NULL;
 
                packet[i] = (unsigned char) (i & 0xff);
 
        map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
-                            PCI_DMA_TODEVICE);
+                            DMA_TO_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
 
        udelay(5);
 
-       dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+       dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
        dev_kfree_skb(skb);
 
        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
 
        dma_sync_single_for_cpu(&bp->pdev->dev,
                dma_unmap_addr(rx_buf, mapping),
-               bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+               bp->rx_buf_use_size, DMA_FROM_DEVICE);
 
        if (rx_hdr->l2_fhdr_status &
                (L2_FHDR_ERRORS_BAD_CRC |
        } else
                mss = 0;
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
+                                DMA_TO_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = NULL;
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                        skb_headlen(skb), PCI_DMA_TODEVICE);
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        /* unmap remaining mapped pages */
        for (i = 0; i < last_frag; i++) {
                tx_buf = &txr->tx_buf_ring[ring_prod];
                dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              PCI_DMA_TODEVICE);
+                              DMA_TO_DEVICE);
        }
 
        dev_kfree_skb_any(skb);
                persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
 
        /* Configure DMA attributes. */
-       if (pci_set_dma_mask(pdev, dma_mask) == 0) {
+       if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
                dev->features |= NETIF_F_HIGHDMA;
-               rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
+               rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
                if (rc) {
                        dev_err(&pdev->dev,
                                "pci_set_consistent_dma_mask failed, aborting\n");
                        goto err_out_unmap;
                }
-       } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+       } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
                dev_err(&pdev->dev, "System does not support DMA, aborting\n");
                goto err_out_unmap;
        }
 
        prod = txr->tx_prod;
        tx_buf = &txr->tx_buf_ring[prod];
        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                        skb_headlen(skb), PCI_DMA_TODEVICE);
+                        skb_headlen(skb), DMA_TO_DEVICE);
        prod = NEXT_TX(prod);
 
        /* unmap remaining mapped pages */
                tx_buf = &txr->tx_buf_ring[prod];
                dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              PCI_DMA_TODEVICE);
+                              DMA_TO_DEVICE);
        }
 
 tx_free:
                }
 
                dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                                skb_headlen(skb), PCI_DMA_TODEVICE);
+                                skb_headlen(skb), DMA_TO_DEVICE);
                last = tx_buf->nr_frags;
 
                for (j = 0; j < last; j++) {
                                &pdev->dev,
                                dma_unmap_addr(tx_buf, mapping),
                                skb_frag_size(&skb_shinfo(skb)->frags[j]),
-                               PCI_DMA_TODEVICE);
+                               DMA_TO_DEVICE);
                }
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        if (bp->flags & BNXT_FLAG_CHIP_P5) {
        }
 
        mapping = dma_map_page_attrs(&pdev->dev, page, offset,
-                                    BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+                                    BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_WEAK_ORDERING);
        if (dma_mapping_error(&pdev->dev, mapping)) {
                __free_page(page);
                }
 
                dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
-                                    PCI_DMA_FROMDEVICE,
+                                    DMA_FROM_DEVICE,
                                     DMA_ATTR_WEAK_ORDERING);
 
                skb->data_len += frag_len;
                                dma_unmap_single(&pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
                                        dma_unmap_len(tx_buf, len),
-                                       PCI_DMA_TODEVICE);
+                                       DMA_TO_DEVICE);
                                xdp_return_frame(tx_buf->xdpf);
                                tx_buf->action = 0;
                                tx_buf->xdpf = NULL;
                        dma_unmap_single(&pdev->dev,
                                         dma_unmap_addr(tx_buf, mapping),
                                         skb_headlen(skb),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                        last = tx_buf->nr_frags;
                        j += 2;
                                dma_unmap_page(
                                        &pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
-                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+                                       skb_frag_size(frag), DMA_TO_DEVICE);
                        }
                        dev_kfree_skb(skb);
                }
                        continue;
 
                dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
-                                    BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+                                    BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_WEAK_ORDERING);
 
                rx_agg_buf->page = NULL;
 
                data[i] = (u8)(i & 0xff);
 
        map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
-                            PCI_DMA_TODEVICE);
+                            DMA_TO_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
        bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
        rc = bnxt_poll_loopback(bp, cpr, pkt_size);
 
-       dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+       dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
        dev_kfree_skb(skb);
        return rc;
 }
 
                        dma_unmap_single(&pdev->dev,
                                         dma_unmap_addr(tx_buf, mapping),
                                         dma_unmap_len(tx_buf, len),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
                        xdp_return_frame(tx_buf->xdpf);
                        tx_buf->action = 0;
                        tx_buf->xdpf = NULL;
 
                        skb_tstamp_tx(skb, ×tamp);
                }
 
-               pci_unmap_single(tp->pdev,
-                                dma_unmap_addr(ri, mapping),
-                                skb_headlen(skb),
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
+                                skb_headlen(skb), DMA_TO_DEVICE);
 
                ri->skb = NULL;
 
                        if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
                                tx_bug = 1;
 
-                       pci_unmap_page(tp->pdev,
+                       dma_unmap_page(&tp->pdev->dev,
                                       dma_unmap_addr(ri, mapping),
                                       skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
 
                        while (ri->fragmented) {
                                ri->fragmented = false;
        if (!ri->data)
                return;
 
-       pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
-                        map_sz, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
+                        DMA_FROM_DEVICE);
        tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
        ri->data = NULL;
 }
        if (!data)
                return -ENOMEM;
 
-       mapping = pci_map_single(tp->pdev,
-                                data + TG3_RX_OFFSET(tp),
-                                data_size,
-                                PCI_DMA_FROMDEVICE);
-       if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
+       mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
+                                data_size, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
                tg3_frag_free(skb_size <= PAGE_SIZE, data);
                return -EIO;
        }
                        if (skb_size < 0)
                                goto drop_it;
 
-                       pci_unmap_single(tp->pdev, dma_addr, skb_size,
-                                        PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
+                                        DMA_FROM_DEVICE);
 
                        /* Ensure that the update to the data happens
                         * after the usage of the old DMA mapping.
                                goto drop_it_no_recycle;
 
                        skb_reserve(skb, TG3_RAW_IP_ALIGN);
-                       pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
+                                               DMA_FROM_DEVICE);
                        memcpy(skb->data,
                               data + TG3_RX_OFFSET(tp),
                               len);
-                       pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
+                                                  len, DMA_FROM_DEVICE);
                }
 
                skb_put(skb, len);
        skb = txb->skb;
        txb->skb = NULL;
 
-       pci_unmap_single(tnapi->tp->pdev,
-                        dma_unmap_addr(txb, mapping),
-                        skb_headlen(skb),
-                        PCI_DMA_TODEVICE);
+       dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        while (txb->fragmented) {
                txb->fragmented = false;
                entry = NEXT_TX(entry);
                txb = &tnapi->tx_buffers[entry];
 
-               pci_unmap_page(tnapi->tp->pdev,
+               dma_unmap_page(&tnapi->tp->pdev->dev,
                               dma_unmap_addr(txb, mapping),
-                              skb_frag_size(frag), PCI_DMA_TODEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
 
                while (txb->fragmented) {
                        txb->fragmented = false;
                ret = -1;
        } else {
                /* New SKB is guaranteed to be linear. */
-               new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
-                                         PCI_DMA_TODEVICE);
+               new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
+                                         new_skb->len, DMA_TO_DEVICE);
                /* Make sure the mapping succeeded */
-               if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+               if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
                        dev_kfree_skb_any(new_skb);
                        ret = -1;
                } else {
 
        len = skb_headlen(skb);
 
-       mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(tp->pdev, mapping))
+       mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(&tp->pdev->dev, mapping))
                goto drop;
 
 
        for (i = data_off; i < tx_len; i++)
                tx_data[i] = (u8) (i & 0xff);
 
-       map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(tp->pdev, map)) {
+       map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&tp->pdev->dev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
        }
                } else
                        goto out;
 
-               pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
+                                       DMA_FROM_DEVICE);
 
                rx_data += TG3_RX_OFFSET(tp);
                for (i = data_off; i < rx_len; i++, val++) {
 
        /* Configure DMA attributes. */
        if (dma_mask > DMA_BIT_MASK(32)) {
-               err = pci_set_dma_mask(pdev, dma_mask);
+               err = dma_set_mask(&pdev->dev, dma_mask);
                if (!err) {
                        features |= NETIF_F_HIGHDMA;
-                       err = pci_set_consistent_dma_mask(pdev,
-                                                         persist_dma_mask);
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   persist_dma_mask);
                        if (err < 0) {
                                dev_err(&pdev->dev, "Unable to obtain 64 bit "
                                        "DMA for consistent allocations\n");
                }
        }
        if (err || dma_mask == DMA_BIT_MASK(32)) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev,
                                "No usable DMA configuration, aborting\n");