struct sk_buff *skb;
                struct fl_pg_chunk pg_chunk;
        };
-       DECLARE_PCI_UNMAP_ADDR(dma_addr);
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
 };
 
 struct rsp_desc {              /* response queue descriptor */
         * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
         */
        struct dummy {
-               DECLARE_PCI_UNMAP_ADDR(addr);
+               DEFINE_DMA_UNMAP_ADDR(addr);
        };
 
        return sizeof(struct dummy) != 0;
                put_page(d->pg_chunk.page);
                d->pg_chunk.page = NULL;
        } else {
-               pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+               pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
                                 q->buf_size, PCI_DMA_FROMDEVICE);
                kfree_skb(d->skb);
                d->skb = NULL;
        if (unlikely(pci_dma_mapping_error(pdev, mapping)))
                return -ENOMEM;
 
-       pci_unmap_addr_set(sd, dma_addr, mapping);
+       dma_unmap_addr_set(sd, dma_addr, mapping);
 
        d->addr_lo = cpu_to_be32(mapping);
        d->addr_hi = cpu_to_be32((u64) mapping >> 32);
                                break;
                        }
                        mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
-                       pci_unmap_addr_set(sd, dma_addr, mapping);
+                       dma_unmap_addr_set(sd, dma_addr, mapping);
 
                        add_one_rx_chunk(mapping, d, q->gen);
                        pci_dma_sync_single_for_device(adap->pdev, mapping,
                if (likely(skb != NULL)) {
                        __skb_put(skb, len);
                        pci_dma_sync_single_for_cpu(adap->pdev,
-                                           pci_unmap_addr(sd, dma_addr), len,
+                                           dma_unmap_addr(sd, dma_addr), len,
                                            PCI_DMA_FROMDEVICE);
                        memcpy(skb->data, sd->skb->data, len);
                        pci_dma_sync_single_for_device(adap->pdev,
-                                           pci_unmap_addr(sd, dma_addr), len,
+                                           dma_unmap_addr(sd, dma_addr), len,
                                            PCI_DMA_FROMDEVICE);
                } else if (!drop_thres)
                        goto use_orig_buf;
                goto recycle;
 
 use_orig_buf:
-       pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+       pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
                         fl->buf_size, PCI_DMA_FROMDEVICE);
        skb = sd->skb;
        skb_put(skb, len);
        struct sk_buff *newskb, *skb;
        struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
 
-       dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+       dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
 
        newskb = skb = q->pg_skb;
        if (!skb && (len <= SGE_RX_COPY_THRES)) {
        fl->credits--;
 
        pci_dma_sync_single_for_cpu(adap->pdev,
-                                   pci_unmap_addr(sd, dma_addr),
+                                   dma_unmap_addr(sd, dma_addr),
                                    fl->buf_size - SGE_PG_RSVD,
                                    PCI_DMA_FROMDEVICE);