hldev = blockpool->hldev;
 
        list_for_each_safe(p, n, &blockpool->free_block_list) {
-               pci_unmap_single(hldev->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&hldev->pdev->dev,
+                                ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                                ((struct __vxge_hw_blockpool_entry *)p)->length,
+                                DMA_BIDIRECTIONAL);
 
                vxge_os_dma_free(hldev->pdev,
                        ((struct __vxge_hw_blockpool_entry *)p)->memblock,
                        goto blockpool_create_exit;
                }
 
-               dma_addr = pci_map_single(hldev->pdev, memblock,
-                               VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (unlikely(pci_dma_mapping_error(hldev->pdev,
-                               dma_addr))) {
+               dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
+                                         VXGE_HW_BLOCK_SIZE,
+                                         DMA_BIDIRECTIONAL);
+               if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
                        vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
                        __vxge_hw_blockpool_destroy(blockpool);
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
                goto exit;
        }
 
-       dma_addr = pci_map_single(devh->pdev, block_addr, length,
-                               PCI_DMA_BIDIRECTIONAL);
+       dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
+                                 DMA_BIDIRECTIONAL);
 
-       if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+       if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
                vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
                blockpool->req_out--;
                goto exit;
                if (!memblock)
                        goto exit;
 
-               dma_object->addr = pci_map_single(devh->pdev, memblock, size,
-                                       PCI_DMA_BIDIRECTIONAL);
+               dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
+                                                 size, DMA_BIDIRECTIONAL);
 
-               if (unlikely(pci_dma_mapping_error(devh->pdev,
-                               dma_object->addr))) {
+               if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
                        memblock = NULL;
                if (blockpool->pool_size < blockpool->pool_max)
                        break;
 
-               pci_unmap_single(
-                       (blockpool->hldev)->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&(blockpool->hldev)->pdev->dev,
+                                ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                                ((struct __vxge_hw_blockpool_entry *)p)->length,
+                                DMA_BIDIRECTIONAL);
 
                vxge_os_dma_free(
                        (blockpool->hldev)->pdev,
        blockpool = &devh->block_pool;
 
        if (size != blockpool->block_size) {
-               pci_unmap_single(devh->pdev, dma_object->addr, size,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
+                                DMA_BIDIRECTIONAL);
                vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
        } else {
 
 
        rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
 
        rx_priv->skb_data = rx_priv->skb->data;
-       dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
-                               rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
+                                 rx_priv->data_size, DMA_FROM_DEVICE);
 
-       if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
+       if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
                ring->stats.pci_map_fail++;
                return -EIO;
        }
 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
                                    struct vxge_rx_priv *rx_priv)
 {
-       pci_dma_sync_single_for_device(ring->pdev,
-               rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
+                                  rx_priv->data_size, DMA_FROM_DEVICE);
 
        vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
        vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
                                if (!vxge_rx_map(dtr, ring)) {
                                        skb_put(skb, pkt_length);
 
-                                       pci_unmap_single(ring->pdev, data_dma,
-                                               data_size, PCI_DMA_FROMDEVICE);
+                                       dma_unmap_single(&ring->pdev->dev,
+                                                        data_dma, data_size,
+                                                        DMA_FROM_DEVICE);
 
                                        vxge_hw_ring_rxd_pre_post(ringh, dtr);
                                        vxge_post(&dtr_cnt, &first_dtr, dtr,
                                skb_reserve(skb_up,
                                    VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 
-                               pci_dma_sync_single_for_cpu(ring->pdev,
-                                       data_dma, data_size,
-                                       PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(&ring->pdev->dev,
+                                                       data_dma, data_size,
+                                                       DMA_FROM_DEVICE);
 
                                vxge_debug_mem(VXGE_TRACE,
                                        "%s: %s:%d  skb_up = %p",
                }
 
                /*  for unfragmented skb */
-               pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
-                               skb_headlen(skb), PCI_DMA_TODEVICE);
+               dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                                skb_headlen(skb), DMA_TO_DEVICE);
 
                for (j = 0; j < frg_cnt; j++) {
-                       pci_unmap_page(fifo->pdev,
-                                       txd_priv->dma_buffers[i++],
-                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+                       dma_unmap_page(&fifo->pdev->dev,
+                                      txd_priv->dma_buffers[i++],
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
                        frag += 1;
                }
 
 
        first_frg_len = skb_headlen(skb);
 
-       dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
-                               PCI_DMA_TODEVICE);
+       dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
+                                    first_frg_len, DMA_TO_DEVICE);
 
-       if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
+       if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
                fifo->stats.pci_map_fail++;
                goto _exit0;
        j = 0;
        frag = &skb_shinfo(skb)->frags[0];
 
-       pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
-                       skb_headlen(skb), PCI_DMA_TODEVICE);
+       dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        for (; j < i; j++) {
-               pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
-                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+               dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                frag += 1;
        }
 
        if (state != VXGE_HW_RXD_STATE_POSTED)
                return;
 
-       pci_unmap_single(ring->pdev, rx_priv->data_dma,
-               rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
+                        rx_priv->data_size, DMA_FROM_DEVICE);
 
        dev_kfree_skb(rx_priv->skb);
        rx_priv->skb_data = NULL;
        frag = &skb_shinfo(skb)->frags[0];
 
        /*  for unfragmented skb */
-       pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
-               skb_headlen(skb), PCI_DMA_TODEVICE);
+       dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        for (j = 0; j < frg_cnt; j++) {
-               pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
-                              skb_frag_size(frag), PCI_DMA_TODEVICE);
+               dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                frag += 1;
        }
 
                goto _exit0;
        }
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 64bit DMA", __func__);
 
                high_dma = 1;
 
-               if (pci_set_consistent_dma_mask(pdev,
-                                               DMA_BIT_MASK(64))) {
+               if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                        vxge_debug_init(VXGE_ERR,
                                "%s : unable to obtain 64bit DMA for "
                                "consistent allocations", __func__);
                        ret = -ENOMEM;
                        goto _exit1;
                }
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 32bit DMA", __func__);
        } else {