int i;
 
        for (i = 0; i < RX_DESC_NUM; i++)
-               dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+               dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
                                 priv->rx_buf_size, DMA_FROM_DEVICE);
 
        if (priv->tx_desc_base)
                       desc + RX_REG_OFFSET_DESC1);
 
                priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
-               priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+               priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
                                                     priv->rx_buf[i],
                                                     priv->rx_buf_size,
                                                     DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+               if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
                        netdev_err(ndev, "DMA mapping error\n");
 
                moxart_desc_write(priv->rx_mapping[i],
                if (len > RX_BUF_SIZE)
                        len = RX_BUF_SIZE;
 
-               dma_sync_single_for_cpu(&ndev->dev,
+               dma_sync_single_for_cpu(&priv->pdev->dev,
                                        priv->rx_mapping[rx_head],
                                        priv->rx_buf_size, DMA_FROM_DEVICE);
                skb = netdev_alloc_skb_ip_align(ndev, len);
        unsigned int tx_tail = priv->tx_tail;
 
        while (tx_tail != tx_head) {
-               dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+               dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
                                 priv->tx_len[tx_tail], DMA_TO_DEVICE);
 
                ndev->stats.tx_packets++;
 
        len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
 
-       priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+       priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
                                                   len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+       if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
                netdev_err(ndev, "DMA mapping error\n");
                goto out_unlock;
        }
                len = ETH_ZLEN;
        }
 
-       dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+       dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
                                   priv->tx_buf_size, DMA_TO_DEVICE);
 
        txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
        priv->tx_buf_size = TX_BUF_SIZE;
        priv->rx_buf_size = RX_BUF_SIZE;
 
-       priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+       priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->tx_desc_base) {
                goto init_fail;
        }
 
-       priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+       priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->rx_desc_base) {