if (!req->skb)
                return -ENOMEM;
 
-       req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
-                                         skb_data_area_size(req->skb), DMA_FROM_DEVICE);
+       req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
        if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
                dev_kfree_skb_any(req->skb);
                req->skb = NULL;
 
                if (req->mapped_buff) {
                        dma_unmap_single(md_ctrl->dev, req->mapped_buff,
-                                        skb_data_area_size(skb), DMA_FROM_DEVICE);
+                                        queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
                        req->mapped_buff = 0;
                }
 
        list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
                if (req_cur->mapped_buff && req_cur->skb) {
                        dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
-                                        skb_data_area_size(req_cur->skb), tx_rx);
+                                        ring->pkt_size, tx_rx);
                        req_cur->mapped_buff = 0;
                }
 
 
 {
        dma_addr_t data_bus_addr;
        struct sk_buff *skb;
-       size_t data_len;
 
        skb = __dev_alloc_skb(size, GFP_KERNEL);
        if (!skb)
                return false;
 
-       data_len = skb_data_area_size(skb);
-       data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE);
+       data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
        if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
                dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
                dev_kfree_skb_any(skb);
 
        cur_skb->skb = skb;
        cur_skb->data_bus_addr = data_bus_addr;
-       cur_skb->data_len = data_len;
+       cur_skb->data_len = size;
 
        return true;
 }