}
                        /* Update the skb. */
                        if (merge) {
-                               skb_shinfo(skb)->frags[i - 1].size += copy;
+                               skb_frag_size_add(
+                                               &skb_shinfo(skb)->frags[i - 1],
+                                               copy);
                        } else {
                                skb_fill_page_desc(skb, i, page, off, copy);
                                if (off + copy < pg_size) {
 
                i = skb_shinfo(skb)->nr_frags;
                if (skb_can_coalesce(skb, i, page, offset)) {
-                       skb_shinfo(skb)->frags[i - 1].size += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                } else if (i < MAX_SKB_FRAGS) {
                        get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, copy);
 
                sg = sg_next(sg);
                BUG_ON(!sg);
                frag = &skb_shinfo(skb)->frags[i];
-               sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
+               sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
+                               frag->page_offset);
        }
 }
 
 
                goto bail_txadd;
 
        for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
 
                /* combine physically continuous fragments later? */
                ret = sdma_txadd_page(sde->dd,
 
 
                        dma_addr = skb_frag_dma_map(vp->gendev, frag,
                                                    0,
-                                                   frag->size,
+                                                   skb_frag_size(frag),
                                                    DMA_TO_DEVICE);
                        if (dma_mapping_error(vp->gendev, dma_addr)) {
                                for(i = i-1; i >= 0; i--)
 
        u32 thiscopy, remainder;
        struct sk_buff *skb = tcb->skb;
        u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
-       struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+       skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
        struct phy_device *phydev = adapter->netdev->phydev;
        dma_addr_t dma_addr;
        struct tx_ring *tx_ring = &adapter->tx_ring;
                                frag++;
                        }
                } else {
-                       desc[frag].len_vlan = frags[i - 1].size;
+                       desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
                        dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
                                                    &frags[i - 1],
                                                    0,
-                                                   frags[i - 1].size,
+                                                   desc[frag].len_vlan,
                                                    DMA_TO_DEVICE);
                        desc[frag].addr_lo = lower_32_bits(dma_addr);
                        desc[frag].addr_hi = upper_32_bits(dma_addr);
 
        struct xgbe_ring *ring = channel->tx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_packet_data *packet;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t skb_dma;
        unsigned int start_index, cur_index;
        unsigned int offset, tso, vlan, datalen, len;
 
                             struct xgbe_ring *ring, struct sk_buff *skb,
                             struct xgbe_packet_data *packet)
 {
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int context_desc;
        unsigned int len;
        unsigned int i;
 
                                nr_frags = skb_shinfo(skb)->nr_frags;
 
                                for (i = 0; i < 2 && i < nr_frags; i++)
-                                       len += skb_shinfo(skb)->frags[i].size;
+                                       len += skb_frag_size(
+                                               &skb_shinfo(skb)->frags[i]);
 
                                /* HW requires header must reside in 3 buffer */
                                if (unlikely(hdr_len > len)) {
 
        tpd->len = cpu_to_le16(maplen);
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
                if (++txq->write_idx == txq->count)
                        txq->write_idx = 0;
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
                use_tpd = atl1c_get_tpd(adapter, type);
                memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                u16 i;
                u16 seg_num;
 
-               frag = &skb_shinfo(skb)->frags[f];
                buf_len = skb_frag_size(frag);
 
                seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                u16 i, nseg;
 
-               frag = &skb_shinfo(skb)->frags[f];
                buf_len = skb_frag_size(frag);
 
                nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
 
        flags = 0;
 
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int len = skb_frag_size(frag);
 
                index = (index + 1) % BGMAC_TX_RING_SLOTS;
 
 {
        unsigned int payload = offset_and_len >> 16;
        unsigned int len = offset_and_len & 0xffff;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct page *page = data;
        u16 prod = rxr->rx_prod;
        struct sk_buff *skb;
 
        head_unmap->nvecs++;
 
        for (i = 0, vect_id = 0; i < vectors - 1; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                u32             size = skb_frag_size(frag);
 
                if (unlikely(size == 0)) {
 
        for (i = 0; i < nfrags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               len = frag->size;
+               len = skb_frag_size(frag);
 
                paddr = skb_frag_dma_map(priv->device, frag, 0, len,
                                         DMA_TO_DEVICE);
 
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
 
        } else {
                int i, frags;
-               struct skb_frag_struct *frag;
+               skb_frag_t *frag;
                struct octnic_gather *g;
 
                spin_lock(&lio->glist_lock[q_idx]);
                        frag = &skb_shinfo(skb)->frags[i - 1];
 
                        g->sg[(i >> 2)].ptr[(i & 3)] =
-                               dma_map_page(&oct->pci_dev->dev,
-                                            frag->page.p,
-                                            frag->page_offset,
-                                            frag->size,
-                                            DMA_TO_DEVICE);
+                               skb_frag_dma_map(&oct->pci_dev->dev,
+                                                frag, 0, skb_frag_size(frag),
+                                                DMA_TO_DEVICE);
 
                        if (dma_mapping_error(&oct->pci_dev->dev,
                                              g->sg[i >> 2].ptr[i & 3])) {
                                        frag = &skb_shinfo(skb)->frags[j - 1];
                                        dma_unmap_page(&oct->pci_dev->dev,
                                                       g->sg[j >> 2].ptr[j & 3],
-                                                      frag->size,
+                                                      skb_frag_size(frag),
                                                       DMA_TO_DEVICE);
                                }
                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
                                return NETDEV_TX_BUSY;
                        }
 
-                       add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+                       add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+                                   (i & 3));
                        i++;
                }
 
 
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
                ndata.reqtype = REQTYPE_NORESP_NET;
 
        } else {
-               struct skb_frag_struct *frag;
+               skb_frag_t *frag;
                struct octnic_gather *g;
                int i, frags;
 
                        frag = &skb_shinfo(skb)->frags[i - 1];
 
                        g->sg[(i >> 2)].ptr[(i & 3)] =
-                               dma_map_page(&oct->pci_dev->dev,
-                                            frag->page.p,
-                                            frag->page_offset,
-                                            frag->size,
-                                            DMA_TO_DEVICE);
+                               skb_frag_dma_map(&oct->pci_dev->dev,
+                                                frag, 0, skb_frag_size(frag),
+                                                DMA_TO_DEVICE);
                        if (dma_mapping_error(&oct->pci_dev->dev,
                                              g->sg[i >> 2].ptr[i & 3])) {
                                dma_unmap_single(&oct->pci_dev->dev,
                                        frag = &skb_shinfo(skb)->frags[j - 1];
                                        dma_unmap_page(&oct->pci_dev->dev,
                                                       g->sg[j >> 2].ptr[j & 3],
-                                                      frag->size,
+                                                      skb_frag_size(frag),
                                                       DMA_TO_DEVICE);
                                }
                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
                                return NETDEV_TX_BUSY;
                        }
 
-                       add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+                       add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+                                   (i & 3));
                        i++;
                }
 
 
                goto doorbell;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
                size = skb_frag_size(frag);
 
        struct port_info *pi = netdev_priv(qs->netdev);
        struct sk_buff *skb = NULL;
        struct cpl_rx_pkt *cpl;
-       struct skb_frag_struct *rx_frag;
+       skb_frag_t *rx_frag;
        int nr_frags;
        int offset = 0;
 
 
                        buflen = skb_headlen(skb);
                } else {
                        skb_frag = skb_si->frags + frag;
-                       buffer = page_address(skb_frag_page(skb_frag)) +
-                                skb_frag->page_offset;
-                       buflen = skb_frag->size;
+                       buffer = skb_frag_address(skb_frag);
+                       buflen = skb_frag_size(skb_frag);
                }
 
                if (frag == last_frag) {
 
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                len = skb_frag_size(frag);
 
                busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
 
                              int active_offloads)
 {
        struct enetc_tx_swbd *tx_swbd;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int len = skb_headlen(skb);
        union enetc_tx_bd temp_bd;
        union enetc_tx_bd *txbd;
 
                status = fec16_to_cpu(bdp->cbd_sc);
                status &= ~BD_ENET_TX_STATS;
                status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-               frag_len = skb_shinfo(skb)->frags[frag].size;
+               frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
 
                /* Handle the last BD specially */
                if (frag == nr_frags - 1) {
                        ebdp->cbd_esc = cpu_to_fec32(estatus);
                }
 
-               bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+               bufaddr = skb_frag_address(this_frag);
 
                index = fec_enet_get_bd_index(bdp, &txq->bd);
                if (((unsigned long) bufaddr) & fep->tx_align ||
 
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               int len = frag->size;
+               int len = skb_frag_size(frag);
 
                addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
                ret = dma_mapping_error(priv->dev, addr);
 
        int frag_num;
        struct sk_buff *skb = *out_skb;
        struct sk_buff *new_skb = NULL;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        size = skb_headlen(skb);
        buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
        struct hnae_ring *ring = ring_data->ring;
        struct device *dev = ring_to_dev(ring);
        struct netdev_queue *dev_queue;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int buf_num;
        int seg_num;
        dma_addr_t dma;
 
        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
        struct device *dev = ring_to_dev(ring);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int frag_buf_num;
        int k, sizeoflast;
        dma_addr_t dma;
 
                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
        } else {
-               frag = (struct skb_frag_struct *)priv;
+               frag = (skb_frag_t *)priv;
                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
        }
 
        bd_num = hns3_tx_bd_count(size);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int frag_bd_num;
 
                size = skb_frag_size(frag);
                &tx_ring_data(priv, skb->queue_mapping);
        struct hns3_enet_ring *ring = ring_data->ring;
        struct netdev_queue *dev_queue;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int next_to_use_head;
        int buf_num;
        int seg_num;
 
        struct hinic_hwdev *hwdev = nic_dev->hwdev;
        struct hinic_hwif *hwif = hwdev->hwif;
        struct pci_dev *pdev = hwif->pdev;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma_addr;
        int i, j;
 
 
                                       ctrl);
        /* skb fragments */
        for (i = 0; i < nr_frags; ++i) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                len = skb_frag_size(frag);
 
                if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
-               frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
-               frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
 
        struct sk_buff *skb = first->skb;
        struct fm10k_tx_buffer *tx_buffer;
        struct fm10k_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned char *data;
        dma_addr_t dma;
        unsigned int data_len, size;
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
 
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
 
  **/
 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        int count = 0, size = skb_headlen(skb);
 
 
  **/
 bool __iavf_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct iavf_tx_buffer *tx_bi;
        struct iavf_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
 
  **/
 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        int count = 0, size = skb_headlen(skb);
 
 
 {
        u64 td_offset, td_tag, td_cmd;
        u16 i = tx_ring->next_to_use;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int data_len, size;
        struct ice_tx_desc *tx_desc;
        struct ice_tx_buf *tx_buf;
  */
 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int count = 0, size = skb_headlen(skb);
 
  */
 static bool __ice_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
 
        struct sk_buff *skb = first->skb;
        struct igb_tx_buffer *tx_buffer;
        union e1000_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (igb_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
 
                goto dma_error;
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
 
                count++;
                i++;
 
        struct igc_tx_buffer *tx_buffer;
        union igc_adv_tx_desc *tx_desc;
        u32 tx_flags = first->tx_flags;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        u16 i = tx_ring->next_to_use;
        unsigned int data_len, size;
        dma_addr_t dma;
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (igc_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
 
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
 
 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
                            struct sk_buff *skb)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned char *va;
        unsigned int pull_len;
 
                                              skb_headlen(skb),
                                              DMA_FROM_DEVICE);
        } else {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
        struct sk_buff *skb = first->skb;
        struct ixgbe_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
 
        struct sk_buff *skb = first->skb;
        struct ixgbevf_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
 
        bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
        int i, nr_frags = skb_shinfo(skb)->nr_frags;
        int mask = jme->tx_ring_mask;
-       const struct skb_frag_struct *frag;
        u32 len;
        int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
-               frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
                        jme_drop_tx_map(jme, idx, i);
                        goto out;
                }
-
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
 
 
        for (i = 0; i < nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               void *addr = page_address(frag->page.p) + frag->page_offset;
+               void *addr = skb_frag_address(frag);
 
                tx_desc = mvneta_txq_next_desc_get(txq);
-               tx_desc->data_size = frag->size;
+               tx_desc->data_size = skb_frag_size(frag);
 
                tx_desc->buf_phys_addr =
                        dma_map_single(pp->dev->dev.parent, addr,
 
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               void *addr = page_address(frag->page.p) + frag->page_offset;
+               void *addr = skb_frag_address(frag);
 
                tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
                mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
-               mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+               mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
 
                buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
-                                             frag->size, DMA_TO_DEVICE);
+                                             skb_frag_size(frag),
+                                             DMA_TO_DEVICE);
                if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
                        mvpp2_txq_desc_put(txq);
                        goto cleanup;
 
        txd = itxd;
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                unsigned int offset = 0;
                int frag_size = skb_frag_size(frag);
 
 static inline int mtk_cal_txd_req(struct sk_buff *skb)
 {
        int i, nfrags;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        nfrags = 1;
        if (skb_is_gso(skb)) {
 
 
        /* Map fragments if any */
        for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
-               const struct skb_frag_struct *frag;
-
-               frag = &shinfo->frags[i_frag];
+               const skb_frag_t *frag = &shinfo->frags[i_frag];
                byte_count = skb_frag_size(frag);
                dma = skb_frag_dma_map(ddev, frag,
                                       0, byte_count,
 
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int fsz = skb_frag_size(frag);
 
                dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
 
 }
 
 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
-                                        const struct skb_frag_struct *fragment,
+                                        const skb_frag_t *fragment,
                                         unsigned int frame_length)
 {
        /* called only from within lan743x_tx_xmit_frame
                goto finish;
 
        for (j = 0; j < nr_frags; j++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
 
-               frag = &(skb_shinfo(skb)->frags[j]);
                if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
                        /* upon error no need to call
                         *      lan743x_tx_frame_end
 
 {
        u8 *va;
        struct vlan_ethhdr *veh;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        __wsum vsum;
 
        va = addr;
 {
        struct myri10ge_priv *mgp = ss->mgp;
        struct sk_buff *skb;
-       struct skb_frag_struct *rx_frags;
+       skb_frag_t *rx_frags;
        struct myri10ge_rx_buf *rx;
        int i, idx, remainder, bytes;
        struct pci_dev *pdev = mgp->pdev;
                return 0;
        }
        rx_frags = skb_shinfo(skb)->frags;
-       /* Fill skb_frag_struct(s) with data from our receive */
+       /* Fill skb_frag_t(s) with data from our receive */
        for (i = 0, remainder = len; remainder > 0; i++) {
                myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
                skb_fill_page_desc(skb, i, rx->info[idx].page,
 
        /* remove padding */
        rx_frags[0].page_offset += MXGEFW_PAD;
-       rx_frags[0].size -= MXGEFW_PAD;
+       skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
        len -= MXGEFW_PAD;
 
        skb->len = len;
        struct myri10ge_slice_state *ss;
        struct mcp_kreq_ether_send *req;
        struct myri10ge_tx_buf *tx;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct netdev_queue *netdev_queue;
        dma_addr_t bus;
        u32 low;
 
 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
-       const struct skb_frag_struct *frag;
+       const skb_frag_t *frag;
        int f, nr_frags, wr_idx, md_bytes;
        struct nfp_net_tx_ring *tx_ring;
        struct nfp_net_r_vector *r_vec;
        todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
 
        while (todo--) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
                struct nfp_net_tx_buf *tx_buf;
                struct sk_buff *skb;
                int fidx, nr_frags;
 static void
 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
 {
-       const struct skb_frag_struct *frag;
+       const skb_frag_t *frag;
        struct netdev_queue *nd_q;
 
        while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
 
                struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
 {
        struct netxen_skb_frag *nf;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int i, nr_frags;
        dma_addr_t map;
 
        struct pci_dev *pdev;
        int i, k;
        int delta = 0;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        u32 producer;
        int frag_count;
 
                             struct qlcnic_cmd_buffer *pbuf)
 {
        struct qlcnic_skb_frag *nf;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int i, nr_frags;
        dma_addr_t map;
 
 
        }
 
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
-               tpbuf->length = frag->size;
-               tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
-                                              frag->page.p, frag->page_offset,
-                                              tpbuf->length, DMA_TO_DEVICE);
+               tpbuf->length = skb_frag_size(frag);
+               tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+                                                  frag, 0, tpbuf->length,
+                                                  DMA_TO_DEVICE);
                ret = dma_mapping_error(adpt->netdev->dev.parent,
                                        tpbuf->dma_addr);
                if (ret)
 
        struct xlgmac_desc_data *desc_data;
        unsigned int offset, datalen, len;
        struct xlgmac_pkt_info *pkt_info;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int tso, vlan;
        dma_addr_t skb_dma;
        unsigned int i;
 
                               struct sk_buff *skb,
                               struct xlgmac_pkt_info *pkt_info)
 {
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int context_desc;
        unsigned int len;
        unsigned int i;
 
        bdx_tx_db_inc_wptr(db);
 
        for (i = 0; i < nr_frags; i++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
 
                frag = &skb_shinfo(skb)->frags[i];
                db->wptr->len = skb_frag_size(frag);
 
        total_len += skb_headlen(skb);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
                total_len += skb_frag_size(f);
-               sg_set_page(&urb->sg[i + s], f->page.p, f->size,
+               sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
                                f->page_offset);
        }
        urb->transfer_buffer_length = total_len;
 
 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
                    struct vmxnet3_rx_buf_info *rbi)
 {
-       struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
-               skb_shinfo(skb)->nr_frags;
+       skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 
        BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                u32 buf_size;
 
                buf_offset = 0;
        int i;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
        }
 
        if (nr_frags) {
                seq_printf(s, "    nr_frags = %d\n", nr_frags);
                for (i = 0; i < nr_frags; i++) {
-                       const struct skb_frag_struct *frag =
-                                       &skb_shinfo(skb)->frags[i];
+                       const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        len = skb_frag_size(frag);
                        p = skb_frag_address_safe(frag);
 
                                     len);
                } else {
                        frag = &skb_shinfo(skb)->frags[f];
-                       len = frag->size;
+                       len = skb_frag_size(frag);
                        wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
                }
 
 
                        if (!headlen) {
                                pa = skb_frag_dma_map(dev, frag,
-                                                     frag->size - len, lenmss,
-                                                     DMA_TO_DEVICE);
+                                                     skb_frag_size(frag) - len,
+                                                     lenmss, DMA_TO_DEVICE);
                                vring->ctx[i].mapped_as = wil_mapped_as_page;
                        } else {
                                pa = dma_map_single(dev,
 
        /* middle segments */
        for (; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag =
-                               &skb_shinfo(skb)->frags[f];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                int len = skb_frag_size(frag);
 
                *_d = *d;
 
        /* Rest of the descriptors are from the SKB fragments */
        for (f = 0; f < nr_frags; f++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
-               int len = frag->size;
+               int len = skb_frag_size(frag);
 
                wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
                             len, descs_used);
 
                        int j;
                        skb->truesize += skb->data_len;
                        for (j = 0; j < i; j++)
-                               put_page(frags[j].page.p);
+                               put_page(skb_frag_page(&frags[j]));
                        return -ENOMEM;
                }
 
                        BUG();
 
                offset += len;
-               frags[i].page.p = page;
+               __skb_frag_set_page(&frags[i], page);
                frags[i].page_offset = 0;
                skb_frag_size_set(&frags[i], len);
        }
 
        int cnt, elements = 0;
 
        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
 
                elements += qeth_get_elements_for_range(
                        (addr_t)skb_frag_address(frag),
 
 u32 fcoe_fc_crc(struct fc_frame *fp)
 {
        struct sk_buff *skb = fp_skb(fp);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned char *data;
        unsigned long off, len, clen;
        u32 crc;
 
                hw_buffer.s.size = skb_headlen(skb);
                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+                       skb_frag_t *fs = skb_shinfo(skb)->frags + i;
 
                        hw_buffer.s.addr =
-                               XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
-                                              fs->page_offset));
+                               XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
                        hw_buffer.s.size = fs->size;
                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
                }
 
                        count = add_physinfo_entries(page_to_pfn(
                                  skb_frag_page(&skb_shinfo(skb)->frags[frag])),
                                  skb_shinfo(skb)->frags[frag].page_offset,
-                                 skb_shinfo(skb)->frags[frag].size, count,
-                                 frags_max, frags);
+                                 skb_frag_size(&skb_shinfo(skb)->frags[frag]),
+                                 count, frags_max, frags);
                        /* add_physinfo_entries only returns
                         * zero if the frags array is out of room
                         * That should never happen because we
 
                skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
 
                sg_init_table(&ccmd->sg, 1);
-               sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
-                           dfrag->page_offset);
-               get_page(dfrag->page.p);
+               sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
+                               skb_frag_size(dfrag), dfrag->page_offset);
+               get_page(skb_frag_page(dfrag));
 
                cmd->se_cmd.t_data_sg = &ccmd->sg;
                cmd->se_cmd.t_data_nents = 1;
                        pdu_cb->ddigest, pdu_cb->frags);
        for (i = 0; i < ssi->nr_frags; i++)
                pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
-                       skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+                       skb, i, ssi->frags[i].page_offset,
+                       skb_frag_size(&ssi->frags[i]));
 }
 
 static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
                hpdu_cb->frags++;
                hpdu_cb->hfrag_idx = hfrag_idx;
 
-               len = hssi->frags[hfrag_idx].size;
+               len = skb_frag_size(&hssi->frags[hfrag_idx]);;
                hskb->len += len;
                hskb->data_len += len;
                hskb->truesize += len;
 
                        get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
 
-                       len += hssi->frags[dfrag_idx].size;
+                       len += skb_frag_size(&hssi->frags[dfrag_idx]);
 
                        hssi->nr_frags++;
                        hpdu_cb->frags++;