return err;
 }
 
+/* Create a SG frame descriptor based on a linear skb.
+ *
+ * This function is used on the Tx path when the skb headroom is not large
+ * enough for the HW requirements, thus instead of realloc-ing the skb we
+ * create a SG frame descriptor with only one entry.
+ */
+static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+                                 struct sk_buff *skb,
+                                 struct dpaa2_fd *fd)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       struct dpaa2_sg_entry *sgt;
+       struct dpaa2_eth_swa *swa;
+       dma_addr_t addr, sgt_addr;
+       void *sgt_buf = NULL;
+       int sgt_buf_size;
+       int err;
+
+       /* Prepare the HW SGT structure */
+       sgt_cache = this_cpu_ptr(priv->sgt_cache);
+       sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+
+       if (sgt_cache->count == 0)
+               sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
+                                 GFP_ATOMIC);
+       else
+               sgt_buf = sgt_cache->buf[--sgt_cache->count];
+       if (unlikely(!sgt_buf))
+               return -ENOMEM;
+
+       sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+       sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+       addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(dev, addr))) {
+               err = -ENOMEM;
+               goto data_map_failed;
+       }
+
+       /* Fill in the HW SGT structure */
+       dpaa2_sg_set_addr(sgt, addr);
+       dpaa2_sg_set_len(sgt, skb->len);
+       dpaa2_sg_set_final(sgt, true);
+
+       /* Store the skb backpointer in the SGT buffer */
+       swa = (struct dpaa2_eth_swa *)sgt_buf;
+       swa->type = DPAA2_ETH_SWA_SINGLE;
+       swa->single.skb = skb;
+       swa->sg.sgt_size = sgt_buf_size;
+
+       /* Separately map the SGT buffer */
+       sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+               err = -ENOMEM;
+               goto sgt_map_failed;
+       }
+
+       dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+       dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+       dpaa2_fd_set_addr(fd, sgt_addr);
+       dpaa2_fd_set_len(fd, skb->len);
+       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+       if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+               enable_tx_tstamp(fd, sgt_buf);
+
+       return 0;
+
+sgt_map_failed:
+       dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
+data_map_failed:
+       if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+               kfree(sgt_buf);
+       else
+               sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+
+       return err;
+}
+
 /* Create a frame descriptor based on a linear skb */
 static int build_single_fd(struct dpaa2_eth_priv *priv,
                           struct sk_buff *skb,
                       const struct dpaa2_fd *fd, bool in_napi)
 {
        struct device *dev = priv->net_dev->dev.parent;
-       dma_addr_t fd_addr;
+       dma_addr_t fd_addr, sg_addr;
        struct sk_buff *skb = NULL;
        unsigned char *buffer_start;
        struct dpaa2_eth_swa *swa;
        u8 fd_format = dpaa2_fd_get_format(fd);
        u32 fd_len = dpaa2_fd_get_len(fd);
 
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       struct dpaa2_sg_entry *sgt;
+
        fd_addr = dpaa2_fd_get_addr(fd);
        buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
        swa = (struct dpaa2_eth_swa *)buffer_start;
                                         DMA_BIDIRECTIONAL);
                }
        } else if (fd_format == dpaa2_fd_sg) {
-               skb = swa->sg.skb;
+               if (swa->type == DPAA2_ETH_SWA_SG) {
+                       skb = swa->sg.skb;
+
+                       /* Unmap the scatterlist */
+                       dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+                                    DMA_BIDIRECTIONAL);
+                       kfree(swa->sg.scl);
 
-               /* Unmap the scatterlist */
-               dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
-                            DMA_BIDIRECTIONAL);
-               kfree(swa->sg.scl);
+                       /* Unmap the SGT buffer */
+                       dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+                                        DMA_BIDIRECTIONAL);
+               } else {
+                       skb = swa->single.skb;
 
-               /* Unmap the SGT buffer */
-               dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
-                                DMA_BIDIRECTIONAL);
+                       /* Unmap the SGT Buffer */
+                       dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
+                                        DMA_BIDIRECTIONAL);
+
+                       sgt = (struct dpaa2_sg_entry *)(buffer_start +
+                                                       priv->tx_data_offset);
+                       sg_addr = dpaa2_sg_get_addr(sgt);
+                       dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
+               }
        } else {
                netdev_dbg(priv->net_dev, "Invalid FD format\n");
                return;
        }
 
        /* Free SGT buffer allocated on tx */
-       if (fd_format != dpaa2_fd_single)
-               skb_free_frag(buffer_start);
+       if (fd_format != dpaa2_fd_single) {
+               sgt_cache = this_cpu_ptr(priv->sgt_cache);
+               if (swa->type == DPAA2_ETH_SWA_SG) {
+                       skb_free_frag(buffer_start);
+               } else {
+                       if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+                               kfree(buffer_start);
+                       else
+                               sgt_cache->buf[sgt_cache->count++] = buffer_start;
+               }
+       }
 
        /* Move on with skb release */
        napi_consume_skb(skb, in_napi);
        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
        needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
-       if (skb_headroom(skb) < needed_headroom) {
-               struct sk_buff *ns;
-
-               ns = skb_realloc_headroom(skb, needed_headroom);
-               if (unlikely(!ns)) {
-                       percpu_stats->tx_dropped++;
-                       goto err_alloc_headroom;
-               }
-               percpu_extras->tx_reallocs++;
-
-               if (skb->sk)
-                       skb_set_owner_w(ns, skb->sk);
-
-               dev_kfree_skb(skb);
-               skb = ns;
-       }
 
        /* We'll be holding a back-reference to the skb until Tx Confirmation;
         * we don't want that overwritten by a concurrent Tx with a cloned skb.
                err = build_sg_fd(priv, skb, &fd);
                percpu_extras->tx_sg_frames++;
                percpu_extras->tx_sg_bytes += skb->len;
+       } else if (skb_headroom(skb) < needed_headroom) {
+               err = build_sg_fd_single_buf(priv, skb, &fd);
+               percpu_extras->tx_sg_frames++;
+               percpu_extras->tx_sg_bytes += skb->len;
        } else {
                err = build_single_fd(priv, skb, &fd);
        }
        return NETDEV_TX_OK;
 
 err_build_fd:
-err_alloc_headroom:
        dev_kfree_skb(skb);
 
        return NETDEV_TX_OK;
        return 0;
 }
 
+static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
+{
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       u16 count;
+       int k, i;
+
+       for_each_online_cpu(k) {
+               sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
+               count = sgt_cache->count;
+
+               for (i = 0; i < count; i++)
+                       kfree(sgt_cache->buf[i]);
+               sgt_cache->count = 0;
+       }
+}
+
 static int pull_channel(struct dpaa2_eth_channel *ch)
 {
        int err;
        /* Empty the buffer pool */
        drain_pool(priv);
 
+       /* Empty the Scatter-Gather Buffer cache */
+       dpaa2_eth_sgt_cache_drain(priv);
+
        return 0;
 }
 
                goto err_alloc_percpu_extras;
        }
 
+       priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
+       if (!priv->sgt_cache) {
+               dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
+               err = -ENOMEM;
+               goto err_alloc_sgt_cache;
+       }
+
        err = netdev_init(net_dev);
        if (err)
                goto err_netdev_init;
 err_alloc_rings:
 err_csum:
 err_netdev_init:
+       free_percpu(priv->sgt_cache);
+err_alloc_sgt_cache:
        free_percpu(priv->percpu_extras);
 err_alloc_percpu_extras:
        free_percpu(priv->percpu_stats);
                fsl_mc_free_irqs(ls_dev);
 
        free_rings(priv);
+       free_percpu(priv->sgt_cache);
        free_percpu(priv->percpu_stats);
        free_percpu(priv->percpu_extras);