#define XGMAC_HWFEAT_GMIISEL           BIT(1)
 #define XGMAC_HW_FEATURE1              0x00000120
 #define XGMAC_HWFEAT_TSOEN             BIT(18)
+#define XGMAC_HWFEAT_ADDR64            GENMASK(15, 14)
 #define XGMAC_HWFEAT_TXFIFOSIZE                GENMASK(10, 6)
 #define XGMAC_HWFEAT_RXFIFOSIZE                GENMASK(4, 0)
 #define XGMAC_HW_FEATURE2              0x00000124
 #define XGMAC_EN_LPI                   BIT(15)
 #define XGMAC_LPI_XIT_PKT              BIT(14)
 #define XGMAC_AAL                      BIT(12)
+#define XGMAC_EAME                     BIT(11)
 #define XGMAC_BLEN                     GENMASK(7, 1)
 #define XGMAC_BLEN256                  BIT(7)
 #define XGMAC_BLEN128                  BIT(6)
 
  *  This function fills descriptor and request new descriptors according to
  *  buffer length to fill
  */
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
                                 int total_len, bool last_segment, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        tmp_len = total_len;
 
        while (tmp_len > 0) {
+               dma_addr_t curr_addr;
+
                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
                desc = tx_q->dma_tx + tx_q->cur_tx;
 
-               desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+               curr_addr = des + (total_len - tmp_len);
+               if (priv->dma_cap.addr64 <= 32)
+                       desc->des0 = cpu_to_le32(curr_addr);
+               else
+                       stmmac_set_desc_addr(priv, desc, curr_addr);
+
                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
                            TSO_MAX_BUFF_SIZE : tmp_len;
 
        struct stmmac_priv *priv = netdev_priv(dev);
        int nfrags = skb_shinfo(skb)->nr_frags;
        u32 queue = skb_get_queue_mapping(skb);
-       unsigned int first_entry, des;
+       unsigned int first_entry;
        struct stmmac_tx_queue *tx_q;
        int tmp_pay_len = 0;
        u32 pay_len, mss;
        u8 proto_hdr_len;
+       dma_addr_t des;
        int i;
 
        tx_q = &priv->tx_queue[queue];
        tx_q->tx_skbuff_dma[first_entry].buf = des;
        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
 
-       first->des0 = cpu_to_le32(des);
+       if (priv->dma_cap.addr64 <= 32) {
+               first->des0 = cpu_to_le32(des);
 
-       /* Fill start of payload in buff2 of first descriptor */
-       if (pay_len)
-               first->des1 = cpu_to_le32(des + proto_hdr_len);
+               /* Fill start of payload in buff2 of first descriptor */
+               if (pay_len)
+                       first->des1 = cpu_to_le32(des + proto_hdr_len);
 
-       /* If needed take extra descriptors to fill the remaining payload */
-       tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+               /* If needed take extra descriptors to fill the remaining payload */
+               tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+       } else {
+               stmmac_set_desc_addr(priv, first, des);
+               tmp_pay_len = pay_len;
+       }
 
        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
 
        int i, csum_insertion = 0, is_jumbo = 0;
        u32 queue = skb_get_queue_mapping(skb);
        int nfrags = skb_shinfo(skb)->nr_frags;
-       int entry;
-       unsigned int first_entry;
        struct dma_desc *desc, *first;
        struct stmmac_tx_queue *tx_q;
+       unsigned int first_entry;
        unsigned int enh_desc;
-       unsigned int des;
+       dma_addr_t des;
+       int entry;
 
        tx_q = &priv->tx_queue[queue];
 
                priv->tso = true;
                dev_info(priv->device, "TSO feature enabled\n");
        }
+
+       if (priv->dma_cap.addr64) {
+               ret = dma_set_mask_and_coherent(device,
+                               DMA_BIT_MASK(priv->dma_cap.addr64));
+               if (!ret) {
+                       dev_info(priv->device, "Using %d bits DMA width\n",
+                                priv->dma_cap.addr64);
+               } else {
+                       ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+                       if (ret) {
+                               dev_err(priv->device, "Failed to set DMA Mask\n");
+                               goto error_hw_init;
+                       }
+
+                       priv->dma_cap.addr64 = 32;
+               }
+       }
+
        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED