#include <net/dsa.h>
 #include <net/dst_metadata.h>
 #include <net/page_pool/helpers.h>
+#include <linux/genalloc.h>
 
 #include "mtk_eth_soc.h"
 #include "mtk_wed.h"
        return (void *)data;
 }
 
+static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
+                               dma_addr_t *dma_handle, bool use_sram)
+{
+       void *dma_ring;
+
+       if (use_sram && eth->sram_pool) {
+               dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
+               if (!dma_ring)
+                       return dma_ring;
+               *dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
+                                                   (unsigned long)dma_ring);
+       } else {
+               dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
+                                             GFP_KERNEL);
+       }
+
+       return dma_ring;
+}
+
+static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
+                             dma_addr_t dma_handle, bool in_sram)
+{
+       if (in_sram && eth->sram_pool)
+               gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
+       else
+               dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
+}
+
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
        dma_addr_t dma_addr;
        int i, j, len;
 
-       if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
-               eth->scratch_ring = eth->sram_base;
-       else
-               eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
-                                                      cnt * soc->tx.desc_size,
-                                                      ð->phy_scratch_ring,
-                                                      GFP_KERNEL);
+       eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
+                                              ð->phy_scratch_ring, true);
 
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
        if (!ring->buf)
                goto no_tx_mem;
 
-       if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
-               ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
-               ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
-       } else {
-               ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
-                                              &ring->phys, GFP_KERNEL);
-       }
-
+       ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
        if (!ring->dma)
                goto no_tx_mem;
 
                kfree(ring->buf);
                ring->buf = NULL;
        }
-       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
-               dma_free_coherent(eth->dma_dev,
-                                 ring->dma_size * soc->tx.desc_size,
-                                 ring->dma, ring->phys);
+
+       if (ring->dma) {
+               mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
+                                 ring->dma, ring->phys, true);
                ring->dma = NULL;
        }
 
        const struct mtk_reg_map *reg_map = eth->soc->reg_map;
        const struct mtk_soc_data *soc = eth->soc;
        struct mtk_rx_ring *ring;
-       int rx_data_len, rx_dma_size, tx_ring_size;
+       int rx_data_len, rx_dma_size;
        int i;
 
-       if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
-               tx_ring_size = MTK_QDMA_RING_SIZE;
-       else
-               tx_ring_size = soc->tx.dma_size;
-
        if (rx_flag == MTK_RX_FLAGS_QDMA) {
                if (ring_no)
                        return -EINVAL;
                ring->page_pool = pp;
        }
 
-       if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
-           rx_flag != MTK_RX_FLAGS_NORMAL) {
-               ring->dma = dma_alloc_coherent(eth->dma_dev,
-                               rx_dma_size * eth->soc->rx.desc_size,
-                               &ring->phys, GFP_KERNEL);
-       } else {
-               struct mtk_tx_ring *tx_ring = ð->tx_ring;
-
-               ring->dma = tx_ring->dma + tx_ring_size *
-                           eth->soc->tx.desc_size * (ring_no + 1);
-               ring->phys = tx_ring->phys + tx_ring_size *
-                            eth->soc->tx.desc_size * (ring_no + 1);
-       }
-
+       ring->dma = mtk_dma_ring_alloc(eth,
+                                      rx_dma_size * eth->soc->rx.desc_size,
+                                      &ring->phys,
+                                      rx_flag == MTK_RX_FLAGS_NORMAL);
        if (!ring->dma)
                return -ENOMEM;
 
                ring->data = NULL;
        }
 
-       if (!in_sram && ring->dma) {
-               dma_free_coherent(eth->dma_dev,
-                                 ring->dma_size * eth->soc->rx.desc_size,
-                                 ring->dma, ring->phys);
+       if (ring->dma) {
+               mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
+                                 ring->dma, ring->phys, in_sram);
                ring->dma = NULL;
        }
 
                        netdev_tx_reset_subqueue(eth->netdev[i], j);
        }
 
-       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
-               dma_free_coherent(eth->dma_dev,
-                                 MTK_QDMA_RING_SIZE * soc->tx.desc_size,
-                                 eth->scratch_ring, eth->phy_scratch_ring);
+       if (eth->scratch_ring) {
+               mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
+                                 eth->scratch_ring, eth->phy_scratch_ring,
+                                 true);
                eth->scratch_ring = NULL;
                eth->phy_scratch_ring = 0;
        }
+
        mtk_tx_clean(eth);
-       mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+       mtk_rx_clean(eth, ð->rx_ring[0], true);
        mtk_rx_clean(eth, ð->rx_ring_qdma, false);
 
        if (eth->hwlro) {
        return 0;
 }
 
+static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
+{
+       dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
+
+       if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
+           res->end)
+               return -EINVAL;
+
+       eth->sram_pool = devm_gen_pool_create(eth->dev,
+                                             const_ilog2(MTK_ETH_SRAM_GRANULARITY),
+                                             NUMA_NO_NODE, dev_name(eth->dev));
+
+       if (IS_ERR(eth->sram_pool))
+               return PTR_ERR(eth->sram_pool);
+
+       return gen_pool_add_virt(eth->sram_pool,
+                                (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
+                                res->start + MTK_ETH_SRAM_OFFSET,
+                                MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
+}
+
 static int mtk_probe(struct platform_device *pdev)
 {
-       struct resource *res = NULL, *res_sram;
+       struct resource *res = NULL;
        struct device_node *mac_np;
        struct mtk_eth *eth;
        int err, i;
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
                eth->ip_align = NET_IP_ALIGN;
 
-       if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
-               /* SRAM is actual memory and supports transparent access just like DRAM.
-                * Hence we don't require __iomem being set and don't need to use accessor
-                * functions to read from or write to SRAM.
-                */
-               if (mtk_is_netsys_v3_or_greater(eth)) {
-                       eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
-                       if (IS_ERR(eth->sram_base))
-                               return PTR_ERR(eth->sram_base);
-               } else {
-                       eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
-               }
-       }
-
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
                if (!err)
                        err = -EINVAL;
                        goto err_destroy_sgmii;
                }
+
                if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
-                       if (mtk_is_netsys_v3_or_greater(eth)) {
-                               res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-                               if (!res_sram) {
+                       eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+                                                        "sram", 0);
+                       if (!eth->sram_pool) {
+                               if (!mtk_is_netsys_v3_or_greater(eth)) {
+                                       err = mtk_setup_legacy_sram(eth, res);
+                                       if (err)
+                                               goto err_destroy_sgmii;
+                               } else {
+                                       dev_err(&pdev->dev,
+                                               "Could not get SRAM pool\n");
                                        err = -EINVAL;
                                        goto err_destroy_sgmii;
                                }
-                               eth->phy_scratch_ring = res_sram->start;
-                       } else {
-                               eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
                        }
                }
        }