reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
                if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-                       mtk_ppe_check_skb(eth->ppe, skb, hash);
+                       mtk_ppe_check_skb(eth->ppe[0], skb, hash);
 
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
        /* we run 2 netdevs on the same dma ring so we only bring it up once */
        if (!refcount_read(ð->dma_refcnt)) {
                const struct mtk_soc_data *soc = eth->soc;
-               u32 gdm_config = MTK_GDMA_TO_PDMA;
+               u32 gdm_config;
+               int i;
 
                err = mtk_start_dma(eth);
                if (err)
                        return err;
 
-               if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
-                       gdm_config = soc->reg_map->gdma_to_ppe;
+               for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+                       mtk_ppe_start(eth->ppe[i]);
 
+               gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+                                                 : MTK_GDMA_TO_PDMA;
                mtk_gdm_config(eth, gdm_config);
 
                napi_enable(ð->tx_napi);
 {
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_eth *eth = mac->hw;
+       int i;
 
        phylink_stop(mac->phylink);
 
 
        mtk_dma_free(eth);
 
-       if (eth->soc->offload_version)
-               mtk_ppe_stop(eth->ppe);
+       for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+               mtk_ppe_stop(eth->ppe[i]);
 
        return 0;
 }
        }
 
        if (eth->soc->offload_version) {
-               u32 ppe_addr = eth->soc->reg_map->ppe_base;
-
-               eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
-               if (!eth->ppe) {
-                       err = -ENOMEM;
-                       goto err_free_dev;
+               u32 num_ppe;
+
+               num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+               num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+               for (i = 0; i < num_ppe; i++) {
+                       u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+                       eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+                                                  eth->soc->offload_version, i);
+                       if (!eth->ppe[i]) {
+                               err = -ENOMEM;
+                               goto err_free_dev;
+                       }
                }
 
                err = mtk_eth_offload_init(eth);
 
 
        int                             ip_align;
 
-       struct mtk_ppe                  *ppe;
+       struct mtk_ppe                  *ppe[2];
        struct rhashtable               flow_table;
 
        struct bpf_prog                 __rcu *prog;
 
 }
 
 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
-                int version)
+                            int version, int index)
 {
        const struct mtk_soc_data *soc = eth->soc;
        struct device *dev = eth->dev;
        if (!ppe->foe_flow)
                return NULL;
 
-       mtk_ppe_debugfs_init(ppe);
+       mtk_ppe_debugfs_init(ppe, index);
 
        return ppe;
 }
                        ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
 }
 
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
 {
        u32 val;
 
+       if (!ppe)
+               return;
+
        mtk_ppe_init_foe_table(ppe);
        ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
 
        ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
 
        ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
-
-       return 0;
 }
 
 int mtk_ppe_stop(struct mtk_ppe *ppe)
        u32 val;
        int i;
 
+       if (!ppe)
+               return 0;
+
        for (i = 0; i < MTK_PPE_ENTRIES; i++)
                ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
                                                   MTK_FOE_STATE_INVALID);
 
        };
        u8 type;
        s8 wed_index;
+       u8 ppe_index;
        u16 hash;
        union {
                struct mtk_foe_entry data;
        struct device *dev;
        void __iomem *base;
        int version;
+       char dirname[5];
 
        struct mtk_foe_entry *foe_table;
        dma_addr_t foe_phys;
        void *acct_table;
 };
 
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+                            int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
 int mtk_ppe_stop(struct mtk_ppe *ppe);
 
 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
 
 #endif
 
                           inode->i_private);
 }
 
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
 {
        static const struct file_operations fops_all = {
                .open = mtk_ppe_debugfs_foe_open_all,
                .llseek = seq_lseek,
                .release = single_release,
        };
-
        static const struct file_operations fops_bind = {
                .open = mtk_ppe_debugfs_foe_open_bind,
                .read = seq_read,
                .llseek = seq_lseek,
                .release = single_release,
        };
-
        struct dentry *root;
 
-       root = debugfs_create_dir("mtk_ppe", NULL);
+       snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+       root = debugfs_create_dir(ppe->dirname, NULL);
        debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
        debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
 
 
        memcpy(&entry->data, &foe, sizeof(entry->data));
        entry->wed_index = wed_index;
 
-       err = mtk_foe_entry_commit(eth->ppe, entry);
+       err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
        if (err < 0)
                goto free;
 
        return 0;
 
 clear:
-       mtk_foe_entry_clear(eth->ppe, entry);
+       mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
 free:
        kfree(entry);
        if (wed_index >= 0)
        if (!entry)
                return -ENOENT;
 
-       mtk_foe_entry_clear(eth->ppe, entry);
+       mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
        rhashtable_remove_fast(ð->flow_table, &entry->node,
                               mtk_flow_ht_params);
        if (entry->wed_index >= 0)
        if (!entry)
                return -ENOENT;
 
-       idle = mtk_foe_entry_idle_time(eth->ppe, entry);
+       idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
        f->stats.lastused = jiffies - idle * HZ;
 
        return 0;
        struct flow_block_cb *block_cb;
        flow_setup_cb_t *cb;
 
-       if (!eth->ppe || !eth->ppe->foe_table)
+       if (!eth->soc->offload_version)
                return -EOPNOTSUPP;
 
        if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
 
 int mtk_eth_offload_init(struct mtk_eth *eth)
 {
-       if (!eth->ppe || !eth->ppe->foe_table)
-               return 0;
-
        return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
 }