#include <linux/pinctrl/devinfo.h>
 #include <linux/phylink.h>
 #include <linux/jhash.h>
+#include <linux/bitfield.h>
 #include <net/dsa.h>
 
 #include "mtk_eth_soc.h"
                struct net_device *netdev;
                unsigned int pktlen;
                dma_addr_t dma_addr;
-               u32 hash;
+               u32 hash, reason;
                int mac;
 
                ring = mtk_get_rx_ring(eth);
                        skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
                }
 
+               reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+               if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+                       mtk_ppe_check_skb(eth->ppe, skb,
+                                         trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
                    (trxd.rxd2 & RX_DMA_VTAG))
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
        }
 
        if (eth->soc->offload_version) {
-               eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2);
+               eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
                if (!eth->ppe) {
                        err = -ENOMEM;
                        goto err_free_dev;
 
 #include <linux/iopoll.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
+#include "mtk_eth_soc.h"
 #include "mtk_ppe.h"
 #include "mtk_ppe_regs.h"
 
+static DEFINE_SPINLOCK(ppe_lock);
+
 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
 {
        writel(val, ppe->base + reg);
        return ppe_m32(ppe, reg, val, 0);
 }
 
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+       return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
 {
        int ret;
               FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
 }
 
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
-                        u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+{
+       int type, len;
+
+       if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+               return false;
+
+       type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+       if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+               len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+       else
+               len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+       return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 {
        struct mtk_foe_entry *hwe;
-       u32 hash;
+       struct mtk_foe_entry foe;
+
+       spin_lock_bh(&ppe_lock);
+       if (entry->hash == 0xffff)
+               goto out;
 
+       hwe = &ppe->foe_table[entry->hash];
+       memcpy(&foe, hwe, sizeof(foe));
+       if (!mtk_flow_entry_match(entry, &foe)) {
+               entry->hash = 0xffff;
+               goto out;
+       }
+
+       entry->data.ib1 = foe.ib1;
+
+out:
+       spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+                      u16 hash)
+{
+       struct mtk_foe_entry *hwe;
+       u16 timestamp;
+
+       timestamp = mtk_eth_timestamp(ppe->eth);
        timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
        entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
        entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
 
-       hash = mtk_ppe_hash_entry(entry);
        hwe = &ppe->foe_table[hash];
-       if (!mtk_foe_entry_usable(hwe)) {
-               hwe++;
-               hash++;
-
-               if (!mtk_foe_entry_usable(hwe))
-                       return -ENOSPC;
-       }
-
        memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
        wmb();
        hwe->ib1 = entry->ib1;
        dma_wmb();
 
        mtk_ppe_cache_clear(ppe);
+}
 
-       return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+       spin_lock_bh(&ppe_lock);
+       hlist_del_init(&entry->list);
+       if (entry->hash != 0xffff) {
+               ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+               ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+                                                             MTK_FOE_STATE_BIND);
+               dma_wmb();
+       }
+       entry->hash = 0xffff;
+       spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+       u32 hash = mtk_ppe_hash_entry(&entry->data);
+
+       entry->hash = 0xffff;
+       spin_lock_bh(&ppe_lock);
+       hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+       spin_unlock_bh(&ppe_lock);
+
+       return 0;
+}
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+       struct hlist_head *head = &ppe->foe_flow[hash / 2];
+       struct mtk_flow_entry *entry;
+       struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+       bool found = false;
+
+       if (hlist_empty(head))
+               return;
+
+       spin_lock_bh(&ppe_lock);
+       hlist_for_each_entry(entry, head, list) {
+               if (found || !mtk_flow_entry_match(entry, hwe)) {
+                       if (entry->hash != 0xffff)
+                               entry->hash = 0xffff;
+                       continue;
+               }
+
+               entry->hash = hash;
+               __mtk_foe_entry_commit(ppe, &entry->data, hash);
+               found = true;
+       }
+       spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+       u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+       u16 timestamp;
+
+       mtk_flow_entry_update(ppe, entry);
+       timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+
+       if (timestamp > now)
+               return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+       else
+               return now - timestamp;
 }
 
-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base,
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
                 int version)
 {
+       struct device *dev = eth->dev;
        struct mtk_foe_entry *foe;
        struct mtk_ppe *ppe;
 
         * not coherent.
         */
        ppe->base = base;
+       ppe->eth = eth;
        ppe->dev = dev;
        ppe->version = version;
 
 
        MTK_PPE_CPU_REASON_INVALID                      = 0x1f,
 };
 
+struct mtk_flow_entry {
+       struct rhash_head node;
+       struct hlist_node list;
+       unsigned long cookie;
+       struct mtk_foe_entry data;
+       u16 hash;
+       s8 wed_index;
+};
+
 struct mtk_ppe {
+       struct mtk_eth *eth;
        struct device *dev;
        void __iomem *base;
        int version;
        struct mtk_foe_entry *foe_table;
        dma_addr_t foe_phys;
 
+       u16 foe_check_time[MTK_PPE_ENTRIES];
+       struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+
        void *acct_table;
 };
 
-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
 int mtk_ppe_start(struct mtk_ppe *ppe);
 int mtk_ppe_stop(struct mtk_ppe *ppe);
 
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
 static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
 {
-       ppe->foe_table[hash].ib1 = 0;
-       dma_wmb();
+       u16 now, diff;
+
+       if (!ppe)
+               return;
+
+       now = (u16)jiffies;
+       diff = now - ppe->foe_check_time[hash];
+       if (diff < HZ / 10)
+               return;
+
+       ppe->foe_check_time[hash] = now;
+       __mtk_ppe_check_skb(ppe, skb, hash);
 }
 
 static inline int
 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
                           int bss, int wcid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
-                        u16 timestamp);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
 
 #endif
 
        } pppoe;
 };
 
-struct mtk_flow_entry {
-       struct rhash_head node;
-       unsigned long cookie;
-       u16 hash;
-       s8 wed_index;
-};
-
 static const struct rhashtable_params mtk_flow_ht_params = {
        .head_offset = offsetof(struct mtk_flow_entry, node),
        .key_offset = offsetof(struct mtk_flow_entry, cookie),
        .automatic_shrinking = true,
 };
 
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
-{
-       return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
-}
-
 static int
 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
                       bool egress)
        int offload_type = 0;
        int wed_index = -1;
        u16 addr_type = 0;
-       u32 timestamp;
        u8 l4proto = 0;
        int err = 0;
-       int hash;
        int i;
 
        if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))
                return -ENOMEM;
 
        entry->cookie = f->cookie;
-       timestamp = mtk_eth_timestamp(eth);
-       hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp);
-       if (hash < 0) {
-               err = hash;
+       memcpy(&entry->data, &foe, sizeof(entry->data));
+       entry->wed_index = wed_index;
+
+       if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
                goto free;
-       }
 
-       entry->hash = hash;
-       entry->wed_index = wed_index;
        err = rhashtable_insert_fast(ð->flow_table, &entry->node,
                                     mtk_flow_ht_params);
        if (err < 0)
-               goto clear_flow;
+               goto clear;
 
        return 0;
-clear_flow:
-       mtk_foe_entry_clear(eth->ppe, hash);
+
+clear:
+       mtk_foe_entry_clear(eth->ppe, entry);
 free:
        kfree(entry);
        if (wed_index >= 0)
        if (!entry)
                return -ENOENT;
 
-       mtk_foe_entry_clear(eth->ppe, entry->hash);
+       mtk_foe_entry_clear(eth->ppe, entry);
        rhashtable_remove_fast(ð->flow_table, &entry->node,
                               mtk_flow_ht_params);
        if (entry->wed_index >= 0)
 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
 {
        struct mtk_flow_entry *entry;
-       int timestamp;
        u32 idle;
 
        entry = rhashtable_lookup(ð->flow_table, &f->cookie,
        if (!entry)
                return -ENOENT;
 
-       timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash);
-       if (timestamp < 0)
-               return -ETIMEDOUT;
-
-       idle = mtk_eth_timestamp(eth) - timestamp;
+       idle = mtk_foe_entry_idle_time(eth->ppe, entry);
        f->stats.lastused = jiffies - idle * HZ;
 
        return 0;