*/
                list_del(&act->list);
        }
-       if (act->count)
+       if (act->count) {
+               spin_lock_bh(&act->count->cnt->lock);
+               if (!list_empty(&act->count_user))
+                       list_del(&act->count_user);
+               spin_unlock_bh(&act->count->cnt->lock);
                efx_tc_flower_put_counter_index(efx, act->count);
+       }
        if (act->encap_md) {
                list_del(&act->encap_user);
                efx_tc_flower_release_encap_md(efx, act->encap_md);
                                        goto release;
                                }
                                act->count = ctr;
+                               INIT_LIST_HEAD(&act->count_user);
                        }
 
                        if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) {
                                goto release;
                        }
                        act->count = ctr;
+                       INIT_LIST_HEAD(&act->count_user);
                }
 
                switch (fa->id) {
                                list_add_tail(&act->encap_user, &encap->users);
                                act->dest_mport = encap->dest_mport;
                                act->deliver = 1;
+                               if (act->count && !WARN_ON(!act->count->cnt)) {
+                                       /* This counter is used by an encap
+                                        * action, which needs a reference back
+                                        * so it can prod neighbouring whenever
+                                        * traffic is seen.
+                                        */
+                                       spin_lock_bh(&act->count->cnt->lock);
+                                       list_add_tail(&act->count_user,
+                                                     &act->count->cnt->users);
+                                       spin_unlock_bh(&act->count->cnt->lock);
+                               }
                                rc = efx_mae_alloc_action_set(efx, act);
                                if (rc) {
                                        NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (encap)");
 
  */
 
 #include "tc_counters.h"
+#include "tc_encap_actions.h"
 #include "mae_counter_format.h"
 #include "mae.h"
 #include "rx_common.h"
 {
        struct efx_tc_counter *cnt = ptr;
 
+       WARN_ON(!list_empty(&cnt->users));
+       /* We'd like to synchronize_rcu() here, but unfortunately we aren't
+        * removing the element from the hashtable (it's not clear that's a
+        * safe thing to do in an rhashtable_free_and_destroy free_fn), so
+        * threads could still be obtaining new pointers to *cnt if they can
+        * race against this function at all.
+        */
+       flush_work(&cnt->work);
+       EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock));
        kfree(cnt);
 }
 
        rhashtable_free_and_destroy(&efx->tc->counter_ht, efx_tc_counter_free, NULL);
 }
 
+static void efx_tc_counter_work(struct work_struct *work)
+{
+       struct efx_tc_counter *cnt = container_of(work, struct efx_tc_counter, work);
+       struct efx_tc_encap_action *encap;
+       struct efx_tc_action_set *act;
+       unsigned long touched;
+       struct neighbour *n;
+
+       spin_lock_bh(&cnt->lock);
+       touched = READ_ONCE(cnt->touched);
+
+       list_for_each_entry(act, &cnt->users, count_user) {
+               encap = act->encap_md;
+               if (!encap)
+                       continue;
+               if (!encap->neigh) /* can't happen */
+                       continue;
+               if (time_after_eq(encap->neigh->used, touched))
+                       continue;
+               encap->neigh->used = touched;
+               /* We have passed traffic using this ARP entry, so
+                * indicate to the ARP cache that it's still active
+                */
+               if (encap->neigh->dst_ip)
+                       n = neigh_lookup(&arp_tbl, &encap->neigh->dst_ip,
+                                        encap->neigh->egdev);
+               else
+#if IS_ENABLED(CONFIG_IPV6)
+                       n = neigh_lookup(ipv6_stub->nd_tbl,
+                                        &encap->neigh->dst_ip6,
+                                        encap->neigh->egdev);
+#else
+                       n = NULL;
+#endif
+               if (!n)
+                       continue;
+
+               neigh_event_send(n, NULL);
+               neigh_release(n);
+       }
+       spin_unlock_bh(&cnt->lock);
+}
+
 /* Counter allocation */
 
 static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
                return ERR_PTR(-ENOMEM);
 
        spin_lock_init(&cnt->lock);
+       INIT_WORK(&cnt->work, efx_tc_counter_work);
        cnt->touched = jiffies;
        cnt->type = type;
 
        rc = efx_mae_allocate_counter(efx, cnt);
        if (rc)
                goto fail1;
+       INIT_LIST_HEAD(&cnt->users);
        rc = rhashtable_insert_fast(&efx->tc->counter_ht, &cnt->linkage,
                                    efx_tc_counter_ht_params);
        if (rc)
                netif_warn(efx, hw, efx->net_dev,
                           "Failed to free MAE counter %u, rc %d\n",
                           cnt->fw_id, rc);
+       WARN_ON(!list_empty(&cnt->users));
        /* This doesn't protect counter updates coming in arbitrarily long
         * after we deleted the counter.  The RCU just ensures that we won't
         * free the counter while another thread has a pointer to it.
         * is handled by the generation count.
         */
        synchronize_rcu();
+       flush_work(&cnt->work);
        EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock));
        kfree(cnt);
 }
                cnt->touched = jiffies;
        }
        spin_unlock_bh(&cnt->lock);
+       schedule_work(&cnt->work);
 out:
        rcu_read_unlock();
 }