static LIST_HEAD(mirred_list);
 static DEFINE_SPINLOCK(mirred_list_lock);
 
+#define MIRRED_RECURSION_LIMIT    4
+static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
+
 static bool tcf_mirred_is_act_redirect(int action)
 {
        return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
        struct sk_buff *skb2 = skb;
        bool m_mac_header_xmit;
        struct net_device *dev;
+       unsigned int rec_level;
        int retval, err = 0;
        bool use_reinsert;
        bool want_ingress;
        int m_eaction;
        int mac_len;
 
+       rec_level = __this_cpu_inc_return(mirred_rec_level);
+       if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
+               net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+                                    netdev_name(skb->dev));
+               __this_cpu_dec(mirred_rec_level);
+               return TC_ACT_SHOT;
+       }
+
        tcf_lastuse_update(&m->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
 
                        res->ingress = want_ingress;
                        res->qstats = this_cpu_ptr(m->common.cpu_qstats);
                        skb_tc_reinsert(skb, res);
+                       __this_cpu_dec(mirred_rec_level);
                        return TC_ACT_CONSUMED;
                }
        }
                if (tcf_mirred_is_act_redirect(m_eaction))
                        retval = TC_ACT_SHOT;
        }
+       __this_cpu_dec(mirred_rec_level);
 
        return retval;
 }