#include <linux/module.h>
 #include <linux/rhashtable.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 
 #include <linux/if_ether.h>
 #include <linux/in6.h>
        u32 in_hw_count;
        struct rcu_work rwork;
        struct net_device *hw_dev;
+       /* Flower classifier is unlocked, which means that its reference counter
+        * can be changed concurrently without any kind of external
+        * synchronization. Use atomic reference counter to be concurrency-safe.
+        */
+       refcount_t refcnt;
 };
 
 static const struct rhashtable_params mask_ht_params = {
        return rcu_dereference_raw(tp->root);
 }
 
+static void __fl_put(struct cls_fl_filter *f)
+{
+       if (!refcount_dec_and_test(&f->refcnt))
+               return;
+
+       if (tcf_exts_get_net(&f->exts))
+               tcf_queue_work(&f->rwork, fl_destroy_filter_work);
+       else
+               __fl_destroy_filter(f);
+}
+
+static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
+{
+       struct cls_fl_filter *f;
+
+       rcu_read_lock();
+       f = idr_find(&head->handle_idr, handle);
+       if (f && !refcount_inc_not_zero(&f->refcnt))
+               f = NULL;
+       rcu_read_unlock();
+
+       return f;
+}
+
+static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
+                                               unsigned long *handle)
+{
+       struct cls_fl_head *head = fl_head_dereference(tp);
+       struct cls_fl_filter *f;
+
+       rcu_read_lock();
+       while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
+               /* don't return filters that are being deleted */
+               if (refcount_inc_not_zero(&f->refcnt))
+                       break;
+               ++(*handle);
+       }
+       rcu_read_unlock();
+
+       return f;
+}
+
 static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
                        struct netlink_ext_ack *extack)
 {
        if (!tc_skip_hw(f->flags))
                fl_hw_destroy_filter(tp, f, extack);
        tcf_unbind_filter(tp, &f->res);
-       if (async)
-               tcf_queue_work(&f->rwork, fl_destroy_filter_work);
-       else
-               __fl_destroy_filter(f);
+       __fl_put(f);
 
        return last;
 }
        tcf_queue_work(&head->rwork, fl_destroy_sleepable);
 }
 
+static void fl_put(struct tcf_proto *tp, void *arg)
+{
+       struct cls_fl_filter *f = arg;
+
+       __fl_put(f);
+}
+
 static void *fl_get(struct tcf_proto *tp, u32 handle)
 {
        struct cls_fl_head *head = fl_head_dereference(tp);
 
-       return idr_find(&head->handle_idr, handle);
+       return __fl_get(head, handle);
 }
 
 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        struct nlattr **tb;
        int err;
 
-       if (!tca[TCA_OPTIONS])
-               return -EINVAL;
+       if (!tca[TCA_OPTIONS]) {
+               err = -EINVAL;
+               goto errout_fold;
+       }
 
        mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
-       if (!mask)
-               return -ENOBUFS;
+       if (!mask) {
+               err = -ENOBUFS;
+               goto errout_fold;
+       }
 
        tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
        if (!tb) {
                err = -ENOBUFS;
                goto errout_tb;
        }
+       refcount_set(&fnew->refcnt, 1);
 
        err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
        if (err < 0)
        if (!tc_in_hw(fnew->flags))
                fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
+       refcount_inc(&fnew->refcnt);
        if (fold) {
                fnew->handle = handle;
 
                        fl_hw_destroy_filter(tp, fold, NULL);
                tcf_unbind_filter(tp, &fold->res);
                tcf_exts_get_net(&fold->exts);
-               tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
+               /* Caller holds reference to fold, so refcnt is always > 0
+                * after this.
+                */
+               refcount_dec(&fold->refcnt);
+               __fl_put(fold);
        } else {
                if (__fl_lookup(fnew->mask, &fnew->mkey)) {
                        err = -EEXIST;
        kfree(tb);
 errout_mask_alloc:
        kfree(mask);
+errout_fold:
+       if (fold)
+               __fl_put(fold);
        return err;
 }
 
                               f->mask->filter_ht_params);
        __fl_delete(tp, f, extack);
        *last = list_empty(&head->masks);
+       __fl_put(f);
+
        return 0;
 }
 
 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
                    bool rtnl_held)
 {
-       struct cls_fl_head *head = fl_head_dereference(tp);
        struct cls_fl_filter *f;
 
        arg->count = arg->skip;
 
-       while ((f = idr_get_next_ul(&head->handle_idr,
-                                   &arg->cookie)) != NULL) {
+       while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
                if (arg->fn(tp, f, arg) < 0) {
+                       __fl_put(f);
                        arg->stop = 1;
                        break;
                }
-               arg->cookie = f->handle + 1;
+               __fl_put(f);
+               arg->cookie++;
                arg->count++;
        }
 }
        .init           = fl_init,
        .destroy        = fl_destroy,
        .get            = fl_get,
+       .put            = fl_put,
        .change         = fl_change,
        .delete         = fl_delete,
        .walk           = fl_walk,