u32 block_index;
 };
 
+struct tcf_qevent {
+       struct tcf_block        *block;
+       struct tcf_block_ext_info info;
+       struct tcf_proto __rcu *filter_chain;
+};
+
 struct tcf_block_cb;
 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 
                          void *cb_priv, u32 *flags, unsigned int *in_hw_count);
 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
 
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 enum flow_block_binder_type binder_type,
+                                 struct nlattr *block_index_attr,
+                                 struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                                            struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+{
+       return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       return 0;
+}
+#endif
+
 struct tc_cls_u32_knode {
        struct tcf_exts *exts;
        struct tcf_result *res;
 
 }
 EXPORT_SYMBOL(tcf_exts_num_actions);
 
+#ifdef CONFIG_NET_CLS_ACT
+static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
+                                       u32 *p_block_index,
+                                       struct netlink_ext_ack *extack)
+{
+       *p_block_index = nla_get_u32(block_index_attr);
+       if (!*p_block_index) {
+               NL_SET_ERR_MSG(extack, "Block number may not be zero");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       if (!block_index)
+               return 0;
+
+       qe->info.binder_type = binder_type;
+       qe->info.chain_head_change = tcf_chain_head_change_dflt;
+       qe->info.chain_head_change_priv = &qe->filter_chain;
+       qe->info.block_index = block_index;
+
+       return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
+}
+EXPORT_SYMBOL(tcf_qevent_init);
+
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+       if (qe->info.block_index)
+               tcf_block_put_ext(qe->block, sch, &qe->info);
+}
+EXPORT_SYMBOL(tcf_qevent_destroy);
+
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       /* Bounce newly-configured block or change in block. */
+       if (block_index != qe->info.block_index) {
+               NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tcf_qevent_validate_change);
+
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+{
+       struct tcf_result cl_res;
+       struct tcf_proto *fl;
+
+       if (!qe->info.block_index)
+               return skb;
+
+       fl = rcu_dereference_bh(qe->filter_chain);
+
+       if (root_lock)
+               spin_unlock(root_lock);
+
+       switch (tcf_classify(skb, fl, &cl_res, false)) {
+       case TC_ACT_SHOT:
+               qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_BYPASS;
+               return NULL;
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+       case TC_ACT_TRAP:
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       case TC_ACT_REDIRECT:
+               skb_do_redirect(skb);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       }
+
+       if (root_lock)
+               spin_lock(root_lock);
+
+       return skb;
+}
+EXPORT_SYMBOL(tcf_qevent_handle);
+
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       if (!qe->info.block_index)
+               return 0;
+       return nla_put_u32(skb, attr_name, qe->info.block_index);
+}
+EXPORT_SYMBOL(tcf_qevent_dump);
+#endif
+
 static __net_init int tcf_net_init(struct net *net)
 {
        struct tcf_net *tn = net_generic(net, tcf_net_id);