#define TCA_ACT_FLAGS_BIND     (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
 #define TCA_ACT_FLAGS_REPLACE  (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
 #define TCA_ACT_FLAGS_NO_RTNL  (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
+#define TCA_ACT_FLAGS_AT_INGRESS       (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
 
 /* Update lastuse only if needed, to avoid dirtying a cache line.
  * We use a temp variable to avoid fetching jiffies twice.
 
        FLOW_ACTION_MARK,
        FLOW_ACTION_PTYPE,
        FLOW_ACTION_PRIORITY,
+       FLOW_ACTION_RX_QUEUE_MAPPING,
        FLOW_ACTION_WAKE,
        FLOW_ACTION_QUEUE,
        FLOW_ACTION_SAMPLE,
                u32                     csum_flags;     /* FLOW_ACTION_CSUM */
                u32                     mark;           /* FLOW_ACTION_MARK */
                u16                     ptype;          /* FLOW_ACTION_PTYPE */
+               u16                     rx_queue;       /* FLOW_ACTION_RX_QUEUE_MAPPING */
                u32                     priority;       /* FLOW_ACTION_PRIORITY */
                struct {                                /* FLOW_ACTION_QUEUE */
                        u32             ctx;
 
        return priority;
 }
 
+static inline u16 tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
+{
+       u16 rx_queue;
+
+       rcu_read_lock();
+       rx_queue = rcu_dereference(to_skbedit(a)->params)->queue_mapping;
+       rcu_read_unlock();
+
+       return rx_queue;
+}
+
 /* Return true iff action is queue_mapping */
 static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
 {
        return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
 }
 
+/* Return true if action is on ingress traffic */
+static inline bool is_tcf_skbedit_ingress(u32 flags)
+{
+       return flags & TCA_ACT_FLAGS_AT_INGRESS;
+}
+
+static inline bool is_tcf_skbedit_tx_queue_mapping(const struct tc_action *a)
+{
+       return is_tcf_skbedit_queue_mapping(a) &&
+              !is_tcf_skbedit_ingress(a->tcfa_flags);
+}
+
+static inline bool is_tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
+{
+       return is_tcf_skbedit_queue_mapping(a) &&
+              is_tcf_skbedit_ingress(a->tcfa_flags);
+}
+
 /* Return true iff action is inheritdsfield */
 static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
 {
 
        }
 
        if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
+               if (is_tcf_skbedit_ingress(act_flags) &&
+                   !(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
+                       NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
+                       return -EOPNOTSUPP;
+               }
                flags |= SKBEDIT_F_QUEUE_MAPPING;
                queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
        }
                } else if (is_tcf_skbedit_priority(act)) {
                        entry->id = FLOW_ACTION_PRIORITY;
                        entry->priority = tcf_skbedit_priority(act);
-               } else if (is_tcf_skbedit_queue_mapping(act)) {
-                       NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used");
+               } else if (is_tcf_skbedit_tx_queue_mapping(act)) {
+                       NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
                        return -EOPNOTSUPP;
+               } else if (is_tcf_skbedit_rx_queue_mapping(act)) {
+                       entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
+                       entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
                } else if (is_tcf_skbedit_inheritdsfield(act)) {
                        NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
                        return -EOPNOTSUPP;
                        fl_action->id = FLOW_ACTION_PTYPE;
                else if (is_tcf_skbedit_priority(act))
                        fl_action->id = FLOW_ACTION_PRIORITY;
+               else if (is_tcf_skbedit_rx_queue_mapping(act))
+                       fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
                else
                        return -EOPNOTSUPP;
        }
 
                tp->ops->put(tp, fh);
 }
 
+static bool is_qdisc_ingress(__u32 classid)
+{
+       return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
+}
+
 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                          struct netlink_ext_ack *extack)
 {
                flags |= TCA_ACT_FLAGS_REPLACE;
        if (!rtnl_held)
                flags |= TCA_ACT_FLAGS_NO_RTNL;
+       if (is_qdisc_ingress(parent))
+               flags |= TCA_ACT_FLAGS_AT_INGRESS;
        err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
                              flags, extack);
        if (err == 0) {