* Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
  */
 
+#include <linux/ethtool_netlink.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
        u32 flags;
        u64 min_rate[TC_QOPT_MAX_QUEUE];
        u64 max_rate[TC_QOPT_MAX_QUEUE];
+       u32 fp[TC_QOPT_MAX_QUEUE];
 };
 
 static int mqprio_enable_offload(struct Qdisc *sch,
                return -EINVAL;
        }
 
+       mqprio_fp_to_offload(priv->fp, &mqprio);
+
        err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
                                            &mqprio);
        if (err)
        return 0;
 }
 
+static const struct
+nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
+       [TCA_MQPRIO_TC_ENTRY_INDEX]     = NLA_POLICY_MAX(NLA_U32,
+                                                        TC_QOPT_MAX_QUEUE),
+       [TCA_MQPRIO_TC_ENTRY_FP]        = NLA_POLICY_RANGE(NLA_U32,
+                                                          TC_FP_EXPRESS,
+                                                          TC_FP_PREEMPTIBLE),
+};
+
 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
        [TCA_MQPRIO_MODE]       = { .len = sizeof(u16) },
        [TCA_MQPRIO_SHAPER]     = { .len = sizeof(u16) },
        [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
        [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
+       [TCA_MQPRIO_TC_ENTRY]   = { .type = NLA_NESTED },
 };
 
+static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE],
+                                struct nlattr *opt,
+                                unsigned long *seen_tcs,
+                                struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1];
+       int err, tc;
+
+       err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt,
+                              mqprio_tc_entry_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) {
+               NL_SET_ERR_MSG(extack, "TC entry index missing");
+               return -EINVAL;
+       }
+
+       tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]);
+       if (*seen_tcs & BIT(tc)) {
+               NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX],
+                                   "Duplicate tc entry");
+               return -EINVAL;
+       }
+
+       *seen_tcs |= BIT(tc);
+
+       if (tb[TCA_MQPRIO_TC_ENTRY_FP])
+               fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]);
+
+       return 0;
+}
+
+static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt,
+                                  int nlattr_opt_len,
+                                  struct netlink_ext_ack *extack)
+{
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       bool have_preemption = false;
+       unsigned long seen_tcs = 0;
+       u32 fp[TC_QOPT_MAX_QUEUE];
+       struct nlattr *n;
+       int tc, rem;
+       int err = 0;
+
+       for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+               fp[tc] = priv->fp[tc];
+
+       nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) {
+               if (nla_type(n) != TCA_MQPRIO_TC_ENTRY)
+                       continue;
+
+               err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack);
+               if (err)
+                       goto out;
+       }
+
+       for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+               priv->fp[tc] = fp[tc];
+               if (fp[tc] == TC_FP_PREEMPTIBLE)
+                       have_preemption = true;
+       }
+
+       if (have_preemption && !ethtool_dev_mm_supported(dev)) {
+               NL_SET_ERR_MSG(extack, "Device does not support preemption");
+               return -EOPNOTSUPP;
+       }
+out:
+       return err;
+}
+
 /* Parse the other netlink attributes that represent the payload of
  * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt.
  */
                priv->flags |= TC_MQPRIO_F_MAX_RATE;
        }
 
+       if (tb[TCA_MQPRIO_TC_ENTRY]) {
+               err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len,
+                                             extack);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
        int i, err = -EOPNOTSUPP;
        struct tc_mqprio_qopt *qopt = NULL;
        struct tc_mqprio_caps caps;
-       int len;
+       int len, tc;
 
        BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
        BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
        if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
+       for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+               priv->fp[tc] = TC_FP_EXPRESS;
+
        qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
                                 &caps, sizeof(caps));
 
        return -1;
 }
 
+static int mqprio_dump_tc_entries(struct mqprio_sched *priv,
+                                 struct sk_buff *skb)
+{
+       struct nlattr *n;
+       int tc;
+
+       for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+               n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY);
+               if (!n)
+                       return -EMSGSIZE;
+
+               if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc))
+                       goto nla_put_failure;
+
+               if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc]))
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, n);
+       }
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, n);
+       return -EMSGSIZE;
+}
+
 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct net_device *dev = qdisc_dev(sch);
            (dump_rates(priv, &opt, skb) != 0))
                goto nla_put_failure;
 
+       if (mqprio_dump_tc_entries(priv, skb))
+               goto nla_put_failure;
+
        return nla_nest_end(skb, nla);
 nla_put_failure:
        nlmsg_trim(skb, nla);