u16 queue_mapping;
        unsigned int needed_headroom;
        u32 fd_len;
+       u8 prio = 0;
        int err, i;
 
        percpu_stats = this_cpu_ptr(priv->percpu_stats);
         * a queue affined to the same core that processed the Rx frame
         */
        queue_mapping = skb_get_queue_mapping(skb);
+
+       if (net_dev->num_tc) {
+               prio = netdev_txq_to_tc(net_dev, queue_mapping);
+               /* Hardware interprets priority level 0 as being the highest,
+                * so we need to do a reverse mapping to the netdev tc index
+                */
+               prio = net_dev->num_tc - prio - 1;
+               /* We have only one FQ array entry for all Tx hardware queues
+                * with the same flow id (but different priority levels)
+                */
+               queue_mapping %= dpaa2_eth_queue_count(priv);
+       }
        fq = &priv->fq[queue_mapping];
 
        fd_len = dpaa2_fd_get_len(&fd);
         * the Tx confirmation callback for this frame
         */
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-               err = priv->enqueue(priv, fq, &fd, 0);
+               err = priv->enqueue(priv, fq, &fd, prio);
                if (err != -EBUSY)
                        break;
        }
        struct net_device *net_dev = priv->net_dev;
        struct cpumask xps_mask;
        struct dpaa2_eth_fq *fq;
-       int i, num_queues;
+       int i, num_queues, netdev_queues;
        int err = 0;
 
        num_queues = dpaa2_eth_queue_count(priv);
+       netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
 
        /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
         * queues, so only process those
         */
-       for (i = 0; i < num_queues; i++) {
-               fq = &priv->fq[i];
+       for (i = 0; i < netdev_queues; i++) {
+               fq = &priv->fq[i % num_queues];
 
                cpumask_clear(&xps_mask);
                cpumask_set_cpu(fq->target_cpu, &xps_mask);
        return err;
 }
 
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+                             enum tc_setup_type type, void *type_data)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct tc_mqprio_qopt *mqprio = type_data;
+       u8 num_tc, num_queues;
+       int i;
+
+       if (type != TC_SETUP_QDISC_MQPRIO)
+               return -EINVAL;
+
+       mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+       num_queues = dpaa2_eth_queue_count(priv);
+       num_tc = mqprio->num_tc;
+
+       if (num_tc == net_dev->num_tc)
+               return 0;
+
+       if (num_tc  > dpaa2_eth_tc_count(priv)) {
+               netdev_err(net_dev, "Max %d traffic classes supported\n",
+                          dpaa2_eth_tc_count(priv));
+               return -EINVAL;
+       }
+
+       if (!num_tc) {
+               netdev_reset_tc(net_dev);
+               netif_set_real_num_tx_queues(net_dev, num_queues);
+               goto out;
+       }
+
+       netdev_set_num_tc(net_dev, num_tc);
+       netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
+
+       for (i = 0; i < num_tc; i++)
+               netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
+
+out:
+       update_xps(priv);
+
+       return 0;
+}
+
 static const struct net_device_ops dpaa2_eth_ops = {
        .ndo_open = dpaa2_eth_open,
        .ndo_start_xmit = dpaa2_eth_tx,
        .ndo_change_mtu = dpaa2_eth_change_mtu,
        .ndo_bpf = dpaa2_eth_xdp,
        .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+       .ndo_setup_tc = dpaa2_eth_setup_tc,
 };
 
 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
        dev = &dpni_dev->dev;
 
        /* Net device */
-       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
+       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
        if (!net_dev) {
                dev_err(dev, "alloc_etherdev_mq() failed\n");
                return -ENOMEM;