static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
 {
-       int i;
+       int i, t;
        int err = 0;
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               priv->tx_cq[i]->moder_cnt = priv->tx_frames;
-               priv->tx_cq[i]->moder_time = priv->tx_usecs;
-               if (priv->port_up) {
-                       err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
-                       if (err)
-                               return err;
+       for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
+                       priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
+                       if (priv->port_up) {
+                               err = mlx4_en_set_cq_moder(priv,
+                                                          priv->tx_cq[t][i]);
+                               if (err)
+                                       return err;
+                       }
                }
        }
 
        switch (sset) {
        case ETH_SS_STATS:
                return bitmap_iterator_count(&it) +
-                       (priv->tx_ring_num * 2) +
+                       (priv->tx_ring_num[TX] * 2) +
                        (priv->rx_ring_num * 3);
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                if (bitmap_iterator_test(&it))
                        data[index++] = ((unsigned long *)&priv->pkstats)[i];
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               data[index++] = priv->tx_ring[i]->packets;
-               data[index++] = priv->tx_ring[i]->bytes;
+       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+               data[index++] = priv->tx_ring[TX][i]->packets;
+               data[index++] = priv->tx_ring[TX][i]->bytes;
        }
        for (i = 0; i < priv->rx_ring_num; i++) {
                data[index++] = priv->rx_ring[i]->packets;
                                strcpy(data + (index++) * ETH_GSTRING_LEN,
                                       main_strings[strings]);
 
-               for (i = 0; i < priv->tx_ring_num; i++) {
+               for (i = 0; i < priv->tx_ring_num[TX]; i++) {
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "tx%d_packets", i);
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
 
        if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
                                        priv->rx_ring[0]->size) &&
-           tx_size == priv->tx_ring[0]->size)
+           tx_size == priv->tx_ring[TX][0]->size)
                return 0;
 
        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
        param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
        param->rx_pending = priv->port_up ?
                priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
-       param->tx_pending = priv->tx_ring[0]->size;
+       param->tx_pending = priv->tx_ring[TX][0]->size;
 }
 
 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
        channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
 
        channel->rx_count = priv->rx_ring_num;
-       channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+       channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
 }
 
 static int mlx4_en_set_channels(struct net_device *dev,
        struct mlx4_en_port_profile new_prof;
        struct mlx4_en_priv *tmp;
        int port_up = 0;
+       int xdp_count;
        int err = 0;
 
        if (channel->other_count || channel->combined_count ||
            !channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
-       if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
-               en_err(priv, "Minimum %d tx channels required with XDP on\n",
-                      priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
-               return -EINVAL;
-       }
-
        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
        if (!tmp)
                return -ENOMEM;
 
        mutex_lock(&mdev->state_lock);
+       xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
+       if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+               err = -EINVAL;
+               en_err(priv,
+                      "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+                      channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+                      MAX_TX_RINGS);
+               goto out;
+       }
+
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        new_prof.num_tx_rings_p_up = channel->tx_count;
-       new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+       new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+       new_prof.tx_ring_num[TX_XDP] = xdp_count;
        new_prof.rx_ring_num = channel->rx_count;
 
        err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
 
        mlx4_en_safe_replace_resources(priv, tmp);
 
-       netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
-                                                       priv->xdp_ring_num);
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
        if (dev->num_tc)
                mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
 
-       en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+       en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
        en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
 
        if (port_up) {
 
        err = mlx4_en_moderation_update(priv);
 out:
-       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
+       kfree(tmp);
        return err;
 }
 
        int ret = 0;
 
        if (bf_enabled_new != bf_enabled_old) {
+               int t;
+
                if (bf_enabled_new) {
                        bool bf_supported = true;
 
-                       for (i = 0; i < priv->tx_ring_num; i++)
-                               bf_supported &= priv->tx_ring[i]->bf_alloced;
+                       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+                               for (i = 0; i < priv->tx_ring_num[t]; i++)
+                                       bf_supported &=
+                                               priv->tx_ring[t][i]->bf_alloced;
 
                        if (!bf_supported) {
                                en_err(priv, "BlueFlame is not supported\n");
                        priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
                }
 
-               for (i = 0; i < priv->tx_ring_num; i++)
-                       priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+               for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+                       for (i = 0; i < priv->tx_ring_num[t]; i++)
+                               priv->tx_ring[t][i]->bf_enabled =
+                                       bf_enabled_new;
 
                en_info(priv, "BlueFlame %s\n",
                        bf_enabled_new ?  "Enabled" : "Disabled");
 
        struct mlx4_en_cq *cq;
        int i;
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               cq = priv->tx_cq[i];
+       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+               cq = priv->tx_cq[TX][i];
                napi_schedule(&cq->napi);
        }
 }
        if (netif_msg_timer(priv))
                en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
+       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+               struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
+
                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
                        continue;
                en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
-                       i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
-                       priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
+                       i, tx_ring->qpn, tx_ring->cqn,
+                       tx_ring->cons, tx_ring->prod);
        }
 
        priv->port_stats.tx_timeout++;
 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_cq *cq;
-       int i;
+       int i, t;
 
        /* If we haven't received a specific coalescing setting
         * (module param), we set the moderation parameters as follows:
                priv->last_moder_bytes[i] = 0;
        }
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               cq = priv->tx_cq[i];
-               cq->moder_cnt = priv->tx_frames;
-               cq->moder_time = priv->tx_usecs;
+       for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       cq = priv->tx_cq[t][i];
+                       cq->moder_cnt = priv->tx_frames;
+                       cq->moder_time = priv->tx_usecs;
+               }
        }
 
        /* Reset auto-moderation params */
 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
                                      int tx_ring_idx)
 {
-       struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
-       int rr_index;
+       struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
+       int rr_index = tx_ring_idx;
 
-       rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
-       if (rr_index >= 0) {
-               tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
-               tx_ring->recycle_ring = priv->rx_ring[rr_index];
-               en_dbg(DRV, priv,
-                      "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
-                      tx_ring_idx, rr_index);
-       } else {
-               tx_ring->recycle_ring = NULL;
-       }
+       tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
+       tx_ring->recycle_ring = priv->rx_ring[rr_index];
+       en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
+              TX_XDP, tx_ring_idx, rr_index);
 }
 
 int mlx4_en_start_port(struct net_device *dev)
        struct mlx4_en_cq *cq;
        struct mlx4_en_tx_ring *tx_ring;
        int rx_index = 0;
-       int tx_index = 0;
        int err = 0;
-       int i;
+       int i, t;
        int j;
        u8 mc_list[16] = {0};
 
                goto rss_err;
 
        /* Configure tx cq's and rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               /* Configure cq */
-               cq = priv->tx_cq[i];
-               err = mlx4_en_activate_cq(priv, cq, i);
-               if (err) {
-                       en_err(priv, "Failed allocating Tx CQ\n");
-                       goto tx_err;
-               }
-               err = mlx4_en_set_cq_moder(priv, cq);
-               if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters\n");
-                       mlx4_en_deactivate_cq(priv, cq);
-                       goto tx_err;
-               }
-               en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
-               cq->buf->wqe_index = cpu_to_be16(0xffff);
+       for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
 
-               /* Configure ring */
-               tx_ring = priv->tx_ring[i];
-               err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
-                       i / priv->num_tx_rings_p_up);
-               if (err) {
-                       en_err(priv, "Failed allocating Tx ring\n");
-                       mlx4_en_deactivate_cq(priv, cq);
-                       goto tx_err;
-               }
-               tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
-
-               mlx4_en_init_recycle_ring(priv, i);
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       /* Configure cq */
+                       cq = priv->tx_cq[t][i];
+                       err = mlx4_en_activate_cq(priv, cq, i);
+                       if (err) {
+                               en_err(priv, "Failed allocating Tx CQ\n");
+                               goto tx_err;
+                       }
+                       err = mlx4_en_set_cq_moder(priv, cq);
+                       if (err) {
+                               en_err(priv, "Failed setting cq moderation parameters\n");
+                               mlx4_en_deactivate_cq(priv, cq);
+                               goto tx_err;
+                       }
+                       en_dbg(DRV, priv,
+                              "Resetting index of collapsed CQ:%d to -1\n", i);
+                       cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+                       /* Configure ring */
+                       tx_ring = priv->tx_ring[t][i];
+                       err = mlx4_en_activate_tx_ring(priv, tx_ring,
+                                                      cq->mcq.cqn,
+                                                      i / num_tx_rings_p_up);
+                       if (err) {
+                               en_err(priv, "Failed allocating Tx ring\n");
+                               mlx4_en_deactivate_cq(priv, cq);
+                               goto tx_err;
+                       }
+                       if (t != TX_XDP) {
+                               tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+                               tx_ring->recycle_ring = NULL;
+                       } else {
+                               mlx4_en_init_recycle_ring(priv, i);
+                       }
 
-               /* Arm CQ for TX completions */
-               mlx4_en_arm_cq(priv, cq);
+                       /* Arm CQ for TX completions */
+                       mlx4_en_arm_cq(priv, cq);
 
-               /* Set initial ownership of all Tx TXBBs to SW (1) */
-               for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
-                       *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
-               ++tx_index;
+                       /* Set initial ownership of all Tx TXBBs to SW (1) */
+                       for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+                               *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
+               }
        }
 
        /* Configure port */
        return 0;
 
 tx_err:
-       while (tx_index--) {
-               mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
-               mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
+       if (t == MLX4_EN_NUM_TX_TYPES) {
+               t--;
+               i = priv->tx_ring_num[t];
+       }
+       while (t >= 0) {
+               while (i--) {
+                       mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+                       mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+               }
+               if (!t--)
+                       break;
+               i = priv->tx_ring_num[t];
        }
        mlx4_en_destroy_drop_qp(priv);
 rss_err:
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_mc_list *mclist, *tmp;
        struct ethtool_flow_id *flow, *tmp_flow;
-       int i;
+       int i, t;
        u8 mc_list[16] = {0};
 
        if (!priv->port_up) {
        mlx4_en_destroy_drop_qp(priv);
 
        /* Free TX Rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
-               mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+                       mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+               }
        }
        msleep(10);
 
-       for (i = 0; i < priv->tx_ring_num; i++)
-               mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+               for (i = 0; i < priv->tx_ring_num[t]; i++)
+                       mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
 
        if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
                mlx4_en_delete_rss_steer_rules(priv);
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_tx_ring **tx_ring;
        int i;
 
        if (!mlx4_is_slave(mdev->dev))
               sizeof(priv->tx_priority_flowstats));
        memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               priv->tx_ring[i]->bytes = 0;
-               priv->tx_ring[i]->packets = 0;
-               priv->tx_ring[i]->tx_csum = 0;
-               priv->tx_ring[i]->tx_dropped = 0;
-               priv->tx_ring[i]->queue_stopped = 0;
-               priv->tx_ring[i]->wake_queue = 0;
-               priv->tx_ring[i]->tso_packets = 0;
-               priv->tx_ring[i]->xmit_more = 0;
+       tx_ring = priv->tx_ring[TX];
+       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+               tx_ring[i]->bytes = 0;
+               tx_ring[i]->packets = 0;
+               tx_ring[i]->tx_csum = 0;
+               tx_ring[i]->tx_dropped = 0;
+               tx_ring[i]->queue_stopped = 0;
+               tx_ring[i]->wake_queue = 0;
+               tx_ring[i]->tso_packets = 0;
+               tx_ring[i]->xmit_more = 0;
        }
        for (i = 0; i < priv->rx_ring_num; i++) {
                priv->rx_ring[i]->bytes = 0;
 
 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 {
-       int i;
+       int i, t;
 
 #ifdef CONFIG_RFS_ACCEL
        priv->dev->rx_cpu_rmap = NULL;
 #endif
 
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               if (priv->tx_ring && priv->tx_ring[i])
-                       mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
-               if (priv->tx_cq && priv->tx_cq[i])
-                       mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       if (priv->tx_ring[t] && priv->tx_ring[t][i])
+                               mlx4_en_destroy_tx_ring(priv,
+                                                       &priv->tx_ring[t][i]);
+                       if (priv->tx_cq[t] && priv->tx_cq[t][i])
+                               mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+               }
        }
 
        for (i = 0; i < priv->rx_ring_num; i++) {
 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
-       int i;
+       int i, t;
        int node;
 
        /* Create tx Rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               node = cpu_to_node(i % num_online_cpus());
-               if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
-                                     prof->tx_ring_size, i, TX, node))
-                       goto err;
-
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
-                                          prof->tx_ring_size, TXBB_SIZE,
-                                          node, i))
-                       goto err;
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       node = cpu_to_node(i % num_online_cpus());
+                       if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
+                                             prof->tx_ring_size, i, t, node))
+                               goto err;
+
+                       if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
+                                                  prof->tx_ring_size,
+                                                  TXBB_SIZE, node, i))
+                               goto err;
+               }
        }
 
        /* Create rx Rings */
                if (priv->rx_cq[i])
                        mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
        }
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               if (priv->tx_ring[i])
-                       mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
-               if (priv->tx_cq[i])
-                       mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+                       if (priv->tx_ring[t][i])
+                               mlx4_en_destroy_tx_ring(priv,
+                                                       &priv->tx_ring[t][i]);
+                       if (priv->tx_cq[t][i])
+                               mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+               }
        }
        return -ENOMEM;
 }
                             struct mlx4_en_priv *src,
                             struct mlx4_en_port_profile *prof)
 {
+       int t;
+
        memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
               sizeof(dst->hwtstamp_config));
        dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
-       dst->tx_ring_num = prof->tx_ring_num;
        dst->rx_ring_num = prof->rx_ring_num;
        dst->flags = prof->flags;
        dst->mdev = src->mdev;
        dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
                                         DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
 
-       dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
-                               GFP_KERNEL);
-       if (!dst->tx_ring)
-               return -ENOMEM;
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               dst->tx_ring_num[t] = prof->tx_ring_num[t];
+               if (!dst->tx_ring_num[t])
+                       continue;
 
-       dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
-                             GFP_KERNEL);
-       if (!dst->tx_cq) {
-               kfree(dst->tx_ring);
-               return -ENOMEM;
+               dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+                                         MAX_TX_RINGS, GFP_KERNEL);
+               if (!dst->tx_ring[t])
+                       goto err_free_tx;
+
+               dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+                                       MAX_TX_RINGS, GFP_KERNEL);
+               if (!dst->tx_cq[t]) {
+                       kfree(dst->tx_ring[t]);
+                       goto err_free_tx;
+               }
        }
+
        return 0;
+
+err_free_tx:
+       while (t--) {
+               kfree(dst->tx_ring[t]);
+               kfree(dst->tx_cq[t]);
+       }
+       return -ENOMEM;
 }
 
 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
                                struct mlx4_en_priv *src)
 {
+       int t;
        memcpy(dst->rx_ring, src->rx_ring,
               sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
        memcpy(dst->rx_cq, src->rx_cq,
               sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
        memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
               sizeof(dst->hwtstamp_config));
-       dst->tx_ring_num = src->tx_ring_num;
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               dst->tx_ring_num[t] = src->tx_ring_num[t];
+               dst->tx_ring[t] = src->tx_ring[t];
+               dst->tx_cq[t] = src->tx_cq[t];
+       }
        dst->rx_ring_num = src->rx_ring_num;
-       dst->tx_ring = src->tx_ring;
-       dst->tx_cq = src->tx_cq;
        memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
 }
 
                                struct mlx4_en_priv *tmp,
                                struct mlx4_en_port_profile *prof)
 {
+       int t;
+
        mlx4_en_copy_priv(tmp, priv, prof);
 
        if (mlx4_en_alloc_resources(tmp)) {
                en_warn(priv,
                        "%s: Resource allocation failed, using previous configuration\n",
                        __func__);
-               kfree(tmp->tx_ring);
-               kfree(tmp->tx_cq);
+               for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+                       kfree(tmp->tx_ring[t]);
+                       kfree(tmp->tx_cq[t]);
+               }
                return -ENOMEM;
        }
        return 0;
        struct mlx4_en_dev *mdev = priv->mdev;
        bool shutdown = mdev->dev->persist->interface_state &
                                            MLX4_INTERFACE_STATE_SHUTDOWN;
+       int t;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
 
        mlx4_en_free_resources(priv);
 
-       kfree(priv->tx_ring);
-       kfree(priv->tx_cq);
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
+       }
 
        if (!shutdown)
                free_netdev(dev);
        en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
                 dev->mtu, new_mtu);
 
-       if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
+       if (priv->tx_ring_num[TX_XDP] && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
                en_err(priv, "MTU size:%d requires frags but XDP running\n",
                       new_mtu);
                return -EOPNOTSUPP;
 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
+       struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
        struct mlx4_update_qp_params params;
        int err;
 
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
        struct bpf_prog *old_prog;
+       struct mlx4_en_priv *tmp;
+       int tx_changed = 0;
        int xdp_ring_num;
        int port_up = 0;
        int err;
        int i;
 
-       xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
+       xdp_ring_num = prog ? priv->rx_ring_num : 0;
 
        /* No need to reconfigure buffers when simply swapping the
         * program for a new one.
         */
-       if (priv->xdp_ring_num == xdp_ring_num) {
+       if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
                if (prog) {
                        prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
                        if (IS_ERR(prog))
                return -EOPNOTSUPP;
        }
 
-       if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
-               en_err(priv,
-                      "Minimum %d tx channels required to run XDP\n",
-                      (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
-               return -EINVAL;
-       }
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
 
        if (prog) {
                prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
-               if (IS_ERR(prog))
-                       return PTR_ERR(prog);
+               if (IS_ERR(prog)) {
+                       err = PTR_ERR(prog);
+                       goto out;
+               }
        }
 
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
+
+       if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
+               tx_changed = 1;
+               new_prof.tx_ring_num[TX] =
+                       MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
+               en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
+       }
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto unlock_out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       priv->xdp_ring_num = xdp_ring_num;
-       netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
-                                                       priv->xdp_ring_num);
+       mlx4_en_safe_replace_resources(priv, tmp);
+       if (tx_changed)
+               netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
 
        for (i = 0; i < priv->rx_ring_num; i++) {
                old_prog = rcu_dereference_protected(
                }
        }
 
+unlock_out:
        mutex_unlock(&mdev->state_lock);
-       return 0;
+out:
+       kfree(tmp);
+       return err;
 }
 
 static bool mlx4_xdp_attached(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       return !!priv->xdp_ring_num;
+       return !!priv->tx_ring_num[TX_XDP];
 }
 
 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
 {
        struct net_device *dev;
        struct mlx4_en_priv *priv;
-       int i;
+       int i, t;
        int err;
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
        if (dev == NULL)
                return -ENOMEM;
 
-       netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
+       netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
        netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
 
        SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
        priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
                        MLX4_WQE_CTRL_SOLICITED);
        priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
-       priv->tx_ring_num = prof->tx_ring_num;
        priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
        netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
 
-       priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
-                               GFP_KERNEL);
-       if (!priv->tx_ring) {
-               err = -ENOMEM;
-               goto out;
-       }
-       priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
-                             GFP_KERNEL);
-       if (!priv->tx_cq) {
-               err = -ENOMEM;
-               goto out;
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               priv->tx_ring_num[t] = prof->tx_ring_num[t];
+               if (!priv->tx_ring_num[t])
+                       continue;
+
+               priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+                                          MAX_TX_RINGS, GFP_KERNEL);
+               if (!priv->tx_ring[t]) {
+                       err = -ENOMEM;
+                       goto err_free_tx;
+               }
+               priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+                                        MAX_TX_RINGS, GFP_KERNEL);
+               if (!priv->tx_cq[t]) {
+                       kfree(priv->tx_ring[t]);
+                       err = -ENOMEM;
+                       goto out;
+               }
        }
        priv->rx_ring_num = prof->rx_ring_num;
        priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
        else
                dev->netdev_ops = &mlx4_netdev_ops;
        dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
-       netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
        dev->ethtool_ops = &mlx4_en_ethtool_ops;
        netif_carrier_off(dev);
        mlx4_en_set_default_moderation(priv);
 
-       en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+       en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
 
        return 0;
 
+err_free_tx:
+       while (t--) {
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
+       }
 out:
        mlx4_en_destroy_netdev(dev);
        return err;