/* Copyright (c) 2019 Mellanox Technologies. */
 
 #include "en/params.h"
+#include "en/txrx.h"
+#include "en_accel/tls_rxtx.h"
 
 static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
                                   struct mlx5e_xsk_param *xsk)
 
        return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
 }
+
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+       bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+       u16 stop_room;
+
+       stop_room  = mlx5e_tls_get_stop_room(mdev, params);
+       stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+       if (is_mpwqe)
+               /* A MPWQE can take up to the maximum-sized WQE + all the normal
+                * stop room can be taken if a new packet breaks the active
+                * MPWQE session and allocates its WQEs right away.
+                */
+               stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+       return stop_room;
+}
+
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
+{
+       size_t sq_size = 1 << params->log_sq_size;
+       u16 stop_room;
+
+       stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
+       if (stop_room >= sq_size) {
+               netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n",
+                          stop_room, sq_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
 
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
        bool                       is_mpw;
+       u16                        stop_room;
 };
 
 struct mlx5e_channel_param {
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param);
 
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
+
 #endif /* __MLX5_EN_PARAMS_H__ */
 
        (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
 
 static u8
-mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
                          unsigned int sync_len)
 {
        /* Given the MTU and sync_len, calculates an upper bound for the
         * number of DUMP WQEs needed for the TX resync of a record.
         */
-       return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
+       return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
 }
 
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
 {
        u16 num_dumps, stop_room = 0;
 
-       num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
+       num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
 
        stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
        stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
 
        u32 tls_tisn;
 };
 
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
 
 bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
                              struct sk_buff *skb, int datalen,
 
        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 }
 
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
-       struct mlx5_core_dev *mdev = sq->channel->mdev;
-
        if (!mlx5_accel_is_tls_device(mdev))
                return 0;
 
        if (mlx5_accel_is_ktls_device(mdev))
-               return mlx5e_ktls_get_stop_room(sq);
+               return mlx5e_ktls_get_stop_room(params);
 
        /* FPGA */
        /* Resync SKB. */
 
 #include "en.h"
 #include "en/txrx.h"
 
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 
 bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
                             struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
 static inline void
 mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
                        struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
        return 0;
 }
 
 
 #include "en.h"
 #include "en/port.h"
+#include "en/params.h"
 #include "en/xsk/pool.h"
 #include "lib/clock.h"
 
        new_channels.params.log_rq_mtu_frames = log_rq_size;
        new_channels.params.log_sq_size = log_sq_size;
 
+       err = mlx5e_validate_params(priv, &new_channels.params);
+       if (err)
+               goto unlock;
+
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
                goto unlock;
 
        return 0;
 }
 
-static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
-{
-       int sq_size = 1 << log_sq_size;
-
-       sq->stop_room  = mlx5e_tls_get_stop_room(sq);
-       sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-       if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
-               /* A MPWQE can take up to the maximum-sized WQE + all the normal
-                * stop room can be taken if a new packet breaks the active
-                * MPWQE session and allocates its WQEs right away.
-                */
-               sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
-       if (WARN_ON(sq->stop_room >= sq_size)) {
-               netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
-                          sq->stop_room, sq_size);
-               return -ENOSPC;
-       }
-
-       return 0;
-}
-
 static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                             int txq_ix,
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
        if (param->is_mpw)
                set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
-       err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
-       if (err)
-               return err;
+       sq->stop_room = param->stop_room;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+       param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
        mlx5e_build_tx_cq_param(priv, params, ¶m->cqp);
 }
 
 
        new_channels.params = *params;
        new_channels.params.sw_mtu = new_mtu;
+       err = mlx5e_validate_params(priv, &new_channels.params);
+       if (err)
+               goto out;
 
        if (params->xdp_prog &&
            !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {