#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
 
+enum packet_merge {
+       MLX5E_PACKET_MERGE_NONE,
+       MLX5E_PACKET_MERGE_LRO,
+       MLX5E_PACKET_MERGE_SHAMPO,
+};
+
+struct mlx5e_packet_merge_param {
+       enum packet_merge type;
+       u32 timeout;
+};
+
 struct mlx5e_params {
        u8  log_sq_size;
        u8  rq_wq_type;
        bool tunneled_offload_en;
        struct dim_cq_moder rx_cq_moderation;
        struct dim_cq_moder tx_cq_moderation;
-       bool lro_en;
+       struct mlx5e_packet_merge_param packet_merge;
        u8  tx_min_inline_mode;
        bool vlan_strip_disable;
        bool scatter_fcs_en;
        bool rx_dim_enabled;
        bool tx_dim_enabled;
-       u32 packet_merge_timeout;
        u32 pflags;
        struct bpf_prog *xdp_prog;
        struct mlx5e_xsk *xsk;
 
        u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
                                 mlx5e_rx_get_linear_frag_sz(params, NULL));
 
-       return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
+       return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
+               linear_frag_sz <= PAGE_SIZE;
 }
 
 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
                mlx5e_rx_is_linear_skb(params, xsk) :
                mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
 
-       return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
-}
-
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
-{
-       struct mlx5e_lro_param lro_param;
-
-       lro_param = (struct mlx5e_lro_param) {
-               .enabled = params->lro_en,
-               .timeout = params->packet_merge_timeout,
-       };
-
-       return lro_param;
+       return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
+               mlx5e_get_linear_rq_headroom(params, xsk) : 0;
 }
 
 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 
 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
+       bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
        bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
                MLX5_CAP_GEN(mdev, relaxed_ordering_write);
 
-       return ro && params->lro_en ?
+       return ro && lro_en ?
                MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
 }
 
 
        u16 chunk_size;
 };
 
-struct mlx5e_lro_param {
-       bool enabled;
-       u32 timeout;
-};
-
 struct mlx5e_cq_param {
        u32                        cqc[MLX5_ST_SZ_DW(cqc)];
        struct mlx5_wq_param       wq;
 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk);
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
 
 /* Build queue parameters */
 
 
 
 static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
                                enum mlx5_traffic_types tt,
-                               const struct mlx5e_lro_param *init_lro_param,
+                               const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                                bool inner)
 {
        struct mlx5e_rss_params_traffic_type rss_tt;
        rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
        mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
                                    rqtn, rss->inner_ft_support);
-       mlx5e_tir_builder_build_lro(builder, init_lro_param);
+       mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
        rss_tt = mlx5e_rss_get_tt_config(rss, tt);
        mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
 
 }
 
 static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
-                                const struct mlx5e_lro_param *init_lro_param,
+                                const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                                 bool inner)
 {
        enum mlx5_traffic_types tt, max_tt;
        int err;
 
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
-               err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+               err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
                if (err)
                        goto err_destroy_tirs;
        }
 
 int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
                   bool inner_ft_support, u32 drop_rqn,
-                  const struct mlx5e_lro_param *init_lro_param)
+                  const struct mlx5e_packet_merge_param *init_pkt_merge_param)
 {
        int err;
 
        if (err)
                goto err_out;
 
-       err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
+       err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
        if (err)
                goto err_destroy_rqt;
 
        if (inner_ft_support) {
-               err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
+               err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
                if (err)
                        goto err_destroy_tirs;
        }
  */
 int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
                          enum mlx5_traffic_types tt,
-                         const struct mlx5e_lro_param *init_lro_param,
+                         const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                          bool inner, u32 *tirn)
 {
        struct mlx5e_tir *tir;
        if (!tir) { /* TIR doesn't exist, create one */
                int err;
 
-               err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+               err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
                if (err)
                        return err;
                tir = rss_get_tir(rss, tt, inner);
                               mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
 }
 
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+                                    struct mlx5e_packet_merge_param *pkt_merge_param)
 {
        struct mlx5e_tir_builder *builder;
        enum mlx5_traffic_types tt;
        if (!builder)
                return -ENOMEM;
 
-       mlx5e_tir_builder_build_lro(builder, lro_param);
+       mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
 
        final_err = 0;
 
 
 void mlx5e_rss_free(struct mlx5e_rss *rss);
 int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
                   bool inner_ft_support, u32 drop_rqn,
-                  const struct mlx5e_lro_param *init_lro_param);
+                  const struct mlx5e_packet_merge_param *init_pkt_merge_param);
 int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
                           bool inner_ft_support, u32 drop_rqn);
 int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
                       bool inner);
 int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
                          enum mlx5_traffic_types tt,
-                         const struct mlx5e_lro_param *init_lro_param,
+                         const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                          bool inner, u32 *tirn);
 
 void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
 void mlx5e_rss_disable(struct mlx5e_rss *rss);
 
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+                                    struct mlx5e_packet_merge_param *pkt_merge_param);
 int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
 int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
                       const u8 *key, const u8 *hfunc,
 
 /* API for rx_res_rss_* */
 
 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
-                                    const struct mlx5e_lro_param *init_lro_param,
+                                    const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                                     unsigned int init_nch)
 {
        bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
                return -ENOMEM;
 
        err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
-                            init_lro_param);
+                            init_pkt_merge_param);
        if (err)
                goto err_rss_free;
 
 }
 
 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
-                                     const struct mlx5e_lro_param *init_lro_param)
+                                     const struct mlx5e_packet_merge_param *init_pkt_merge_param)
 {
        bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
        struct mlx5e_tir_builder *builder;
                mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
                                            mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
                                            inner_ft_support);
-               mlx5e_tir_builder_build_lro(builder, init_lro_param);
+               mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
                mlx5e_tir_builder_build_direct(builder);
 
                err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
                mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
                                            mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
                                            inner_ft_support);
-               mlx5e_tir_builder_build_lro(builder, init_lro_param);
+               mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
                mlx5e_tir_builder_build_direct(builder);
 
                err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
 
 int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
                      enum mlx5e_rx_res_features features, unsigned int max_nch,
-                     u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+                     u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                      unsigned int init_nch)
 {
        int err;
        res->max_nch = max_nch;
        res->drop_rqn = drop_rqn;
 
-       err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
+       err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
        if (err)
                goto err_out;
 
-       err = mlx5e_rx_res_channels_init(res, init_lro_param);
+       err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
        if (err)
                goto err_rss_destroy;
 
        return err;
 }
 
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+                                       struct mlx5e_packet_merge_param *pkt_merge_param)
 {
        struct mlx5e_tir_builder *builder;
        int err, final_err;
        if (!builder)
                return -ENOMEM;
 
-       mlx5e_tir_builder_build_lro(builder, lro_param);
+       mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
 
        final_err = 0;
 
                if (!rss)
                        continue;
 
-               err = mlx5e_rss_lro_set_param(rss, lro_param);
+               err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
                if (err)
                        final_err = final_err ? : err;
        }
        for (ix = 0; ix < res->max_nch; ix++) {
                err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
                if (err) {
-                       mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
+                       mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
                                       mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
                        if (!final_err)
                                final_err = err;
 
 struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
 int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
                      enum mlx5e_rx_res_features features, unsigned int max_nch,
-                     u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+                     u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
                      unsigned int init_nch);
 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
 void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
 u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
                                     u8 rx_hash_fields);
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+                                       struct mlx5e_packet_merge_param *pkt_merge_param);
 
 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
 
        MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
 }
 
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
-                                const struct mlx5e_lro_param *lro_param)
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+                                         const struct mlx5e_packet_merge_param *pkt_merge_param)
 {
        void *tirc = mlx5e_tir_builder_get_tirc(builder);
        const unsigned int rough_max_l2_l3_hdr_sz = 256;
 
        if (builder->modify)
-               MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
+               MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1);
 
-       if (!lro_param->enabled)
+       if (pkt_merge_param->type == MLX5E_PACKET_MERGE_NONE)
                return;
 
        MLX5_SET(tirc, tirc, packet_merge_mask,
                 MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
                 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
-       MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+       MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
 }
 
 static int mlx5e_hfunc_to_hw(u8 hfunc)
 
 };
 
 struct mlx5e_tir_builder;
-struct mlx5e_lro_param;
+struct mlx5e_packet_merge_param;
 
 struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
 void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
 void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
 void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
                                 u32 rqtn, bool inner_ft_support);
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
-                                const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+                                         const struct mlx5e_packet_merge_param *pkt_merge_param);
 void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
                                 const struct mlx5e_rss_params_hash *rss_hash,
                                 const struct mlx5e_rss_params_traffic_type *rss_tt,
 
                        return -EOPNOTSUPP;
                if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
                        return -EINVAL;
-       } else if (priv->channels.params.lro_en) {
-               netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
+       } else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+               netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
                return -EINVAL;
        }
 
 
                         u32 rss_context, u32 *tirn)
 {
        if (fs->flow_type & FLOW_RSS) {
-               struct mlx5e_lro_param lro_param;
+               struct mlx5e_packet_merge_param pkt_merge_param;
                struct mlx5e_rss *rss;
                u32 flow_type;
                int err;
                if (tt < 0)
                        return -EINVAL;
 
-               lro_param = mlx5e_get_lro_param(&priv->channels.params);
-               err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn);
+               pkt_merge_param = priv->channels.params.packet_merge;
+               err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
                if (err)
                        return err;
                eth_rule->rss = rss;
 
        chs->num = 0;
 }
 
-static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
 {
        struct mlx5e_rx_res *res = priv->rx_res;
-       struct mlx5e_lro_param lro_param;
 
-       lro_param = mlx5e_get_lro_param(&priv->channels.params);
-
-       return mlx5e_rx_res_lro_set_param(res, &lro_param);
+       return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
 }
 
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
 
 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
                         struct mlx5e_params *params, u16 mtu)
        }
 
        new_params = *cur_params;
-       new_params.lro_en = enable;
 
-       if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
-               if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
-                   mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
-                       reset = false;
+       if (enable)
+               new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
+       else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
+               new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+       else
+               goto out;
+
+       if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
+             new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
+               if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+                       if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
+                           mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
+                               reset = false;
+               }
        }
 
        err = mlx5e_safe_switch_params(priv, &new_params,
-                                      mlx5e_modify_tirs_lro_ctx, NULL, reset);
+                                      mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
 out:
        mutex_unlock(&priv->state_lock);
        return err;
                goto out;
        }
 
-       if (params->lro_en)
+       if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
                reset = false;
 
        if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
        struct net_device *netdev = priv->netdev;
        struct mlx5e_params new_params;
 
-       if (priv->channels.params.lro_en) {
-               netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+       if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+               netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
                return -EINVAL;
        }
 
            params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                /* No XSK params: checking the availability of striding RQ in general. */
                if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
-                       params->lro_en = !slow_pci_heuristic(mdev);
+                       params->packet_merge.type = slow_pci_heuristic(mdev) ?
+                               MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
        }
-       params->packet_merge_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+       params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
        /* CQ moderation params */
        rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        enum mlx5e_rx_res_features features;
-       struct mlx5e_lro_param lro_param;
        int err;
 
        priv->rx_res = mlx5e_rx_res_alloc();
        features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
        if (priv->channels.params.tunneled_offload_en)
                features |= MLX5E_RX_RES_FEATURE_INNER_FT;
-       lro_param = mlx5e_get_lro_param(&priv->channels.params);
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
-                               priv->max_nch, priv->drop_rq.rqn, &lro_param,
+                               priv->max_nch, priv->drop_rq.rqn,
+                               &priv->channels.params.packet_merge,
                                priv->channels.params.num_channels);
        if (err)
                goto err_close_drop_rq;
 
 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_lro_param lro_param;
        int err;
 
        priv->rx_res = mlx5e_rx_res_alloc();
                return err;
        }
 
-       lro_param = mlx5e_get_lro_param(&priv->channels.params);
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
-                               priv->max_nch, priv->drop_rq.rqn, &lro_param,
+                               priv->max_nch, priv->drop_rq.rqn,
+                               &priv->channels.params.packet_merge,
                                priv->channels.params.num_channels);
        if (err)
                goto err_close_drop_rq;
 
                MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
                MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
 
-       params->lro_en = false;
+       params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
        params->tunneled_offload_en = false;
 }
 static int mlx5i_init_rx(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_lro_param lro_param;
        int err;
 
        priv->rx_res = mlx5e_rx_res_alloc();
                goto err_destroy_q_counters;
        }
 
-       lro_param = mlx5e_get_lro_param(&priv->channels.params);
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
-                               priv->max_nch, priv->drop_rq.rqn, &lro_param,
+                               priv->max_nch, priv->drop_rq.rqn,
+                               &priv->channels.params.packet_merge,
                                priv->channels.params.num_channels);
        if (err)
                goto err_close_drop_rq;
 
        u8         reserved_at_3c[0x1];
        u8         hash[0x1];
        u8         reserved_at_3e[0x1];
-       u8         lro[0x1];
+       u8         packet_merge[0x1];
 };
 
 struct mlx5_ifc_modify_tir_out_bits {