u8             map_dir;   /* dma map direction */
        } buff;
 
-       struct mlx5e_channel  *channel;
        struct device         *pdev;
        struct net_device     *netdev;
        struct mlx5e_rq_stats *stats;
        struct mlx5e_page_cache page_cache;
        struct hwtstamp_config *tstamp;
        struct mlx5_clock      *clock;
+       struct mlx5e_icosq    *icosq;
+       struct mlx5e_priv     *priv;
 
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_post_rx_wqes  post_wqes;
 
        return err;
 }
 
-int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel)
+int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
+                                   struct mlx5e_ch_stats *stats)
 {
        u32 eqe_count;
 
-       netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+       netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
                   eq->core.eqn, eq->core.cons_index, eq->core.irqn);
 
        eqe_count = mlx5_eq_poll_irq_disabled(eq);
        if (!eqe_count)
                return -EIO;
 
-       netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n",
+       netdev_err(dev, "Recovered %d eqes on EQ 0x%x\n",
                   eqe_count, eq->core.eqn);
 
-       channel->stats->eq_rearm++;
+       stats->eq_rearm++;
        return 0;
 }
 
 
 };
 
 int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn);
-int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel);
+int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
+                                   struct mlx5e_ch_stats *stats);
 int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
 int mlx5e_health_report(struct mlx5e_priv *priv,
                        struct devlink_health_reporter *reporter, char *err_str,
 
 
 static int mlx5e_rx_reporter_timeout_recover(void *ctx)
 {
-       struct mlx5e_icosq *icosq;
        struct mlx5_eq_comp *eq;
        struct mlx5e_rq *rq;
        int err;
 
        rq = ctx;
-       icosq = &rq->channel->icosq;
        eq = rq->cq.mcq.eq;
-       err = mlx5e_health_channel_eq_recover(eq, rq->channel);
-       if (err)
-               clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+
+       err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
+       if (err && rq->icosq)
+               clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
 
        return err;
 }
 static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
                                                   struct devlink_fmsg *fmsg)
 {
-       struct mlx5e_priv *priv = rq->channel->priv;
-       struct mlx5e_icosq *icosq;
-       u8 icosq_hw_state;
        u16 wqe_counter;
        int wqes_sz;
        u8 hw_state;
        u16 wq_head;
        int err;
 
-       icosq = &rq->channel->icosq;
-       err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
-       if (err)
-               return err;
-
-       err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
+       err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state);
        if (err)
                return err;
 
        if (err)
                return err;
 
-       err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
+       err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
        if (err)
                return err;
 
        if (err)
                return err;
 
-       err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
-       if (err)
-               return err;
+       if (rq->icosq) {
+               struct mlx5e_icosq *icosq = rq->icosq;
+               u8 icosq_hw_state;
+
+               err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
+               if (err)
+                       return err;
+
+               err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
+               if (err)
+                       return err;
+       }
 
        err = devlink_fmsg_obj_nest_end(fmsg);
        if (err)
 
 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
 {
-       struct mlx5e_icosq *icosq = &rq->channel->icosq;
-       struct mlx5e_priv *priv = rq->channel->priv;
+       char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
        char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+       struct mlx5e_icosq *icosq = rq->icosq;
+       struct mlx5e_priv *priv = rq->priv;
        struct mlx5e_err_ctx err_ctx = {};
 
        err_ctx.ctx = rq;
        err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
        err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+
+       if (icosq)
+               snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
        snprintf(err_str, sizeof(err_str),
-                "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
-                icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+                "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
+                rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
 
        mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
 }
 
 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
 {
-       struct mlx5e_priv *priv = rq->channel->priv;
        char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+       struct mlx5e_priv *priv = rq->priv;
        struct mlx5e_err_ctx err_ctx = {};
 
        err_ctx.ctx = rq;
 
        sq = to_ctx->sq;
        eq = sq->cq.mcq.eq;
        priv = sq->channel->priv;
-       err = mlx5e_health_channel_eq_recover(eq, sq->channel);
+       err = mlx5e_health_channel_eq_recover(sq->channel->netdev, eq, sq->channel->stats);
        if (!err) {
                to_ctx->status = 0; /* this sq recovered */
                return err;
 
        rq->wq_type = params->rq_wq_type;
        rq->pdev    = c->pdev;
        rq->netdev  = c->netdev;
+       rq->priv    = c->priv;
        rq->tstamp  = c->tstamp;
        rq->clock   = &mdev->clock;
-       rq->channel = c;
+       rq->icosq   = &c->icosq;
        rq->ix      = c->ix;
        rq->mdev    = mdev;
        rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        int i;
 
        old_prog = rcu_dereference_protected(rq->xdp_prog,
-                                            lockdep_is_held(&rq->channel->priv->state_lock));
+                                            lockdep_is_held(&rq->priv->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
 
 
 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
 {
-       struct mlx5e_channel *c = rq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_dev *mdev = rq->mdev;
 
        void *in;
        void *rqc;
 
 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
 {
-       struct mlx5e_channel *c = rq->channel;
-       struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_core_dev *mdev = rq->mdev;
        void *in;
        void *rqc;
        int inlen;
 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
 {
        unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
-       struct mlx5e_channel *c = rq->channel;
 
        u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
 
                msleep(20);
        } while (time_before(jiffies, exp_time));
 
-       netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
-                   c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
+       netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+                   rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
 
        mlx5e_reporter_rx_timeout(rq);
        return -ETIMEDOUT;
 void mlx5e_activate_rq(struct mlx5e_rq *rq)
 {
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-       mlx5e_trigger_irq(&rq->channel->icosq);
+       mlx5e_trigger_irq(rq->icosq);
 }
 
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
        cancel_work_sync(&rq->dim.work);
-       cancel_work_sync(&rq->channel->icosq.recover_work);
+       cancel_work_sync(&rq->icosq->recover_work);
        cancel_work_sync(&rq->recover_work);
        mlx5e_destroy_rq(rq);
        mlx5e_free_rx_descs(rq);
        struct bpf_prog *old_prog;
 
        old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
-                                      lockdep_is_held(&rq->channel->priv->state_lock));
+                                      lockdep_is_held(&rq->priv->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
 }
 
 {
        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
        struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
-       struct mlx5e_icosq *sq = &rq->channel->icosq;
+       struct mlx5e_icosq *sq = rq->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *umr_wqe;
        u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
 
 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
 {
-       struct mlx5e_icosq *sq = &rq->channel->icosq;
        struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
        u8  umr_completed = rq->mpwqe.umr_completed;
+       struct mlx5e_icosq *sq = rq->icosq;
        int alloc_err = 0;
        u8  missing, i;
        u16 head;
 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 {
        struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+       struct mlx5e_priv *priv = rq->priv;
 
        if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
            !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
                mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
-               queue_work(rq->channel->priv->wq, &rq->recover_work);
+               queue_work(priv->wq, &rq->recover_work);
        }
 }
 
 
 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
 {
+       struct net_device *netdev = rq->netdev;
        struct mlx5_core_dev *mdev = rq->mdev;
-       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = rq->priv;
 
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                rq->post_wqes = mlx5e_post_rx_mpwqes;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
-               rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+               rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
 #ifdef CONFIG_MLX5_EN_IPSEC
                if (MLX5_IPSEC_DEV(mdev)) {
-                       netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+                       netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
                        return -EINVAL;
                }
 #endif
                if (!rq->handle_rx_cqe) {
-                       netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
+                       netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
                        return -EINVAL;
                }
                break;
 
 #ifdef CONFIG_MLX5_EN_IPSEC
                if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
-                   c->priv->ipsec)
+                   priv->ipsec)
                        rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
                else
 #endif
-                       rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
+                       rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
                if (!rq->handle_rx_cqe) {
-                       netdev_err(c->netdev, "RX handler of RQ is not set\n");
+                       netdev_err(netdev, "RX handler of RQ is not set\n");
                        return -EINVAL;
                }
        }