&err_ctx);
 }
 
+static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+{
+       struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
+       u32 eqe_count;
+
+       netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+                  eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+       eqe_count = mlx5_eq_poll_irq_disabled(eq);
+       if (!eqe_count) {
+               clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+               return 1;
+       }
+
+       netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
+                  eqe_count, eq->core.eqn);
+       sq->channel->stats->eq_rearm++;
+       return 0;
+}
+
+void mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+{
+       struct mlx5e_tx_err_ctx err_ctx;
+       char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
+
+       err_ctx.sq       = sq;
+       err_ctx.recover  = mlx5e_tx_reporter_timeout_recover;
+       sprintf(err_str,
+               "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+               sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+               jiffies_to_usecs(jiffies - sq->txq->trans_start));
+       devlink_health_report(sq->channel->priv->tx_reporter, err_str,
+                             &err_ctx);
+}
+
 /* state lock cannot be grabbed within this function.
  * It can cause a dead lock or a read-after-free.
  */
 
        return features;
 }
 
-static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
-                                       struct mlx5e_txqsq *sq)
-{
-       struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
-       u32 eqe_count;
-
-       netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
-                  eq->core.eqn, eq->core.cons_index, eq->core.irqn);
-
-       eqe_count = mlx5_eq_poll_irq_disabled(eq);
-       if (!eqe_count)
-               return false;
-
-       netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
-       sq->channel->stats->eq_rearm++;
-       return true;
-}
-
 static void mlx5e_tx_timeout_work(struct work_struct *work)
 {
        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
                                               tx_timeout_work);
-       struct net_device *dev = priv->netdev;
-       bool reopen_channels = false;
-       int i, err;
+       int i;
+
+       if (!priv->tx_reporter)
+               return;
 
        rtnl_lock();
        mutex_lock(&priv->state_lock);
                goto unlock;
 
        for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
-               struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
+               struct netdev_queue *dev_queue =
+                       netdev_get_tx_queue(priv->netdev, i);
                struct mlx5e_txqsq *sq = priv->txq2sq[i];
 
                if (!netif_xmit_stopped(dev_queue))
                        continue;
 
-               netdev_err(dev,
-                          "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
-                          i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
-                          jiffies_to_usecs(jiffies - dev_queue->trans_start));
-
-               /* If we recover a lost interrupt, most likely TX timeout will
-                * be resolved, skip reopening channels
-                */
-               if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
-                       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-                       reopen_channels = true;
-               }
+               mlx5e_tx_reporter_timeout(sq);
        }
 
-       if (!reopen_channels)
-               goto unlock;
-
-       mlx5e_close_locked(dev);
-       err = mlx5e_open_locked(dev);
-       if (err)
-               netdev_err(priv->netdev,
-                          "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
-                          err);
-
 unlock:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();