return count_eqe;
 }
 
-static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
+                                  unsigned long *flags)
        __acquires(&eq->lock)
 {
-       if (in_irq())
+       if (!recovery)
                spin_lock(&eq->lock);
        else
                spin_lock_irqsave(&eq->lock, *flags);
 }
 
-static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
+                                    unsigned long *flags)
        __releases(&eq->lock)
 {
-       if (in_irq())
+       if (!recovery)
                spin_unlock(&eq->lock);
        else
                spin_unlock_irqrestore(&eq->lock, *flags);
        struct mlx5_eqe *eqe;
        unsigned long flags;
        int num_eqes = 0;
+       bool recovery;
 
        dev = eq->dev;
        eqt = dev->priv.eq_table;
 
-       mlx5_eq_async_int_lock(eq_async, &flags);
+       recovery = action == ASYNC_EQ_RECOVER;
+       mlx5_eq_async_int_lock(eq_async, recovery, &flags);
 
        eqe = next_eqe_sw(eq);
        if (!eqe)
 
 out:
        eq_update_ci(eq, 1);
-       mlx5_eq_async_int_unlock(eq_async, &flags);
+       mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
 
-       return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0;
+       return unlikely(recovery) ? num_eqes : 0;
 }
 
 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)