#include <linux/random.h>
 #include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
 #include <linux/debugfs.h>
 
 #include "mlx5_core.h"
+#include "lib/eq.h"
 
 enum {
        CMD_IF_REV = 5,
        return MLX5_GET(mbox_in, in->first.data, opcode);
 }
 
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+
 static void cb_timeout_handler(struct work_struct *work)
 {
        struct delayed_work *dwork = container_of(work, struct delayed_work,
                up(&cmd->sem);
 }
 
+static int cmd_comp_notifier(struct notifier_block *nb,
+                            unsigned long type, void *data)
+{
+       struct mlx5_core_dev *dev;
+       struct mlx5_cmd *cmd;
+       struct mlx5_eqe *eqe;
+
+       cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
+       dev = container_of(cmd, struct mlx5_core_dev, cmd);
+       eqe = data;
+
+       mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+
+       return NOTIFY_OK;
+}
 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
 {
+       MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
+       mlx5_eq_notifier_register(dev, &dev->cmd.nb);
        mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
 }
 
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
 {
        mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
+       mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
 }
 
 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
        }
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
                }
        }
 }
-EXPORT_SYMBOL(mlx5_cmd_comp_handler);
+
+void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+{
+       unsigned long flags;
+       u64 vector;
+
+       /* wait for pending handlers to complete */
+       mlx5_eq_synchronize_cmd_irq(dev);
+       spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+       vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+       if (!vector)
+               goto no_trig;
+
+       vector |= MLX5_TRIGGERED_CMD_COMP;
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+       mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+       mlx5_cmd_comp_handler(dev, vector, true);
+       return;
+
+no_trig:
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
 
 static int status_to_err(u8 status)
 {
 
                    &dev->iseg->cmdq_addr_l_sz);
 }
 
-static void trigger_cmd_completions(struct mlx5_core_dev *dev)
-{
-       unsigned long flags;
-       u64 vector;
-
-       /* wait for pending handlers to complete */
-       mlx5_eq_synchronize_cmd_irq(dev);
-       spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
-       vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
-       if (!vector)
-               goto no_trig;
-
-       vector |= MLX5_TRIGGERED_CMD_COMP;
-       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-
-       mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-       mlx5_cmd_comp_handler(dev, vector, true);
-       return;
-
-no_trig:
-       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-}
-
 static int in_fatal(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
        mlx5_core_err(dev, "start\n");
        if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
                dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
-               trigger_cmd_completions(dev);
+               mlx5_cmd_trigger_completions(dev);
        }
 
        mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);