struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
                                                 roce.nb);
 
-       if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
-               return NOTIFY_DONE;
+       switch (event) {
+       case NETDEV_REGISTER:
+       case NETDEV_UNREGISTER:
+               write_lock(&ibdev->roce.netdev_lock);
+               if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
+                       ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
+                                            NULL : ndev;
+               write_unlock(&ibdev->roce.netdev_lock);
+               break;
 
-       write_lock(&ibdev->roce.netdev_lock);
-       if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
-               ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
-       write_unlock(&ibdev->roce.netdev_lock);
+       case NETDEV_UP:
+       case NETDEV_DOWN:
+               if (ndev == ibdev->roce.netdev && ibdev->ib_active) {
+                       struct ib_event ibev = {0};
+
+                       ibev.device = &ibdev->ib_dev;
+                       ibev.event = (event == NETDEV_UP) ?
+                                    IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+                       ibev.element.port_num = 1;
+                       ib_dispatch_event(&ibev);
+               }
+               break;
+
+       default:
+               break;
+       }
 
        return NOTIFY_DONE;
 }
                break;
 
        case MLX5_DEV_EVENT_PORT_UP:
-               ibev.event = IB_EVENT_PORT_ACTIVE;
-               port = (u8)param;
-               break;
-
        case MLX5_DEV_EVENT_PORT_DOWN:
        case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               ibev.event = IB_EVENT_PORT_ERR;
                port = (u8)param;
+
+               /* In RoCE, port up/down events are handled in
+                * mlx5_netdev_event().
+                */
+               if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+                       IB_LINK_LAYER_ETHERNET)
+                       return;
+
+               ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+                            IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
                break;
 
        case MLX5_DEV_EVENT_LID_CHANGE:
                       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
+static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
+{
+       if (dev->roce.nb.notifier_call) {
+               unregister_netdevice_notifier(&dev->roce.nb);
+               dev->roce.nb.notifier_call = NULL;
+       }
+}
+
 static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
 {
        int err;
 
        dev->roce.nb.notifier_call = mlx5_netdev_event;
        err = register_netdevice_notifier(&dev->roce.nb);
-       if (err)
+       if (err) {
+               dev->roce.nb.notifier_call = NULL;
                return err;
+       }
 
        err = mlx5_nic_vport_enable_roce(dev->mdev);
        if (err)
        return 0;
 
 err_unregister_netdevice_notifier:
-       unregister_netdevice_notifier(&dev->roce.nb);
+       mlx5_remove_roce_notifier(dev);
        return err;
 }
 
 static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
 {
        mlx5_nic_vport_disable_roce(dev->mdev);
-       unregister_netdevice_notifier(&dev->roce.nb);
 }
 
 static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
        destroy_dev_resources(&dev->devr);
 
 err_disable_roce:
-       if (ll == IB_LINK_LAYER_ETHERNET)
+       if (ll == IB_LINK_LAYER_ETHERNET) {
                mlx5_disable_roce(dev);
+               mlx5_remove_roce_notifier(dev);
+       }
 
 err_free_port:
        kfree(dev->port);
        struct mlx5_ib_dev *dev = context;
        enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
 
+       mlx5_remove_roce_notifier(dev);
        ib_unregister_device(&dev->ib_dev);
        mlx5_ib_dealloc_q_counters(dev);
        destroy_umrc_res(dev);