]> www.infradead.org Git - nvme.git/commitdiff
net/mlx5: Expose SFs IRQs
authorShay Drory <shayd@nvidia.com>
Sun, 14 Apr 2024 08:26:07 +0000 (11:26 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 11 Jul 2024 21:17:14 +0000 (14:17 -0700)
Expose the sysfs files for the IRQs that the mlx5 PCI SFs are using.
These entries are similar to PCI PFs and VFs in 'msi_irqs' directory.

Reviewed-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Shay Drory <shayd@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
v8-v9:
- add Przemek RB
v6->v7:
- remove not needed changes to mlx5 sfnum SF sysfs
v5->v6:
- fail IRQ creation in case auxiliary_device_sysfs_irq_add() failed
  (Parav and Przemek)
v2->v3:
- fix mlx5 sfnum SF sysfs

drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

index 5693986ae65628ee00e190a9d40e5b7a0e519399..5661f047702e05296bede785ff47e1bd21229abc 100644 (file)
@@ -714,7 +714,7 @@ err2:
 err1:
        mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
        mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
-       mlx5_ctrl_irq_release(table->ctrl_irq);
+       mlx5_ctrl_irq_release(dev, table->ctrl_irq);
        return err;
 }
 
@@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
        cleanup_async_eq(dev, &table->cmd_eq, "cmd");
        mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
        mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
-       mlx5_ctrl_irq_release(table->ctrl_irq);
+       mlx5_ctrl_irq_release(dev, table->ctrl_irq);
 }
 
 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
        af_desc.is_managed = 1;
        cpumask_copy(&af_desc.mask, cpu_online_mask);
        cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
-       irq = mlx5_irq_affinity_request(pool, &af_desc);
+       irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
        if (IS_ERR(irq))
                return PTR_ERR(irq);
 
index 612e666ec2635614f2e24443f319efd822b369b5..f7b01b3f0cba360449533c03c148e2bbc3d53e0d 100644 (file)
@@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
 
 /**
  * mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @dev: mlx5 core device which is requesting the IRQ.
  * @pool: IRQ pool to request from.
  * @af_desc: affinity descriptor for this IRQ.
  *
  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
  */
 struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+                         struct irq_affinity_desc *af_desc)
 {
        struct mlx5_irq *least_loaded_irq, *new_irq;
+       int ret;
 
        mutex_lock(&pool->lock);
        least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
@@ -153,6 +156,16 @@ out:
                              mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
 unlock:
        mutex_unlock(&pool->lock);
+       if (mlx5_irq_pool_is_sf_pool(pool)) {
+               ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
+                                                    mlx5_irq_get_irq(least_loaded_irq));
+               if (ret) {
+                       mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
+                                     mlx5_irq_get_irq(least_loaded_irq), ret);
+                       mlx5_irq_put(least_loaded_irq);
+                       least_loaded_irq = ERR_PTR(ret);
+               }
+       }
        return least_loaded_irq;
 }
 
@@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
        cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
        synchronize_irq(pci_irq_vector(pool->dev->pdev,
                                       mlx5_irq_get_index(irq)));
+       if (mlx5_irq_pool_is_sf_pool(pool))
+               auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
+                                                 mlx5_irq_get_irq(irq));
        if (mlx5_irq_put(irq))
                if (pool->irqs_per_cpu)
                        cpu_put(pool, cpu);
index c38342b9f3208fc61aeaae31ae47efb159de38a1..e764b720d9b20def22335d4a8fccfe2a96e15482 100644 (file)
@@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
        return dev->coredev_type == MLX5_COREDEV_SF;
 }
 
+static inline struct auxiliary_device *
+mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
+{
+       return container_of(mdev->device, struct auxiliary_device, dev);
+}
+
 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
 void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
 int mlx5_init_one(struct mlx5_core_dev *dev);
index 1088114e905d19adee66f0683bb89739e53b1cac..0881e961d8b177e5bcb3e40451aaf5fa81d74840 100644 (file)
@@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
 
 struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
                                  struct irq_affinity_desc *af_desc,
                                  struct cpu_rmap **rmap);
@@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
 int mlx5_irq_get_index(struct mlx5_irq *irq);
+int mlx5_irq_get_irq(const struct mlx5_irq *irq);
 
 struct mlx5_irq_pool;
 #ifdef CONFIG_MLX5_SF
 struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
                                                    struct cpumask *used_cpus, u16 vecidx);
-struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
-                                          struct irq_affinity_desc *af_desc);
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+                         struct irq_affinity_desc *af_desc);
 void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
 #else
 static inline
@@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
 }
 
 static inline struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+                         struct irq_affinity_desc *af_desc)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
@@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
 static inline
 void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
 {
+       mlx5_irq_release_vector(irq);
 }
 #endif
 #endif /* __MLX5_IRQ_H__ */
index fb8787e30d3faea62d2ce7833cf8cce78aba7be7..ac7c3a76b4cf8853624c9b0a64719d11da73e34e 100644 (file)
@@ -367,6 +367,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
        return irq->mask;
 }
 
+int mlx5_irq_get_irq(const struct mlx5_irq *irq)
+{
+       return irq->map.virq;
+}
+
 int mlx5_irq_get_index(struct mlx5_irq *irq)
 {
        return irq->map.index;
@@ -440,11 +445,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
 
 /**
  * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @dev: mlx5 device that releasing the IRQ.
  * @ctrl_irq: ctrl IRQ to be released.
  */
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
 {
-       _mlx5_irq_release(ctrl_irq);
+       mlx5_irq_affinity_irq_release(dev, ctrl_irq);
 }
 
 /**
@@ -473,7 +479,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
                /* Allocate the IRQ in index 0. The vector was already allocated */
                irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
        } else {
-               irq = mlx5_irq_affinity_request(pool, &af_desc);
+               irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
        }
 
        return irq;