]> www.infradead.org Git - users/hch/dma-mapping.git/commitdiff
net/mlx5: HWS, use lock classes for bwc locks
authorCosmin Ratiu <cratiu@nvidia.com>
Tue, 15 Oct 2024 09:32:04 +0000 (12:32 +0300)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 17 Oct 2024 10:14:07 +0000 (12:14 +0200)
The HWS BWC API uses one lock per queue and usually acquires one of
them, except when doing changes which require locking all queues in
order. Naturally, lockdep isn't too happy about acquiring the same lock
class multiple times, so inform it that each queue lock is a different
class to avoid false positives.

Fixes: 2ca62599aa0b ("net/mlx5: HWS, added send engine and context handling")
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c

index e5a7ce60433401f8d463085001b6d1c44025e6ff..8ab548aa402be2921bb7e099f5003ede81fc9d2e 100644 (file)
@@ -46,6 +46,7 @@ struct mlx5hws_context {
        struct mlx5hws_send_engine *send_queue;
        size_t queues;
        struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+       struct lock_class_key *bwc_lock_class_keys;
        struct list_head tbl_list;
        struct mlx5hws_context_debug_info debug_info;
        struct xarray peer_ctx_xa;
index e101dc46d99ef49866b7ab7e9126d4adcf27d818..6d443e6ee8d9e9d3a2ced7f50743a4fae5e7b13d 100644 (file)
@@ -947,8 +947,12 @@ static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
        if (!mlx5hws_context_bwc_supported(ctx))
                return;
 
-       for (i = 0; i < bwc_queues; i++)
+       for (i = 0; i < bwc_queues; i++) {
                mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+               lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
+       }
+
+       kfree(ctx->bwc_lock_class_keys);
        kfree(ctx->bwc_send_queue_locks);
 }
 
@@ -977,10 +981,22 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
        if (!ctx->bwc_send_queue_locks)
                return -ENOMEM;
 
-       for (i = 0; i < bwc_queues; i++)
+       ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
+                                          sizeof(*ctx->bwc_lock_class_keys),
+                                          GFP_KERNEL);
+       if (!ctx->bwc_lock_class_keys)
+               goto err_lock_class_keys;
+
+       for (i = 0; i < bwc_queues; i++) {
                mutex_init(&ctx->bwc_send_queue_locks[i]);
+               lockdep_register_key(ctx->bwc_lock_class_keys + i);
+       }
 
        return 0;
+
+err_lock_class_keys:
+       kfree(ctx->bwc_send_queue_locks);
+       return -ENOMEM;
 }
 
 int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,