Multiple flow counters can utilize a single Hardware Steering (HWS)
action for Hardware Steering rules. Given that these counter bulks are
not exclusively created for Hardware Steering, but also serve purposes
such as statistics gathering and other steering modes, it's more
efficient to create the HWS action only when it's first needed by a
Hardware Steering rule. This approach allows for better resource
management through the use of a reference count, rather than
automatically creating an HWS action for every bulk of flow counters.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20250109160546.1733647-8-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-enum mlx5_fc_type {
- MLX5_FC_TYPE_ACQUIRED = 0,
- MLX5_FC_TYPE_LOCAL,
-};
-
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- enum mlx5_fc_type type;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct mlx5_fs_bulk fs_bulk;
- u32 base_id;
- struct mlx5_fc fcs[];
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
counter->id = id;
}
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
+{
+ return counter->bulk->base_id;
+}
+
static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
void *pool_ctx)
{
for (i = 0; i < bulk_len; i++)
mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
return &fc_bulk->fs_bulk;
fs_bulk_cleanup:
}
return true;
}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
+
+ fc_bulk_hws = &fc_bulk->hws_data;
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
+ return fc_bulk_hws->hws_action;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return fc_bulk_hws->hws_action;
+ }
+ fc_bulk_hws->hws_action =
+ mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
+ if (!fc_bulk_hws->hws_action) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return NULL;
+ }
+ refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
+ mutex_unlock(&fc_bulk_hws->lock);
+
+ return fc_bulk_hws->hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
+ return;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fc_bulk_hws->hws_action);
+ fc_bulk_hws->hws_action = NULL;
+ mutex_unlock(&fc_bulk_hws->lock);
+}
struct mlx5_fs_hws_mh *mh_data);
bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
#endif /* __MLX5_FS_HWS_POOLS_H__ */