]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net/mlx5e: Present succeeded IPsec SA bytes and packet
authorLeon Romanovsky <leonro@nvidia.com>
Thu, 27 Jun 2024 18:02:39 +0000 (21:02 +0300)
committerDavid S. Miller <davem@davemloft.net>
Fri, 28 Jun 2024 11:58:11 +0000 (12:58 +0100)
IPsec SA statistics presents successfully decrypted and encrypted
packet and bytes, and not total handled by this SA. So update the
calculation logic to take into account failures.

Fixes: 6fb7f9408779 ("net/mlx5e: Connect mlx5 IPsec statistics with XFRM core")
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c

index c54fd01ea635a2c0bcc585a69c7110b338ea972a..2a10428d820aef8eea61a53a1857d7c41869a7e1 100644 (file)
@@ -989,6 +989,10 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
        struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
        struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
        struct net *net = dev_net(x->xso.dev);
+       u64 trailer_packets = 0, trailer_bytes = 0;
+       u64 replay_packets = 0, replay_bytes = 0;
+       u64 auth_packets = 0, auth_bytes = 0;
+       u64 success_packets, success_bytes;
        u64 packets, bytes, lastuse;
 
        lockdep_assert(lockdep_is_held(&x->lock) ||
@@ -999,26 +1003,32 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
                return;
 
        if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
-               mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
-               x->stats.integrity_failed += packets;
-               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
-
-               mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
-               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+               mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
+                                    &auth_packets, &lastuse);
+               x->stats.integrity_failed += auth_packets;
+               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
+
+               mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
+                                    &trailer_packets, &lastuse);
+               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
        }
 
        if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
                return;
 
-       mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
-       x->curlft.packets += packets;
-       x->curlft.bytes += bytes;
-
        if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
-               mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
-               x->stats.replay += packets;
-               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+               mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
+                                    &replay_packets, &lastuse);
+               x->stats.replay += replay_packets;
+               XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
        }
+
+       mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+       success_packets = packets - auth_packets - trailer_packets - replay_packets;
+       x->curlft.packets += success_packets;
+
+       success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
+       x->curlft.bytes += success_bytes;
 }
 
 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,