u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
+
+ u64 rx_gso_checksum_fixup;
};
struct netfront_info;
/* Statistics */
struct netfront_stats __percpu *rx_stats;
struct netfront_stats __percpu *tx_stats;
-
- atomic_t rx_gso_checksum_fixup;
};
struct netfront_rx_info {
return cons;
}
-static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
+static int checksum_setup(struct netfront_info *info, struct sk_buff *skb)
{
bool recalculate_partial_csum = false;
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- struct netfront_info *np = netdev_priv(dev);
- atomic_inc(&np->rx_gso_checksum_fixup);
+ struct netfront_stats *rx_stats = this_cpu_ptr(info->rx_stats);
+
+ rx_stats->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
skb->protocol = eth_type_trans(skb, queue->info->netdev);
skb_reset_network_header(skb);
- if (checksum_setup(queue->info->netdev, skb)) {
+ if (checksum_setup(queue->info, skb)) {
kfree_skb(skb);
packets_dropped++;
queue->info->netdev->stats.rx_errors++;
} xennet_stats[] = {
{
"rx_gso_checksum_fixup",
- offsetof(struct netfront_info, rx_gso_checksum_fixup)
+ offsetof(struct netfront_stats, rx_gso_checksum_fixup)
},
};
static void xennet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
- void *np = netdev_priv(dev);
- int i;
+ struct netfront_info *np = netdev_priv(dev);
+ struct netfront_stats tot_stats;
+ void *temp = &tot_stats;
+ int i, cpu;
+
+ memset(temp, 0, sizeof(tot_stats));
+
+ for_each_possible_cpu(cpu) {
+ struct netfront_stats *s = per_cpu_ptr(np->rx_stats, cpu);
+
+ tot_stats.rx_gso_checksum_fixup += s->rx_gso_checksum_fixup;
+ }
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
- data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
+ data[i] = *((u64 *)(temp + xennet_stats[i].offset));
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)