struct rtnl_link_stats64 *stats64)
{
u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
+ u64 rx_over = 0, rx_missed = 0, rx_length = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
- u64 rx_over = 0, rx_missed = 0;
+
unsigned int start, i;
fbnic_get_hw_stats(fbd);
stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
struct fbnic_ring *rxr = fbn->rx[i];
if (!rxr)
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ rx_length = stats->rx.length_errors;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->rx_bytes += rx_bytes;
stats64->rx_packets += rx_packets;
stats64->rx_dropped += rx_dropped;
+ stats64->rx_errors += rx_length;
+ stats64->rx_length_errors += rx_length;
+
+ if (!xdpr)
+ continue;
+
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
}
}
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
u64 stop, wake, csum, lso;
+ struct fbnic_ring *xdpr;
unsigned int start;
u64 bytes, packets;
tx->hw_gso_wire_packets = lso;
tx->stop = stop;
tx->wake = wake;
+
+ xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
+ if (xdpr) {
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes += bytes;
+ tx->packets += packets;
+ }
}
static void fbnic_get_base_stats(struct net_device *dev,
struct fbnic_ring *ring, bool discard,
unsigned int hw_head)
{
+ u64 total_bytes = 0, total_packets = 0;
unsigned int head = ring->head;
- u64 total_bytes = 0;
while (hw_head != head) {
struct page *page;
twd = le64_to_cpu(ring->desc[head]);
page = ring->tx_buf[head];
+ /* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
+ * us one increment per packet, with no branches.
+ */
+ total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
+ FBNIC_TWD_TYPE_AL;
total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
page_pool_put_page(nv->page_pool, page, -1, pp_allow_direct);
return;
ring->head = head;
+
+ if (discard) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
frag = &shinfo->frags[0];
}
- if (fbnic_desc_unused(ring) < nsegs)
+ if (fbnic_desc_unused(ring) < nsegs) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
return -FBNIC_XDP_CONSUME;
+ }
page = virt_to_page(pkt->buff.data_hard_start);
offset = offset_in_page(pkt->buff.data);
struct fbnic_q_triad *qt, int budget)
{
unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0, length_errors = 0;
s32 head0 = -1, head1 = -1, pkt_tail = -1;
- u64 csum_complete = 0, csum_none = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
__le64 *raw_rcd, done;
if (!skb) {
alloc_failed++;
dropped++;
+ } else if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) {
+ length_errors++;
} else {
dropped++;
}
rcq->stats.rx.alloc_failed += alloc_failed;
rcq->stats.rx.csum_complete += csum_complete;
rcq->stats.rx.csum_none += csum_none;
+ rcq->stats.rx.length_errors += length_errors;
u64_stats_update_end(&rcq->stats.syncp);
if (pkt_tail >= 0)
fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
/* Remember to add new stats here */
- BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
+static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ struct fbnic_queue_stats *stats = &xdpr->stats;
+
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
if (!(xdpr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
+
/* Remove pointer to the Tx ring */
WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
fbn->tx[xdpr->q_idx] = NULL;