if (vlan_tag_flags)
ptp->tx_hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else {
atomic_inc(&bp->ptp_cfg->tx_avail);
dev_kfree_skb_any(skb);
tx_kick_pending:
if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
atomic_inc(&bp->ptp_cfg->tx_avail);
}
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
tx_buf->is_push = 0;
goto next_tx_int;
}
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ tx_buf->is_ts_pkt = 0;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
skb_frag_size(&skb_shinfo(skb)->frags[j]),
DMA_TO_DEVICE);
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb)) {