}
if (xdp_flags & ENA_XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
return work_done;
rx_ring->stats.bytes += rx_byte_cnt;
if (xdp_redirect_frm_cnt)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
return pkt_received;
}
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
}
if (xdp_ret & MVPP2_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
net_dim(ð->rx_dim, dim_sample);
if (xdp_flush)
- xdp_do_flush_map();
+ xdp_do_flush();
return done;
}
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
- xdp_do_flush_map();
+ xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
if (xdp_redir)
- xdp_do_flush_map();
+ xdp_do_flush();
if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
u16 pkts)
{
if (xdp_res & NETSEC_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & NETSEC_XDP_TX)
netsec_xdp_ring_tx_db(priv, pkts);
* particular hardware is sharing a common queue, so the
* incoming device might change per packet.
*/
- xdp_do_flush_map();
+ xdp_do_flush();
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);