if (!ring) {
data[i++] = 0;
data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
continue;
}
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
/* populate Rx queue data */
if (!ring) {
data[i++] = 0;
data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
continue;
}
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
}
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
}
break;
}
#include "vf.h"
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
struct ixgbevf_stats {
u64 packets;
u64 bytes;
+#ifdef BP_EXTENDED_STATS
+ u64 yields;
+ u64 misses;
+ u64 cleaned;
+#endif
};
struct ixgbevf_tx_queue_stats {
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+ spin_lock_init(&q_vector->lock);
+ q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_LOCKED) {
+ WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+ q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->tx.ring->stats.yields++;
+#endif
+ } else {
+ /* we don't care if someone yielded */
+ q_vector->state = IXGBEVF_QV_STATE_NAPI;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+ IXGBEVF_QV_STATE_NAPI_YIELD));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+
+ spin_lock_bh(&q_vector->lock);
+ if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+ q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->rx.ring->stats.yields++;
+#endif
+ } else {
+ /* preserve yield marks */
+ q_vector->state |= IXGBEVF_QV_STATE_POLL;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+ WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+ return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0
*/
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb)
{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb_mark_napi_id(skb, &q_vector->napi);
+
+ if (ixgbevf_qv_busy_polling(q_vector)) {
+ netif_receive_skb(skb);
+ /* exit early if we busy polled */
+ return;
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
napi_gro_receive(&q_vector->napi, skb);
}
if (budget <= 0)
return budget;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!ixgbevf_qv_lock_napi(q_vector))
+ return budget;
+#endif
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling
clean_complete &= (cleaned < per_ring_budget);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *ring;
+ int found = 0;
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return LL_FLUSH_FAILED;
+
+ if (!ixgbevf_qv_lock_poll(q_vector))
+ return LL_FLUSH_BUSY;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+ if (found)
+ ring->stats.cleaned += found;
+ else
+ ring->stats.misses++;
+#endif
+ if (found)
+ break;
+ }
+
+ ixgbevf_qv_unlock_poll(q_vector);
+
+ return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
napi_enable(&q_vector->napi);
}
}
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+ pr_info("QV %d locked\n", q_idx);
+ usleep_range(1000, 20000);
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ixgbevf_busy_poll_recv,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif