]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "ixgbevf: get rid of custom busy polling code"
authorJack Vogel <jack.vogel@oracle.com>
Thu, 27 Jul 2017 18:44:14 +0000 (11:44 -0700)
committerJack Vogel <jack.vogel@oracle.com>
Wed, 2 Aug 2017 21:39:59 +0000 (14:39 -0700)
This reverts commit 1975e69c708706b84d9462ce7c0135d33310c28a. Performance regression,
because the net/core napi support is not present.

Orabug: 26494997
Signed-off-by: Jack Vogel <jack.vogel@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index 08fa3030276ed712ea15bad3bad838a368d2cc8f..2c8d9c09882057277049baebc94b1d2117a25a21 100644 (file)
@@ -432,6 +432,11 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                if (!ring) {
                        data[i++] = 0;
                        data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+                       data[i++] = 0;
+                       data[i++] = 0;
+                       data[i++] = 0;
+#endif
                        continue;
                }
 
@@ -441,6 +446,12 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                        data[i + 1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
+#ifdef BP_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i + 1] = ring->stats.misses;
+               data[i + 2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
 
        /* populate Rx queue data */
@@ -449,6 +460,11 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                if (!ring) {
                        data[i++] = 0;
                        data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+                       data[i++] = 0;
+                       data[i++] = 0;
+                       data[i++] = 0;
+#endif
                        continue;
                }
 
@@ -458,6 +474,12 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                        data[i + 1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
+#ifdef BP_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i + 1] = ring->stats.misses;
+               data[i + 2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
 }
 
@@ -485,12 +507,28 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
                }
                break;
        }
index 581f44bbd7b3bca08dde63f1b6ec1418453e03b9..9ac851d56e93d61034fb55ae461c744be473592a 100644 (file)
 
 #include "vf.h"
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
 #define IXGBE_MAX_TXD_PWR      14
 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
 
@@ -68,6 +73,11 @@ struct ixgbevf_rx_buffer {
 struct ixgbevf_stats {
        u64 packets;
        u64 bytes;
+#ifdef BP_EXTENDED_STATS
+       u64 yields;
+       u64 misses;
+       u64 cleaned;
+#endif
 };
 
 struct ixgbevf_tx_queue_stats {
@@ -207,6 +217,109 @@ struct ixgbevf_q_vector {
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+       spin_lock_init(&q_vector->lock);
+       q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+       int rc = true;
+
+       spin_lock_bh(&q_vector->lock);
+       if (q_vector->state & IXGBEVF_QV_LOCKED) {
+               WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+               q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+               rc = false;
+#ifdef BP_EXTENDED_STATS
+               q_vector->tx.ring->stats.yields++;
+#endif
+       } else {
+               /* we don't care if someone yielded */
+               q_vector->state = IXGBEVF_QV_STATE_NAPI;
+       }
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+       int rc = false;
+
+       spin_lock_bh(&q_vector->lock);
+       WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+                                  IXGBEVF_QV_STATE_NAPI_YIELD));
+
+       if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+               rc = true;
+       /* reset state to idle, unless QV is disabled */
+       q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+       int rc = true;
+
+       spin_lock_bh(&q_vector->lock);
+       if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+               q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+               rc = false;
+#ifdef BP_EXTENDED_STATS
+               q_vector->rx.ring->stats.yields++;
+#endif
+       } else {
+               /* preserve yield marks */
+               q_vector->state |= IXGBEVF_QV_STATE_POLL;
+       }
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+       int rc = false;
+
+       spin_lock_bh(&q_vector->lock);
+       WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+       if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+               rc = true;
+       /* reset state to idle, unless QV is disabled */
+       q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+       WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+       return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+       int rc = true;
+
+       spin_lock_bh(&q_vector->lock);
+       if (q_vector->state & IXGBEVF_QV_OWNED)
+               rc = false;
+       q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /* microsecond values for various ITR rates shifted by 2 to fit itr register
  * with the first 3 bits reserved 0
  */
index 9c81c52790ebda4b20bfa613346fbb49702284d6..db919958ecce96a7d11912a55c6a4958f8117b0e 100644 (file)
@@ -456,6 +456,16 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
                           struct sk_buff *skb)
 {
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       skb_mark_napi_id(skb, &q_vector->napi);
+
+       if (ixgbevf_qv_busy_polling(q_vector)) {
+               netif_receive_skb(skb);
+               /* exit early if we busy polled */
+               return;
+       }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
        napi_gro_receive(&q_vector->napi, skb);
 }
 
@@ -1018,6 +1028,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
 
        if (budget <= 0)
                return budget;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       if (!ixgbevf_qv_lock_napi(q_vector))
+               return budget;
+#endif
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling
@@ -1034,6 +1048,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
                clean_complete &= (cleaned < per_ring_budget);
        }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
@@ -1068,6 +1086,40 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+       struct ixgbevf_q_vector *q_vector =
+                       container_of(napi, struct ixgbevf_q_vector, napi);
+       struct ixgbevf_adapter *adapter = q_vector->adapter;
+       struct ixgbevf_ring  *ring;
+       int found = 0;
+
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+               return LL_FLUSH_FAILED;
+
+       if (!ixgbevf_qv_lock_poll(q_vector))
+               return LL_FLUSH_BUSY;
+
+       ixgbevf_for_each_ring(ring, q_vector->rx) {
+               found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+               if (found)
+                       ring->stats.cleaned += found;
+               else
+                       ring->stats.misses++;
+#endif
+               if (found)
+                       break;
+       }
+
+       ixgbevf_qv_unlock_poll(q_vector);
+
+       return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  * ixgbevf_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -1925,6 +1977,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+               ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
                napi_enable(&q_vector->napi);
        }
 }
@@ -1938,6 +1993,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
                napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+               while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+                       pr_info("QV %d locked\n", q_idx);
+                       usleep_range(1000, 20000);
+               }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
        }
 }
 
@@ -3931,6 +3992,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = ixgbevf_busy_poll_recv,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbevf_netpoll,
 #endif