]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ixgbe/ixgbevf: use napi_complete_done()
authorJesse Brandeburg <jesse.brandeburg@intel.com>
Thu, 24 Sep 2015 23:35:47 +0000 (16:35 -0700)
committerChuck Anderson <chuck.anderson@oracle.com>
Sat, 25 Feb 2017 05:47:43 +0000 (21:47 -0800)
Orabug: 24568240

As per Eric Dumazet's previous patches:
(see commit (24d2e4a50737) - tg3: use napi_complete_done())

Quoting verbatim:
Using napi_complete_done() instead of napi_complete() allows
us to use /sys/class/net/ethX/gro_flush_timeout

GRO layer can aggregate more packets if the flush is delayed a bit,
without having to set too big coalescing parameters that impact
latencies.
</end quote>

Tested
configuration: low latency via ethtool -C ethx adaptive-rx off
rx-usecs 10 adaptive-tx off tx-usecs 15
workload: streaming rx using netperf TCP_MAERTS

igb:
MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 () port 0 AF_INET : demo
...
Interim result:  941.48 10^6bits/s over 1.000 seconds ending at 1440193171.589

Alignment      Offset         Bytes    Bytes       Recvs   Bytes    Sends
Local  Remote  Local  Remote  Xfered   Per                 Per
Recv   Send    Recv   Send             Recv (avg)          Send (avg)
    8       8      0       0 1176930056  1475.36    797726   16384.00  71905

MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 () port 0 AF_INET : demo
...
Interim result:  941.49 10^6bits/s over 0.997 seconds ending at 1440193142.763

Alignment      Offset         Bytes    Bytes       Recvs   Bytes    Sends
Local  Remote  Local  Remote  Xfered   Per                 Per
Recv   Send    Recv   Send             Recv (avg)          Send (avg)
    8       8      0       0 1175182320  50476.00     23282   16384.00  71816

i40e:
Hard to test because the traffic is incoming so fast (24Gb/s) that GRO
always receives 87kB, even at the highest interrupt rate.

Other drivers were only compile tested.

Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
(partial backport of commit 32b3e08fff60494cd1d281a39b51583edfd2b18f for ixgbe/ixgbevf)
Signed-off-by: Brian Maly <brian.maly@oracle.com>
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index cd96c184f4e1f78dd1f444a6056856a11310b64f..7147f74dd7acff8af0d474c64e4824c8afcb8867 100644 (file)
@@ -2846,7 +2846,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                                container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
 #ifdef CONFIG_IXGBE_DCA
@@ -2868,9 +2868,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       ixgbe_for_each_ring(ring, q_vector->rx)
-               clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
-                                  per_ring_budget) < per_ring_budget);
+       ixgbe_for_each_ring(ring, q_vector->rx) {
+               int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+                                                per_ring_budget);
+
+               work_done += cleaned;
+               clean_complete &= (cleaned < per_ring_budget);
+       }
 
        ixgbe_qv_unlock_napi(q_vector);
        /* If all work not completed, return budget and keep polling */
@@ -2878,7 +2882,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (adapter->rx_itr_setting & 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
index eadf70357232b3e3f07c8d6ed0d3121640f3bd45..64a710b324f8d80f79c0ea4161376ff631305788 100644 (file)
@@ -1018,7 +1018,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
                container_of(napi, struct ixgbevf_q_vector, napi);
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct ixgbevf_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
        ixgbevf_for_each_ring(ring, q_vector->tx)
@@ -1039,10 +1039,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       ixgbevf_for_each_ring(ring, q_vector->rx)
-               clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
-                                                       per_ring_budget)
-                                  < per_ring_budget);
+       ixgbevf_for_each_ring(ring, q_vector->rx) {
+               int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
+                                                  per_ring_budget);
+               work_done += cleaned;
+               clean_complete &= (cleaned < per_ring_budget);
+       }
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
        ixgbevf_qv_unlock_napi(q_vector);
@@ -1052,7 +1054,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        if (!clean_complete)
                return budget;
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (adapter->rx_itr_setting == 1)
                ixgbevf_set_itr(q_vector);
        if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&