]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
qede: Update receive statistic once per NAPI
authorMintz, Yuval <Yuval.Mintz@cavium.com>
Fri, 7 Apr 2017 08:04:57 +0000 (11:04 +0300)
committerChuck Anderson <chuck.anderson@oracle.com>
Wed, 26 Jul 2017 03:46:59 +0000 (20:46 -0700)
Orabug: 2593305326439680

Currently, each time an ingress packet is passed to networking stack
the driver increments a per-queue SW statistic.
As we want to have additional fields in the first cache-line of the
Rx-queue struct, change flow so this statistic would be updated once per
NAPI run. We will later push the statistic to a different cache line.

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
drivers/net/ethernet/qlogic/qede/qede_fp.c

index 3c805c7c86e7e147a8be0188264d38174cd3e432..a68be1fadb6d38145653389f31467246e782cc1d 100644 (file)
@@ -561,7 +561,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
        napi_gro_receive(&fp->napi, skb);
-       rxq->rcv_pkts++;
 }
 
 static void qede_set_gro_params(struct qede_dev *edev,
@@ -821,9 +820,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
                       "Strange - TPA cont with more than a single len_list entry\n");
 }
 
-static void qede_tpa_end(struct qede_dev *edev,
-                        struct qede_fastpath *fp,
-                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
+static int qede_tpa_end(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct eth_fast_path_rx_tpa_end_cqe *cqe)
 {
        struct qede_rx_queue *rxq = fp->rxq;
        struct qede_agg_info *tpa_info;
@@ -871,11 +870,12 @@ static void qede_tpa_end(struct qede_dev *edev,
 
        tpa_info->state = QEDE_AGG_STATE_NONE;
 
-       return;
+       return 1;
 err:
        tpa_info->state = QEDE_AGG_STATE_NONE;
        dev_kfree_skb_any(tpa_info->skb);
        tpa_info->skb = NULL;
+       return 0;
 }
 
 static u8 qede_check_notunn_csum(u16 flag)
@@ -1052,8 +1052,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
                qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
                return 0;
        case ETH_RX_CQE_TYPE_TPA_END:
-               qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
-               return 1;
+               return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
        default:
                return 0;
        }
@@ -1158,8 +1157,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
 {
        struct qede_rx_queue *rxq = fp->rxq;
        struct qede_dev *edev = fp->edev;
+       int work_done = 0, rcv_pkts = 0;
        u16 hw_comp_cons, sw_comp_cons;
-       int work_done = 0;
 
        hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
        sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1173,12 +1172,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
 
        /* Loop to complete all indicated BDs */
        while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
-               qede_rx_process_cqe(edev, fp, rxq);
+               rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
                qed_chain_recycle_consumed(&rxq->rx_comp_ring);
                sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
                work_done++;
        }
 
+       rxq->rcv_pkts += rcv_pkts;
+
        /* Allocate replacement buffers */
        while (rxq->num_rx_buffers - rxq->filled_buffers)
                if (qede_alloc_rx_buffer(rxq, false))