u8 flags;
 };
 
+struct enic_wq_stats {
+       u64 packets;            /* pkts queued for Tx */
+       u64 stopped;            /* Tx ring almost full, queue stopped */
+       u64 wake;               /* Tx ring no longer full, queue woken up*/
+       u64 tso;                /* non-encap tso pkt */
+       u64 encap_tso;          /* encap tso pkt */
+       u64 encap_csum;         /* encap HW csum */
+       u64 csum_partial;       /* skb->ip_summed = CHECKSUM_PARTIAL */
+       u64 csum_none;          /* HW csum not required */
+       u64 bytes;              /* bytes queued for Tx */
+       u64 add_vlan;           /* HW adds vlan tag */
+       u64 cq_work;            /* Tx completions processed */
+       u64 cq_bytes;           /* Tx bytes processed */
+       u64 null_pkt;           /* skb length <= 0 */
+       u64 skb_linear_fail;    /* linearize failures */
+       u64 desc_full_awake;    /* TX ring full while queue awake */
+};
+
+struct enic_rq_stats {
+       u64 packets;                    /* pkts received */
+       u64 bytes;                      /* bytes received */
+       u64 l4_rss_hash;                /* hashed on l4 */
+       u64 l3_rss_hash;                /* hashed on l3 */
+       u64 csum_unnecessary;           /* HW verified csum */
+       u64 csum_unnecessary_encap;     /* HW verified csum on encap packet */
+       u64 vlan_stripped;              /* HW stripped vlan */
+       u64 napi_complete;              /* napi complete intr reenabled */
+       u64 napi_repoll;                /* napi poll again */
+       u64 bad_fcs;                    /* bad pkts */
+       u64 pkt_truncated;              /* truncated pkts */
+       u64 no_skb;                     /* out of skbs */
+       u64 desc_skip;                  /* Rx pkt went into later buffer */
+};
+
 /* Per-instance private data structure */
 struct enic {
        struct net_device *netdev;
        /* work queue cache line section */
        ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
        spinlock_t wq_lock[ENIC_WQ_MAX];
+       struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
        unsigned int wq_count;
        u16 loop_enable;
        u16 loop_tag;
 
        /* receive queue cache line section */
        ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+       struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
        unsigned int rq_count;
        struct vxlan_offload vxlan;
-       u64 rq_truncated_pkts;
-       u64 rq_bad_fcs;
        struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
 
        /* interrupt resource cache line section */
 
 static void enic_wq_free_buf(struct vnic_wq *wq,
        struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
 {
+       struct enic *enic = vnic_dev_priv(wq->vdev);
+
+       enic->wq_stats[wq->index].cq_work++;
+       enic->wq_stats[wq->index].cq_bytes += buf->len;
        enic_free_wq_buf(wq, buf);
 }
 
 
        if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
            vnic_wq_desc_avail(&enic->wq[q_number]) >=
-           (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
+           (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
                netif_wake_subqueue(enic->netdev, q_number);
+               enic->wq_stats[q_number].wake++;
+       }
 
        spin_unlock(&enic->wq_lock[q_number]);
 
        if (!eop)
                err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 
+       /* The enic_queue_wq_desc() above does not do HW checksum */
+       enic->wq_stats[wq->index].csum_none++;
+       enic->wq_stats[wq->index].packets++;
+       enic->wq_stats[wq->index].bytes += skb->len;
+
        return err;
 }
 
        if (!eop)
                err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 
+       enic->wq_stats[wq->index].csum_partial++;
+       enic->wq_stats[wq->index].packets++;
+       enic->wq_stats[wq->index].bytes += skb->len;
+
        return err;
 }
 
        unsigned int offset = 0;
        unsigned int hdr_len;
        dma_addr_t dma_addr;
+       unsigned int pkts;
        unsigned int len;
        skb_frag_t *frag;
 
        if (skb->encapsulation) {
                hdr_len = skb_inner_tcp_all_headers(skb);
                enic_preload_tcp_csum_encap(skb);
+               enic->wq_stats[wq->index].encap_tso++;
        } else {
                hdr_len = skb_tcp_all_headers(skb);
                enic_preload_tcp_csum(skb);
+               enic->wq_stats[wq->index].tso++;
        }
 
        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
        }
 
        if (eop)
-               return 0;
+               goto tso_out_stats;
 
        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
         * for additional data fragments
                }
        }
 
+tso_out_stats:
+       /* calculate how many packets tso sent */
+       len = skb->len - hdr_len;
+       pkts = len / mss;
+       if ((len % mss) > 0)
+               pkts++;
+       enic->wq_stats[wq->index].packets += pkts;
+       enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+
        return 0;
 }
 
        if (!eop)
                err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 
+       enic->wq_stats[wq->index].encap_csum++;
+       enic->wq_stats[wq->index].packets++;
+       enic->wq_stats[wq->index].bytes += skb->len;
+
        return err;
 }
 
                /* VLAN tag from trunking driver */
                vlan_tag_insert = 1;
                vlan_tag = skb_vlan_tag_get(skb);
+               enic->wq_stats[wq->index].add_vlan++;
        } else if (enic->loop_enable) {
                vlan_tag = enic->loop_tag;
                loopback = 1;
        else if (skb->encapsulation)
                err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
                                              vlan_tag, loopback);
-       else if (skb->ip_summed == CHECKSUM_PARTIAL)
+       else if (skb->ip_summed == CHECKSUM_PARTIAL)
                err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
                                                vlan_tag, loopback);
        else
        unsigned int txq_map;
        struct netdev_queue *txq;
 
+       txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+       wq = &enic->wq[txq_map];
+
        if (skb->len <= 0) {
                dev_kfree_skb_any(skb);
+               enic->wq_stats[wq->index].null_pkt++;
                return NETDEV_TX_OK;
        }
 
-       txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
-       wq = &enic->wq[txq_map];
        txq = netdev_get_tx_queue(netdev, txq_map);
 
        /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
            skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
            skb_linearize(skb)) {
                dev_kfree_skb_any(skb);
+               enic->wq_stats[wq->index].skb_linear_fail++;
                return NETDEV_TX_OK;
        }
 
                /* This is a hard error, log it */
                netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
                spin_unlock(&enic->wq_lock[txq_map]);
+               enic->wq_stats[wq->index].desc_full_awake++;
                return NETDEV_TX_BUSY;
        }
 
        if (enic_queue_wq_skb(enic, wq, skb))
                goto error;
 
-       if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+       if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
                netif_tx_stop_queue(txq);
+               enic->wq_stats[wq->index].stopped++;
+       }
        skb_tx_timestamp(skb);
        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
                vnic_wq_doorbell(wq);
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *stats;
+       u64 pkt_truncated = 0;
+       u64 bad_fcs = 0;
        int err;
+       int i;
 
        err = enic_dev_stats_dump(enic, &stats);
        /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
        net_stats->rx_bytes = stats->rx.rx_bytes_ok;
        net_stats->rx_errors = stats->rx.rx_errors;
        net_stats->multicast = stats->rx.rx_multicast_frames_ok;
-       net_stats->rx_over_errors = enic->rq_truncated_pkts;
-       net_stats->rx_crc_errors = enic->rq_bad_fcs;
+
+       for (i = 0; i < ENIC_RQ_MAX; i++) {
+               struct enic_rq_stats *rqs = &enic->rq_stats[i];
+
+               if (!enic->rq->ctrl)
+                       break;
+               pkt_truncated += rqs->pkt_truncated;
+               bad_fcs += rqs->bad_fcs;
+       }
+       net_stats->rx_over_errors = pkt_truncated;
+       net_stats->rx_crc_errors = bad_fcs;
        net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
 }
 
                return 0;
        }
        skb = netdev_alloc_skb_ip_align(netdev, len);
-       if (!skb)
+       if (!skb) {
+               enic->rq_stats[rq->index].no_skb++;
                return -ENOMEM;
+       }
 
        dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
                                  DMA_FROM_DEVICE);
        struct net_device *netdev = enic->netdev;
        struct sk_buff *skb;
        struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
 
        u8 type, color, eop, sop, ingress_port, vlan_stripped;
        u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
        u32 rss_hash;
        bool outer_csum_ok = true, encap = false;
 
-       if (skipped)
+       rqstats->packets++;
+       if (skipped) {
+               rqstats->desc_skip++;
                return;
+       }
 
        skb = buf->os_buf;
 
 
                if (!fcs_ok) {
                        if (bytes_written > 0)
-                               enic->rq_bad_fcs++;
+                               rqstats->bad_fcs++;
                        else if (bytes_written == 0)
-                               enic->rq_truncated_pkts++;
+                               rqstats->pkt_truncated++;
                }
 
                dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
 
                /* Good receive
                 */
-
+               rqstats->bytes += bytes_written;
                if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
                        buf->os_buf = NULL;
                        dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
                        case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
                        case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
                                skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+                               rqstats->l4_rss_hash++;
                                break;
                        case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
                        case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
                        case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
                                skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+                               rqstats->l3_rss_hash++;
                                break;
                        }
                }
                    (ipv4_csum_ok || ipv6)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        skb->csum_level = encap;
+                       if (encap)
+                               rqstats->csum_unnecessary_encap++;
+                       else
+                               rqstats->csum_unnecessary++;
                }
 
-               if (vlan_stripped)
+               if (vlan_stripped) {
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-
+                       rqstats->vlan_stripped++;
+               }
                skb_mark_napi_id(skb, &enic->napi[rq->index]);
                if (!(netdev->features & NETIF_F_GRO))
                        netif_receive_skb(skb);
 
                /* Buffer overflow
                 */
-
+               rqstats->pkt_truncated++;
                dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[0]);
                vnic_intr_unmask(&enic->intr[intr]);
+               enic->rq_stats[0].napi_complete++;
+       } else {
+               enic->rq_stats[0].napi_repoll++;
        }
 
        return rq_work_done;
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
+               enic->rq_stats[rq].napi_complete++;
+       } else {
+               enic->rq_stats[rq].napi_repoll++;
        }
 
        return work_done;