return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
        struct sk_buff *skb;
        unsigned int len;
                bytes += skb->len;
                packets++;
 
-               dev_consume_skb_any(skb);
+               napi_consume_skb(skb, in_napi);
        }
 
        /* Avoid overhead when no packets have been processed
                return;
 
        if (__netif_tx_trylock(txq)) {
-               free_old_xmit_skbs(sq);
+               free_old_xmit_skbs(sq, true);
                __netif_tx_unlock(txq);
        }
 
        struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
 
        __netif_tx_lock(txq, raw_smp_processor_id());
-       free_old_xmit_skbs(sq);
+       free_old_xmit_skbs(sq, true);
        __netif_tx_unlock(txq);
 
        virtqueue_napi_complete(napi, sq->vq, 0);
        bool use_napi = sq->napi.weight;
 
        /* Free up any pending old buffers before queueing new ones. */
-       free_old_xmit_skbs(sq);
+       free_old_xmit_skbs(sq, false);
 
        if (use_napi && kick)
                virtqueue_enable_cb_delayed(sq->vq);
                if (!use_napi &&
                    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
                        /* More just got used, free them then recheck. */
-                       free_old_xmit_skbs(sq);
+                       free_old_xmit_skbs(sq, false);
                        if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
                                netif_start_subqueue(dev, qnum);
                                virtqueue_disable_cb(sq->vq);