virtnet_rq_free_buf(vi, rq, buf);
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, bool in_napi)
 {
        struct virtnet_sq_free_stats stats = {0};
 
                                virtqueue_napi_schedule(&sq->napi, sq->vq);
                } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
                        /* More just got used, free them then recheck. */
-                       free_old_xmit_skbs(sq, false);
+                       free_old_xmit(sq, false);
                        if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
                                netif_start_subqueue(dev, qnum);
                                virtqueue_disable_cb(sq->vq);
 
                do {
                        virtqueue_disable_cb(sq->vq);
-                       free_old_xmit_skbs(sq, true);
+                       free_old_xmit(sq, true);
                } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
 
                if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
        txq = netdev_get_tx_queue(vi->dev, index);
        __netif_tx_lock(txq, raw_smp_processor_id());
        virtqueue_disable_cb(sq->vq);
-       free_old_xmit_skbs(sq, true);
+       free_old_xmit(sq, true);
 
        if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
                netif_tx_wake_queue(txq);
                if (use_napi)
                        virtqueue_disable_cb(sq->vq);
 
-               free_old_xmit_skbs(sq, false);
+               free_old_xmit(sq, false);
 
        } while (use_napi && kick &&
               unlikely(!virtqueue_enable_cb_delayed(sq->vq)));