};
 };
 
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 
 static bool is_xdp_frame(void *ptr)
        return p;
 }
 
+static void virtnet_rq_free_buf(struct virtnet_info *vi,
+                               struct receive_queue *rq, void *buf)
+{
+       if (vi->mergeable_rx_bufs)
+               put_page(virt_to_head_page(buf));
+       else if (vi->big_packets)
+               give_pages(rq, buf);
+       else
+               put_page(virt_to_head_page(buf));
+}
+
 static void enable_delayed_refill(struct virtnet_info *vi)
 {
        spin_lock_bh(&vi->refill_lock);
        return buf;
 }
 
-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
-{
-       void *buf;
-
-       buf = virtqueue_detach_unused_buf(rq->vq);
-       if (buf && rq->do_dma)
-               virtnet_rq_unmap(rq, buf, 0);
-
-       return buf;
-}
-
 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 {
        struct virtnet_rq_dma *dma;
        }
 }
 
+static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
+{
+       struct virtnet_info *vi = vq->vdev->priv;
+       struct receive_queue *rq;
+       int i = vq2rxq(vq);
+
+       rq = &vi->rq[i];
+
+       if (rq->do_dma)
+               virtnet_rq_unmap(rq, buf, 0);
+
+       virtnet_rq_free_buf(vi, rq, buf);
+}
+
 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
        unsigned int len;
        if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
                pr_debug("%s: short packet %i\n", dev->name, len);
                DEV_STATS_INC(dev, rx_length_errors);
-               virtnet_rq_free_unused_buf(rq->vq, buf);
+               virtnet_rq_free_buf(vi, rq, buf);
                return;
        }
 
        if (running)
                napi_disable(&rq->napi);
 
-       err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+       err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
        if (err)
                netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
 
                xdp_return_frame(ptr_to_xdp(buf));
 }
 
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
-{
-       struct virtnet_info *vi = vq->vdev->priv;
-       int i = vq2rxq(vq);
-
-       if (vi->mergeable_rx_bufs)
-               put_page(virt_to_head_page(buf));
-       else if (vi->big_packets)
-               give_pages(&vi->rq[i], buf);
-       else
-               put_page(virt_to_head_page(buf));
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
        void *buf;
        }
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
-               struct receive_queue *rq = &vi->rq[i];
+               struct virtqueue *vq = vi->rq[i].vq;
 
-               while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
-                       virtnet_rq_free_unused_buf(rq->vq, buf);
+               while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+                       virtnet_rq_unmap_free_buf(vq, buf);
                cond_resched();
        }
 }