return NULL;
 }
 
+/* TODO: build xdp in big mode */
+static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+                                     struct virtnet_info *vi,
+                                     struct receive_queue *rq,
+                                     struct xdp_buff *xdp,
+                                     void *buf,
+                                     unsigned int len,
+                                     unsigned int frame_sz,
+                                     u16 *num_buf,
+                                     unsigned int *xdp_frags_truesize,
+                                     struct virtnet_rq_stats *stats)
+{
+       struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+       unsigned int headroom, tailroom, room;
+       unsigned int truesize, cur_frag_size;
+       struct skb_shared_info *shinfo;
+       unsigned int xdp_frags_truesz = 0;
+       struct page *page;
+       skb_frag_t *frag;
+       int offset;
+       void *ctx;
+
+       xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+       xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
+                        VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+
+       if (*num_buf > 1) {
+               /* If we want to build multi-buffer xdp, we need
+                * to specify that the flags of xdp_buff have the
+                * XDP_FLAGS_HAS_FRAG bit.
+                */
+               if (!xdp_buff_has_frags(xdp))
+                       xdp_buff_set_frags_flag(xdp);
+
+               shinfo = xdp_get_shared_info_from_buff(xdp);
+               shinfo->nr_frags = 0;
+               shinfo->xdp_frags_size = 0;
+       }
+
+       if ((*num_buf - 1) > MAX_SKB_FRAGS)
+               return -EINVAL;
+
+       while ((--*num_buf) >= 1) {
+               buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+               if (unlikely(!buf)) {
+                       pr_debug("%s: rx error: %d buffers out of %d missing\n",
+                                dev->name, *num_buf,
+                                virtio16_to_cpu(vi->vdev, hdr->num_buffers));
+                       dev->stats.rx_length_errors++;
+                       return -EINVAL;
+               }
+
+               stats->bytes += len;
+               page = virt_to_head_page(buf);
+               offset = buf - page_address(page);
+
+               truesize = mergeable_ctx_to_truesize(ctx);
+               headroom = mergeable_ctx_to_headroom(ctx);
+               tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+               room = SKB_DATA_ALIGN(headroom + tailroom);
+
+               cur_frag_size = truesize;
+               xdp_frags_truesz += cur_frag_size;
+               if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+                       put_page(page);
+                       pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+                                dev->name, len, (unsigned long)(truesize - room));
+                       dev->stats.rx_length_errors++;
+                       return -EINVAL;
+               }
+
+               frag = &shinfo->frags[shinfo->nr_frags++];
+               __skb_frag_set_page(frag, page);
+               skb_frag_off_set(frag, offset);
+               skb_frag_size_set(frag, len);
+               if (page_is_pfmemalloc(page))
+                       xdp_buff_set_frag_pfmemalloc(xdp);
+
+               shinfo->xdp_frags_size += len;
+       }
+
+       *xdp_frags_truesize = xdp_frags_truesz;
+       return 0;
+}
+
 static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         struct virtnet_info *vi,
                                         struct receive_queue *rq,
        unsigned int truesize = mergeable_ctx_to_truesize(ctx);
        unsigned int headroom = mergeable_ctx_to_headroom(ctx);
        unsigned int metasize = 0;
+       unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+       unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
        unsigned int frame_sz;
        int err;
 
        head_skb = NULL;
        stats->bytes += len - vi->hdr_len;
 
-       if (unlikely(len > truesize)) {
+       if (unlikely(len > truesize - room)) {
                pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-                        dev->name, len, (unsigned long)ctx);
+                        dev->name, len, (unsigned long)(truesize - room));
                dev->stats.rx_length_errors++;
                goto err_skb;
        }
                if (unlikely(hdr->hdr.gso_type))
                        goto err_xdp;
 
-               /* Buffers with headroom use PAGE_SIZE as alloc size,
-                * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+               /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+                * with headroom may add hole in truesize, which
+                * make their length exceed PAGE_SIZE. So we disabled the
+                * hole mechanism for xdp. See add_recvbuf_mergeable().
                 */
-               frame_sz = headroom ? PAGE_SIZE : truesize;
+               frame_sz = truesize;
 
                /* This happens when rx buffer size is underestimated
                 * or headroom is not enough because of the buffer
                page = virt_to_head_page(buf);
 
                truesize = mergeable_ctx_to_truesize(ctx);
-               if (unlikely(len > truesize)) {
+               headroom = mergeable_ctx_to_headroom(ctx);
+               tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+               room = SKB_DATA_ALIGN(headroom + tailroom);
+               if (unlikely(len > truesize - room)) {
                        pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-                                dev->name, len, (unsigned long)ctx);
+                                dev->name, len, (unsigned long)(truesize - room));
                        dev->stats.rx_length_errors++;
                        goto err_skb;
                }
        }
 
        sg_init_one(rq->sg, buf, len);
-       ctx = mergeable_len_to_ctx(len, headroom);
+       ctx = mergeable_len_to_ctx(len + room, headroom);
        err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
        if (err < 0)
                put_page(virt_to_head_page(buf));