__be16 h_vlan_TCI;
 };
 
-bool tun_is_xdp_buff(void *ptr)
+bool tun_is_xdp_frame(void *ptr)
 {
        return (unsigned long)ptr & TUN_XDP_FLAG;
 }
-EXPORT_SYMBOL(tun_is_xdp_buff);
+EXPORT_SYMBOL(tun_is_xdp_frame);
 
 void *tun_xdp_to_ptr(void *ptr)
 {
 {
        if (!ptr)
                return;
-       if (tun_is_xdp_buff(ptr)) {
-               struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+       if (tun_is_xdp_frame(ptr)) {
+               struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 
-               put_page(virt_to_head_page(xdp->data));
+               xdp_return_frame(xdpf->data, &xdpf->mem);
        } else {
                __skb_array_destroy_skb(ptr);
        }
 static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
 {
        struct tun_struct *tun = netdev_priv(dev);
-       struct xdp_buff *buff = xdp->data_hard_start;
-       int headroom = xdp->data - xdp->data_hard_start;
+       struct xdp_frame *frame;
        struct tun_file *tfile;
        u32 numqueues;
        int ret = 0;
 
-       /* Assure headroom is available and buff is properly aligned */
-       if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
-               return -ENOSPC;
-
-       *buff = *xdp;
+       frame = convert_to_xdp_frame(xdp);
+       if (unlikely(!frame))
+               return -EOVERFLOW;
 
        rcu_read_lock();
 
        /* Encode the XDP flag into lowest bit for consumer to differ
         * XDP buffer from sk_buff.
         */
-       if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+       if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(frame))) {
                this_cpu_inc(tun->pcpu_stats->tx_dropped);
                ret = -ENOSPC;
        }
 
 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
                                struct tun_file *tfile,
-                               struct xdp_buff *xdp,
+                               struct xdp_frame *xdp_frame,
                                struct iov_iter *iter)
 {
        int vnet_hdr_sz = 0;
-       size_t size = xdp->data_end - xdp->data;
+       size_t size = xdp_frame->len;
        struct tun_pcpu_stats *stats;
        size_t ret;
 
                iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
        }
 
-       ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+       ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
 
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
                        return err;
        }
 
-       if (tun_is_xdp_buff(ptr)) {
-               struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+       if (tun_is_xdp_frame(ptr)) {
+               struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 
-               ret = tun_put_user_xdp(tun, tfile, xdp, to);
-               put_page(virt_to_head_page(xdp->data));
+               ret = tun_put_user_xdp(tun, tfile, xdpf, to);
+               xdp_return_frame(xdpf->data, &xdpf->mem);
        } else {
                struct sk_buff *skb = ptr;
 
 static int tun_ptr_peek_len(void *ptr)
 {
        if (likely(ptr)) {
-               if (tun_is_xdp_buff(ptr)) {
-                       struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+               if (tun_is_xdp_frame(ptr)) {
+                       struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 
-                       return xdp->data_end - xdp->data;
+                       return xdpf->len;
                }
                return __skb_array_len_with_tag(ptr);
        } else {