VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GUEST_USO4,
        VIRTIO_NET_F_GUEST_USO6,
-       VIRTIO_NET_F_GUEST_HDRLEN
+       VIRTIO_NET_F_GUEST_HDRLEN,
+       VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED,
+       VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED,
 };
 
 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
-                               (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
-                               (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
-                               (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
-                               (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
-                               (1ULL << VIRTIO_NET_F_GUEST_USO6))
+                       (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+                       (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
+                       (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
+                       (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
+                       (1ULL << VIRTIO_NET_F_GUEST_USO6) | \
+                       (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | \
+                       (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED))
 
 struct virtnet_stat_desc {
        char desc[ETH_GSTRING_LEN];
        /* Work struct for delayed refilling if we run low on memory. */
        struct delayed_work refill;
 
+       /* UDP tunnel support */
+       bool tx_tnl;
+
+       bool rx_tnl;
+
+       bool rx_tnl_csum;
+
        /* Is delayed refill enabled? */
        bool refill_enabled;
 
                struct virtio_net_hdr hdr;
                struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
                struct virtio_net_hdr_v1_hash hash_v1_hdr;
+               struct virtio_net_hdr_v1_hash_tunnel tnl_hdr;
        };
 };
 
        if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
                virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
 
-       if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       hdr->hdr.flags = flags;
+       if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) {
+               net_warn_ratelimited("%s: bad csum: flags: %x, gso_type: %x rx_tnl_csum %d\n",
+                                    dev->name, hdr->hdr.flags,
+                                    hdr->hdr.gso_type, vi->rx_tnl_csum);
+               goto frame_err;
+       }
 
-       if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
-                                 virtio_is_little_endian(vi->vdev))) {
-               net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+       if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl,
+                                     vi->rx_tnl_csum,
+                                     virtio_is_little_endian(vi->vdev))) {
+               net_warn_ratelimited("%s: bad gso: type: %x, size: %u, flags %x tunnel %d tnl csum %d\n",
                                     dev->name, hdr->hdr.gso_type,
-                                    hdr->hdr.gso_size);
+                                    hdr->hdr.gso_size, hdr->hdr.flags,
+                                    vi->rx_tnl, vi->rx_tnl_csum);
                goto frame_err;
        }
 
 
 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
 {
-       struct virtio_net_hdr_mrg_rxbuf *hdr;
        const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
        struct virtnet_info *vi = sq->vq->vdev->priv;
+       struct virtio_net_hdr_v1_hash_tunnel *hdr;
        int num_sg;
        unsigned hdr_len = vi->hdr_len;
        bool can_push;
        /* Even if we can, don't push here yet as this would skew
         * csum_start offset below. */
        if (can_push)
-               hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
+               hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data -
+                                                              hdr_len);
        else
-               hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
+               hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
 
-       if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
-                                   virtio_is_little_endian(vi->vdev), false,
-                                   0))
+       if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
+                                       virtio_is_little_endian(vi->vdev), 0))
                return -EPROTO;
 
        if (vi->mergeable_rx_bufs)
-               hdr->num_buffers = 0;
+               hdr->hash_hdr.hdr.num_buffers = 0;
 
        sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
        if (can_push) {
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
                        dev->hw_features |= NETIF_F_GSO_UDP_L4;
 
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO)) {
+                       dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+                       dev->hw_enc_features = dev->hw_features;
+               }
+               if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL &&
+                   virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM)) {
+                       dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+                       dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               }
+
                dev->features |= NETIF_F_GSO_ROBUST;
 
                if (gso)
-                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+                       dev->features |= dev->hw_features;
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
 
                dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
        }
 
-       if (vi->has_rss_hash_report)
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+               vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel);
+       else if (vi->has_rss_hash_report)
                vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
        else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
                 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
 
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM))
+               vi->rx_tnl_csum = true;
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO))
+               vi->rx_tnl = true;
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+               vi->tx_tnl = true;
+
        if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
            virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->any_header_sg = true;
 
 static unsigned int features[] = {
        VIRTNET_FEATURES,
+       VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO,
+       VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM,
+       VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO,
+       VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM,
 };
 
 static unsigned int features_legacy[] = {