return 0;
 }
 
-static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
-                                       const struct virtio_net_hdr *hdr,
-                                       bool little_endian)
+static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
+                                         const struct virtio_net_hdr *hdr,
+                                         bool little_endian, u8 hdr_gso_type)
 {
        unsigned int nh_min_len = sizeof(struct iphdr);
        unsigned int gso_type = 0;
        unsigned int p_off = 0;
        unsigned int ip_proto;
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-               switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+               switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                        gso_type = SKB_GSO_TCPV4;
                        ip_proto = IPPROTO_TCP;
                        return -EINVAL;
                }
 
-               if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+               if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
                        gso_type |= SKB_GSO_TCP_ECN;
 
                if (hdr->gso_size == 0)
 
                                if (!protocol)
                                        virtio_net_hdr_set_proto(skb, hdr);
-                               else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+                               else if (!virtio_net_hdr_match_proto(protocol,
+                                                                hdr_gso_type))
                                        return -EINVAL;
                                else
                                        skb->protocol = protocol;
                }
        }
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+       if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
                unsigned int nh_off = p_off;
                struct skb_shared_info *shinfo = skb_shinfo(skb);
        return 0;
 }
 
+static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+                                       const struct virtio_net_hdr *hdr,
+                                       bool little_endian)
+{
+       return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
+}
+
 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
                                          struct virtio_net_hdr *hdr,
                                          bool little_endian,
        return 0;
 }
 
+static inline unsigned int virtio_l3min(bool is_ipv6)
+{
+       return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+}
+
+static inline int
+virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
+                         const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+                         bool tnl_hdr_negotiated,
+                         bool tnl_csum_negotiated,
+                         bool little_endian)
+{
+       const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
+       unsigned int inner_nh, outer_th, inner_th;
+       unsigned int inner_l3min, outer_l3min;
+       u8 gso_inner_type, gso_tunnel_type;
+       bool outer_isv6, inner_isv6;
+       int ret;
+
+       gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
+       if (!gso_tunnel_type)
+               return virtio_net_hdr_to_skb(skb, hdr, little_endian);
+
+       /* Tunnel not supported/negotiated, but the hdr asks for it. */
+       if (!tnl_hdr_negotiated)
+               return -EINVAL;
+
+       /* Either ipv4 or ipv6. */
+       if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
+               return -EINVAL;
+
+       /* The UDP tunnel must carry a GSO packet, but no UFO. */
+       gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
+                                          VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
+       if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
+               return -EINVAL;
+
+       /* Rely on csum being present. */
+       if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+               return -EINVAL;
+
+       /* Validate offsets. */
+       outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+       inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
+       inner_l3min = virtio_l3min(inner_isv6);
+       outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
+
+       inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
+       inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
+       outer_th = le16_to_cpu(vhdr->outer_th_offset);
+       if (outer_th < outer_l3min ||
+           inner_nh < outer_th + sizeof(struct udphdr) ||
+           inner_th < inner_nh + inner_l3min)
+               return -EINVAL;
+
+       /* Let the basic parsing deal with plain GSO features. */
+       ret = __virtio_net_hdr_to_skb(skb, hdr, true,
+                                     hdr->gso_type & ~gso_tunnel_type);
+       if (ret)
+               return ret;
+
+       /* In case of USO, the inner protocol is still unknown and
+        * `inner_isv6` is just a guess, additional parsing is needed.
+        * The previous validation ensures that accessing an ipv4 inner
+        * network header is safe.
+        */
+       if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
+               struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
+
+               inner_isv6 = iphdr->version == 6;
+               inner_l3min = virtio_l3min(inner_isv6);
+               if (inner_th < inner_nh + inner_l3min)
+                       return -EINVAL;
+       }
+
+       skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
+                                                htons(ETH_P_IP));
+       if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
+               if (!tnl_csum_negotiated)
+                       return -EINVAL;
+
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+       } else {
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+       }
+
+       skb->inner_transport_header = inner_th + skb_headroom(skb);
+       skb->inner_network_header = inner_nh + skb_headroom(skb);
+       skb->inner_mac_header = inner_nh + skb_headroom(skb);
+       skb->transport_header = outer_th + skb_headroom(skb);
+       skb->encapsulation = 1;
+       return 0;
+}
+
+/* Checksum-related fields validation for the driver */
+static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
+                                                struct virtio_net_hdr *hdr,
+                                                bool tnl_csum_negotiated)
+{
+       if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
+               if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
+                       return 0;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
+                       return 0;
+
+               /* tunnel csum packets are invalid when the related
+                * feature has not been negotiated
+                */
+               if (!tnl_csum_negotiated)
+                       return -EINVAL;
+               skb->csum_level = 1;
+               return 0;
+       }
+
+       /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
+        * over UDP tunnel requires the latter
+        */
+       if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
+               return -EINVAL;
+       return 0;
+}
+
+/*
+ * vlan_hlen always refers to the outermost MAC header. That also
+ * means it refers to the only MAC header, if the packet does not carry
+ * any encapsulation.
+ */
+static inline int
+virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
+                           struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+                           bool tnl_hdr_negotiated,
+                           bool little_endian,
+                           int vlan_hlen)
+{
+       struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
+       unsigned int inner_nh, outer_th;
+       int tnl_gso_type;
+       int ret;
+
+       tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
+                                                   SKB_GSO_UDP_TUNNEL_CSUM);
+       if (!tnl_gso_type)
+               return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+                                              vlan_hlen);
+
+       /* Tunnel support not negotiated but skb ask for it. */
+       if (!tnl_hdr_negotiated)
+               return -EINVAL;
+
+       /* Let the basic parsing deal with plain GSO features. */
+       skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
+       ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
+       skb_shinfo(skb)->gso_type |= tnl_gso_type;
+       if (ret)
+               return ret;
+
+       if (skb->protocol == htons(ETH_P_IPV6))
+               hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+       else
+               hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
+
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+               hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
+
+       inner_nh = skb->inner_network_header - skb_headroom(skb);
+       outer_th = skb->transport_header - skb_headroom(skb);
+       vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
+       vhdr->outer_th_offset = cpu_to_le16(outer_th);
+       return 0;
+}
+
 #endif /* _LINUX_VIRTIO_NET_H */
 
                                         * with the same MAC.
                                         */
 #define VIRTIO_NET_F_SPEED_DUPLEX 63   /* Device set linkspeed and duplex */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO 65 /* Driver can receive
+                                             * GSO-over-UDP-tunnel packets
+                                             */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM 66 /* Driver handles
+                                                  * GSO-over-UDP-tunnel
+                                                  * packets with partial csum
+                                                  * for the outer header
+                                                  */
+#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO 67 /* Device can receive
+                                            * GSO-over-UDP-tunnel packets
+                                            */
+#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM 68 /* Device handles
+                                                 * GSO-over-UDP-tunnel
+                                                 * packets with partial csum
+                                                 * for the outer header
+                                                 */
+
+/* Offloads bits corresponding to VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO{,_CSUM}
+ * features
+ */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED       46
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED  47
 
 #ifndef VIRTIO_NET_NO_LEGACY
 #define VIRTIO_NET_F_GSO       6       /* Host handles pkts w/ any GSO type */
 #define VIRTIO_NET_HDR_F_NEEDS_CSUM    1       /* Use csum_start, csum_offset */
 #define VIRTIO_NET_HDR_F_DATA_VALID    2       /* Csum is valid */
 #define VIRTIO_NET_HDR_F_RSC_INFO      4       /* rsc info in csum_ fields */
+#define VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM 8     /* UDP tunnel csum offload */
        __u8 flags;
 #define VIRTIO_NET_HDR_GSO_NONE                0       /* Not a GSO frame */
 #define VIRTIO_NET_HDR_GSO_TCPV4       1       /* GSO frame, IPv4 TCP (TSO) */
 #define VIRTIO_NET_HDR_GSO_UDP         3       /* GSO frame, IPv4 UDP (UFO) */
 #define VIRTIO_NET_HDR_GSO_TCPV6       4       /* GSO frame, IPv6 TCP */
 #define VIRTIO_NET_HDR_GSO_UDP_L4      5       /* GSO frame, IPv4& IPv6 UDP (USO) */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 0x20 /* UDPv4 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6 0x40 /* UDPv6 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL (VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 | \
+                                      VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6)
 #define VIRTIO_NET_HDR_GSO_ECN         0x80    /* TCP has ECN set */
        __u8 gso_type;
        __virtio16 hdr_len;     /* Ethernet + IP + tcp/udp hdrs */
        __le16 padding;
 };
 
+struct virtio_net_hdr_v1_hash_tunnel {
+       struct virtio_net_hdr_v1_hash hash_hdr;
+       __le16 outer_th_offset;
+       __le16 inner_nh_offset;
+};
+
 #ifndef VIRTIO_NET_NO_LEGACY
 /* This header comes first in the scatter-gather list.
  * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must