#define TUN_FASYNC     IFF_ATTACH_QUEUE
 
 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
-                     IFF_MULTI_QUEUE)
+                     IFF_VNET_LE | IFF_MULTI_QUEUE)
 #define GOODCOPY_LEN 128
 
 #define FLT_EXACT_COUNT 8
        u32 flow_count;
 };
 
+static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
+{
+       return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
+}
+
+static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
+{
+       return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
+}
+
 static inline u32 tun_hashfn(u32 rxhash)
 {
        return rxhash & 0x3ff;
                        return -EFAULT;
 
                if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
-                   gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
-                       gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
+                   tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
+                       gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
 
-               if (gso.hdr_len > len)
+               if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
                offset += tun->vnet_hdr_sz;
        }
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
                align += NET_IP_ALIGN;
                if (unlikely(len < ETH_HLEN ||
-                            (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
+                            (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
                        return -EINVAL;
        }
 
                 * enough room for skb expand head in case it is used.
                 * The rest of the buffer is mapped from userspace.
                 */
-               copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+               copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
                if (copylen > good_linear)
                        copylen = good_linear;
                linear = copylen;
 
        if (!zerocopy) {
                copylen = len;
-               if (gso.hdr_len > good_linear)
+               if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
                        linear = good_linear;
                else
-                       linear = gso.hdr_len;
+                       linear = tun16_to_cpu(tun, gso.hdr_len);
        }
 
        skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
        }
 
        if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-               if (!skb_partial_csum_set(skb, gso.csum_start,
-                                         gso.csum_offset)) {
+               if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
+                                         tun16_to_cpu(tun, gso.csum_offset))) {
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
                        return -EINVAL;
                if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
-               skb_shinfo(skb)->gso_size = gso.gso_size;
+               skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
                if (skb_shinfo(skb)->gso_size == 0) {
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
                        struct skb_shared_info *sinfo = skb_shinfo(skb);
 
                        /* This is a hint as to how much should be linear. */
-                       gso.hdr_len = skb_headlen(skb);
-                       gso.gso_size = sinfo->gso_size;
+                       gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
+                       gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
                        if (sinfo->gso_type & SKB_GSO_TCPV4)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
-                                      sinfo->gso_type, gso.gso_size,
-                                      gso.hdr_len);
+                                      sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+                                      tun16_to_cpu(tun, gso.hdr_len));
                                print_hex_dump(KERN_ERR, "tun: ",
                                               DUMP_PREFIX_NONE,
                                               16, 1, skb->head,
-                                              min((int)gso.hdr_len, 64), true);
+                                              min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
                                WARN_ON_ONCE(1);
                                return -EINVAL;
                        }
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       gso.csum_start = skb_checksum_start_offset(skb) +
-                                        vlan_hlen;
-                       gso.csum_offset = skb->csum_offset;
+                       gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
+                                                     vlan_hlen);
+                       gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
                } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                        gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
                } /* else everything is zero */