static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
{
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
- struct virtio_net_hdr *gso = &hdr->gso;
+ struct virtio_net_hdr *gso = NULL;
int buflen = hdr->buflen;
int vnet_hdr_len = 0;
struct tap_dev *tap;
struct sk_buff *skb;
int err, depth;
- if (q->flags & IFF_VNET_HDR)
+ if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ if (xdp->data != xdp->data_hard_start + sizeof(*hdr) + vnet_hdr_len) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ gso = (void *)&hdr[1];
+ if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ tap16_to_cpu(q, gso->csum_start) +
+ tap16_to_cpu(q, gso->csum_offset) + 2 >
+ tap16_to_cpu(q, gso->hdr_len))
+ gso->hdr_len = cpu_to_tap16(q,
+ tap16_to_cpu(q, gso->csum_start) +
+ tap16_to_cpu(q, gso->csum_offset) + 2);
+
+ if (tap16_to_cpu(q, gso->hdr_len) > xdp->data_end - xdp->data) {
+ err = -EINVAL;
+ goto err;
+ }
+ }
skb = build_skb(xdp->data_hard_start, buflen);
if (!skb) {
err = -ENOMEM;
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
- if (vnet_hdr_len) {
+ if (gso) {
err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
if (err)
goto err_kfree;
{
unsigned int datasize = xdp->data_end - xdp->data;
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
+ void *tun_hdr = &hdr[1];
struct virtio_net_hdr *gso = NULL;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
bool skb_xdp = false;
struct page *page;
- if (tun->flags & IFF_VNET_HDR)
- gso = &hdr->gso;
+ if (tun->flags & IFF_VNET_HDR) {
+ gso = tun_hdr;
+ tun_hdr += sizeof(*gso);
+
+ if (tun_hdr > xdp->data) {
+ atomic_long_inc(&tun->rx_frame_errors);
+ return -EINVAL;
+ }
+
+ if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ tun16_to_cpu(tun, gso->csum_start) + tun16_to_cpu(tun, gso->csum_offset) + 2 > tun16_to_cpu(tun, gso->hdr_len))
+ gso->hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso->csum_start) + tun16_to_cpu(tun, gso->csum_offset) + 2);
+
+ if (tun16_to_cpu(tun, gso->hdr_len) > datasize)
+ return -EINVAL;
+ }
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
}
skb_reserve(skb, xdp->data - xdp->data_hard_start);
- skb_put(skb, xdp->data_end - xdp->data);
+ skb_put(skb, datasize);
if (gso && virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
dev);
struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
- struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
size_t len = iov_iter_count(from);
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
-
hdr = buf;
- gso = &hdr->gso;
-
- if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- vhost16_to_cpu(vq, gso->csum_start) +
- vhost16_to_cpu(vq, gso->csum_offset) + 2 >
- vhost16_to_cpu(vq, gso->hdr_len)) {
- gso->hdr_len = cpu_to_vhost16(vq,
- vhost16_to_cpu(vq, gso->csum_start) +
- vhost16_to_cpu(vq, gso->csum_offset) + 2);
-
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (sock_hlen) {
+ copied = copy_page_from_iter(alloc_frag->page,
+ alloc_frag->offset +
+ sizeof(struct tun_xdp_hdr),
+ sock_hlen, from);
+ if (copied != sock_hlen)
+ return -EFAULT;
+
+ len -= sock_hlen;
}
- len -= sock_hlen;
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset + pad,
len, from);
struct tun_xdp_hdr {
int buflen;
- struct virtio_net_hdr gso;
};
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)