From: David S. Miller <davem@davemloft.net>
Date: Sun, 22 Oct 2017 12:36:53 +0000 (+0100)
Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
X-Git-Tag: v4.15-rc1~84^2~358
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2;p=linux.git

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

There were quite a few overlapping sets of changes here.

Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.

Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly.  If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.

In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().

Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.

The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
---

f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2
diff --cc include/linux/filter.h
index 2d2db394b0ca,818a0b26249e..cdd78a7beaae
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@@ -731,24 -725,10 +731,24 @@@ int xdp_do_redirect(struct net_device *
  		    struct bpf_prog *prog);
  void xdp_do_flush_map(void);
  
 +/* Drivers not supporting XDP metadata can use this helper, which
 + * rejects any room expansion for metadata as a result.
 + */
 +static __always_inline void
 +xdp_set_data_meta_invalid(struct xdp_buff *xdp)
 +{
 +	xdp->data_meta = xdp->data + 1;
 +}
 +
 +static __always_inline bool
 +xdp_data_meta_unsupported(const struct xdp_buff *xdp)
 +{
 +	return unlikely(xdp->data_meta > xdp->data);
 +}
 +
  void bpf_warn_invalid_xdp_action(u32 act);
 -void bpf_warn_invalid_xdp_redirect(u32 ifindex);
  
- struct sock *do_sk_redirect_map(void);
+ struct sock *do_sk_redirect_map(struct sk_buff *skb);
  
  #ifdef CONFIG_BPF_JIT
  extern int bpf_jit_enable;
diff --cc kernel/bpf/devmap.c
index e5d3de7cff2e,e745d6a88224..ebdef54bf7df
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@@ -81,9 -78,12 +81,12 @@@ static struct bpf_map *dev_map_alloc(un
  	int err = -EINVAL;
  	u64 cost;
  
+ 	if (!capable(CAP_NET_ADMIN))
+ 		return ERR_PTR(-EPERM);
+ 
  	/* check sanity of attributes */
  	if (attr->max_entries == 0 || attr->key_size != 4 ||
 -	    attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
 +	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
  		return ERR_PTR(-EINVAL);
  
  	dtab = kzalloc(sizeof(*dtab), GFP_USER);
diff --cc kernel/bpf/sockmap.c
index 86ec846f2d5e,2b6eb35ae5d3..eef843c3b419
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@@ -39,10 -39,8 +39,11 @@@
  #include <linux/workqueue.h>
  #include <linux/list.h>
  #include <net/strparser.h>
+ #include <net/tcp.h>
  
 +#define SOCK_CREATE_FLAG_MASK \
 +	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 +
  struct bpf_stab {
  	struct bpf_map map;
  	struct sock **sock_map;
@@@ -104,9 -102,16 +105,16 @@@ static int smap_verdict_func(struct sma
  		return SK_DROP;
  
  	skb_orphan(skb);
+ 	/* We need to ensure that BPF metadata for maps is also cleared
+ 	 * when we orphan the skb so that we don't have the possibility
+ 	 * to reference a stale map.
+ 	 */
+ 	TCP_SKB_CB(skb)->bpf.map = NULL;
  	skb->sk = psock->sock;
 -	bpf_compute_data_end(skb);
 +	bpf_compute_data_pointers(skb);
+ 	preempt_disable();
  	rc = (*prog->bpf_func)(skb, prog->insnsi);
+ 	preempt_enable();
  	skb->sk = NULL;
  
  	return rc;
@@@ -490,9 -486,12 +489,12 @@@ static struct bpf_map *sock_map_alloc(u
  	int err = -EINVAL;
  	u64 cost;
  
+ 	if (!capable(CAP_NET_ADMIN))
+ 		return ERR_PTR(-EPERM);
+ 
  	/* check sanity of attributes */
  	if (attr->max_entries == 0 || attr->key_size != 4 ||
 -	    attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
 +	    attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  		return ERR_PTR(-EINVAL);
  
  	if (attr->value_size > KMALLOC_MAX_SIZE)
diff --cc kernel/bpf/verifier.c
index 545b8c45a578,c48ca2a34b5e..d906775e12c1
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -1006,7 -1116,12 +1006,13 @@@ static int check_mem_access(struct bpf_
  		/* ctx accesses must be at a fixed offset, so that we can
  		 * determine what type of data were returned.
  		 */
- 		if (!tnum_is_const(reg->var_off)) {
+ 		if (reg->off) {
 -			verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
++			verbose(env,
++				"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
+ 				regno, reg->off, off - reg->off);
+ 			return -EACCES;
+ 		}
+ 		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
  			char tn_buf[48];
  
  			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@@ -2341,12 -2431,14 +2346,15 @@@ static int check_alu_op(struct bpf_veri
  
  static void find_good_pkt_pointers(struct bpf_verifier_state *state,
  				   struct bpf_reg_state *dst_reg,
- 				   enum bpf_reg_type type)
++				   enum bpf_reg_type type,
+ 				   bool range_right_open)
  {
  	struct bpf_reg_state *regs = state->regs, *reg;
+ 	u16 new_range;
  	int i;
  
- 	if (dst_reg->off < 0)
+ 	if (dst_reg->off < 0 ||
+ 	    (dst_reg->off == 0 && range_right_open))
  		/* This doesn't give us any range */
  		return;
  
@@@ -2403,16 -2501,16 +2417,16 @@@
  	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
  	 */
  	for (i = 0; i < MAX_BPF_REG; i++)
 -		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
 +		if (regs[i].type == type && regs[i].id == dst_reg->id)
  			/* keep the maximum range already checked */
- 			regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
+ 			regs[i].range = max(regs[i].range, new_range);
  
  	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
  		if (state->stack_slot_type[i] != STACK_SPILL)
  			continue;
  		reg = &state->spilled_regs[i / BPF_REG_SIZE];
 -		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
 -			reg->range = max(reg->range, new_range);
 +		if (reg->type == type && reg->id == dst_reg->id)
- 			reg->range = max_t(u16, reg->range, dst_reg->off);
++			reg->range = max_t(u16, reg->range, new_range);
  	}
  }
  
@@@ -2776,46 -2874,49 +2790,78 @@@ static int check_cond_jmp_op(struct bpf
  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
  		   dst_reg->type == PTR_TO_PACKET &&
  		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- 		find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET);
+ 		/* pkt_data' > pkt_end */
 -		find_good_pkt_pointers(this_branch, dst_reg, false);
++		find_good_pkt_pointers(this_branch, dst_reg,
++				       PTR_TO_PACKET, false);
+ 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+ 		   dst_reg->type == PTR_TO_PACKET_END &&
+ 		   regs[insn->src_reg].type == PTR_TO_PACKET) {
+ 		/* pkt_end > pkt_data' */
 -		find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
++		find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
++				       PTR_TO_PACKET, true);
+ 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+ 		   dst_reg->type == PTR_TO_PACKET &&
+ 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+ 		/* pkt_data' < pkt_end */
 -		find_good_pkt_pointers(other_branch, dst_reg, true);
++		find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET,
++				       true);
  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+ 		   dst_reg->type == PTR_TO_PACKET_END &&
+ 		   regs[insn->src_reg].type == PTR_TO_PACKET) {
+ 		/* pkt_end < pkt_data' */
 -		find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
++		find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
++				       PTR_TO_PACKET, false);
+ 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
  		   dst_reg->type == PTR_TO_PACKET &&
  		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- 		find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET);
+ 		/* pkt_data' >= pkt_end */
 -		find_good_pkt_pointers(this_branch, dst_reg, true);
++		find_good_pkt_pointers(this_branch, dst_reg,
++				       PTR_TO_PACKET, true);
  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
  		   dst_reg->type == PTR_TO_PACKET_END &&
  		   regs[insn->src_reg].type == PTR_TO_PACKET) {
+ 		/* pkt_end >= pkt_data' */
 -		find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
 +		find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
- 				       PTR_TO_PACKET);
++				       PTR_TO_PACKET, false);
+ 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
+ 		   dst_reg->type == PTR_TO_PACKET &&
+ 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+ 		/* pkt_data' <= pkt_end */
 -		find_good_pkt_pointers(other_branch, dst_reg, false);
++		find_good_pkt_pointers(other_branch, dst_reg,
++				       PTR_TO_PACKET, false);
  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
  		   dst_reg->type == PTR_TO_PACKET_END &&
  		   regs[insn->src_reg].type == PTR_TO_PACKET) {
+ 		/* pkt_end <= pkt_data' */
 -		find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
 +		find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
- 				       PTR_TO_PACKET);
++				       PTR_TO_PACKET, true);
 +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
 +		   dst_reg->type == PTR_TO_PACKET_META &&
 +		   reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
- 		find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET_META);
++		find_good_pkt_pointers(this_branch, dst_reg,
++				       PTR_TO_PACKET_META, false);
 +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
 +		   dst_reg->type == PTR_TO_PACKET_META &&
 +		   reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
- 		find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET_META);
++		find_good_pkt_pointers(other_branch, dst_reg,
++				       PTR_TO_PACKET_META, false);
 +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
 +		   reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
 +		   regs[insn->src_reg].type == PTR_TO_PACKET_META) {
 +		find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
- 				       PTR_TO_PACKET_META);
++				       PTR_TO_PACKET_META, false);
 +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
 +		   reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
 +		   regs[insn->src_reg].type == PTR_TO_PACKET_META) {
 +		find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
- 				       PTR_TO_PACKET_META);
++				       PTR_TO_PACKET_META, false);
  	} else if (is_pointer_value(env, insn->dst_reg)) {
 -		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
 +		verbose(env, "R%d pointer comparison prohibited\n",
 +			insn->dst_reg);
  		return -EACCES;
  	}
 -	if (log_level)
 -		print_verifier_state(this_branch);
 +	if (env->log.level)
 +		print_verifier_state(env, this_branch);
  	return 0;
  }
  
diff --cc net/core/filter.c
index ccf62f44140a,aa0265997f93..b79c44cc8145
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@@ -3894,15 -3682,8 +3895,14 @@@ static bool sk_skb_is_valid_access(int 
  				   enum bpf_access_type type,
  				   struct bpf_insn_access_aux *info)
  {
 +	switch (off) {
 +	case bpf_ctx_range(struct __sk_buff, tc_classid):
 +	case bpf_ctx_range(struct __sk_buff, data_meta):
 +		return false;
 +	}
 +
  	if (type == BPF_WRITE) {
  		switch (off) {
- 		case bpf_ctx_range(struct __sk_buff, mark):
  		case bpf_ctx_range(struct __sk_buff, tc_index):
  		case bpf_ctx_range(struct __sk_buff, priority):
  			break;
@@@ -3912,6 -3693,9 +3912,8 @@@
  	}
  
  	switch (off) {
+ 	case bpf_ctx_range(struct __sk_buff, mark):
 -	case bpf_ctx_range(struct __sk_buff, tc_classid):
+ 		return false;
  	case bpf_ctx_range(struct __sk_buff, data):
  		info->reg_type = PTR_TO_PACKET;
  		break;
diff --cc net/core/rtnetlink.c
index 20b550d07fe3,5ace48926b19..04680a53c8dd
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@@ -4377,11 -4287,14 +4381,14 @@@ static int rtnetlink_event(struct notif
  	case NETDEV_CHANGENAME:
  	case NETDEV_FEAT_CHANGE:
  	case NETDEV_BONDING_FAILOVER:
+ 	case NETDEV_POST_TYPE_CHANGE:
  	case NETDEV_NOTIFY_PEERS:
+ 	case NETDEV_CHANGEUPPER:
  	case NETDEV_RESEND_IGMP:
  	case NETDEV_CHANGEINFODATA:
+ 	case NETDEV_CHANGE_TX_QUEUE_LEN:
  		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
 -				   GFP_KERNEL);
 +				   GFP_KERNEL, NULL);
  		break;
  	default:
  		break;
diff --cc net/vmw_vsock/hyperv_transport.c
index bbac023e70d1,e21991fe883a..5583df708b8c
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@@ -310,7 -310,9 +310,9 @@@ static void hvs_close_connection(struc
  	struct sock *sk = get_per_channel_state(chan);
  	struct vsock_sock *vsk = vsock_sk(sk);
  
+ 	lock_sock(sk);
+ 
 -	sk->sk_state = SS_UNCONNECTED;
 +	sk->sk_state = TCP_CLOSE;
  	sock_set_flag(sk, SOCK_DONE);
  	vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
  
@@@ -344,8 -348,10 +348,9 @@@ static void hvs_open_connection(struct 
  	if (!sk)
  		return;
  
+ 	lock_sock(sk);
 -
 -	if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
 -	    (!conn_from_host && sk->sk_state != SS_CONNECTING))
 +	if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
 +	    (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
  		goto out;
  
  	if (conn_from_host) {
@@@ -395,11 -401,9 +400,9 @@@
  
  		vsock_insert_connected(vnew);
  
- 		lock_sock(sk);
  		vsock_enqueue_accept(sk, new);
- 		release_sock(sk);
  	} else {
 -		sk->sk_state = SS_CONNECTED;
 +		sk->sk_state = TCP_ESTABLISHED;
  		sk->sk_socket->state = SS_CONNECTED;
  
  		vsock_insert_connected(vsock_sk(sk));
diff --cc tools/testing/selftests/bpf/bpf_helpers.h
index 609514f74482,b2e02bdcd098..abfa4c5c8527
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@@ -67,10 -65,7 +67,10 @@@ static int (*bpf_xdp_adjust_meta)(void 
  static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
  			     int optlen) =
  	(void *) BPF_FUNC_setsockopt;
 +static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
 +			     int optlen) =
 +	(void *) BPF_FUNC_getsockopt;
- static int (*bpf_sk_redirect_map)(void *map, int key, int flags) =
+ static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
  	(void *) BPF_FUNC_sk_redirect_map;
  static int (*bpf_sock_map_update)(void *map, void *key, void *value,
  				  unsigned long long flags) =
diff --cc tools/testing/selftests/bpf/test_verifier.c
index cc91d0159f43,64ae21f64489..1b93941bdfea
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@@ -6645,325 -6657,500 +6657,819 @@@ static struct bpf_test tests[] = 
  		.errstr = "BPF_END uses reserved fields",
  		.result = REJECT,
  	},
 +	{
 +		"meta access, test1",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test2",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
 +			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet, off=-8",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test3",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_end)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test4",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_end)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test5",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
 +			BPF_MOV64_IMM(BPF_REG_2, -8),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_xdp_adjust_meta),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "R3 !read_ok",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test6",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test7",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
 +			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test8",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test9",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test10",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_end)),
 +			BPF_MOV64_IMM(BPF_REG_5, 42),
 +			BPF_MOV64_IMM(BPF_REG_6, 24),
 +			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
 +			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
 +			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
 +			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
 +			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
 +			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
 +			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "invalid access to packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test11",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_MOV64_IMM(BPF_REG_5, 42),
 +			BPF_MOV64_IMM(BPF_REG_6, 24),
 +			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
 +			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
 +			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
 +			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
 +			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
 +			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
 +			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
 +	{
 +		"meta access, test12",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_meta)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_end)),
 +			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
 +			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
 +			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
 +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
+ 	{
+ 		"arithmetic ops make PTR_TO_CTX unusable",
+ 		.insns = {
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ 				      offsetof(struct __sk_buff, data) -
+ 				      offsetof(struct __sk_buff, mark)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ 				    offsetof(struct __sk_buff, mark)),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end mangling, bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end mangling, bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' > pkt_end, good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end > pkt_data', good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' < pkt_end, good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end < pkt_data', good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' >= pkt_end, good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end >= pkt_data', good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
 +	{
 +		"bpf_exit with invalid return code. test1",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
 +			BPF_EXIT_INSN(),
 +		},
 +		.errstr = "R0 has value (0x0; 0xffffffff)",
 +		.result = REJECT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test2",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
 +			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test3",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
 +			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
 +			BPF_EXIT_INSN(),
 +		},
 +		.errstr = "R0 has value (0x0; 0x3)",
 +		.result = REJECT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test4",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test5",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.errstr = "R0 has value (0x2; 0x0)",
 +		.result = REJECT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test6",
 +		.insns = {
 +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.errstr = "R0 is not a known value (ctx)",
 +		.result = REJECT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
 +	{
 +		"bpf_exit with invalid return code. test7",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
 +			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.errstr = "R0 has unknown scalar value",
 +		.result = REJECT,
 +		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 +	},
+ 	{
+ 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' <= pkt_end, good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end <= pkt_data', good access",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
+ 	{
+ 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ 			BPF_MOV64_IMM(BPF_REG_0, 0),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.errstr = "R1 offset is outside of the packet",
+ 		.result = REJECT,
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ 	},
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)