static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
+static inline int bpf_try_make_writable(struct sk_buff *skb,
+                                       unsigned int write_len)
+{
+       int err;
+
+       if (!skb_cloned(skb))
+               return 0;
+       if (skb_clone_writable(skb, write_len))
+               return 0;
+       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+       if (!err)
+               bpf_compute_data_end(skb);
+       return err;
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
        struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
         */
        if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + len)))
+       if (unlikely(bpf_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, sp->buff);
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        __be16 vlan_proto = (__force __be16) r2;
+       int ret;
 
        if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
                     vlan_proto != htons(ETH_P_8021AD)))
                vlan_proto = htons(ETH_P_8021Q);
 
-       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+       ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
 static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       int ret;
 
-       return skb_vlan_pop(skb);
+       ret = skb_vlan_pop(skb);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
 static bool sk_filter_is_valid_access(int off, int size,
                                      enum bpf_access_type type)
 {
-       if (off == offsetof(struct __sk_buff, tc_classid))
+       switch (off) {
+       case offsetof(struct __sk_buff, tc_classid):
+       case offsetof(struct __sk_buff, data):
+       case offsetof(struct __sk_buff, data_end):
                return false;
+       }
 
        if (type == BPF_WRITE) {
                switch (off) {
                        *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
                break;
 
+       case offsetof(struct __sk_buff, data):
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, data));
+               break;
+
+       case offsetof(struct __sk_buff, data_end):
+               ctx_off -= offsetof(struct __sk_buff, data_end);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct bpf_skb_data_end, data_end);
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
+                                     dst_reg, src_reg, ctx_off);
+               break;
+
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);