continue;
                }
 
+               if (insn->imm == BPF_FUNC_redirect_map) {
+                       u64 addr = (unsigned long)prog;
+                       struct bpf_insn r4_ld[] = {
+                               BPF_LD_IMM64(BPF_REG_4, addr),
+                               *insn,
+                       };
+                       cnt = ARRAY_SIZE(r4_ld);
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+               }
 patch_call_imm:
                fn = prog->aux->ops->get_func_proto(insn->imm);
                /* all functions that have prototype and verifier allowed
 
        u32 flags;
        struct bpf_map *map;
        struct bpf_map *map_to_flush;
+       const struct bpf_prog *map_owner;
 };
 
 static DEFINE_PER_CPU(struct redirect_info, redirect_info);
 
        ri->ifindex = ifindex;
        ri->flags = flags;
-       ri->map = NULL;
 
        return TC_ACT_REDIRECT;
 }
                               struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       const struct bpf_prog *map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
        u32 index = ri->ifindex;
        struct net_device *fwd;
 
        ri->ifindex = 0;
        ri->map = NULL;
+       ri->map_owner = NULL;
+
+       /* This is really only caused by a deliberately crappy
+        * BPF program, normally we would never hit that case,
+        * so no need to inform someone via tracepoints either,
+        * just bail out.
+        */
+       if (unlikely(map_owner != xdp_prog))
+               return -EINVAL;
 
        fwd = __dev_map_lookup_elem(map, index);
        if (!fwd) {
 
        ri->ifindex = ifindex;
        ri->flags = flags;
+       ri->map = NULL;
+       ri->map_owner = NULL;
 
        return XDP_REDIRECT;
 }
        .arg2_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
+BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
+          const struct bpf_prog *, map_owner)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 
        ri->ifindex = ifindex;
        ri->flags = flags;
        ri->map = map;
+       ri->map_owner = map_owner;
 
        return XDP_REDIRECT;
 }
 
+/* Note, arg4 is hidden from users and populated by the verifier
+ * with the right pointer.
+ */
 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
        .func           = bpf_xdp_redirect_map,
        .gpl_only       = false,