goto patch_call_imm;
                }
 
-               if (insn->imm == BPF_FUNC_redirect_map) {
-                       /* Note, we cannot use prog directly as imm as subsequent
-                        * rewrites would still change the prog pointer. The only
-                        * stable address we can use is aux, which also works with
-                        * prog clones during blinding.
-                        */
-                       u64 addr = (unsigned long)prog->aux;
-                       struct bpf_insn r4_ld[] = {
-                               BPF_LD_IMM64(BPF_REG_4, addr),
-                               *insn,
-                       };
-                       cnt = ARRAY_SIZE(r4_ld);
-
-                       new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
-                       if (!new_prog)
-                               return -ENOMEM;
-
-                       delta    += cnt - 1;
-                       env->prog = prog = new_prog;
-                       insn      = new_prog->insnsi + i + delta;
-               }
 patch_call_imm:
                fn = env->ops->get_func_proto(insn->imm, env->prog);
                /* all functions that have prototype and verifier allowed
 
        }
 }
 
-static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
-                                  unsigned long aux)
+void bpf_clear_redirect_map(struct bpf_map *map)
 {
-       return (unsigned long)xdp_prog->aux != aux;
+       struct bpf_redirect_info *ri;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               ri = per_cpu_ptr(&bpf_redirect_info, cpu);
+               /* Avoid polluting remote cacheline due to writes if
+                * not needed. Once we pass this test, we need the
+                * cmpxchg() to make sure it hasn't been changed in
+                * the meantime by remote CPU.
+                */
+               if (unlikely(READ_ONCE(ri->map) == map))
+                       cmpxchg(&ri->map, map, NULL);
+       }
 }
 
 static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
-                              struct bpf_prog *xdp_prog)
+                              struct bpf_prog *xdp_prog, struct bpf_map *map)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
-       unsigned long map_owner = ri->map_owner;
-       struct bpf_map *map = ri->map;
        u32 index = ri->ifindex;
        void *fwd = NULL;
        int err;
 
        ri->ifindex = 0;
-       ri->map = NULL;
-       ri->map_owner = 0;
-
-       if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
-               err = -EFAULT;
-               map = NULL;
-               goto err;
-       }
+       WRITE_ONCE(ri->map, NULL);
 
        fwd = __xdp_map_lookup_elem(map, index);
        if (!fwd) {
                    struct bpf_prog *xdp_prog)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       struct bpf_map *map = READ_ONCE(ri->map);
        struct net_device *fwd;
        u32 index = ri->ifindex;
        int err;
 
-       if (ri->map)
-               return xdp_do_redirect_map(dev, xdp, xdp_prog);
+       if (map)
+               return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
 
        fwd = dev_get_by_index_rcu(dev_net(dev), index);
        ri->ifindex = 0;
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
-                                      struct bpf_prog *xdp_prog)
+                                      struct bpf_prog *xdp_prog,
+                                      struct bpf_map *map)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
-       unsigned long map_owner = ri->map_owner;
-       struct bpf_map *map = ri->map;
        u32 index = ri->ifindex;
        void *fwd = NULL;
        int err = 0;
 
        ri->ifindex = 0;
-       ri->map = NULL;
-       ri->map_owner = 0;
+       WRITE_ONCE(ri->map, NULL);
 
-       if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
-               err = -EFAULT;
-               map = NULL;
-               goto err;
-       }
        fwd = __xdp_map_lookup_elem(map, index);
        if (unlikely(!fwd)) {
                err = -EINVAL;
                            struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       struct bpf_map *map = READ_ONCE(ri->map);
        u32 index = ri->ifindex;
        struct net_device *fwd;
        int err = 0;
 
-       if (ri->map)
-               return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
-
+       if (map)
+               return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
+                                                  map);
        ri->ifindex = 0;
        fwd = dev_get_by_index_rcu(dev_net(dev), index);
        if (unlikely(!fwd)) {
 
        ri->ifindex = ifindex;
        ri->flags = flags;
-       ri->map = NULL;
-       ri->map_owner = 0;
+       WRITE_ONCE(ri->map, NULL);
 
        return XDP_REDIRECT;
 }
        .arg2_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
-          unsigned long, map_owner)
+BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
+          u64, flags)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
 
 
        ri->ifindex = ifindex;
        ri->flags = flags;
-       ri->map = map;
-       ri->map_owner = map_owner;
+       WRITE_ONCE(ri->map, map);
 
        return XDP_REDIRECT;
 }
 
-/* Note, arg4 is hidden from users and populated by the verifier
- * with the right pointer.
- */
 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
        .func           = bpf_xdp_redirect_map,
        .gpl_only       = false,