}
 EXPORT_SYMBOL_GPL(xdp_master_redirect);
 
-int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
-                   struct bpf_prog *xdp_prog)
+static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
+                                       struct net_device *dev,
+                                       struct xdp_buff *xdp,
+                                       struct bpf_prog *xdp_prog)
 {
-       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
        enum bpf_map_type map_type = ri->map_type;
        void *fwd = ri->tgt_value;
        u32 map_id = ri->map_id;
-       struct xdp_frame *xdpf;
-       struct bpf_map *map;
        int err;
 
        ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
        ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
-       if (map_type == BPF_MAP_TYPE_XSKMAP) {
-               err = __xsk_map_redirect(fwd, xdp);
-               goto out;
-       }
+       err = __xsk_map_redirect(fwd, xdp);
+       if (unlikely(err))
+               goto err;
+
+       _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
+       return 0;
+err:
+       _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
+       return err;
+}
+
+static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+                                                  struct net_device *dev,
+                                                  struct xdp_frame *xdpf,
+                                                  struct bpf_prog *xdp_prog)
+{
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
+       struct bpf_map *map;
+       int err;
+
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
-       xdpf = xdp_convert_buff_to_frame(xdp);
        if (unlikely(!xdpf)) {
                err = -EOVERFLOW;
                goto err;
                err = -EBADRQC;
        }
 
-out:
        if (unlikely(err))
                goto err;
 
        _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
        return err;
 }
+
+int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+                   struct bpf_prog *xdp_prog)
+{
+       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       enum bpf_map_type map_type = ri->map_type;
+
+       if (map_type == BPF_MAP_TYPE_XSKMAP)
+               return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+       return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
+                                      xdp_prog);
+}
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
+int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
+                         struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+{
+       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       enum bpf_map_type map_type = ri->map_type;
+
+       if (map_type == BPF_MAP_TYPE_XSKMAP)
+               return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+       return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
+}
+EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,