func = (u8 *) __bpf_call_base + imm;
 
                        /* Save skb pointer if we need to re-cache skb data */
-                       if (bpf_helper_changes_skb_data(func))
+                       if (bpf_helper_changes_pkt_data(func))
                                PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
                        bpf_jit_emit_func_call(image, ctx, (u64)func);
                        PPC_MR(b2p[BPF_REG_0], 3);
 
                        /* refresh skb cache */
-                       if (bpf_helper_changes_skb_data(func)) {
+                       if (bpf_helper_changes_pkt_data(func)) {
                                /* reload skb pointer to r3 */
                                PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
                                bpf_jit_emit_skb_loads(image, ctx);
 
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
-               if (bpf_helper_changes_skb_data((void *)func)) {
+               if (bpf_helper_changes_pkt_data((void *)func)) {
                        jit->seen |= SEEN_SKB_CHANGE;
                        /* lg %b1,ST_OFF_SKBP(%r15) */
                        EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
 
                        func = (u8 *) __bpf_call_base + imm32;
                        jmp_offset = func - (image + addrs[i]);
                        if (seen_ld_abs) {
-                               reload_skb_data = bpf_helper_changes_skb_data(func);
+                               reload_skb_data = bpf_helper_changes_pkt_data(func);
                                if (reload_skb_data) {
                                        EMIT1(0x57); /* push %rdi */
                                        jmp_offset += 22; /* pop, mov, sub, mov */
 
        int err;
        int i;
 
+       if (prog && prog->xdp_adjust_head) {
+               en_err(priv, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
        xdp_ring_num = prog ? priv->rx_ring_num : 0;
 
        /* No need to reconfigure buffers when simply swapping the
 
        bool reset, was_opened;
        int i;
 
+       if (prog && prog->xdp_adjust_head) {
+               netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
        mutex_lock(&priv->state_lock);
 
        if ((netdev->features & NETIF_F_LRO) && prog) {
 
        };
        int err;
 
+       if (prog && prog->xdp_adjust_head) {
+               nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
        if (!prog && !nn->xdp_prog)
                return 0;
        if (prog && nn->xdp_prog) {
 
 {
        struct qede_reload_args args;
 
+       if (prog && prog->xdp_adjust_head) {
+               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
        /* If we're called, there was already a bpf reference increment */
        args.func = &qede_xdp_reload_func;
        args.u.new_prog = prog;
 
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
-                               dst_needed:1;   /* Do we need dst entry? */
+                               dst_needed:1,   /* Do we need dst entry? */
+                               xdp_adjust_head:1; /* Adjusting pkt head? */
        kmemcheck_bitfield_end(meta);
        enum bpf_prog_type      type;           /* Type of BPF program */
        u32                     len;            /* Number of filter blocks */
 struct xdp_buff {
        void *data;
        void *data_end;
+       void *data_hard_start;
 };
 
 /* compute the linear packet data range [data, data_end) which
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
-bool bpf_helper_changes_skb_data(void *func);
+bool bpf_helper_changes_pkt_data(void *func);
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
  *     @len: length of header to be pushed in front
  *     @flags: Flags (unused for now)
  *     Return: 0 on success or negative error
+ *
+ * int bpf_xdp_adjust_head(xdp_md, delta)
+ *     Adjust the xdp_md.data by delta
+ *     @xdp_md: pointer to xdp_md
+ *     @delta: An positive/negative integer to be added to xdp_md.data
+ *     Return: 0 on success or negative on error
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(csum_update),                \
        FN(set_hash_invalid),           \
        FN(get_numa_node_id),           \
-       FN(skb_change_head),
+       FN(skb_change_head),            \
+       FN(xdp_adjust_head),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
        __u32 protocol;
 };
 
+#define XDP_PACKET_HEADROOM 256
+
 /* User return codes for XDP prog type.
  * A valid XDP program must return one of these defined values. All other
  * return codes are reserved for future use. Unknown return codes will result
 
        return prog;
 }
 
-bool __weak bpf_helper_changes_skb_data(void *func)
+bool __weak bpf_helper_changes_pkt_data(void *func)
 {
        return false;
 }
 
                                prog->dst_needed = 1;
                        if (insn->imm == BPF_FUNC_get_prandom_u32)
                                bpf_user_rnd_init_once();
+                       if (insn->imm == BPF_FUNC_xdp_adjust_head)
+                               prog->xdp_adjust_head = 1;
                        if (insn->imm == BPF_FUNC_tail_call) {
                                /* mark bpf_tail_call as different opcode
                                 * to avoid conditional branch in
 
                return -EINVAL;
        }
 
-       changes_data = bpf_helper_changes_skb_data(fn->func);
+       changes_data = bpf_helper_changes_pkt_data(fn->func);
 
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
 
        .arg3_type      = ARG_ANYTHING,
 };
 
-bool bpf_helper_changes_skb_data(void *func)
+BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
+{
+       void *data = xdp->data + offset;
+
+       if (unlikely(data < xdp->data_hard_start ||
+                    data > xdp->data_end - ETH_HLEN))
+               return -EINVAL;
+
+       xdp->data = data;
+
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
+       .func           = bpf_xdp_adjust_head,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
+bool bpf_helper_changes_pkt_data(void *func)
 {
        if (func == bpf_skb_vlan_push ||
            func == bpf_skb_vlan_pop ||
            func == bpf_skb_change_tail ||
            func == bpf_skb_pull_data ||
            func == bpf_l3_csum_replace ||
-           func == bpf_l4_csum_replace)
+           func == bpf_l4_csum_replace ||
+           func == bpf_xdp_adjust_head)
                return true;
 
        return false;
                return &bpf_xdp_event_output_proto;
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
+       case BPF_FUNC_xdp_adjust_head:
+               return &bpf_xdp_adjust_head_proto;
        default:
                return sk_filter_func_proto(func_id);
        }