Mode modifier is one of::
 
-  BPF_IMM  0x00  /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
-  BPF_ABS  0x20
-  BPF_IND  0x40
-  BPF_MEM  0x60
-  BPF_LEN  0x80  /* classic BPF only, reserved in eBPF */
-  BPF_MSH  0xa0  /* classic BPF only, reserved in eBPF */
-  BPF_XADD 0xc0  /* eBPF only, exclusive add */
+  BPF_IMM     0x00  /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
+  BPF_ABS     0x20
+  BPF_IND     0x40
+  BPF_MEM     0x60
+  BPF_LEN     0x80  /* classic BPF only, reserved in eBPF */
+  BPF_MSH     0xa0  /* classic BPF only, reserved in eBPF */
+  BPF_ATOMIC  0xc0  /* eBPF only, atomic operations */
 
 eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
 (BPF_IND | <size> | BPF_LD) which are used to access packet data.
     BPF_MEM | <size> | BPF_STX:  *(size *) (dst_reg + off) = src_reg
     BPF_MEM | <size> | BPF_ST:   *(size *) (dst_reg + off) = imm32
     BPF_MEM | <size> | BPF_LDX:  dst_reg = *(size *) (src_reg + off)
-    BPF_XADD | BPF_W  | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
-    BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
 
-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
-2 byte atomic increments are not supported.
+Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW.
+
+It also includes atomic operations, which use the immediate field for extra
+encoding.
+
+   .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W  | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
+   .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
+
+Note that 1 and 2 byte atomic operations are not supported.
+
+You may encounter BPF_XADD - this is a legacy name for BPF_ATOMIC, referring to
+the exclusive-add operation encoded when the immediate field is zero.
 
 eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists
 of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
 
                }
                emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
                break;
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       /* Atomic ops */
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
                goto notyet;
        /* STX: *(size *)(dst + off) = src */
        case BPF_STX | BPF_MEM | BPF_W:
 
                }
                break;
 
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op code %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
+               /* STX XADD: lock *(u32 *)(dst + off) += src
+                * and
+                * STX XADD: lock *(u64 *)(dst + off) += src
+                */
+
                if (!off) {
                        reg = dst;
                } else {
 
        case BPF_STX | BPF_H | BPF_MEM:
        case BPF_STX | BPF_W | BPF_MEM:
        case BPF_STX | BPF_DW | BPF_MEM:
-       case BPF_STX | BPF_W | BPF_XADD:
-       case BPF_STX | BPF_DW | BPF_XADD:
+       case BPF_STX | BPF_W | BPF_ATOMIC:
+       case BPF_STX | BPF_DW | BPF_ATOMIC:
                if (insn->dst_reg == BPF_REG_10) {
                        ctx->flags |= EBPF_SEEN_FP;
                        dst = MIPS_R_SP;
                src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
                if (src < 0)
                        return src;
-               if (BPF_MODE(insn->code) == BPF_XADD) {
+               if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+                       if (insn->imm != BPF_ADD) {
+                               pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
+                               return -EINVAL;
+                       }
+
                        /*
                         * If mem_off does not fit within the 9 bit ll/sc
                         * instruction immediate field, use a temp reg.
 
                        break;
 
                /*
-                * BPF_STX XADD (atomic_add)
+                * BPF_STX ATOMIC (atomic ops)
                 */
-               /* *(u32 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+                       if (insn->imm != BPF_ADD) {
+                               pr_err_ratelimited(
+                                       "eBPF filter atomic op code %02x (@%d) unsupported\n",
+                                       code, i);
+                               return -ENOTSUPP;
+                       }
+
+                       /* *(u32 *)(dst + off) += src */
+
                        /* Get EA into TMP_REG_1 */
                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
                        tmp_idx = ctx->idx * 4;
                        /* we're done if this succeeded */
                        PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
-               /* *(u64 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_DW:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
+                       if (insn->imm != BPF_ADD) {
+                               pr_err_ratelimited(
+                                       "eBPF filter atomic op code %02x (@%d) unsupported\n",
+                                       code, i);
+                               return -ENOTSUPP;
+                       }
+                       /* *(u64 *)(dst + off) += src */
+
                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
                        tmp_idx = ctx->idx * 4;
                        EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
 
        const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
        const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
 
-       if (mode == BPF_XADD && size != BPF_W)
+       if (mode == BPF_ATOMIC && size != BPF_W)
                return -1;
 
        emit_imm(RV_REG_T0, off, ctx);
                case BPF_MEM:
                        emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
                        break;
-               case BPF_XADD:
+               case BPF_ATOMIC: /* Only BPF_ADD supported */
                        emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
                             ctx);
                        break;
        case BPF_STX | BPF_MEM | BPF_H:
        case BPF_STX | BPF_MEM | BPF_W:
        case BPF_STX | BPF_MEM | BPF_DW:
-       case BPF_STX | BPF_XADD | BPF_W:
                if (BPF_CLASS(code) == BPF_ST) {
                        emit_imm32(tmp2, imm, ctx);
                        src = tmp2;
                        return -1;
                break;
 
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+               if (insn->imm != BPF_ADD) {
+                       pr_info_once(
+                               "bpf-jit: not supported: atomic operation %02x ***\n",
+                               insn->imm);
+                       return -EFAULT;
+               }
+
+               if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+                                  BPF_MODE(code)))
+                       return -1;
+               break;
+
        /* No hardware support for 8-byte atomics in RV32. */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
                /* Fallthrough. */
 
 notsupported:
 
                emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
                emit_sd(RV_REG_T1, 0, rs, ctx);
                break;
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (insn->imm != BPF_ADD) {
+                       pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
+                              insn->imm);
+                       return -EINVAL;
+               }
+
+               /* atomic_add: lock *(u32 *)(dst + off) += src
+                * atomic_add: lock *(u64 *)(dst + off) += src
+                */
+
                if (off) {
                        if (is_12b_int(off)) {
                                emit_addi(RV_REG_T1, rd, off, ctx);
 
                jit->seen |= SEEN_MEM;
                break;
        /*
-        * BPF_STX XADD (atomic_add)
+        * BPF_ATOMIC
         */
-       case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
-               /* laal %w0,%src,off(%dst) */
-               EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
-                             dst_reg, off);
-               jit->seen |= SEEN_MEM;
-               break;
-       case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
-               /* laalg %w0,%src,off(%dst) */
-               EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
-                             dst_reg, off);
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+               if (insn->imm != BPF_ADD) {
+                       pr_err("Unknown atomic operation %02x\n", insn->imm);
+                       return -1;
+               }
+
+               /* *(u32/u64 *)(dst + off) += src
+                *
+                * BFW_W:  laal  %w0,%src,off(%dst)
+                * BPF_DW: laalg %w0,%src,off(%dst)
+                */
+               EMIT6_DISP_LH(0xeb000000,
+                             BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
+                             REG_W0, src_reg, dst_reg, off);
                jit->seen |= SEEN_MEM;
                break;
        /*
 
                break;
        }
 
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W: {
+       case BPF_STX | BPF_ATOMIC | BPF_W: {
                const u8 tmp = bpf2sparc[TMP_REG_1];
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
+               /* lock *(u32 *)(dst + off) += src */
+
                if (insn->dst_reg == BPF_REG_FP)
                        ctx->saw_frame_pointer = true;
 
                break;
        }
        /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW: {
+       case BPF_STX | BPF_ATOMIC | BPF_DW: {
                const u8 tmp = bpf2sparc[TMP_REG_1];
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
                if (insn->dst_reg == BPF_REG_FP)
                        ctx->saw_frame_pointer = true;
 
 
        *pprog = prog;
 }
 
+static int emit_atomic(u8 **pprog, u8 atomic_op,
+                      u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
+{
+       u8 *prog = *pprog;
+       int cnt = 0;
+
+       EMIT1(0xF0); /* lock prefix */
+
+       maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
+
+       /* emit opcode */
+       switch (atomic_op) {
+       case BPF_ADD:
+               /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
+               EMIT1(simple_alu_opcodes[atomic_op]);
+               break;
+       default:
+               pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+               return -EFAULT;
+       }
+
+       emit_insn_suffix(&prog, dst_reg, src_reg, off);
+
+       *pprog = prog;
+       return 0;
+}
+
 static bool ex_handler_bpf(const struct exception_table_entry *x,
                           struct pt_regs *regs, int trapnr,
                           unsigned long error_code, unsigned long fault_addr)
        int i, cnt = 0, excnt = 0;
        int proglen = 0;
        u8 *prog = temp;
+       int err;
 
        detect_reg_usage(insn, insn_cnt, callee_regs_used,
                         &tail_call_seen);
                        }
                        break;
 
-                       /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
-               case BPF_STX | BPF_XADD | BPF_W:
-                       /* Emit 'lock add dword ptr [rax + off], eax' */
-                       if (is_ereg(dst_reg) || is_ereg(src_reg))
-                               EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
-                       else
-                               EMIT2(0xF0, 0x01);
-                       goto xadd;
-               case BPF_STX | BPF_XADD | BPF_DW:
-                       EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
-xadd:
-                       emit_modrm_dstoff(&prog, dst_reg, src_reg, insn->off);
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
+                       err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
+                                         insn->off, BPF_SIZE(insn->code));
+                       if (err)
+                               return err;
                        break;
 
                        /* call */
 
                                return -EFAULT;
                        }
                        break;
-               /* STX XADD: lock *(u32 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_W:
-               /* STX XADD: lock *(u64 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_DW:
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
                        goto notyet;
                case BPF_JMP | BPF_EXIT:
                        if (seen_exit) {
 
        return 0;
 }
 
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
+       if (meta->insn.imm != BPF_ADD)
+               return -EOPNOTSUPP;
+
        return mem_xadd(nfp_prog, meta, false);
 }
 
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
+       if (meta->insn.imm != BPF_ADD)
+               return -EOPNOTSUPP;
+
        return mem_xadd(nfp_prog, meta, true);
 }
 
        [BPF_STX | BPF_MEM | BPF_H] =   mem_stx2,
        [BPF_STX | BPF_MEM | BPF_W] =   mem_stx4,
        [BPF_STX | BPF_MEM | BPF_DW] =  mem_stx8,
-       [BPF_STX | BPF_XADD | BPF_W] =  mem_xadd4,
-       [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+       [BPF_STX | BPF_ATOMIC | BPF_W] =        mem_atomic4,
+       [BPF_STX | BPF_ATOMIC | BPF_DW] =       mem_atomic8,
        [BPF_ST | BPF_MEM | BPF_B] =    mem_st1,
        [BPF_ST | BPF_MEM | BPF_H] =    mem_st2,
        [BPF_ST | BPF_MEM | BPF_W] =    mem_st4,
 
        return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
 }
 
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
 {
-       return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+       return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
 }
 
 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
 
                        pr_vlog(env, "map writes not supported\n");
                        return -EOPNOTSUPP;
                }
-               if (is_mbpf_xadd(meta)) {
+               if (is_mbpf_atomic(meta)) {
                        err = nfp_bpf_map_mark_used(env, meta, reg,
                                                    NFP_MAP_USE_ATOMIC_CNT);
                        if (err)
 }
 
 static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
-                  struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+                    struct bpf_verifier_env *env)
 {
        const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
        const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
 
+       if (meta->insn.imm != BPF_ADD) {
+               pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+               return -EOPNOTSUPP;
+       }
+
        if (dreg->type != PTR_TO_MAP_VALUE) {
                pr_vlog(env, "atomic add not to a map value pointer: %d\n",
                        dreg->type);
        if (is_mbpf_store(meta))
                return nfp_bpf_check_store(nfp_prog, meta, env);
 
-       if (is_mbpf_xadd(meta))
-               return nfp_bpf_check_xadd(nfp_prog, meta, env);
+       if (is_mbpf_atomic(meta))
+               return nfp_bpf_check_atomic(nfp_prog, meta, env);
 
        if (is_mbpf_alu(meta))
                return nfp_bpf_check_alu(nfp_prog, meta, env);
 
                .off   = OFF,                                   \
                .imm   = 0 })
 
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
 
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+/*
+ * Atomic operations:
+ *
+ *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                 \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 
 
 /* ld/ldx fields */
 #define BPF_DW         0x18    /* double word (64-bit) */
-#define BPF_XADD       0xc0    /* exclusive add */
+#define BPF_ATOMIC     0xc0    /* atomic memory ops - op type in immediate */
+#define BPF_XADD       0xc0    /* exclusive add - legacy name */
 
 /* alu/jmp fields */
 #define BPF_MOV                0xb0    /* mov reg to reg */
  *             running simultaneously.
  *
  *             A user should care about the synchronization by himself.
- *             For example, by using the **BPF_STX_XADD** instruction to alter
+ *             For example, by using the **BPF_ATOMIC** instructions to alter
  *             the shared data.
  *     Return
  *             A pointer to the local storage area.
 
        INSN_3(STX, MEM,  H),                   \
        INSN_3(STX, MEM,  W),                   \
        INSN_3(STX, MEM,  DW),                  \
-       INSN_3(STX, XADD, W),                   \
-       INSN_3(STX, XADD, DW),                  \
+       INSN_3(STX, ATOMIC, W),                 \
+       INSN_3(STX, ATOMIC, DW),                \
        /*   Immediate based. */                \
        INSN_3(ST, MEM, B),                     \
        INSN_3(ST, MEM, H),                     \
        LDX_PROBE(DW, 8)
 #undef LDX_PROBE
 
-       STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
-               atomic_add((u32) SRC, (atomic_t *)(unsigned long)
-                          (DST + insn->off));
+       STX_ATOMIC_W:
+               switch (IMM) {
+               case BPF_ADD:
+                       /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
+                       atomic_add((u32) SRC, (atomic_t *)(unsigned long)
+                                  (DST + insn->off));
+               default:
+                       goto default_label;
+               }
                CONT;
-       STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
-               atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
-                            (DST + insn->off));
+       STX_ATOMIC_DW:
+               switch (IMM) {
+               case BPF_ADD:
+                       /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
+                       atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
+                                    (DST + insn->off));
+               default:
+                       goto default_label;
+               }
                CONT;
 
        default_label:
                 *
                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
                 */
-               pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+               pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
+                       insn->code, insn->imm);
                BUG_ON(1);
                return 0;
 }
 
                                bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
                                insn->dst_reg,
                                insn->off, insn->src_reg);
-               else if (BPF_MODE(insn->code) == BPF_XADD)
+               else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+                        insn->imm == BPF_ADD) {
                        verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
                                insn->code,
                                bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
                                insn->dst_reg, insn->off,
                                insn->src_reg);
-               else
+               } else {
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
+               }
        } else if (class == BPF_ST) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
 
        return err;
 }
 
-static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
+static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
 {
        int err;
 
-       if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
-           insn->imm != 0) {
-               verbose(env, "BPF_XADD uses reserved fields\n");
+       if (insn->imm != BPF_ADD) {
+               verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
+               return -EINVAL;
+       }
+
+       if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
+               verbose(env, "invalid atomic operand size\n");
                return -EINVAL;
        }
 
            is_pkt_reg(env, insn->dst_reg) ||
            is_flow_key_reg(env, insn->dst_reg) ||
            is_sk_reg(env, insn->dst_reg)) {
-               verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+               verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
                        insn->dst_reg,
                        reg_type_str[reg_state(env, insn->dst_reg)->type]);
                return -EACCES;
        }
 
-       /* check whether atomic_add can read the memory */
+       /* check whether we can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1, true);
        if (err)
                return err;
 
-       /* check whether atomic_add can write into the same memory */
+       /* check whether we can write into the same memory */
        return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                BPF_SIZE(insn->code), BPF_WRITE, -1, true);
 }
                } else if (class == BPF_STX) {
                        enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
-                       if (BPF_MODE(insn->code) == BPF_XADD) {
-                               err = check_xadd(env, env->insn_idx, insn);
+                       if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+                               err = check_atomic(env, env->insn_idx, insn);
                                if (err)
                                        return err;
                                env->insn_idx++;
 
                if (BPF_CLASS(insn->code) == BPF_STX &&
                    ((BPF_MODE(insn->code) != BPF_MEM &&
-                     BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
+                     BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) {
                        verbose(env, "BPF_STX uses reserved fields\n");
                        return -EINVAL;
                }
 
                { { 0, 0xffffffff } },
                .stack_depth = 40,
        },
-       /* BPF_STX | BPF_XADD | BPF_W/DW */
+       /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
        {
                "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_LDX_MEM(BPF_W, R0, R10, -40),
                        BPF_EXIT_INSN(),
                },
                        BPF_ALU64_REG(BPF_MOV, R1, R10),
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_ALU64_REG(BPF_MOV, R0, R10),
                        BPF_ALU64_REG(BPF_SUB, R0, R1),
                        BPF_EXIT_INSN(),
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_LDX_MEM(BPF_DW, R0, R10, -40),
                        BPF_EXIT_INSN(),
                },
                        BPF_ALU64_REG(BPF_MOV, R1, R10),
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_ALU64_REG(BPF_MOV, R0, R10),
                        BPF_ALU64_REG(BPF_SUB, R0, R1),
                        BPF_EXIT_INSN(),
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
 
 
 #define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = BPF_ADD })
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 
                 */
                BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
                BPF_MOV64_IMM(BPF_REG_1, 1),
-               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
-                               offsetof(struct stats, packets)),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+                             offsetof(struct stats, packets)),
                BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
                                offsetof(struct __sk_buff, len)),
-               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
-                               offsetof(struct stats, bytes)),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+                             offsetof(struct stats, bytes)),
                BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
                                offsetof(struct __sk_buff, len)),
                BPF_EXIT_INSN(),
 
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
                BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
                BPF_EXIT_INSN(),
        };
 
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                /* Count bytes */
                BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
                BPF_EXIT_INSN(),
 
                .off   = OFF,                                   \
                .imm   = 0 })
 
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+/*
+ * Atomic operations:
+ *
+ *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ */
 
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                 \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 
 
 /* ld/ldx fields */
 #define BPF_DW         0x18    /* double word (64-bit) */
-#define BPF_XADD       0xc0    /* exclusive add */
+#define BPF_ATOMIC     0xc0    /* atomic memory ops - op type in immediate */
+#define BPF_XADD       0xc0    /* exclusive add - legacy name */
 
 /* alu/jmp fields */
 #define BPF_MOV                0xb0    /* mov reg to reg */
  *             running simultaneously.
  *
  *             A user should care about the synchronization by himself.
- *             For example, by using the **BPF_STX_XADD** instruction to alter
+ *             For example, by using the **BPF_ATOMIC** instructions to alter
  *             the shared data.
  *     Return
  *             A pointer to the local storage area.
 
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
                BPF_MOV64_IMM(BPF_REG_2, 0),
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
                BPF_MOV64_IMM(BPF_REG_1, val),
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
                BPF_MOV64_IMM(BPF_REG_2, 0),
 
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                             BPF_FUNC_get_local_storage),
                BPF_MOV64_IMM(BPF_REG_1, 1),
-               BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
                BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
                BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
 
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
 {
-       "context stores via XADD",
+       "context stores via BPF_ATOMIC",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
-                    BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)),
        BPF_EXIT_INSN(),
        },
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
 
        BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0),
        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
        BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
        BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
        BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
 
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
                    offsetof(struct __sk_buff, cb[0])),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
                      offsetof(struct __sk_buff, cb[0])),
        BPF_EXIT_INSN(),
        },
        .errstr_unpriv = "R2 leaks addr into mem",
        .result_unpriv = REJECT,
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
 },
 {
        "leak pointer into ctx 2",
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
                    offsetof(struct __sk_buff, cb[0])),
-       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
                      offsetof(struct __sk_buff, cb[0])),
        BPF_EXIT_INSN(),
        },
        .errstr_unpriv = "R10 leaks addr into mem",
        .result_unpriv = REJECT,
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
 },
 {
        "leak pointer into ctx 3",
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
        BPF_MOV64_IMM(BPF_REG_3, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
 
        BPF_MOV64_IMM(BPF_REG_5, 42),
        BPF_MOV64_IMM(BPF_REG_6, 24),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
        BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
        BPF_MOV64_IMM(BPF_REG_5, 42),
        BPF_MOV64_IMM(BPF_REG_6, 24),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
        BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
 
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
        BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+       BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
+                    BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
        BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
        BPF_EXIT_INSN(),
 
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
        BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
        BPF_EXIT_INSN(),
 
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 1),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
        BPF_EXIT_INSN(),
        },
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_IMM(BPF_REG_1, 1),
-       BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
        BPF_EXIT_INSN(),
        },
        BPF_MOV64_IMM(BPF_REG_0, 1),
        BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
        BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
-       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
-       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+       .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
        BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
        BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
        BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
        BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),