/* DYNPTR points to memory local to the bpf program. */
        DYNPTR_TYPE_LOCAL       = BIT(8 + BPF_BASE_TYPE_BITS),
 
+       /* DYNPTR points to a ringbuf record. */
+       DYNPTR_TYPE_RINGBUF     = BIT(9 + BPF_BASE_TYPE_BITS),
+
        __BPF_TYPE_FLAG_MAX,
        __BPF_TYPE_LAST_FLAG    = __BPF_TYPE_FLAG_MAX - 1,
 };
 
-#define DYNPTR_TYPE_FLAG_MASK  DYNPTR_TYPE_LOCAL
+#define DYNPTR_TYPE_FLAG_MASK  (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
 
 /* Max number of base types. */
 #define BPF_BASE_TYPE_LIMIT    (1UL << BPF_BASE_TYPE_BITS)
 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
        BPF_DYNPTR_TYPE_INVALID,
        /* Points to memory that is local to the bpf program */
        BPF_DYNPTR_TYPE_LOCAL,
+       /* Underlying data is a ringbuf record */
+       BPF_DYNPTR_TYPE_RINGBUF,
 };
 
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+                    enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_check_size(u32 size);
+
 #endif /* _LINUX_BPF_H */
 
         * for the purpose of tracking that it's freed.
         * For PTR_TO_SOCKET this is used to share which pointers retain the
         * same reference to the socket, to determine proper reference freeing.
+        * For stack slots that are dynptrs, this is used to track references to
+        * the dynptr to determine proper reference freeing.
         */
        u32 id;
        /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
 
  *     Return
  *             0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
  *             -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ *     Description
+ *             Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ *             through the dynptr interface. *flags* must be 0.
+ *
+ *             Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ *             bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ *             reservation fails. This is enforced by the verifier.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ *     Description
+ *             Submit reserved ring buffer sample, pointed to by *data*,
+ *             through the dynptr interface. This is a no-op if the dynptr is
+ *             invalid/null.
+ *
+ *             For more information on *flags*, please see
+ *             'bpf_ringbuf_submit'.
+ *     Return
+ *             Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ *     Description
+ *             Discard reserved ring buffer sample through the dynptr
+ *             interface. This is a no-op if the dynptr is invalid/null.
+ *
+ *             For more information on *flags*, please see
+ *             'bpf_ringbuf_discard'.
+ *     Return
+ *             Nothing. Always succeeds.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(map_lookup_percpu_elem),     \
        FN(skc_to_mptcp_sock),          \
        FN(dynptr_from_mem),            \
+       FN(ringbuf_reserve_dynptr),     \
+       FN(ringbuf_submit_dynptr),      \
+       FN(ringbuf_discard_dynptr),     \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
 
        ptr->size |= type << DYNPTR_TYPE_SHIFT;
 }
 
-static int bpf_dynptr_check_size(u32 size)
+int bpf_dynptr_check_size(u32 size)
 {
        return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
 }
 
-static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
-                           enum bpf_dynptr_type type, u32 offset, u32 size)
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+                    enum bpf_dynptr_type type, u32 offset, u32 size)
 {
        ptr->data = data;
        ptr->offset = offset;
        bpf_dynptr_set_type(ptr, type);
 }
 
-static void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
 {
        memset(ptr, 0, sizeof(*ptr));
 }
                return &bpf_ringbuf_discard_proto;
        case BPF_FUNC_ringbuf_query:
                return &bpf_ringbuf_query_proto;
+       case BPF_FUNC_ringbuf_reserve_dynptr:
+               return &bpf_ringbuf_reserve_dynptr_proto;
+       case BPF_FUNC_ringbuf_submit_dynptr:
+               return &bpf_ringbuf_submit_dynptr_proto;
+       case BPF_FUNC_ringbuf_discard_dynptr:
+               return &bpf_ringbuf_discard_dynptr_proto;
        case BPF_FUNC_for_each_map_elem:
                return &bpf_for_each_map_elem_proto;
        case BPF_FUNC_loop:
 
        .arg1_type      = ARG_CONST_MAP_PTR,
        .arg2_type      = ARG_ANYTHING,
 };
+
+BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
+          struct bpf_dynptr_kern *, ptr)
+{
+       struct bpf_ringbuf_map *rb_map;
+       void *sample;
+       int err;
+
+       if (unlikely(flags)) {
+               bpf_dynptr_set_null(ptr);
+               return -EINVAL;
+       }
+
+       err = bpf_dynptr_check_size(size);
+       if (err) {
+               bpf_dynptr_set_null(ptr);
+               return err;
+       }
+
+       rb_map = container_of(map, struct bpf_ringbuf_map, map);
+
+       sample = __bpf_ringbuf_reserve(rb_map->rb, size);
+       if (!sample) {
+               bpf_dynptr_set_null(ptr);
+               return -EINVAL;
+       }
+
+       bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
+       .func           = bpf_ringbuf_reserve_dynptr,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
+};
+
+BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
+{
+       if (!ptr->data)
+               return 0;
+
+       bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
+
+       bpf_dynptr_set_null(ptr);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
+       .func           = bpf_ringbuf_submit_dynptr,
+       .ret_type       = RET_VOID,
+       .arg1_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
+       .arg2_type      = ARG_ANYTHING,
+};
+
+BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
+{
+       if (!ptr->data)
+               return 0;
+
+       bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
+
+       bpf_dynptr_set_null(ptr);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
+       .func           = bpf_ringbuf_discard_dynptr,
+       .ret_type       = RET_VOID,
+       .arg1_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
+       .arg2_type      = ARG_ANYTHING,
+};
 
                                          POISON_POINTER_DELTA))
 #define BPF_MAP_PTR(X)         ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
 
+static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
+static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
+
 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
 {
        return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
        switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
        case DYNPTR_TYPE_LOCAL:
                return BPF_DYNPTR_TYPE_LOCAL;
+       case DYNPTR_TYPE_RINGBUF:
+               return BPF_DYNPTR_TYPE_RINGBUF;
        default:
                return BPF_DYNPTR_TYPE_INVALID;
        }
 }
 
+static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
+{
+       return type == BPF_DYNPTR_TYPE_RINGBUF;
+}
+
 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
                                   enum bpf_arg_type arg_type, int insn_idx)
 {
        struct bpf_func_state *state = func(env, reg);
        enum bpf_dynptr_type type;
-       int spi, i;
+       int spi, i, id;
 
        spi = get_spi(reg->off);
 
        state->stack[spi].spilled_ptr.dynptr.type = type;
        state->stack[spi - 1].spilled_ptr.dynptr.type = type;
 
+       if (dynptr_type_refcounted(type)) {
+               /* The id is used to track proper releasing */
+               id = acquire_reference_state(env, insn_idx);
+               if (id < 0)
+                       return id;
+
+               state->stack[spi].spilled_ptr.id = id;
+               state->stack[spi - 1].spilled_ptr.id = id;
+       }
+
        return 0;
 }
 
                state->stack[spi - 1].slot_type[i] = STACK_INVALID;
        }
 
+       /* Invalidate any slices associated with this dynptr */
+       if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
+               release_reference(env, state->stack[spi].spilled_ptr.id);
+               state->stack[spi].spilled_ptr.id = 0;
+               state->stack[spi - 1].spilled_ptr.id = 0;
+       }
+
        state->stack[spi].spilled_ptr.dynptr.first_slot = false;
        state->stack[spi].spilled_ptr.dynptr.type = 0;
        state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
 
 skip_type_check:
        if (arg_type_is_release(arg_type)) {
-               if (!reg->ref_obj_id && !register_is_null(reg)) {
+               if (arg_type_is_dynptr(arg_type)) {
+                       struct bpf_func_state *state = func(env, reg);
+                       int spi = get_spi(reg->off);
+
+                       if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
+                           !state->stack[spi].spilled_ptr.id) {
+                               verbose(env, "arg %d is an unacquired reference\n", regno);
+                               return -EINVAL;
+                       }
+               } else if (!reg->ref_obj_id && !register_is_null(reg)) {
                        verbose(env, "R%d must be referenced when passed to release function\n",
                                regno);
                        return -EINVAL;
                        case DYNPTR_TYPE_LOCAL:
                                err_extra = "local ";
                                break;
+                       case DYNPTR_TYPE_RINGBUF:
+                               err_extra = "ringbuf ";
+                               break;
                        default:
                                break;
                        }
+
                        verbose(env, "Expected an initialized %sdynptr as arg #%d\n",
                                err_extra, arg + 1);
                        return -EINVAL;
        case BPF_MAP_TYPE_RINGBUF:
                if (func_id != BPF_FUNC_ringbuf_output &&
                    func_id != BPF_FUNC_ringbuf_reserve &&
-                   func_id != BPF_FUNC_ringbuf_query)
+                   func_id != BPF_FUNC_ringbuf_query &&
+                   func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
+                   func_id != BPF_FUNC_ringbuf_submit_dynptr &&
+                   func_id != BPF_FUNC_ringbuf_discard_dynptr)
                        goto error;
                break;
        case BPF_MAP_TYPE_STACK_TRACE:
        case BPF_FUNC_ringbuf_output:
        case BPF_FUNC_ringbuf_reserve:
        case BPF_FUNC_ringbuf_query:
+       case BPF_FUNC_ringbuf_reserve_dynptr:
+       case BPF_FUNC_ringbuf_submit_dynptr:
+       case BPF_FUNC_ringbuf_discard_dynptr:
                if (map->map_type != BPF_MAP_TYPE_RINGBUF)
                        goto error;
                break;
 
  *     Return
  *             0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
  *             -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ *     Description
+ *             Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ *             through the dynptr interface. *flags* must be 0.
+ *
+ *             Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ *             bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ *             reservation fails. This is enforced by the verifier.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ *     Description
+ *             Submit reserved ring buffer sample, pointed to by *data*,
+ *             through the dynptr interface. This is a no-op if the dynptr is
+ *             invalid/null.
+ *
+ *             For more information on *flags*, please see
+ *             'bpf_ringbuf_submit'.
+ *     Return
+ *             Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ *     Description
+ *             Discard reserved ring buffer sample through the dynptr
+ *             interface. This is a no-op if the dynptr is invalid/null.
+ *
+ *             For more information on *flags*, please see
+ *             'bpf_ringbuf_discard'.
+ *     Return
+ *             Nothing. Always succeeds.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(map_lookup_percpu_elem),     \
        FN(skc_to_mptcp_sock),          \
        FN(dynptr_from_mem),            \
+       FN(ringbuf_reserve_dynptr),     \
+       FN(ringbuf_submit_dynptr),      \
+       FN(ringbuf_discard_dynptr),     \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper