return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
+static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1UL << spi;
+}
+
+static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0UL;
+}
+
+/* Used for printing the entire verifier state. */
+static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0UL;
+}
+
/* The reg state of a pointer or a bounded scalar was saved when
* it was spilled to the stack.
*/
}
static void print_verifier_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state)
+ const struct bpf_func_state *state,
+ bool print_all)
{
const struct bpf_reg_state *reg;
enum bpf_reg_type t;
t = reg->type;
if (t == NOT_INIT)
continue;
+ if (!print_all && !reg_scratched(env, i))
+ continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]);
types_buf[BPF_REG_SIZE] = 0;
if (!valid)
continue;
+ if (!print_all && !stack_slot_scratched(env, i))
+ continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
if (is_spilled_reg(&state->stack[i])) {
if (state->in_async_callback_fn)
verbose(env, " async_cb");
verbose(env, "\n");
+ mark_verifier_state_clean(env);
}
/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
state->frameno = frameno;
state->subprogno = subprogno;
init_reg_state(env, state);
+ mark_verifier_state_scratched(env);
}
/* Similar to push_stack(), but for async callbacks */
return -EINVAL;
}
+ mark_reg_scratched(env, regno);
+
reg = ®s[regno];
rw64 = is_reg64(env, insn, regno, reg, t);
if (t == SRC_OP) {
reg->precise = true;
}
if (env->log.level & BPF_LOG_LEVEL) {
- print_verifier_state(env, func);
+ print_verifier_state(env, func, false);
verbose(env, "parent %s regs=%x stack=%llx marks\n",
new_marks ? "didn't have" : "already had",
reg_mask, stack_mask);
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}
+ mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
slot = -i - 1;
spi = slot / BPF_REG_SIZE;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ mark_stack_slot_scratched(env, spi);
if (!env->allow_ptr_leaks
&& *stype != NOT_INIT
* to make sure our theoretical access will be safe.
*/
if (env->log.level & BPF_LOG_LEVEL)
- print_verifier_state(env, state);
+ print_verifier_state(env, state, false);
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "caller:\n");
- print_verifier_state(env, caller);
+ print_verifier_state(env, caller, true);
verbose(env, "callee:\n");
- print_verifier_state(env, callee);
+ print_verifier_state(env, callee, true);
}
return 0;
}
*insn_idx = callee->callsite + 1;
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "returning from callee:\n");
- print_verifier_state(env, callee);
+ print_verifier_state(env, callee, true);
verbose(env, "to caller at %d:\n", *insn_idx);
- print_verifier_state(env, caller);
+ print_verifier_state(env, caller, true);
}
/* clear everything in the callee */
free_func_state(callee);
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
- print_verifier_state(env, state);
+ print_verifier_state(env, state, true);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
- print_verifier_state(env, state);
+ print_verifier_state(env, state, true);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
return -EACCES;
}
if (env->log.level & BPF_LOG_LEVEL)
- print_verifier_state(env, this_branch->frame[this_branch->curframe]);
+ print_verifier_state(env, this_branch->frame[this_branch->curframe], false);
return 0;
}
if (env->log.level & BPF_LOG_LEVEL2 ||
(env->log.level & BPF_LOG_LEVEL && do_print_state)) {
- if (env->log.level & BPF_LOG_LEVEL2)
- verbose(env, "%d:", env->insn_idx);
- else
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ if (verifier_state_scratched(env))
+ verbose(env, "%d:", env->insn_idx);
+ } else {
verbose(env, "\nfrom %d to %d%s:",
env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
- print_verifier_state(env, state->frame[state->curframe]);
+ }
+ print_verifier_state(env, state->frame[state->curframe],
+ false);
do_print_state = false;
}
if (err)
return err;
process_bpf_exit:
+ mark_verifier_state_scratched(env);
update_branch_counts(env, env->cur_state);
err = pop_stack(env, &prev_insn_idx,
&env->insn_idx, pop_log);
}
}
+ mark_verifier_state_clean(env);
+
if (IS_ERR(btf_vmlinux)) {
/* Either gcc or pahole or kernel are broken. */
verbose(env, "in-kernel BTF is malformed\n");
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
+ {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R10=fp0"},
{1, "R3_w=inv2"},
{2, "R3_w=inv4"},
{3, "R3_w=inv8"},
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
+ {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R10=fp0"},
{1, "R3_w=inv1"},
{2, "R3_w=inv2"},
{3, "R3_w=inv4"},
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
+ {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R10=fp0"},
{1, "R3_w=inv4"},
{2, "R3_w=inv8"},
{3, "R3_w=inv10"},
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
+ {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R10=fp0"},
{1, "R3_w=inv7"},
{2, "R3_w=inv7"},
{3, "R3_w=inv14"},
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
+ {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {18, "R3=pkt_end(id=0,off=0,imm=0)"},
+ {13, "R3_w=pkt_end(id=0,off=0,imm=0)"},
{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
+ {3, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
- {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
+ {9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
+ {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
+ {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* Checked s>=0 */
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {12, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able