bpf: use check_sub_overflow() to check for subtraction overflows
Similar to previous patch that drops signed_add*_overflows() and uses
(compiler) builtin-based check_add_overflow(), do the same for
signed_sub*_overflows() and replace them with the generic
check_sub_overflow() to make future refactoring easier and have the
checks implemented more efficiently.
Unsigned overflow check for subtraction does not use helpers and are
simple enough already, so they're left untouched.
After the change GCC 13.3.0 generates cleaner assembly on x86_64:
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
139bf: mov 0x28(%r12),%rax
139c4: mov %edx,0x54(%r12)
139c9: sub %r11,%rax
139cc: mov %rax,0x28(%r12)
139d1: jo 14627 <adjust_reg_min_max_vals+0x1237>
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
139d7: mov 0x30(%r12),%rax
139dc: sub %r9,%rax
139df: mov %rax,0x30(%r12)
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
139e4: jo 14627 <adjust_reg_min_max_vals+0x1237>
...
*dst_smin = S64_MIN;
14627: movabs $0x8000000000000000,%rax
14631: mov %rax,0x28(%r12)
*dst_smax = S64_MAX;
14636: sub $0x1,%rax
1463a: mov %rax,0x30(%r12)
Before the change it gives:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13a50: mov 0x28(%r12),%rdi
13a55: mov %edx,0x54(%r12)
dst_reg->smax_value = S64_MAX;
13a5a: movabs $0x7fffffffffffffff,%rdx
13a64: mov %eax,0x50(%r12)
dst_reg->smin_value = S64_MIN;
13a69: movabs $0x8000000000000000,%rax
s64 res = (s64)((u64)a - (u64)b);
13a73: mov %rdi,%rsi
13a76: sub %rcx,%rsi
if (b < 0)
13a79: test %rcx,%rcx
13a7c: js 145ea <adjust_reg_min_max_vals+0x119a>
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13a82: cmp %rsi,%rdi
13a85: jl 13ac7 <adjust_reg_min_max_vals+0x677>
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
13a87: mov 0x30(%r12),%r8
s64 res = (s64)((u64)a - (u64)b);
13a8c: mov %r8,%rax
13a8f: sub %r9,%rax
return res > a;
13a92: cmp %rax,%r8
13a95: setl %sil
if (b < 0)
13a99: test %r9,%r9
13a9c: js 147d1 <adjust_reg_min_max_vals+0x1381>
dst_reg->smax_value = S64_MAX;
13aa2: movabs $0x7fffffffffffffff,%rdx
dst_reg->smin_value = S64_MIN;
13aac: movabs $0x8000000000000000,%rax
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13ab6: test %sil,%sil
13ab9: jne 13ac7 <adjust_reg_min_max_vals+0x677>
dst_reg->smin_value -= smax_val;
13abb: mov %rdi,%rax
dst_reg->smax_value -= smin_val;
13abe: mov %r8,%rdx
dst_reg->smin_value -= smax_val;
13ac1: sub %rcx,%rax
dst_reg->smax_value -= smin_val;
13ac4: sub %r9,%rdx
13ac7: mov %rax,0x28(%r12)
...
13ad1: mov %rdx,0x30(%r12)
...
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
145ea: cmp %rsi,%rdi
145ed: jg 13ac7 <adjust_reg_min_max_vals+0x677>
145f3: jmp 13a87 <adjust_reg_min_max_vals+0x637>
Suggested-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20240712080127.136608-4-shung-hsi.yu@suse.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>