As arm64 JIT now supports timed may_goto instruction, make sure all
relevant tests run on this architecture. Some tests were enabled and
other required modifications to work properly on arm64.
$ ./test_progs -a "stream*","*may_goto*",verifier_bpf_fastcall
#404 stream_errors:OK
[...]
#406/2 stream_success/stream_cond_break:OK
[...]
#494/23 verifier_bpf_fastcall/may_goto_interaction_x86_64:SKIP
#494/24 verifier_bpf_fastcall/may_goto_interaction_arm64:OK
[...]
#539/1 verifier_may_goto_1/may_goto 0:OK
#539/2 verifier_may_goto_1/batch 2 of may_goto 0:OK
#539/3 verifier_may_goto_1/may_goto batch with offsets 2/1/0:OK
#539/4 verifier_may_goto_1/may_goto batch with offsets 2/0:OK
#539 verifier_may_goto_1:OK
#540/1 verifier_may_goto_2/C code with may_goto 0:OK
#540 verifier_may_goto_2:OK
Summary: 7/16 PASSED, 25 SKIPPED, 0 FAILED
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20250827113245.52629-3-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
ASSERT_OK(ret, "ret");
ASSERT_OK(opts.retval, "retval");
-#if !defined(__x86_64__) && !defined(__s390x__)
+#if !defined(__x86_64__) && !defined(__s390x__) && !defined(__aarch64__)
ASSERT_TRUE(1, "Timed may_goto unsupported, skip.");
if (i == 0) {
ret = bpf_prog_stream_read(prog_fd, 2, buf, sizeof(buf), &ropts);
SEC("raw_tp")
__arch_arm64
-__log_level(4) __msg("stack depth 16")
-/* may_goto counter at -16 */
-__xlated("0: *(u64 *)(r10 -16) =")
-__xlated("1: r1 = 1")
-__xlated("2: call bpf_get_smp_processor_id")
+__log_level(4) __msg("stack depth 24")
+/* may_goto counter at -24 */
+__xlated("0: *(u64 *)(r10 -24) =")
+/* may_goto timestamp at -16 */
+__xlated("1: *(u64 *)(r10 -16) =")
+__xlated("2: r1 = 1")
+__xlated("3: call bpf_get_smp_processor_id")
/* may_goto expansion starts */
-__xlated("3: r11 = *(u64 *)(r10 -16)")
-__xlated("4: if r11 == 0x0 goto pc+3")
-__xlated("5: r11 -= 1")
-__xlated("6: *(u64 *)(r10 -16) = r11")
+__xlated("4: r11 = *(u64 *)(r10 -24)")
+__xlated("5: if r11 == 0x0 goto pc+6")
+__xlated("6: r11 -= 1")
+__xlated("7: if r11 != 0x0 goto pc+2")
+__xlated("8: r11 = -24")
+__xlated("9: call unknown")
+__xlated("10: *(u64 *)(r10 -24) = r11")
/* may_goto expansion ends */
-__xlated("7: *(u64 *)(r10 -8) = r1")
-__xlated("8: exit")
+__xlated("11: *(u64 *)(r10 -8) = r1")
+__xlated("12: exit")
__success
__naked void may_goto_interaction_arm64(void)
{
__description("may_goto 0")
__arch_x86_64
__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
__description("batch 2 of may_goto 0")
__arch_x86_64
__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
__description("may_goto batch with offsets 2/1/0")
__arch_x86_64
__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
}
SEC("raw_tp")
-__description("may_goto batch with offsets 2/0 - x86_64 and s390x")
+__description("may_goto batch with offsets 2/0")
__arch_x86_64
__arch_s390x
+__arch_arm64
__xlated("0: *(u64 *)(r10 -16) = 65535")
__xlated("1: *(u64 *)(r10 -8) = 0")
__xlated("2: r11 = *(u64 *)(r10 -16)")
__xlated("10: r0 = 2")
__xlated("11: exit")
__success
-__naked void may_goto_batch_2_x86_64_s390x(void)
-{
- asm volatile (
- ".8byte %[may_goto1];"
- ".8byte %[may_goto3];"
- "r0 = 1;"
- "r0 = 2;"
- "exit;"
- :
- : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
- __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
- : __clobber_all);
-}
-
-SEC("raw_tp")
-__description("may_goto batch with offsets 2/0 - arm64")
-__arch_arm64
-__xlated("0: *(u64 *)(r10 -8) = 8388608")
-__xlated("1: r11 = *(u64 *)(r10 -8)")
-__xlated("2: if r11 == 0x0 goto pc+3")
-__xlated("3: r11 -= 1")
-__xlated("4: *(u64 *)(r10 -8) = r11")
-__xlated("5: r0 = 1")
-__xlated("6: r0 = 2")
-__xlated("7: exit")
-__success
-__naked void may_goto_batch_2_arm64(void)
+__naked void may_goto_batch_2(void)
{
asm volatile (
".8byte %[may_goto1];"