summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorIlya Leoshkevich <iii@linux.ibm.com>2025-10-16 23:51:27 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-10-19 16:31:00 +0200
commit1527222f35499e6616c88014d9ee9e9e07985ef9 (patch)
tree825710cdbf7375e05acb4b7b9b5efdebcd4921b5 /arch/s390
parent2c768a9d1c1795492525f749e815f553950a33c7 (diff)
s390/bpf: Write back tail call counter for BPF_PSEUDO_CALL
commit c861a6b147137d10b5ff88a2c492ba376cd1b8b0 upstream. The tailcall_bpf2bpf_hierarchy_1 test hangs on s390. Its call graph is as follows: entry() subprog_tail() bpf_tail_call_static(0) -> entry + tail_call_start subprog_tail() bpf_tail_call_static(0) -> entry + tail_call_start entry() copies its tail call counter to the subprog_tail()'s frame, which then increments it. However, the incremented result is discarded, leading to an astronomically large number of tail calls. Fix by writing the incremented counter back to the entry()'s frame. Fixes: dd691e847d28 ("s390/bpf: Implement bpf_jit_supports_subprog_tailcalls()") Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20250813121016.163375-3-iii@linux.ibm.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/net/bpf_jit_comp.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index cfcf2ee96094..15c6ab660a5b 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1439,13 +1439,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
jit->seen |= SEEN_FUNC;
/*
* Copy the tail call counter to where the callee expects it.
- *
- * Note 1: The callee can increment the tail call counter, but
- * we do not load it back, since the x86 JIT does not do this
- * either.
- *
- * Note 2: We assume that the verifier does not let us call the
- * main program, which clears the tail call counter on entry.
*/
/* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
@@ -1472,6 +1465,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
call_r1(jit);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
+
+ /*
+ * Copy the potentially updated tail call counter back.
+ */
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ /*
+ * mvc frame_off+tail_call_cnt(%r15),
+ * tail_call_cnt(4,%r15)
+ */
+ _EMIT6(0xd203f000 | (jit->frame_off +
+ offsetof(struct prog_frame,
+ tail_call_cnt)),
+ 0xf000 | offsetof(struct prog_frame,
+ tail_call_cnt));
+
break;
}
case BPF_JMP | BPF_TAIL_CALL: {