1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright (c) 2025 Puranjay Mohan <puranjay@kernel.org> */ 3 4#include <linux/linkage.h> 5 6SYM_FUNC_START(arch_bpf_timed_may_goto) 7 /* Allocate stack space and emit frame record */ 8 stp x29, x30, [sp, #-64]! 9 mov x29, sp 10 11 /* Save BPF registers R0 - R5 (x7, x0-x4)*/ 12 stp x7, x0, [sp, #16] 13 stp x1, x2, [sp, #32] 14 stp x3, x4, [sp, #48] 15 16 /* 17 * Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP 18 * (x25) to get the pointer to count and timestamp and pass it as the 19 * first argument in x0. 20 * 21 * Before generating the call to arch_bpf_timed_may_goto, the verifier 22 * generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP - 23 * stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64 24 * jit in this case. 25 */ 26 add x0, x9, x25 27 bl bpf_check_timed_may_goto 28 /* BPF_REG_AX(x9) will be stored into count, so move return value to it. */ 29 mov x9, x0 30 31 /* Restore BPF registers R0 - R5 (x7, x0-x4) */ 32 ldp x7, x0, [sp, #16] 33 ldp x1, x2, [sp, #32] 34 ldp x3, x4, [sp, #48] 35 36 /* Restore FP and LR */ 37 ldp x29, x30, [sp], #64 38 39 ret 40SYM_FUNC_END(arch_bpf_timed_may_goto) 41