1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 4 #include <vmlinux.h> 5 #include <bpf/bpf_tracing.h> 6 #include "bpf_misc.h" 7 #include "../bpf_testmod/bpf_testmod.h" 8 #include "../bpf_testmod/bpf_testmod_kfunc.h" 9 10 char _license[] SEC("license") = "GPL"; 11 12 void __kfunc_btf_root(void) 13 { 14 bpf_kfunc_st_ops_inc10(NULL); 15 } 16 17 static __noinline __used int subprog(struct st_ops_args *args) 18 { 19 args->a += 1; 20 return args->a; 21 } 22 23 __success 24 /* prologue */ 25 __xlated("0: r6 = *(u64 *)(r1 +0)") 26 __xlated("1: r7 = *(u64 *)(r6 +0)") 27 __xlated("2: r7 += 1000") 28 __xlated("3: *(u64 *)(r6 +0) = r7") 29 /* main prog */ 30 __xlated("4: r1 = *(u64 *)(r1 +0)") 31 __xlated("5: r6 = r1") 32 __xlated("6: call kernel-function") 33 __xlated("7: r1 = r6") 34 __xlated("8: call pc+1") 35 __xlated("9: exit") 36 SEC("struct_ops/test_prologue") 37 __naked int test_prologue(void) 38 { 39 asm volatile ( 40 "r1 = *(u64 *)(r1 +0);" 41 "r6 = r1;" 42 "call %[bpf_kfunc_st_ops_inc10];" 43 "r1 = r6;" 44 "call subprog;" 45 "exit;" 46 : 47 : __imm(bpf_kfunc_st_ops_inc10) 48 : __clobber_all); 49 } 50 51 __success 52 /* save __u64 *ctx to stack */ 53 __xlated("0: *(u64 *)(r10 -8) = r1") 54 /* main prog */ 55 __xlated("1: r1 = *(u64 *)(r1 +0)") 56 __xlated("2: r6 = r1") 57 __xlated("3: call kernel-function") 58 __xlated("4: r1 = r6") 59 __xlated("5: call pc+") 60 /* epilogue */ 61 __xlated("6: r1 = *(u64 *)(r10 -8)") 62 __xlated("7: r1 = *(u64 *)(r1 +0)") 63 __xlated("8: r6 = *(u64 *)(r1 +0)") 64 __xlated("9: r6 += 10000") 65 __xlated("10: *(u64 *)(r1 +0) = r6") 66 __xlated("11: r0 = r6") 67 __xlated("12: r0 *= 2") 68 __xlated("13: exit") 69 SEC("struct_ops/test_epilogue") 70 __naked int test_epilogue(void) 71 { 72 asm volatile ( 73 "r1 = *(u64 *)(r1 +0);" 74 "r6 = r1;" 75 "call %[bpf_kfunc_st_ops_inc10];" 76 "r1 = r6;" 77 "call subprog;" 78 "exit;" 79 : 80 : __imm(bpf_kfunc_st_ops_inc10) 81 : __clobber_all); 82 } 83 84 __success 85 /* prologue */ 86 __xlated("0: r6 = *(u64 *)(r1 +0)") 87 __xlated("1: r7 = *(u64 *)(r6 +0)") 88 __xlated("2: r7 += 1000") 89 __xlated("3: *(u64 *)(r6 +0) = r7") 90 /* save __u64 *ctx to stack */ 91 __xlated("4: *(u64 *)(r10 -8) = r1") 92 /* main prog */ 93 __xlated("5: r1 = *(u64 *)(r1 +0)") 94 __xlated("6: r6 = r1") 95 __xlated("7: call kernel-function") 96 __xlated("8: r1 = r6") 97 __xlated("9: call pc+") 98 /* epilogue */ 99 __xlated("10: r1 = *(u64 *)(r10 -8)") 100 __xlated("11: r1 = *(u64 *)(r1 +0)") 101 __xlated("12: r6 = *(u64 *)(r1 +0)") 102 __xlated("13: r6 += 10000") 103 __xlated("14: *(u64 *)(r1 +0) = r6") 104 __xlated("15: r0 = r6") 105 __xlated("16: r0 *= 2") 106 __xlated("17: exit") 107 SEC("struct_ops/test_pro_epilogue") 108 __naked int test_pro_epilogue(void) 109 { 110 asm volatile ( 111 "r1 = *(u64 *)(r1 +0);" 112 "r6 = r1;" 113 "call %[bpf_kfunc_st_ops_inc10];" 114 "r1 = r6;" 115 "call subprog;" 116 "exit;" 117 : 118 : __imm(bpf_kfunc_st_ops_inc10) 119 : __clobber_all); 120 } 121 122 SEC("syscall") 123 __retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */ 124 int syscall_prologue(void *ctx) 125 { 126 struct st_ops_args args = {}; 127 128 return bpf_kfunc_st_ops_test_prologue(&args); 129 } 130 131 SEC("syscall") 132 __retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ 133 int syscall_epilogue(void *ctx) 134 { 135 struct st_ops_args args = {}; 136 137 return bpf_kfunc_st_ops_test_epilogue(&args); 138 } 139 140 SEC("syscall") 141 __retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ 142 int syscall_pro_epilogue(void *ctx) 143 { 144 struct st_ops_args args = {}; 145 146 return bpf_kfunc_st_ops_test_pro_epilogue(&args); 147 } 148 149 SEC(".struct_ops.link") 150 struct bpf_testmod_st_ops pro_epilogue = { 151 .test_prologue = (void *)test_prologue, 152 .test_epilogue = (void *)test_epilogue, 153 .test_pro_epilogue = (void *)test_pro_epilogue, 154 }; 155