xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (revision 4f13d0dabc87fb585b96d90cc4b29f67a2995405)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <linux/rcupdate_trace.h>
22 #include <net/sock.h>
23 #include <linux/namei.h>
24 #include "bpf_testmod.h"
25 #include "bpf_testmod_kfunc.h"
26 
27 #define CREATE_TRACE_POINTS
28 #include "bpf_testmod-events.h"
29 
30 #define CONNECT_TIMEOUT_SEC 1
31 
32 typedef int (*func_proto_typedef)(long);
33 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
34 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
35 
36 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
37 long bpf_testmod_test_struct_arg_result;
38 static DEFINE_MUTEX(sock_lock);
39 static struct socket *sock;
40 
41 struct bpf_testmod_struct_arg_1 {
42 	int a;
43 };
44 struct bpf_testmod_struct_arg_2 {
45 	long a;
46 	long b;
47 };
48 
49 struct bpf_testmod_struct_arg_3 {
50 	int a;
51 	int b[];
52 };
53 
54 struct bpf_testmod_struct_arg_4 {
55 	u64 a;
56 	int b;
57 };
58 
59 struct bpf_testmod_struct_arg_5 {
60 	char a;
61 	short b;
62 	int c;
63 	long d;
64 };
65 
66 union bpf_testmod_union_arg_1 {
67 	char a;
68 	short b;
69 	struct bpf_testmod_struct_arg_1 arg;
70 };
71 
72 union bpf_testmod_union_arg_2 {
73 	int a;
74 	long b;
75 	struct bpf_testmod_struct_arg_2 arg;
76 };
77 
78 __bpf_hook_start();
79 
80 noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a,int b,int c)81 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
82 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
83 	return bpf_testmod_test_struct_arg_result;
84 }
85 
86 noinline int
bpf_testmod_test_struct_arg_2(int a,struct bpf_testmod_struct_arg_2 b,int c)87 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
88 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
bpf_testmod_test_struct_arg_3(int a,int b,struct bpf_testmod_struct_arg_2 c)93 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
94 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
95 	return bpf_testmod_test_struct_arg_result;
96 }
97 
98 noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a,int b,int c,int d,struct bpf_testmod_struct_arg_2 e)99 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
100 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
101 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
102 	return bpf_testmod_test_struct_arg_result;
103 }
104 
105 noinline int
bpf_testmod_test_struct_arg_5(void)106 bpf_testmod_test_struct_arg_5(void) {
107 	bpf_testmod_test_struct_arg_result = 1;
108 	return bpf_testmod_test_struct_arg_result;
109 }
110 
111 noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 * a)112 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
113 	bpf_testmod_test_struct_arg_result = a->b[0];
114 	return bpf_testmod_test_struct_arg_result;
115 }
116 
117 noinline int
bpf_testmod_test_struct_arg_7(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f)118 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
119 			      struct bpf_testmod_struct_arg_4 f)
120 {
121 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
122 		(long)e + f.a + f.b;
123 	return bpf_testmod_test_struct_arg_result;
124 }
125 
126 noinline int
bpf_testmod_test_struct_arg_8(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f,int g)127 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
128 			      struct bpf_testmod_struct_arg_4 f, int g)
129 {
130 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
131 		(long)e + f.a + f.b + g;
132 	return bpf_testmod_test_struct_arg_result;
133 }
134 
135 noinline int
bpf_testmod_test_struct_arg_9(u64 a,void * b,short c,int d,void * e,char f,short g,struct bpf_testmod_struct_arg_5 h,long i)136 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
137 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
138 {
139 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
140 		f + g + h.a + h.b + h.c + h.d + i;
141 	return bpf_testmod_test_struct_arg_result;
142 }
143 
144 noinline int
bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a,int b,int c)145 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c)
146 {
147 	bpf_testmod_test_struct_arg_result = a.arg.a + b + c;
148 	return bpf_testmod_test_struct_arg_result;
149 }
150 
151 noinline int
bpf_testmod_test_union_arg_2(int a,union bpf_testmod_union_arg_2 b)152 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b)
153 {
154 	bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b;
155 	return bpf_testmod_test_struct_arg_result;
156 }
157 
158 noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 * a)159 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
160 	bpf_testmod_test_struct_arg_result = a->a;
161 	return bpf_testmod_test_struct_arg_result;
162 }
163 
bpf_testmod_looooooooooooooooooooooooooooooong_name(void)164 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
165 {
166 }
167 
168 __bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)169 bpf_testmod_test_mod_kfunc(int i)
170 {
171 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
172 }
173 
bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq * it,s64 value,int cnt)174 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
175 {
176 	it->cnt = cnt;
177 
178 	if (cnt < 0)
179 		return -EINVAL;
180 
181 	it->value = value;
182 
183 	return 0;
184 }
185 
bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq * it)186 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
187 {
188 	if (it->cnt <= 0)
189 		return NULL;
190 
191 	it->cnt--;
192 
193 	return &it->value;
194 }
195 
bpf_iter_testmod_seq_value(int val,struct bpf_iter_testmod_seq * it__iter)196 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
197 {
198 	if (it__iter->cnt < 0)
199 		return 0;
200 
201 	return val + it__iter->value;
202 }
203 
bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq * it)204 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
205 {
206 	it->cnt = 0;
207 }
208 
bpf_kfunc_common_test(void)209 __bpf_kfunc void bpf_kfunc_common_test(void)
210 {
211 }
212 
bpf_kfunc_dynptr_test(struct bpf_dynptr * ptr,struct bpf_dynptr * ptr__nullable)213 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
214 				       struct bpf_dynptr *ptr__nullable)
215 {
216 }
217 
bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head * ptr)218 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
219 {
220 	return NULL;
221 }
222 
bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common * ptr)223 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
224 {
225 	return NULL;
226 }
227 
bpf_kfunc_nested_release_test(struct sk_buff * ptr)228 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
229 {
230 }
231 
bpf_kfunc_trusted_vma_test(struct vm_area_struct * ptr)232 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
233 {
234 }
235 
bpf_kfunc_trusted_task_test(struct task_struct * ptr)236 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
237 {
238 }
239 
bpf_kfunc_trusted_num_test(int * ptr)240 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
241 {
242 }
243 
bpf_kfunc_rcu_task_test(struct task_struct * ptr)244 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
245 {
246 }
247 
bpf_kfunc_ret_rcu_test(void)248 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
249 {
250 	return NULL;
251 }
252 
bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)253 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
254 {
255 	return NULL;
256 }
257 
258 static struct prog_test_member trusted_ptr;
259 
bpf_kfunc_get_default_trusted_ptr_test(void)260 __bpf_kfunc struct prog_test_member *bpf_kfunc_get_default_trusted_ptr_test(void)
261 {
262 	return &trusted_ptr;
263 }
264 
bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member * trusted_ptr)265 __bpf_kfunc void bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member *trusted_ptr)
266 {
267 	/*
268 	 * This BPF kfunc doesn't actually have any put/KF_ACQUIRE
269 	 * semantics. We're simply wanting to simulate a BPF kfunc that takes a
270 	 * struct prog_test_member pointer as an argument.
271 	 */
272 }
273 
274 __bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int * err)275 bpf_testmod_ctx_create(int *err)
276 {
277 	struct bpf_testmod_ctx *ctx;
278 
279 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
280 	if (!ctx) {
281 		*err = -ENOMEM;
282 		return NULL;
283 	}
284 	refcount_set(&ctx->usage, 1);
285 
286 	return ctx;
287 }
288 
testmod_free_cb(struct rcu_head * head)289 static void testmod_free_cb(struct rcu_head *head)
290 {
291 	struct bpf_testmod_ctx *ctx;
292 
293 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
294 	kfree(ctx);
295 }
296 
bpf_testmod_ctx_release(struct bpf_testmod_ctx * ctx)297 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
298 {
299 	if (!ctx)
300 		return;
301 	if (refcount_dec_and_test(&ctx->usage))
302 		call_rcu(&ctx->rcu, testmod_free_cb);
303 }
304 
bpf_testmod_ctx_release_dtor(void * ctx)305 __bpf_kfunc void bpf_testmod_ctx_release_dtor(void *ctx)
306 {
307 	bpf_testmod_ctx_release(ctx);
308 }
309 CFI_NOSEAL(bpf_testmod_ctx_release_dtor);
310 
311 static struct bpf_testmod_ops3 *st_ops3;
312 
bpf_testmod_test_3(void)313 static int bpf_testmod_test_3(void)
314 {
315 	return 0;
316 }
317 
bpf_testmod_test_4(void)318 static int bpf_testmod_test_4(void)
319 {
320 	return 0;
321 }
322 
323 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
324 	.test_1 = bpf_testmod_test_3,
325 	.test_2 = bpf_testmod_test_4,
326 };
327 
bpf_testmod_test_struct_ops3(void)328 static void bpf_testmod_test_struct_ops3(void)
329 {
330 	if (st_ops3)
331 		st_ops3->test_1();
332 }
333 
bpf_testmod_ops3_call_test_1(void)334 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
335 {
336 	st_ops3->test_1();
337 }
338 
bpf_testmod_ops3_call_test_2(void)339 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
340 {
341 	st_ops3->test_2();
342 }
343 
344 struct bpf_testmod_btf_type_tag_1 {
345 	int a;
346 };
347 
348 struct bpf_testmod_btf_type_tag_2 {
349 	struct bpf_testmod_btf_type_tag_1 __user *p;
350 };
351 
352 struct bpf_testmod_btf_type_tag_3 {
353 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
354 };
355 
356 noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user * arg)357 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
358 	BTF_TYPE_EMIT(func_proto_typedef);
359 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
360 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
361 	return arg->a;
362 }
363 
364 noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 * arg)365 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
366 	return arg->p->a;
367 }
368 
369 noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu * arg)370 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
371 	return arg->a;
372 }
373 
374 noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 * arg)375 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
376 	return arg->p->a;
377 }
378 
bpf_testmod_loop_test(int n)379 noinline int bpf_testmod_loop_test(int n)
380 {
381 	/* Make sum volatile, so smart compilers, such as clang, will not
382 	 * optimize the code by removing the loop.
383 	 */
384 	volatile int sum = 0;
385 	int i;
386 
387 	/* the primary goal of this test is to test LBR. Create a lot of
388 	 * branches in the function, so we can catch it easily.
389 	 */
390 	for (i = 0; i < n; i++)
391 		sum += i;
392 	return sum;
393 }
394 
bpf_testmod_return_ptr(int arg)395 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
396 {
397 	static struct file f = {};
398 
399 	switch (arg) {
400 	case 1: return (void *)EINVAL;		/* user addr */
401 	case 2: return (void *)0xcafe4a11;	/* user addr */
402 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
403 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
404 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
405 	case 6: return &f;			/* valid addr */
406 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
407 #ifdef CONFIG_X86_64
408 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
409 #endif
410 	default: return NULL;
411 	}
412 }
413 
bpf_testmod_fentry_test1(int a)414 noinline int bpf_testmod_fentry_test1(int a)
415 {
416 	trace_bpf_testmod_fentry_test1_tp(a);
417 
418 	return a + 1;
419 }
420 
bpf_testmod_fentry_test2(int a,u64 b)421 noinline int bpf_testmod_fentry_test2(int a, u64 b)
422 {
423 	trace_bpf_testmod_fentry_test2_tp(a, b);
424 
425 	return a + b;
426 }
427 
bpf_testmod_fentry_test3(char a,int b,u64 c)428 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
429 {
430 	return a + b + c;
431 }
432 
bpf_testmod_fentry_test7(u64 a,void * b,short c,int d,void * e,char f,int g)433 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
434 				      void *e, char f, int g)
435 {
436 	return a + (long)b + c + d + (long)e + f + g;
437 }
438 
bpf_testmod_fentry_test11(u64 a,void * b,short c,int d,void * e,char f,int g,unsigned int h,long i,__u64 j,unsigned long k)439 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
440 				       void *e, char f, int g,
441 				       unsigned int h, long i, __u64 j,
442 				       unsigned long k)
443 {
444 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
445 }
446 
bpf_testmod_stacktrace_test(void)447 noinline void bpf_testmod_stacktrace_test(void)
448 {
449 	/* used for stacktrace test as attach function */
450 	asm volatile ("");
451 }
452 
bpf_testmod_stacktrace_test_3(void)453 noinline void bpf_testmod_stacktrace_test_3(void)
454 {
455 	bpf_testmod_stacktrace_test();
456 	asm volatile ("");
457 }
458 
bpf_testmod_stacktrace_test_2(void)459 noinline void bpf_testmod_stacktrace_test_2(void)
460 {
461 	bpf_testmod_stacktrace_test_3();
462 	asm volatile ("");
463 }
464 
bpf_testmod_stacktrace_test_1(void)465 noinline void bpf_testmod_stacktrace_test_1(void)
466 {
467 	bpf_testmod_stacktrace_test_2();
468 	asm volatile ("");
469 }
470 
471 int bpf_testmod_fentry_ok;
472 
473 noinline ssize_t
bpf_testmod_test_read(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)474 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
475 		      const struct bin_attribute *bin_attr,
476 		      char *buf, loff_t off, size_t len)
477 {
478 	struct bpf_testmod_test_read_ctx ctx = {
479 		.buf = buf,
480 		.off = off,
481 		.len = len,
482 	};
483 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
484 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
485 	struct bpf_testmod_struct_arg_3 *struct_arg3;
486 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
487 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
488 	union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} };
489 	union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} };
490 	int i = 1;
491 
492 	while (bpf_testmod_return_ptr(i))
493 		i++;
494 
495 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
496 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
497 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
498 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
499 	(void)bpf_testmod_test_struct_arg_5();
500 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
501 					    (void *)20, struct_arg4);
502 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
503 					    (void *)20, struct_arg4, 23);
504 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
505 					    21, 22, struct_arg5, 27);
506 
507 	(void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5);
508 	(void)bpf_testmod_test_union_arg_2(6, union_arg2);
509 
510 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
511 
512 	(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
513 
514 	bpf_testmod_test_struct_ops3();
515 
516 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
517 				sizeof(int)), GFP_KERNEL);
518 	if (struct_arg3 != NULL) {
519 		struct_arg3->b[0] = 1;
520 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
521 		kfree(struct_arg3);
522 	}
523 
524 	/* This is always true. Use the check to make sure the compiler
525 	 * doesn't remove bpf_testmod_loop_test.
526 	 */
527 	if (bpf_testmod_loop_test(101) > 100)
528 		trace_bpf_testmod_test_read(current, &ctx);
529 
530 	trace_bpf_testmod_test_nullable_bare_tp(NULL);
531 
532 	/* Magic number to enable writable tp */
533 	if (len == 64) {
534 		struct bpf_testmod_test_writable_ctx writable = {
535 			.val = 1024,
536 		};
537 		trace_bpf_testmod_test_writable_bare_tp(&writable);
538 		if (writable.early_ret)
539 			return snprintf(buf, len, "%d\n", writable.val);
540 	}
541 
542 	if (bpf_testmod_fentry_test1(1) != 2 ||
543 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
544 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
545 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
546 			21, 22) != 133 ||
547 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
548 			21, 22, 23, 24, 25, 26) != 231)
549 		goto out;
550 
551 	bpf_testmod_stacktrace_test_1();
552 
553 	bpf_testmod_fentry_ok = 1;
554 out:
555 	return -EIO; /* always fail */
556 }
557 EXPORT_SYMBOL(bpf_testmod_test_read);
558 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
559 
560 noinline ssize_t
bpf_testmod_test_write(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)561 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
562 		      const struct bin_attribute *bin_attr,
563 		      char *buf, loff_t off, size_t len)
564 {
565 	struct bpf_testmod_test_write_ctx ctx = {
566 		.buf = buf,
567 		.off = off,
568 		.len = len,
569 	};
570 
571 	trace_bpf_testmod_test_write_bare_tp(current, &ctx);
572 
573 	return -EIO; /* always fail */
574 }
575 EXPORT_SYMBOL(bpf_testmod_test_write);
576 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
577 
bpf_fentry_shadow_test(int a)578 noinline int bpf_fentry_shadow_test(int a)
579 {
580 	return a + 2;
581 }
582 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
583 
584 __bpf_hook_end();
585 
586 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
587 	.attr = { .name = "bpf_testmod", .mode = 0666, },
588 	.read = bpf_testmod_test_read,
589 	.write = bpf_testmod_test_write,
590 };
591 
592 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
593  * please see test_uretprobe_regs_change test
594  */
595 #ifdef __x86_64__
596 
597 static int
uprobe_handler(struct uprobe_consumer * self,struct pt_regs * regs,__u64 * data)598 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
599 {
600 	regs->cx = 0x87654321feebdaed;
601 	return 0;
602 }
603 
604 static int
uprobe_ret_handler(struct uprobe_consumer * self,unsigned long func,struct pt_regs * regs,__u64 * data)605 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
606 		   struct pt_regs *regs, __u64 *data)
607 
608 {
609 	regs->ax  = 0x12345678deadbeef;
610 	regs->r11 = (u64) -1;
611 	return 0;
612 }
613 
614 struct testmod_uprobe {
615 	struct path path;
616 	struct uprobe *uprobe;
617 	struct uprobe_consumer consumer;
618 };
619 
620 static DEFINE_MUTEX(testmod_uprobe_mutex);
621 
622 static struct testmod_uprobe uprobe = {
623 	.consumer.handler = uprobe_handler,
624 	.consumer.ret_handler = uprobe_ret_handler,
625 };
626 
testmod_register_uprobe(loff_t offset)627 static int testmod_register_uprobe(loff_t offset)
628 {
629 	int err = -EBUSY;
630 
631 	if (uprobe.uprobe)
632 		return -EBUSY;
633 
634 	mutex_lock(&testmod_uprobe_mutex);
635 
636 	if (uprobe.uprobe)
637 		goto out;
638 
639 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
640 	if (err)
641 		goto out;
642 
643 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
644 					offset, 0, &uprobe.consumer);
645 	if (IS_ERR(uprobe.uprobe)) {
646 		err = PTR_ERR(uprobe.uprobe);
647 		path_put(&uprobe.path);
648 		uprobe.uprobe = NULL;
649 	}
650 out:
651 	mutex_unlock(&testmod_uprobe_mutex);
652 	return err;
653 }
654 
testmod_unregister_uprobe(void)655 static void testmod_unregister_uprobe(void)
656 {
657 	mutex_lock(&testmod_uprobe_mutex);
658 
659 	if (uprobe.uprobe) {
660 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
661 		uprobe_unregister_sync();
662 		path_put(&uprobe.path);
663 		uprobe.uprobe = NULL;
664 	}
665 
666 	mutex_unlock(&testmod_uprobe_mutex);
667 }
668 
669 static ssize_t
bpf_testmod_uprobe_write(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)670 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
671 			 const struct bin_attribute *bin_attr,
672 			 char *buf, loff_t off, size_t len)
673 {
674 	unsigned long offset = 0;
675 	int err = 0;
676 
677 	if (kstrtoul(buf, 0, &offset))
678 		return -EINVAL;
679 
680 	if (offset)
681 		err = testmod_register_uprobe(offset);
682 	else
683 		testmod_unregister_uprobe();
684 
685 	return err ?: strlen(buf);
686 }
687 
688 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
689 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
690 	.write = bpf_testmod_uprobe_write,
691 };
692 
register_bpf_testmod_uprobe(void)693 static int register_bpf_testmod_uprobe(void)
694 {
695 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
696 }
697 
unregister_bpf_testmod_uprobe(void)698 static void unregister_bpf_testmod_uprobe(void)
699 {
700 	testmod_unregister_uprobe();
701 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
702 }
703 
704 #else
register_bpf_testmod_uprobe(void)705 static int register_bpf_testmod_uprobe(void)
706 {
707 	return 0;
708 }
709 
unregister_bpf_testmod_uprobe(void)710 static void unregister_bpf_testmod_uprobe(void) { }
711 #endif
712 
713 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
714 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
715 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
716 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
717 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
718 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
719 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
720 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
721 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
722 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
723 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test)
724 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test)
725 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test)
726 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
727 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
728 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
729 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
730 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
731 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
732 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
733 BTF_ID_FLAGS(func, bpf_kfunc_get_default_trusted_ptr_test);
734 BTF_ID_FLAGS(func, bpf_kfunc_put_default_trusted_ptr_test);
735 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
736 
737 BTF_ID_LIST(bpf_testmod_dtor_ids)
738 BTF_ID(struct, bpf_testmod_ctx)
739 BTF_ID(func, bpf_testmod_ctx_release_dtor)
740 
741 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
742 	.owner = THIS_MODULE,
743 	.set   = &bpf_testmod_common_kfunc_ids,
744 };
745 
bpf_kfunc_call_test1(struct sock * sk,u32 a,u64 b,u32 c,u64 d)746 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
747 {
748 	return a + b + c + d;
749 }
750 
bpf_kfunc_call_test2(struct sock * sk,u32 a,u32 b)751 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
752 {
753 	return a + b;
754 }
755 
bpf_kfunc_call_test3(struct sock * sk)756 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
757 {
758 	return sk;
759 }
760 
bpf_kfunc_call_test4(signed char a,short b,int c,long d)761 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
762 {
763 	/* Provoke the compiler to assume that the caller has sign-extended a,
764 	 * b and c on platforms where this is required (e.g. s390x).
765 	 */
766 	return (long)a + (long)b + (long)c + d;
767 }
768 
769 static struct prog_test_ref_kfunc prog_test_struct = {
770 	.a = 42,
771 	.b = 108,
772 	.next = &prog_test_struct,
773 	.cnt = REFCOUNT_INIT(1),
774 };
775 
776 __bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long * scalar_ptr)777 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
778 {
779 	refcount_inc(&prog_test_struct.cnt);
780 	return &prog_test_struct;
781 }
782 
bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc * p)783 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
784 {
785 	WARN_ON_ONCE(1);
786 }
787 
788 __bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)789 bpf_kfunc_call_memb_acquire(void)
790 {
791 	WARN_ON_ONCE(1);
792 	return NULL;
793 }
794 
bpf_kfunc_call_memb1_release(struct prog_test_member1 * p)795 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
796 {
797 	WARN_ON_ONCE(1);
798 }
799 
__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc * p,const int size)800 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
801 {
802 	if (size > 2 * sizeof(int))
803 		return NULL;
804 
805 	return (int *)p;
806 }
807 
bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc * p,const int rdwr_buf_size)808 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
809 						  const int rdwr_buf_size)
810 {
811 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
812 }
813 
bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)814 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
815 						    const int rdonly_buf_size)
816 {
817 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
818 }
819 
820 /* the next 2 ones can't be really used for testing expect to ensure
821  * that the verifier rejects the call.
822  * Acquire functions must return struct pointers, so these ones are
823  * failing.
824  */
bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)825 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
826 						    const int rdonly_buf_size)
827 {
828 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
829 }
830 
bpf_kfunc_call_int_mem_release(int * p)831 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
832 {
833 }
834 
bpf_kfunc_call_test_pass_ctx(struct __sk_buff * skb)835 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
836 {
837 }
838 
bpf_kfunc_call_test_pass1(struct prog_test_pass1 * p)839 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
840 {
841 }
842 
bpf_kfunc_call_test_pass2(struct prog_test_pass2 * p)843 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
844 {
845 }
846 
bpf_kfunc_call_test_fail1(struct prog_test_fail1 * p)847 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
848 {
849 }
850 
bpf_kfunc_call_test_fail2(struct prog_test_fail2 * p)851 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
852 {
853 }
854 
bpf_kfunc_call_test_fail3(struct prog_test_fail3 * p)855 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
856 {
857 }
858 
bpf_kfunc_call_test_mem_len_pass1(void * mem,int mem__sz)859 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
860 {
861 }
862 
bpf_kfunc_call_test_mem_len_fail1(void * mem,int len)863 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
864 {
865 }
866 
bpf_kfunc_call_test_mem_len_fail2(u64 * mem,int len)867 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
868 {
869 }
870 
bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc * p)871 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
872 {
873 	/* p != NULL, but p->cnt could be 0 */
874 }
875 
bpf_kfunc_call_test_destructive(void)876 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
877 {
878 }
879 
bpf_kfunc_call_test_static_unused_arg(u32 arg,u32 unused)880 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
881 {
882 	return arg;
883 }
884 
bpf_kfunc_call_test_sleepable(void)885 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
886 {
887 }
888 
889 struct bpf_kfunc_rcu_tasks_trace_data {
890 	struct rcu_head rcu;
891 	int *done;
892 };
893 
bpf_kfunc_rcu_tasks_trace_cb(struct rcu_head * rhp)894 static void bpf_kfunc_rcu_tasks_trace_cb(struct rcu_head *rhp)
895 {
896 	struct bpf_kfunc_rcu_tasks_trace_data *data;
897 
898 	data = container_of(rhp, struct bpf_kfunc_rcu_tasks_trace_data, rcu);
899 	WRITE_ONCE(*data->done, 1);
900 	kfree(data);
901 }
902 
bpf_kfunc_call_test_call_rcu_tasks_trace(int * done)903 __bpf_kfunc int bpf_kfunc_call_test_call_rcu_tasks_trace(int *done)
904 {
905 	struct bpf_kfunc_rcu_tasks_trace_data *data;
906 
907 	data = kmalloc(sizeof(*data), GFP_ATOMIC);
908 	if (!data)
909 		return -ENOMEM;
910 	data->done = done;
911 	call_rcu_tasks_trace(&data->rcu, bpf_kfunc_rcu_tasks_trace_cb);
912 	return 0;
913 }
914 
bpf_kfunc_init_sock(struct init_sock_args * args)915 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
916 {
917 	int proto;
918 	int err;
919 
920 	mutex_lock(&sock_lock);
921 
922 	if (sock) {
923 		pr_err("%s called without releasing old sock", __func__);
924 		err = -EPERM;
925 		goto out;
926 	}
927 
928 	switch (args->af) {
929 	case AF_INET:
930 	case AF_INET6:
931 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
932 		break;
933 	case AF_UNIX:
934 		proto = PF_UNIX;
935 		break;
936 	default:
937 		pr_err("invalid address family %d\n", args->af);
938 		err = -EINVAL;
939 		goto out;
940 	}
941 
942 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
943 			       proto, &sock);
944 
945 	if (!err)
946 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
947 		 * and consider the connection attempt failed if it returns
948 		 * -EINPROGRESS.
949 		 */
950 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
951 out:
952 	mutex_unlock(&sock_lock);
953 
954 	return err;
955 }
956 
bpf_kfunc_close_sock(void)957 __bpf_kfunc void bpf_kfunc_close_sock(void)
958 {
959 	mutex_lock(&sock_lock);
960 
961 	if (sock) {
962 		sock_release(sock);
963 		sock = NULL;
964 	}
965 
966 	mutex_unlock(&sock_lock);
967 }
968 
bpf_kfunc_call_kernel_connect(struct addr_args * args)969 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
970 {
971 	int err;
972 
973 	if (args->addrlen > sizeof(args->addr))
974 		return -EINVAL;
975 
976 	mutex_lock(&sock_lock);
977 
978 	if (!sock) {
979 		pr_err("%s called without initializing sock", __func__);
980 		err = -EPERM;
981 		goto out;
982 	}
983 
984 	err = kernel_connect(sock, (struct sockaddr_unsized *)&args->addr,
985 			     args->addrlen, 0);
986 out:
987 	mutex_unlock(&sock_lock);
988 
989 	return err;
990 }
991 
bpf_kfunc_call_kernel_bind(struct addr_args * args)992 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
993 {
994 	int err;
995 
996 	if (args->addrlen > sizeof(args->addr))
997 		return -EINVAL;
998 
999 	mutex_lock(&sock_lock);
1000 
1001 	if (!sock) {
1002 		pr_err("%s called without initializing sock", __func__);
1003 		err = -EPERM;
1004 		goto out;
1005 	}
1006 
1007 	err = kernel_bind(sock, (struct sockaddr_unsized *)&args->addr, args->addrlen);
1008 out:
1009 	mutex_unlock(&sock_lock);
1010 
1011 	return err;
1012 }
1013 
bpf_kfunc_call_kernel_listen(void)1014 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
1015 {
1016 	int err;
1017 
1018 	mutex_lock(&sock_lock);
1019 
1020 	if (!sock) {
1021 		pr_err("%s called without initializing sock", __func__);
1022 		err = -EPERM;
1023 		goto out;
1024 	}
1025 
1026 	err = kernel_listen(sock, 128);
1027 out:
1028 	mutex_unlock(&sock_lock);
1029 
1030 	return err;
1031 }
1032 
bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args * args)1033 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
1034 {
1035 	struct msghdr msg = {
1036 		.msg_name	= &args->addr.addr,
1037 		.msg_namelen	= args->addr.addrlen,
1038 	};
1039 	struct kvec iov;
1040 	int err;
1041 
1042 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
1043 	    args->msglen > sizeof(args->msg))
1044 		return -EINVAL;
1045 
1046 	iov.iov_base = args->msg;
1047 	iov.iov_len  = args->msglen;
1048 
1049 	mutex_lock(&sock_lock);
1050 
1051 	if (!sock) {
1052 		pr_err("%s called without initializing sock", __func__);
1053 		err = -EPERM;
1054 		goto out;
1055 	}
1056 
1057 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
1058 	args->addr.addrlen = msg.msg_namelen;
1059 out:
1060 	mutex_unlock(&sock_lock);
1061 
1062 	return err;
1063 }
1064 
bpf_kfunc_call_sock_sendmsg(struct sendmsg_args * args)1065 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
1066 {
1067 	struct msghdr msg = {
1068 		.msg_name	= &args->addr.addr,
1069 		.msg_namelen	= args->addr.addrlen,
1070 	};
1071 	struct kvec iov;
1072 	int err;
1073 
1074 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
1075 	    args->msglen > sizeof(args->msg))
1076 		return -EINVAL;
1077 
1078 	iov.iov_base = args->msg;
1079 	iov.iov_len  = args->msglen;
1080 
1081 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
1082 	mutex_lock(&sock_lock);
1083 
1084 	if (!sock) {
1085 		pr_err("%s called without initializing sock", __func__);
1086 		err = -EPERM;
1087 		goto out;
1088 	}
1089 
1090 	err = sock_sendmsg(sock, &msg);
1091 	args->addr.addrlen = msg.msg_namelen;
1092 out:
1093 	mutex_unlock(&sock_lock);
1094 
1095 	return err;
1096 }
1097 
bpf_kfunc_call_kernel_getsockname(struct addr_args * args)1098 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
1099 {
1100 	int err;
1101 
1102 	mutex_lock(&sock_lock);
1103 
1104 	if (!sock) {
1105 		pr_err("%s called without initializing sock", __func__);
1106 		err = -EPERM;
1107 		goto out;
1108 	}
1109 
1110 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
1111 	if (err < 0)
1112 		goto out;
1113 
1114 	args->addrlen = err;
1115 	err = 0;
1116 out:
1117 	mutex_unlock(&sock_lock);
1118 
1119 	return err;
1120 }
1121 
bpf_kfunc_call_kernel_getpeername(struct addr_args * args)1122 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
1123 {
1124 	int err;
1125 
1126 	mutex_lock(&sock_lock);
1127 
1128 	if (!sock) {
1129 		pr_err("%s called without initializing sock", __func__);
1130 		err = -EPERM;
1131 		goto out;
1132 	}
1133 
1134 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1135 	if (err < 0)
1136 		goto out;
1137 
1138 	args->addrlen = err;
1139 	err = 0;
1140 out:
1141 	mutex_unlock(&sock_lock);
1142 
1143 	return err;
1144 }
1145 
1146 static DEFINE_MUTEX(st_ops_mutex);
1147 static struct bpf_testmod_st_ops *st_ops;
1148 
bpf_kfunc_st_ops_test_prologue(struct st_ops_args * args)1149 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1150 {
1151 	int ret = -1;
1152 
1153 	mutex_lock(&st_ops_mutex);
1154 	if (st_ops && st_ops->test_prologue)
1155 		ret = st_ops->test_prologue(args);
1156 	mutex_unlock(&st_ops_mutex);
1157 
1158 	return ret;
1159 }
1160 
bpf_kfunc_st_ops_test_epilogue(struct st_ops_args * args)1161 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1162 {
1163 	int ret = -1;
1164 
1165 	mutex_lock(&st_ops_mutex);
1166 	if (st_ops && st_ops->test_epilogue)
1167 		ret = st_ops->test_epilogue(args);
1168 	mutex_unlock(&st_ops_mutex);
1169 
1170 	return ret;
1171 }
1172 
bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args * args)1173 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1174 {
1175 	int ret = -1;
1176 
1177 	mutex_lock(&st_ops_mutex);
1178 	if (st_ops && st_ops->test_pro_epilogue)
1179 		ret = st_ops->test_pro_epilogue(args);
1180 	mutex_unlock(&st_ops_mutex);
1181 
1182 	return ret;
1183 }
1184 
bpf_kfunc_st_ops_inc10(struct st_ops_args * args)1185 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1186 {
1187 	args->a += 10;
1188 	return args->a;
1189 }
1190 
1191 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
1192 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux);
1193 
1194 __bpf_kfunc int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux);
1195 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux);
1196 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux);
1197 
1198 /* hook targets */
bpf_testmod_test_hardirq_fn(void)1199 noinline void bpf_testmod_test_hardirq_fn(void) { barrier(); }
bpf_testmod_test_softirq_fn(void)1200 noinline void bpf_testmod_test_softirq_fn(void) { barrier(); }
1201 
1202 /* Tasklet for SoftIRQ context */
ctx_check_tasklet_fn(struct tasklet_struct * t)1203 static void ctx_check_tasklet_fn(struct tasklet_struct *t)
1204 {
1205 	bpf_testmod_test_softirq_fn();
1206 }
1207 
1208 DECLARE_TASKLET(ctx_check_tasklet, ctx_check_tasklet_fn);
1209 
1210 /* IRQ Work for HardIRQ context */
ctx_check_irq_fn(struct irq_work * work)1211 static void ctx_check_irq_fn(struct irq_work *work)
1212 {
1213 	bpf_testmod_test_hardirq_fn();
1214 	tasklet_schedule(&ctx_check_tasklet);
1215 }
1216 
1217 static struct irq_work ctx_check_irq = IRQ_WORK_INIT_HARD(ctx_check_irq_fn);
1218 
1219 /* The kfunc trigger */
bpf_kfunc_trigger_ctx_check(void)1220 __bpf_kfunc void bpf_kfunc_trigger_ctx_check(void)
1221 {
1222 	irq_work_queue(&ctx_check_irq);
1223 }
1224 
1225 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_testmod_test_mod_kfunc)1226 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1227 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1228 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1229 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1230 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1231 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1232 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1233 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1234 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1235 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1236 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1237 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1238 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1239 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1240 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1241 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1242 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1243 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1244 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1245 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1246 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1247 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_RCU)
1248 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1249 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1250 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1251 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1252 BTF_ID_FLAGS(func, bpf_kfunc_call_test_call_rcu_tasks_trace)
1253 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1254 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1255 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1256 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1257 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1258 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1259 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1260 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1261 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1262 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_SLEEPABLE)
1263 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_SLEEPABLE)
1264 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_SLEEPABLE)
1265 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10)
1266 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1)
1267 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_assoc, KF_IMPLICIT_ARGS)
1268 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS)
1269 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS)
1270 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl)
1271 BTF_ID_FLAGS(func, bpf_kfunc_trigger_ctx_check)
1272 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1273 
1274 static int bpf_testmod_ops_init(struct btf *btf)
1275 {
1276 	return 0;
1277 }
1278 
bpf_testmod_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1279 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1280 					    enum bpf_access_type type,
1281 					    const struct bpf_prog *prog,
1282 					    struct bpf_insn_access_aux *info)
1283 {
1284 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1285 }
1286 
bpf_testmod_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1287 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1288 				       const struct btf_member *member,
1289 				       void *kdata, const void *udata)
1290 {
1291 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1292 		/* For data fields, this function has to copy it and return
1293 		 * 1 to indicate that the data has been handled by the
1294 		 * struct_ops type, or the verifier will reject the map if
1295 		 * the value of the data field is not zero.
1296 		 */
1297 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1298 		return 1;
1299 	}
1300 	return 0;
1301 }
1302 
1303 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1304 	.owner = THIS_MODULE,
1305 	.set   = &bpf_testmod_check_kfunc_ids,
1306 };
1307 
1308 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1309 	.get_func_proto	 = bpf_base_func_proto,
1310 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1311 };
1312 
1313 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1314 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1315 };
1316 
bpf_dummy_reg(void * kdata,struct bpf_link * link)1317 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1318 {
1319 	struct bpf_testmod_ops *ops = kdata;
1320 
1321 	if (ops->test_1)
1322 		ops->test_1();
1323 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1324 	 * initialized, so we need to check for NULL.
1325 	 */
1326 	if (ops->test_2)
1327 		ops->test_2(4, ops->data);
1328 
1329 	return 0;
1330 }
1331 
bpf_dummy_unreg(void * kdata,struct bpf_link * link)1332 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1333 {
1334 }
1335 
bpf_testmod_test_1(void)1336 static int bpf_testmod_test_1(void)
1337 {
1338 	return 0;
1339 }
1340 
bpf_testmod_test_2(int a,int b)1341 static void bpf_testmod_test_2(int a, int b)
1342 {
1343 }
1344 
bpf_testmod_tramp(int value)1345 static int bpf_testmod_tramp(int value)
1346 {
1347 	return 0;
1348 }
1349 
bpf_testmod_ops__test_maybe_null(int dummy,struct task_struct * task__nullable)1350 static int bpf_testmod_ops__test_maybe_null(int dummy,
1351 					    struct task_struct *task__nullable)
1352 {
1353 	return 0;
1354 }
1355 
bpf_testmod_ops__test_refcounted(int dummy,struct task_struct * task__ref)1356 static int bpf_testmod_ops__test_refcounted(int dummy,
1357 					    struct task_struct *task__ref)
1358 {
1359 	return 0;
1360 }
1361 
1362 static struct task_struct *
bpf_testmod_ops__test_return_ref_kptr(int dummy,struct task_struct * task__ref,struct cgroup * cgrp)1363 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1364 				      struct cgroup *cgrp)
1365 {
1366 	return NULL;
1367 }
1368 
1369 static struct bpf_testmod_ops __bpf_testmod_ops = {
1370 	.test_1 = bpf_testmod_test_1,
1371 	.test_2 = bpf_testmod_test_2,
1372 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1373 	.test_refcounted = bpf_testmod_ops__test_refcounted,
1374 	.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1375 };
1376 
1377 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1378 	.verifier_ops = &bpf_testmod_verifier_ops,
1379 	.init = bpf_testmod_ops_init,
1380 	.init_member = bpf_testmod_ops_init_member,
1381 	.reg = bpf_dummy_reg,
1382 	.unreg = bpf_dummy_unreg,
1383 	.cfi_stubs = &__bpf_testmod_ops,
1384 	.name = "bpf_testmod_ops",
1385 	.owner = THIS_MODULE,
1386 };
1387 
bpf_dummy_reg2(void * kdata,struct bpf_link * link)1388 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1389 {
1390 	struct bpf_testmod_ops2 *ops = kdata;
1391 
1392 	ops->test_1();
1393 	return 0;
1394 }
1395 
1396 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1397 	.test_1 = bpf_testmod_test_1,
1398 };
1399 
1400 struct bpf_struct_ops bpf_testmod_ops2 = {
1401 	.verifier_ops = &bpf_testmod_verifier_ops,
1402 	.init = bpf_testmod_ops_init,
1403 	.init_member = bpf_testmod_ops_init_member,
1404 	.reg = bpf_dummy_reg2,
1405 	.unreg = bpf_dummy_unreg,
1406 	.cfi_stubs = &__bpf_testmod_ops2,
1407 	.name = "bpf_testmod_ops2",
1408 	.owner = THIS_MODULE,
1409 };
1410 
st_ops3_reg(void * kdata,struct bpf_link * link)1411 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1412 {
1413 	int err = 0;
1414 
1415 	mutex_lock(&st_ops_mutex);
1416 	if (st_ops3) {
1417 		pr_err("st_ops has already been registered\n");
1418 		err = -EEXIST;
1419 		goto unlock;
1420 	}
1421 	st_ops3 = kdata;
1422 
1423 unlock:
1424 	mutex_unlock(&st_ops_mutex);
1425 	return err;
1426 }
1427 
st_ops3_unreg(void * kdata,struct bpf_link * link)1428 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1429 {
1430 	mutex_lock(&st_ops_mutex);
1431 	st_ops3 = NULL;
1432 	mutex_unlock(&st_ops_mutex);
1433 }
1434 
test_1_recursion_detected(struct bpf_prog * prog)1435 static void test_1_recursion_detected(struct bpf_prog *prog)
1436 {
1437 	struct bpf_prog_stats *stats;
1438 
1439 	stats = this_cpu_ptr(prog->stats);
1440 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1441 	       u64_stats_read(&stats->misses));
1442 }
1443 
st_ops3_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)1444 static int st_ops3_check_member(const struct btf_type *t,
1445 				const struct btf_member *member,
1446 				const struct bpf_prog *prog)
1447 {
1448 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1449 
1450 	switch (moff) {
1451 	case offsetof(struct bpf_testmod_ops3, test_1):
1452 		prog->aux->priv_stack_requested = true;
1453 		prog->aux->recursion_detected = test_1_recursion_detected;
1454 		fallthrough;
1455 	default:
1456 		break;
1457 	}
1458 	return 0;
1459 }
1460 
1461 struct bpf_struct_ops bpf_testmod_ops3 = {
1462 	.verifier_ops = &bpf_testmod_verifier_ops3,
1463 	.init = bpf_testmod_ops_init,
1464 	.init_member = bpf_testmod_ops_init_member,
1465 	.reg = st_ops3_reg,
1466 	.unreg = st_ops3_unreg,
1467 	.check_member = st_ops3_check_member,
1468 	.cfi_stubs = &__bpf_testmod_ops3,
1469 	.name = "bpf_testmod_ops3",
1470 	.owner = THIS_MODULE,
1471 };
1472 
bpf_test_mod_st_ops__test_prologue(struct st_ops_args * args)1473 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1474 {
1475 	return 0;
1476 }
1477 
bpf_test_mod_st_ops__test_epilogue(struct st_ops_args * args)1478 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1479 {
1480 	return 0;
1481 }
1482 
bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args * args)1483 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1484 {
1485 	return 0;
1486 }
1487 
1488 static int bpf_cgroup_from_id_id;
1489 static int bpf_cgroup_release_id;
1490 
st_ops_gen_prologue_with_kfunc(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1491 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1492 					  const struct bpf_prog *prog)
1493 {
1494 	struct bpf_insn *insn = insn_buf;
1495 
1496 	/* r8 = r1; // r8 will be "u64 *ctx".
1497 	 * r1 = 0;
1498 	 * r0 = bpf_cgroup_from_id(r1);
1499 	 * if r0 != 0 goto pc+5;
1500 	 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1501 	 * r7 = r6->a;
1502 	 * r7 += 1000;
1503 	 * r6->a = r7;
1504 	 * goto pc+2;
1505 	 * r1 = r0;
1506 	 * bpf_cgroup_release(r1);
1507 	 * r1 = r8;
1508 	 */
1509 	*insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1510 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1511 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1512 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1513 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1514 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1515 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1516 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1517 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1518 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1519 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1520 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1521 	*insn++ = prog->insnsi[0];
1522 
1523 	return insn - insn_buf;
1524 }
1525 
st_ops_gen_epilogue_with_kfunc(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1526 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1527 					  s16 ctx_stack_off)
1528 {
1529 	struct bpf_insn *insn = insn_buf;
1530 
1531 	/* r1 = 0;
1532 	 * r6 = 0;
1533 	 * r0 = bpf_cgroup_from_id(r1);
1534 	 * if r0 != 0 goto pc+6;
1535 	 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1536 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1537 	 * r6 = r1->a;
1538 	 * r6 += 10000;
1539 	 * r1->a = r6;
1540 	 * goto pc+2
1541 	 * r1 = r0;
1542 	 * bpf_cgroup_release(r1);
1543 	 * r0 = r6;
1544 	 * r0 *= 2;
1545 	 * BPF_EXIT;
1546 	 */
1547 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1548 	*insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1549 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1550 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1551 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1552 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1553 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1554 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1555 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1556 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1557 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1558 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1559 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1560 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1561 	*insn++ = BPF_EXIT_INSN();
1562 
1563 	return insn - insn_buf;
1564 }
1565 
1566 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
st_ops_gen_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1567 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1568 			       const struct bpf_prog *prog)
1569 {
1570 	struct bpf_insn *insn = insn_buf;
1571 
1572 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1573 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1574 		return 0;
1575 
1576 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1577 		return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1578 
1579 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1580 	 * r7 = r6->a;
1581 	 * r7 += 1000;
1582 	 * r6->a = r7;
1583 	 */
1584 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1585 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1586 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1587 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1588 	*insn++ = prog->insnsi[0];
1589 
1590 	return insn - insn_buf;
1591 }
1592 
st_ops_gen_epilogue(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1593 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1594 			       s16 ctx_stack_off)
1595 {
1596 	struct bpf_insn *insn = insn_buf;
1597 
1598 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1599 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1600 		return 0;
1601 
1602 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1603 		return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1604 
1605 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1606 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1607 	 * r6 = r1->a;
1608 	 * r6 += 10000;
1609 	 * r1->a = r6;
1610 	 * r0 = r6;
1611 	 * r0 *= 2;
1612 	 * BPF_EXIT;
1613 	 */
1614 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1615 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1616 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1617 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1618 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1619 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1620 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1621 	*insn++ = BPF_EXIT_INSN();
1622 
1623 	return insn - insn_buf;
1624 }
1625 
st_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)1626 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1627 				    const struct bpf_reg_state *reg,
1628 				    int off, int size)
1629 {
1630 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1631 		return -EACCES;
1632 	return 0;
1633 }
1634 
1635 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1636 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1637 	.btf_struct_access = st_ops_btf_struct_access,
1638 	.gen_prologue = st_ops_gen_prologue,
1639 	.gen_epilogue = st_ops_gen_epilogue,
1640 	.get_func_proto = bpf_base_func_proto,
1641 };
1642 
1643 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1644 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1645 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1646 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1647 };
1648 
st_ops_reg(void * kdata,struct bpf_link * link)1649 static int st_ops_reg(void *kdata, struct bpf_link *link)
1650 {
1651 	int err = 0;
1652 
1653 	mutex_lock(&st_ops_mutex);
1654 	if (st_ops) {
1655 		pr_err("st_ops has already been registered\n");
1656 		err = -EEXIST;
1657 		goto unlock;
1658 	}
1659 	st_ops = kdata;
1660 
1661 unlock:
1662 	mutex_unlock(&st_ops_mutex);
1663 	return err;
1664 }
1665 
st_ops_unreg(void * kdata,struct bpf_link * link)1666 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1667 {
1668 	mutex_lock(&st_ops_mutex);
1669 	st_ops = NULL;
1670 	mutex_unlock(&st_ops_mutex);
1671 }
1672 
st_ops_init(struct btf * btf)1673 static int st_ops_init(struct btf *btf)
1674 {
1675 	struct btf *kfunc_btf;
1676 
1677 	bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1678 	bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1679 	if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1680 		return -EINVAL;
1681 
1682 	return 0;
1683 }
1684 
st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1685 static int st_ops_init_member(const struct btf_type *t,
1686 			      const struct btf_member *member,
1687 			      void *kdata, const void *udata)
1688 {
1689 	return 0;
1690 }
1691 
1692 static struct bpf_struct_ops testmod_st_ops = {
1693 	.verifier_ops = &st_ops_verifier_ops,
1694 	.init = st_ops_init,
1695 	.init_member = st_ops_init_member,
1696 	.reg = st_ops_reg,
1697 	.unreg = st_ops_unreg,
1698 	.cfi_stubs = &st_ops_cfi_stubs,
1699 	.name = "bpf_testmod_st_ops",
1700 	.owner = THIS_MODULE,
1701 };
1702 
1703 struct hlist_head multi_st_ops_list;
1704 static DEFINE_SPINLOCK(multi_st_ops_lock);
1705 
multi_st_ops_init(struct btf * btf)1706 static int multi_st_ops_init(struct btf *btf)
1707 {
1708 	spin_lock_init(&multi_st_ops_lock);
1709 	INIT_HLIST_HEAD(&multi_st_ops_list);
1710 
1711 	return 0;
1712 }
1713 
multi_st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1714 static int multi_st_ops_init_member(const struct btf_type *t,
1715 				    const struct btf_member *member,
1716 				    void *kdata, const void *udata)
1717 {
1718 	return 0;
1719 }
1720 
multi_st_ops_find_nolock(u32 id)1721 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
1722 {
1723 	struct bpf_testmod_multi_st_ops *st_ops;
1724 
1725 	hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
1726 		if (st_ops->id == id)
1727 			return st_ops;
1728 	}
1729 
1730 	return NULL;
1731 }
1732 
1733 /* Call test_1() of the struct_ops map identified by the id */
bpf_kfunc_multi_st_ops_test_1(struct st_ops_args * args,u32 id)1734 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
1735 {
1736 	struct bpf_testmod_multi_st_ops *st_ops;
1737 	unsigned long flags;
1738 	int ret = -1;
1739 
1740 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1741 	st_ops = multi_st_ops_find_nolock(id);
1742 	if (st_ops)
1743 		ret = st_ops->test_1(args);
1744 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1745 
1746 	return ret;
1747 }
1748 
1749 /* Call test_1() of the associated struct_ops map */
bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args * args,struct bpf_prog_aux * aux)1750 int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux)
1751 {
1752 	struct bpf_testmod_multi_st_ops *st_ops;
1753 	int ret = -1;
1754 
1755 	st_ops = (struct bpf_testmod_multi_st_ops *)bpf_prog_get_assoc_struct_ops(aux);
1756 	if (st_ops)
1757 		ret = st_ops->test_1(args);
1758 
1759 	return ret;
1760 }
1761 
bpf_kfunc_implicit_arg(int a,struct bpf_prog_aux * aux)1762 int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux)
1763 {
1764 	if (aux && a > 0)
1765 		return a;
1766 	return -EINVAL;
1767 }
1768 
bpf_kfunc_implicit_arg_legacy(int a,int b,struct bpf_prog_aux * aux)1769 int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux)
1770 {
1771 	if (aux)
1772 		return a + b;
1773 	return -EINVAL;
1774 }
1775 
bpf_kfunc_implicit_arg_legacy_impl(int a,int b,struct bpf_prog_aux * aux)1776 int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux)
1777 {
1778 	return bpf_kfunc_implicit_arg_legacy(a, b, aux);
1779 }
1780 
multi_st_ops_reg(void * kdata,struct bpf_link * link)1781 static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
1782 {
1783 	struct bpf_testmod_multi_st_ops *st_ops =
1784 		(struct bpf_testmod_multi_st_ops *)kdata;
1785 	unsigned long flags;
1786 	int err = 0;
1787 	u32 id;
1788 
1789 	if (!st_ops->test_1)
1790 		return -EINVAL;
1791 
1792 	id = bpf_struct_ops_id(kdata);
1793 
1794 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1795 	if (multi_st_ops_find_nolock(id)) {
1796 		pr_err("multi_st_ops(id:%d) has already been registered\n", id);
1797 		err = -EEXIST;
1798 		goto unlock;
1799 	}
1800 
1801 	st_ops->id = id;
1802 	hlist_add_head(&st_ops->node, &multi_st_ops_list);
1803 unlock:
1804 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1805 
1806 	return err;
1807 }
1808 
multi_st_ops_unreg(void * kdata,struct bpf_link * link)1809 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
1810 {
1811 	struct bpf_testmod_multi_st_ops *st_ops;
1812 	unsigned long flags;
1813 	u32 id;
1814 
1815 	id = bpf_struct_ops_id(kdata);
1816 
1817 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1818 	st_ops = multi_st_ops_find_nolock(id);
1819 	if (st_ops)
1820 		hlist_del(&st_ops->node);
1821 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1822 }
1823 
bpf_testmod_multi_st_ops__test_1(struct st_ops_args * args)1824 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
1825 {
1826 	return 0;
1827 }
1828 
1829 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
1830 	.test_1 = bpf_testmod_multi_st_ops__test_1,
1831 };
1832 
1833 struct bpf_struct_ops testmod_multi_st_ops = {
1834 	.verifier_ops = &bpf_testmod_verifier_ops,
1835 	.init = multi_st_ops_init,
1836 	.init_member = multi_st_ops_init_member,
1837 	.reg = multi_st_ops_reg,
1838 	.unreg = multi_st_ops_unreg,
1839 	.cfi_stubs = &multi_st_ops_cfi_stubs,
1840 	.name = "bpf_testmod_multi_st_ops",
1841 	.owner = THIS_MODULE,
1842 };
1843 
1844 extern int bpf_fentry_test1(int a);
1845 
bpf_testmod_init(void)1846 static int bpf_testmod_init(void)
1847 {
1848 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1849 		{
1850 			.btf_id		= bpf_testmod_dtor_ids[0],
1851 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1852 		},
1853 	};
1854 	void **tramp;
1855 	int ret;
1856 
1857 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1858 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1859 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1860 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1861 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1862 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1863 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1864 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1865 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1866 	ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
1867 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1868 						 ARRAY_SIZE(bpf_testmod_dtors),
1869 						 THIS_MODULE);
1870 	if (ret < 0)
1871 		return ret;
1872 	if (bpf_fentry_test1(0) < 0)
1873 		return -EINVAL;
1874 	sock = NULL;
1875 	mutex_init(&sock_lock);
1876 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1877 	if (ret < 0)
1878 		return ret;
1879 	ret = register_bpf_testmod_uprobe();
1880 	if (ret < 0)
1881 		return ret;
1882 
1883 	/* Ensure nothing is between tramp_1..tramp_40 */
1884 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1885 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1886 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1887 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1888 		*tramp++ = bpf_testmod_tramp;
1889 
1890 	return 0;
1891 }
1892 
bpf_testmod_exit(void)1893 static void bpf_testmod_exit(void)
1894 {
1895         /* Need to wait for all references to be dropped because
1896          * bpf_kfunc_call_test_release() which currently resides in kernel can
1897          * be called after bpf_testmod is unloaded. Once release function is
1898          * moved into the module this wait can be removed.
1899          */
1900 	while (refcount_read(&prog_test_struct.cnt) > 1)
1901 		msleep(20);
1902 
1903 	/* Clean up irqwork and tasklet */
1904 	irq_work_sync(&ctx_check_irq);
1905 	tasklet_kill(&ctx_check_tasklet);
1906 
1907 	bpf_kfunc_close_sock();
1908 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1909 	unregister_bpf_testmod_uprobe();
1910 }
1911 
1912 module_init(bpf_testmod_init);
1913 module_exit(bpf_testmod_exit);
1914 
1915 MODULE_AUTHOR("Andrii Nakryiko");
1916 MODULE_DESCRIPTION("BPF selftests module");
1917 MODULE_LICENSE("Dual BSD/GPL");
1918