xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 union bpf_testmod_union_arg_1 {
66 	char a;
67 	short b;
68 	struct bpf_testmod_struct_arg_1 arg;
69 };
70 
71 union bpf_testmod_union_arg_2 {
72 	int a;
73 	long b;
74 	struct bpf_testmod_struct_arg_2 arg;
75 };
76 
77 __bpf_hook_start();
78 
79 noinline int
80 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
81 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
86 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
87 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
88 	return bpf_testmod_test_struct_arg_result;
89 }
90 
91 noinline int
92 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
93 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
94 	return bpf_testmod_test_struct_arg_result;
95 }
96 
97 noinline int
98 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
99 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
100 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_5(void) {
106 	bpf_testmod_test_struct_arg_result = 1;
107 	return bpf_testmod_test_struct_arg_result;
108 }
109 
110 noinline int
111 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
112 	bpf_testmod_test_struct_arg_result = a->b[0];
113 	return bpf_testmod_test_struct_arg_result;
114 }
115 
116 noinline int
117 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
118 			      struct bpf_testmod_struct_arg_4 f)
119 {
120 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
121 		(long)e + f.a + f.b;
122 	return bpf_testmod_test_struct_arg_result;
123 }
124 
125 noinline int
126 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
127 			      struct bpf_testmod_struct_arg_4 f, int g)
128 {
129 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
130 		(long)e + f.a + f.b + g;
131 	return bpf_testmod_test_struct_arg_result;
132 }
133 
134 noinline int
135 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
136 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
137 {
138 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
139 		f + g + h.a + h.b + h.c + h.d + i;
140 	return bpf_testmod_test_struct_arg_result;
141 }
142 
143 noinline int
144 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c)
145 {
146 	bpf_testmod_test_struct_arg_result = a.arg.a + b + c;
147 	return bpf_testmod_test_struct_arg_result;
148 }
149 
150 noinline int
151 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b)
152 {
153 	bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b;
154 	return bpf_testmod_test_struct_arg_result;
155 }
156 
157 noinline int
158 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
159 	bpf_testmod_test_struct_arg_result = a->a;
160 	return bpf_testmod_test_struct_arg_result;
161 }
162 
163 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
164 {
165 }
166 
167 __bpf_kfunc void
168 bpf_testmod_test_mod_kfunc(int i)
169 {
170 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
171 }
172 
173 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
174 {
175 	it->cnt = cnt;
176 
177 	if (cnt < 0)
178 		return -EINVAL;
179 
180 	it->value = value;
181 
182 	return 0;
183 }
184 
185 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
186 {
187 	if (it->cnt <= 0)
188 		return NULL;
189 
190 	it->cnt--;
191 
192 	return &it->value;
193 }
194 
195 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
196 {
197 	if (it__iter->cnt < 0)
198 		return 0;
199 
200 	return val + it__iter->value;
201 }
202 
203 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
204 {
205 	it->cnt = 0;
206 }
207 
208 __bpf_kfunc void bpf_kfunc_common_test(void)
209 {
210 }
211 
212 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
213 				       struct bpf_dynptr *ptr__nullable)
214 {
215 }
216 
217 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
218 {
219 	return NULL;
220 }
221 
222 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
223 {
224 	return NULL;
225 }
226 
227 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
228 {
229 }
230 
231 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
232 {
233 }
234 
235 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
236 {
237 }
238 
239 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
240 {
241 }
242 
243 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
244 {
245 }
246 
247 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
248 {
249 	return NULL;
250 }
251 
252 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
253 {
254 	return NULL;
255 }
256 
257 static struct prog_test_member trusted_ptr;
258 
259 __bpf_kfunc struct prog_test_member *bpf_kfunc_get_default_trusted_ptr_test(void)
260 {
261 	return &trusted_ptr;
262 }
263 
264 __bpf_kfunc void bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member *trusted_ptr)
265 {
266 	/*
267 	 * This BPF kfunc doesn't actually have any put/KF_ACQUIRE
268 	 * semantics. We're simply wanting to simulate a BPF kfunc that takes a
269 	 * struct prog_test_member pointer as an argument.
270 	 */
271 }
272 
273 __bpf_kfunc struct bpf_testmod_ctx *
274 bpf_testmod_ctx_create(int *err)
275 {
276 	struct bpf_testmod_ctx *ctx;
277 
278 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
279 	if (!ctx) {
280 		*err = -ENOMEM;
281 		return NULL;
282 	}
283 	refcount_set(&ctx->usage, 1);
284 
285 	return ctx;
286 }
287 
288 static void testmod_free_cb(struct rcu_head *head)
289 {
290 	struct bpf_testmod_ctx *ctx;
291 
292 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
293 	kfree(ctx);
294 }
295 
296 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
297 {
298 	if (!ctx)
299 		return;
300 	if (refcount_dec_and_test(&ctx->usage))
301 		call_rcu(&ctx->rcu, testmod_free_cb);
302 }
303 
304 __bpf_kfunc void bpf_testmod_ctx_release_dtor(void *ctx)
305 {
306 	bpf_testmod_ctx_release(ctx);
307 }
308 CFI_NOSEAL(bpf_testmod_ctx_release_dtor);
309 
310 static struct bpf_testmod_ops3 *st_ops3;
311 
312 static int bpf_testmod_test_3(void)
313 {
314 	return 0;
315 }
316 
317 static int bpf_testmod_test_4(void)
318 {
319 	return 0;
320 }
321 
322 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
323 	.test_1 = bpf_testmod_test_3,
324 	.test_2 = bpf_testmod_test_4,
325 };
326 
327 static void bpf_testmod_test_struct_ops3(void)
328 {
329 	if (st_ops3)
330 		st_ops3->test_1();
331 }
332 
333 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
334 {
335 	st_ops3->test_1();
336 }
337 
338 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
339 {
340 	st_ops3->test_2();
341 }
342 
343 struct bpf_testmod_btf_type_tag_1 {
344 	int a;
345 };
346 
347 struct bpf_testmod_btf_type_tag_2 {
348 	struct bpf_testmod_btf_type_tag_1 __user *p;
349 };
350 
351 struct bpf_testmod_btf_type_tag_3 {
352 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
353 };
354 
355 noinline int
356 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
357 	BTF_TYPE_EMIT(func_proto_typedef);
358 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
359 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
360 	return arg->a;
361 }
362 
363 noinline int
364 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
365 	return arg->p->a;
366 }
367 
368 noinline int
369 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
370 	return arg->a;
371 }
372 
373 noinline int
374 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
375 	return arg->p->a;
376 }
377 
378 noinline int bpf_testmod_loop_test(int n)
379 {
380 	/* Make sum volatile, so smart compilers, such as clang, will not
381 	 * optimize the code by removing the loop.
382 	 */
383 	volatile int sum = 0;
384 	int i;
385 
386 	/* the primary goal of this test is to test LBR. Create a lot of
387 	 * branches in the function, so we can catch it easily.
388 	 */
389 	for (i = 0; i < n; i++)
390 		sum += i;
391 	return sum;
392 }
393 
394 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
395 {
396 	static struct file f = {};
397 
398 	switch (arg) {
399 	case 1: return (void *)EINVAL;		/* user addr */
400 	case 2: return (void *)0xcafe4a11;	/* user addr */
401 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
402 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
403 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
404 	case 6: return &f;			/* valid addr */
405 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
406 #ifdef CONFIG_X86_64
407 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
408 #endif
409 	default: return NULL;
410 	}
411 }
412 
413 noinline int bpf_testmod_fentry_test1(int a)
414 {
415 	trace_bpf_testmod_fentry_test1_tp(a);
416 
417 	return a + 1;
418 }
419 
420 noinline int bpf_testmod_fentry_test2(int a, u64 b)
421 {
422 	trace_bpf_testmod_fentry_test2_tp(a, b);
423 
424 	return a + b;
425 }
426 
427 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
428 {
429 	return a + b + c;
430 }
431 
432 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
433 				      void *e, char f, int g)
434 {
435 	return a + (long)b + c + d + (long)e + f + g;
436 }
437 
438 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
439 				       void *e, char f, int g,
440 				       unsigned int h, long i, __u64 j,
441 				       unsigned long k)
442 {
443 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
444 }
445 
446 noinline void bpf_testmod_stacktrace_test(void)
447 {
448 	/* used for stacktrace test as attach function */
449 	asm volatile ("");
450 }
451 
452 noinline void bpf_testmod_stacktrace_test_3(void)
453 {
454 	bpf_testmod_stacktrace_test();
455 	asm volatile ("");
456 }
457 
458 noinline void bpf_testmod_stacktrace_test_2(void)
459 {
460 	bpf_testmod_stacktrace_test_3();
461 	asm volatile ("");
462 }
463 
464 noinline void bpf_testmod_stacktrace_test_1(void)
465 {
466 	bpf_testmod_stacktrace_test_2();
467 	asm volatile ("");
468 }
469 
470 int bpf_testmod_fentry_ok;
471 
472 noinline ssize_t
473 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
474 		      const struct bin_attribute *bin_attr,
475 		      char *buf, loff_t off, size_t len)
476 {
477 	struct bpf_testmod_test_read_ctx ctx = {
478 		.buf = buf,
479 		.off = off,
480 		.len = len,
481 	};
482 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
483 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
484 	struct bpf_testmod_struct_arg_3 *struct_arg3;
485 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
486 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
487 	union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} };
488 	union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} };
489 	int i = 1;
490 
491 	while (bpf_testmod_return_ptr(i))
492 		i++;
493 
494 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
495 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
496 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
497 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
498 	(void)bpf_testmod_test_struct_arg_5();
499 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
500 					    (void *)20, struct_arg4);
501 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
502 					    (void *)20, struct_arg4, 23);
503 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
504 					    21, 22, struct_arg5, 27);
505 
506 	(void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5);
507 	(void)bpf_testmod_test_union_arg_2(6, union_arg2);
508 
509 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
510 
511 	(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
512 
513 	bpf_testmod_test_struct_ops3();
514 
515 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
516 				sizeof(int)), GFP_KERNEL);
517 	if (struct_arg3 != NULL) {
518 		struct_arg3->b[0] = 1;
519 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
520 		kfree(struct_arg3);
521 	}
522 
523 	/* This is always true. Use the check to make sure the compiler
524 	 * doesn't remove bpf_testmod_loop_test.
525 	 */
526 	if (bpf_testmod_loop_test(101) > 100)
527 		trace_bpf_testmod_test_read(current, &ctx);
528 
529 	trace_bpf_testmod_test_nullable_bare_tp(NULL);
530 
531 	/* Magic number to enable writable tp */
532 	if (len == 64) {
533 		struct bpf_testmod_test_writable_ctx writable = {
534 			.val = 1024,
535 		};
536 		trace_bpf_testmod_test_writable_bare_tp(&writable);
537 		if (writable.early_ret)
538 			return snprintf(buf, len, "%d\n", writable.val);
539 	}
540 
541 	if (bpf_testmod_fentry_test1(1) != 2 ||
542 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
543 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
544 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
545 			21, 22) != 133 ||
546 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
547 			21, 22, 23, 24, 25, 26) != 231)
548 		goto out;
549 
550 	bpf_testmod_stacktrace_test_1();
551 
552 	bpf_testmod_fentry_ok = 1;
553 out:
554 	return -EIO; /* always fail */
555 }
556 EXPORT_SYMBOL(bpf_testmod_test_read);
557 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
558 
559 noinline ssize_t
560 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
561 		      const struct bin_attribute *bin_attr,
562 		      char *buf, loff_t off, size_t len)
563 {
564 	struct bpf_testmod_test_write_ctx ctx = {
565 		.buf = buf,
566 		.off = off,
567 		.len = len,
568 	};
569 
570 	trace_bpf_testmod_test_write_bare_tp(current, &ctx);
571 
572 	return -EIO; /* always fail */
573 }
574 EXPORT_SYMBOL(bpf_testmod_test_write);
575 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
576 
577 noinline int bpf_fentry_shadow_test(int a)
578 {
579 	return a + 2;
580 }
581 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
582 
583 __bpf_hook_end();
584 
585 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
586 	.attr = { .name = "bpf_testmod", .mode = 0666, },
587 	.read = bpf_testmod_test_read,
588 	.write = bpf_testmod_test_write,
589 };
590 
591 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
592  * please see test_uretprobe_regs_change test
593  */
594 #ifdef __x86_64__
595 
596 static int
597 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
598 {
599 	regs->cx = 0x87654321feebdaed;
600 	return 0;
601 }
602 
603 static int
604 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
605 		   struct pt_regs *regs, __u64 *data)
606 
607 {
608 	regs->ax  = 0x12345678deadbeef;
609 	regs->r11 = (u64) -1;
610 	return 0;
611 }
612 
613 struct testmod_uprobe {
614 	struct path path;
615 	struct uprobe *uprobe;
616 	struct uprobe_consumer consumer;
617 };
618 
619 static DEFINE_MUTEX(testmod_uprobe_mutex);
620 
621 static struct testmod_uprobe uprobe = {
622 	.consumer.handler = uprobe_handler,
623 	.consumer.ret_handler = uprobe_ret_handler,
624 };
625 
626 static int testmod_register_uprobe(loff_t offset)
627 {
628 	int err = -EBUSY;
629 
630 	if (uprobe.uprobe)
631 		return -EBUSY;
632 
633 	mutex_lock(&testmod_uprobe_mutex);
634 
635 	if (uprobe.uprobe)
636 		goto out;
637 
638 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
639 	if (err)
640 		goto out;
641 
642 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
643 					offset, 0, &uprobe.consumer);
644 	if (IS_ERR(uprobe.uprobe)) {
645 		err = PTR_ERR(uprobe.uprobe);
646 		path_put(&uprobe.path);
647 		uprobe.uprobe = NULL;
648 	}
649 out:
650 	mutex_unlock(&testmod_uprobe_mutex);
651 	return err;
652 }
653 
654 static void testmod_unregister_uprobe(void)
655 {
656 	mutex_lock(&testmod_uprobe_mutex);
657 
658 	if (uprobe.uprobe) {
659 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
660 		uprobe_unregister_sync();
661 		path_put(&uprobe.path);
662 		uprobe.uprobe = NULL;
663 	}
664 
665 	mutex_unlock(&testmod_uprobe_mutex);
666 }
667 
668 static ssize_t
669 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
670 			 const struct bin_attribute *bin_attr,
671 			 char *buf, loff_t off, size_t len)
672 {
673 	unsigned long offset = 0;
674 	int err = 0;
675 
676 	if (kstrtoul(buf, 0, &offset))
677 		return -EINVAL;
678 
679 	if (offset)
680 		err = testmod_register_uprobe(offset);
681 	else
682 		testmod_unregister_uprobe();
683 
684 	return err ?: strlen(buf);
685 }
686 
687 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
688 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
689 	.write = bpf_testmod_uprobe_write,
690 };
691 
692 static int register_bpf_testmod_uprobe(void)
693 {
694 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
695 }
696 
697 static void unregister_bpf_testmod_uprobe(void)
698 {
699 	testmod_unregister_uprobe();
700 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
701 }
702 
703 #else
704 static int register_bpf_testmod_uprobe(void)
705 {
706 	return 0;
707 }
708 
709 static void unregister_bpf_testmod_uprobe(void) { }
710 #endif
711 
712 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
713 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
714 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
715 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
716 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
717 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
718 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
719 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
720 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
721 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
722 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test)
723 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test)
724 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test)
725 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
726 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
727 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
728 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
729 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
730 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
731 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
732 BTF_ID_FLAGS(func, bpf_kfunc_get_default_trusted_ptr_test);
733 BTF_ID_FLAGS(func, bpf_kfunc_put_default_trusted_ptr_test);
734 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
735 
736 BTF_ID_LIST(bpf_testmod_dtor_ids)
737 BTF_ID(struct, bpf_testmod_ctx)
738 BTF_ID(func, bpf_testmod_ctx_release_dtor)
739 
740 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
741 	.owner = THIS_MODULE,
742 	.set   = &bpf_testmod_common_kfunc_ids,
743 };
744 
745 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
746 {
747 	return a + b + c + d;
748 }
749 
750 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
751 {
752 	return a + b;
753 }
754 
755 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
756 {
757 	return sk;
758 }
759 
760 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
761 {
762 	/* Provoke the compiler to assume that the caller has sign-extended a,
763 	 * b and c on platforms where this is required (e.g. s390x).
764 	 */
765 	return (long)a + (long)b + (long)c + d;
766 }
767 
768 static struct prog_test_ref_kfunc prog_test_struct = {
769 	.a = 42,
770 	.b = 108,
771 	.next = &prog_test_struct,
772 	.cnt = REFCOUNT_INIT(1),
773 };
774 
775 __bpf_kfunc struct prog_test_ref_kfunc *
776 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
777 {
778 	refcount_inc(&prog_test_struct.cnt);
779 	return &prog_test_struct;
780 }
781 
782 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
783 {
784 	WARN_ON_ONCE(1);
785 }
786 
787 __bpf_kfunc struct prog_test_member *
788 bpf_kfunc_call_memb_acquire(void)
789 {
790 	WARN_ON_ONCE(1);
791 	return NULL;
792 }
793 
794 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
795 {
796 	WARN_ON_ONCE(1);
797 }
798 
799 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
800 {
801 	if (size > 2 * sizeof(int))
802 		return NULL;
803 
804 	return (int *)p;
805 }
806 
807 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
808 						  const int rdwr_buf_size)
809 {
810 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
811 }
812 
813 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
814 						    const int rdonly_buf_size)
815 {
816 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
817 }
818 
819 /* the next 2 ones can't be really used for testing expect to ensure
820  * that the verifier rejects the call.
821  * Acquire functions must return struct pointers, so these ones are
822  * failing.
823  */
824 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
825 						    const int rdonly_buf_size)
826 {
827 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
828 }
829 
830 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
831 {
832 }
833 
834 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
835 {
836 }
837 
838 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
839 {
840 }
841 
842 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
843 {
844 }
845 
846 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
847 {
848 }
849 
850 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
851 {
852 }
853 
854 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
855 {
856 }
857 
858 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
859 {
860 }
861 
862 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
863 {
864 }
865 
866 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
867 {
868 }
869 
870 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
871 {
872 	/* p != NULL, but p->cnt could be 0 */
873 }
874 
875 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
876 {
877 }
878 
879 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
880 {
881 	return arg;
882 }
883 
884 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
885 {
886 }
887 
888 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
889 {
890 	int proto;
891 	int err;
892 
893 	mutex_lock(&sock_lock);
894 
895 	if (sock) {
896 		pr_err("%s called without releasing old sock", __func__);
897 		err = -EPERM;
898 		goto out;
899 	}
900 
901 	switch (args->af) {
902 	case AF_INET:
903 	case AF_INET6:
904 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
905 		break;
906 	case AF_UNIX:
907 		proto = PF_UNIX;
908 		break;
909 	default:
910 		pr_err("invalid address family %d\n", args->af);
911 		err = -EINVAL;
912 		goto out;
913 	}
914 
915 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
916 			       proto, &sock);
917 
918 	if (!err)
919 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
920 		 * and consider the connection attempt failed if it returns
921 		 * -EINPROGRESS.
922 		 */
923 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
924 out:
925 	mutex_unlock(&sock_lock);
926 
927 	return err;
928 }
929 
930 __bpf_kfunc void bpf_kfunc_close_sock(void)
931 {
932 	mutex_lock(&sock_lock);
933 
934 	if (sock) {
935 		sock_release(sock);
936 		sock = NULL;
937 	}
938 
939 	mutex_unlock(&sock_lock);
940 }
941 
942 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
943 {
944 	int err;
945 
946 	if (args->addrlen > sizeof(args->addr))
947 		return -EINVAL;
948 
949 	mutex_lock(&sock_lock);
950 
951 	if (!sock) {
952 		pr_err("%s called without initializing sock", __func__);
953 		err = -EPERM;
954 		goto out;
955 	}
956 
957 	err = kernel_connect(sock, (struct sockaddr_unsized *)&args->addr,
958 			     args->addrlen, 0);
959 out:
960 	mutex_unlock(&sock_lock);
961 
962 	return err;
963 }
964 
965 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
966 {
967 	int err;
968 
969 	if (args->addrlen > sizeof(args->addr))
970 		return -EINVAL;
971 
972 	mutex_lock(&sock_lock);
973 
974 	if (!sock) {
975 		pr_err("%s called without initializing sock", __func__);
976 		err = -EPERM;
977 		goto out;
978 	}
979 
980 	err = kernel_bind(sock, (struct sockaddr_unsized *)&args->addr, args->addrlen);
981 out:
982 	mutex_unlock(&sock_lock);
983 
984 	return err;
985 }
986 
987 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
988 {
989 	int err;
990 
991 	mutex_lock(&sock_lock);
992 
993 	if (!sock) {
994 		pr_err("%s called without initializing sock", __func__);
995 		err = -EPERM;
996 		goto out;
997 	}
998 
999 	err = kernel_listen(sock, 128);
1000 out:
1001 	mutex_unlock(&sock_lock);
1002 
1003 	return err;
1004 }
1005 
1006 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
1007 {
1008 	struct msghdr msg = {
1009 		.msg_name	= &args->addr.addr,
1010 		.msg_namelen	= args->addr.addrlen,
1011 	};
1012 	struct kvec iov;
1013 	int err;
1014 
1015 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
1016 	    args->msglen > sizeof(args->msg))
1017 		return -EINVAL;
1018 
1019 	iov.iov_base = args->msg;
1020 	iov.iov_len  = args->msglen;
1021 
1022 	mutex_lock(&sock_lock);
1023 
1024 	if (!sock) {
1025 		pr_err("%s called without initializing sock", __func__);
1026 		err = -EPERM;
1027 		goto out;
1028 	}
1029 
1030 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
1031 	args->addr.addrlen = msg.msg_namelen;
1032 out:
1033 	mutex_unlock(&sock_lock);
1034 
1035 	return err;
1036 }
1037 
1038 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
1039 {
1040 	struct msghdr msg = {
1041 		.msg_name	= &args->addr.addr,
1042 		.msg_namelen	= args->addr.addrlen,
1043 	};
1044 	struct kvec iov;
1045 	int err;
1046 
1047 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
1048 	    args->msglen > sizeof(args->msg))
1049 		return -EINVAL;
1050 
1051 	iov.iov_base = args->msg;
1052 	iov.iov_len  = args->msglen;
1053 
1054 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
1055 	mutex_lock(&sock_lock);
1056 
1057 	if (!sock) {
1058 		pr_err("%s called without initializing sock", __func__);
1059 		err = -EPERM;
1060 		goto out;
1061 	}
1062 
1063 	err = sock_sendmsg(sock, &msg);
1064 	args->addr.addrlen = msg.msg_namelen;
1065 out:
1066 	mutex_unlock(&sock_lock);
1067 
1068 	return err;
1069 }
1070 
1071 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
1072 {
1073 	int err;
1074 
1075 	mutex_lock(&sock_lock);
1076 
1077 	if (!sock) {
1078 		pr_err("%s called without initializing sock", __func__);
1079 		err = -EPERM;
1080 		goto out;
1081 	}
1082 
1083 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
1084 	if (err < 0)
1085 		goto out;
1086 
1087 	args->addrlen = err;
1088 	err = 0;
1089 out:
1090 	mutex_unlock(&sock_lock);
1091 
1092 	return err;
1093 }
1094 
1095 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
1096 {
1097 	int err;
1098 
1099 	mutex_lock(&sock_lock);
1100 
1101 	if (!sock) {
1102 		pr_err("%s called without initializing sock", __func__);
1103 		err = -EPERM;
1104 		goto out;
1105 	}
1106 
1107 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1108 	if (err < 0)
1109 		goto out;
1110 
1111 	args->addrlen = err;
1112 	err = 0;
1113 out:
1114 	mutex_unlock(&sock_lock);
1115 
1116 	return err;
1117 }
1118 
1119 static DEFINE_MUTEX(st_ops_mutex);
1120 static struct bpf_testmod_st_ops *st_ops;
1121 
1122 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1123 {
1124 	int ret = -1;
1125 
1126 	mutex_lock(&st_ops_mutex);
1127 	if (st_ops && st_ops->test_prologue)
1128 		ret = st_ops->test_prologue(args);
1129 	mutex_unlock(&st_ops_mutex);
1130 
1131 	return ret;
1132 }
1133 
1134 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1135 {
1136 	int ret = -1;
1137 
1138 	mutex_lock(&st_ops_mutex);
1139 	if (st_ops && st_ops->test_epilogue)
1140 		ret = st_ops->test_epilogue(args);
1141 	mutex_unlock(&st_ops_mutex);
1142 
1143 	return ret;
1144 }
1145 
1146 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1147 {
1148 	int ret = -1;
1149 
1150 	mutex_lock(&st_ops_mutex);
1151 	if (st_ops && st_ops->test_pro_epilogue)
1152 		ret = st_ops->test_pro_epilogue(args);
1153 	mutex_unlock(&st_ops_mutex);
1154 
1155 	return ret;
1156 }
1157 
1158 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1159 {
1160 	args->a += 10;
1161 	return args->a;
1162 }
1163 
1164 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
1165 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux);
1166 
1167 __bpf_kfunc int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux);
1168 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux);
1169 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux);
1170 
1171 /* hook targets */
1172 noinline void bpf_testmod_test_hardirq_fn(void) { barrier(); }
1173 noinline void bpf_testmod_test_softirq_fn(void) { barrier(); }
1174 
1175 /* Tasklet for SoftIRQ context */
1176 static void ctx_check_tasklet_fn(struct tasklet_struct *t)
1177 {
1178 	bpf_testmod_test_softirq_fn();
1179 }
1180 
1181 DECLARE_TASKLET(ctx_check_tasklet, ctx_check_tasklet_fn);
1182 
1183 /* IRQ Work for HardIRQ context */
1184 static void ctx_check_irq_fn(struct irq_work *work)
1185 {
1186 	bpf_testmod_test_hardirq_fn();
1187 	tasklet_schedule(&ctx_check_tasklet);
1188 }
1189 
1190 static struct irq_work ctx_check_irq = IRQ_WORK_INIT_HARD(ctx_check_irq_fn);
1191 
1192 /* The kfunc trigger */
1193 __bpf_kfunc void bpf_kfunc_trigger_ctx_check(void)
1194 {
1195 	irq_work_queue(&ctx_check_irq);
1196 }
1197 
1198 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
1199 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1200 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1201 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1202 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1203 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1204 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1205 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1206 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1207 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1208 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1209 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1210 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1211 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1212 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1213 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1214 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1215 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1216 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1217 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1218 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1219 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1220 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_RCU)
1221 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1222 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1223 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1224 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1225 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1226 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1227 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1228 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1229 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1230 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1231 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1232 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1233 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1234 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_SLEEPABLE)
1235 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_SLEEPABLE)
1236 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_SLEEPABLE)
1237 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10)
1238 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1)
1239 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_assoc, KF_IMPLICIT_ARGS)
1240 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS)
1241 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS)
1242 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl)
1243 BTF_ID_FLAGS(func, bpf_kfunc_trigger_ctx_check)
1244 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1245 
1246 static int bpf_testmod_ops_init(struct btf *btf)
1247 {
1248 	return 0;
1249 }
1250 
1251 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1252 					    enum bpf_access_type type,
1253 					    const struct bpf_prog *prog,
1254 					    struct bpf_insn_access_aux *info)
1255 {
1256 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1257 }
1258 
1259 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1260 				       const struct btf_member *member,
1261 				       void *kdata, const void *udata)
1262 {
1263 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1264 		/* For data fields, this function has to copy it and return
1265 		 * 1 to indicate that the data has been handled by the
1266 		 * struct_ops type, or the verifier will reject the map if
1267 		 * the value of the data field is not zero.
1268 		 */
1269 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1270 		return 1;
1271 	}
1272 	return 0;
1273 }
1274 
1275 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1276 	.owner = THIS_MODULE,
1277 	.set   = &bpf_testmod_check_kfunc_ids,
1278 };
1279 
1280 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1281 	.get_func_proto	 = bpf_base_func_proto,
1282 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1283 };
1284 
1285 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1286 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1287 };
1288 
1289 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1290 {
1291 	struct bpf_testmod_ops *ops = kdata;
1292 
1293 	if (ops->test_1)
1294 		ops->test_1();
1295 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1296 	 * initialized, so we need to check for NULL.
1297 	 */
1298 	if (ops->test_2)
1299 		ops->test_2(4, ops->data);
1300 
1301 	return 0;
1302 }
1303 
1304 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1305 {
1306 }
1307 
1308 static int bpf_testmod_test_1(void)
1309 {
1310 	return 0;
1311 }
1312 
1313 static void bpf_testmod_test_2(int a, int b)
1314 {
1315 }
1316 
1317 static int bpf_testmod_tramp(int value)
1318 {
1319 	return 0;
1320 }
1321 
1322 static int bpf_testmod_ops__test_maybe_null(int dummy,
1323 					    struct task_struct *task__nullable)
1324 {
1325 	return 0;
1326 }
1327 
1328 static int bpf_testmod_ops__test_refcounted(int dummy,
1329 					    struct task_struct *task__ref)
1330 {
1331 	return 0;
1332 }
1333 
1334 static struct task_struct *
1335 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1336 				      struct cgroup *cgrp)
1337 {
1338 	return NULL;
1339 }
1340 
1341 static struct bpf_testmod_ops __bpf_testmod_ops = {
1342 	.test_1 = bpf_testmod_test_1,
1343 	.test_2 = bpf_testmod_test_2,
1344 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1345 	.test_refcounted = bpf_testmod_ops__test_refcounted,
1346 	.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1347 };
1348 
1349 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1350 	.verifier_ops = &bpf_testmod_verifier_ops,
1351 	.init = bpf_testmod_ops_init,
1352 	.init_member = bpf_testmod_ops_init_member,
1353 	.reg = bpf_dummy_reg,
1354 	.unreg = bpf_dummy_unreg,
1355 	.cfi_stubs = &__bpf_testmod_ops,
1356 	.name = "bpf_testmod_ops",
1357 	.owner = THIS_MODULE,
1358 };
1359 
1360 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1361 {
1362 	struct bpf_testmod_ops2 *ops = kdata;
1363 
1364 	ops->test_1();
1365 	return 0;
1366 }
1367 
1368 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1369 	.test_1 = bpf_testmod_test_1,
1370 };
1371 
1372 struct bpf_struct_ops bpf_testmod_ops2 = {
1373 	.verifier_ops = &bpf_testmod_verifier_ops,
1374 	.init = bpf_testmod_ops_init,
1375 	.init_member = bpf_testmod_ops_init_member,
1376 	.reg = bpf_dummy_reg2,
1377 	.unreg = bpf_dummy_unreg,
1378 	.cfi_stubs = &__bpf_testmod_ops2,
1379 	.name = "bpf_testmod_ops2",
1380 	.owner = THIS_MODULE,
1381 };
1382 
1383 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1384 {
1385 	int err = 0;
1386 
1387 	mutex_lock(&st_ops_mutex);
1388 	if (st_ops3) {
1389 		pr_err("st_ops has already been registered\n");
1390 		err = -EEXIST;
1391 		goto unlock;
1392 	}
1393 	st_ops3 = kdata;
1394 
1395 unlock:
1396 	mutex_unlock(&st_ops_mutex);
1397 	return err;
1398 }
1399 
1400 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1401 {
1402 	mutex_lock(&st_ops_mutex);
1403 	st_ops3 = NULL;
1404 	mutex_unlock(&st_ops_mutex);
1405 }
1406 
1407 static void test_1_recursion_detected(struct bpf_prog *prog)
1408 {
1409 	struct bpf_prog_stats *stats;
1410 
1411 	stats = this_cpu_ptr(prog->stats);
1412 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1413 	       u64_stats_read(&stats->misses));
1414 }
1415 
1416 static int st_ops3_check_member(const struct btf_type *t,
1417 				const struct btf_member *member,
1418 				const struct bpf_prog *prog)
1419 {
1420 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1421 
1422 	switch (moff) {
1423 	case offsetof(struct bpf_testmod_ops3, test_1):
1424 		prog->aux->priv_stack_requested = true;
1425 		prog->aux->recursion_detected = test_1_recursion_detected;
1426 		fallthrough;
1427 	default:
1428 		break;
1429 	}
1430 	return 0;
1431 }
1432 
1433 struct bpf_struct_ops bpf_testmod_ops3 = {
1434 	.verifier_ops = &bpf_testmod_verifier_ops3,
1435 	.init = bpf_testmod_ops_init,
1436 	.init_member = bpf_testmod_ops_init_member,
1437 	.reg = st_ops3_reg,
1438 	.unreg = st_ops3_unreg,
1439 	.check_member = st_ops3_check_member,
1440 	.cfi_stubs = &__bpf_testmod_ops3,
1441 	.name = "bpf_testmod_ops3",
1442 	.owner = THIS_MODULE,
1443 };
1444 
1445 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1446 {
1447 	return 0;
1448 }
1449 
1450 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1451 {
1452 	return 0;
1453 }
1454 
1455 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1456 {
1457 	return 0;
1458 }
1459 
1460 static int bpf_cgroup_from_id_id;
1461 static int bpf_cgroup_release_id;
1462 
1463 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1464 					  const struct bpf_prog *prog)
1465 {
1466 	struct bpf_insn *insn = insn_buf;
1467 
1468 	/* r8 = r1; // r8 will be "u64 *ctx".
1469 	 * r1 = 0;
1470 	 * r0 = bpf_cgroup_from_id(r1);
1471 	 * if r0 != 0 goto pc+5;
1472 	 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1473 	 * r7 = r6->a;
1474 	 * r7 += 1000;
1475 	 * r6->a = r7;
1476 	 * goto pc+2;
1477 	 * r1 = r0;
1478 	 * bpf_cgroup_release(r1);
1479 	 * r1 = r8;
1480 	 */
1481 	*insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1482 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1483 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1484 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1485 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1486 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1487 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1488 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1489 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1490 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1491 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1492 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1493 	*insn++ = prog->insnsi[0];
1494 
1495 	return insn - insn_buf;
1496 }
1497 
1498 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1499 					  s16 ctx_stack_off)
1500 {
1501 	struct bpf_insn *insn = insn_buf;
1502 
1503 	/* r1 = 0;
1504 	 * r6 = 0;
1505 	 * r0 = bpf_cgroup_from_id(r1);
1506 	 * if r0 != 0 goto pc+6;
1507 	 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1508 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1509 	 * r6 = r1->a;
1510 	 * r6 += 10000;
1511 	 * r1->a = r6;
1512 	 * goto pc+2
1513 	 * r1 = r0;
1514 	 * bpf_cgroup_release(r1);
1515 	 * r0 = r6;
1516 	 * r0 *= 2;
1517 	 * BPF_EXIT;
1518 	 */
1519 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1520 	*insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1521 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1522 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1523 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1524 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1525 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1526 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1527 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1528 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1529 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1530 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1531 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1532 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1533 	*insn++ = BPF_EXIT_INSN();
1534 
1535 	return insn - insn_buf;
1536 }
1537 
1538 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
1539 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1540 			       const struct bpf_prog *prog)
1541 {
1542 	struct bpf_insn *insn = insn_buf;
1543 
1544 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1545 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1546 		return 0;
1547 
1548 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1549 		return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1550 
1551 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1552 	 * r7 = r6->a;
1553 	 * r7 += 1000;
1554 	 * r6->a = r7;
1555 	 */
1556 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1557 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1558 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1559 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1560 	*insn++ = prog->insnsi[0];
1561 
1562 	return insn - insn_buf;
1563 }
1564 
1565 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1566 			       s16 ctx_stack_off)
1567 {
1568 	struct bpf_insn *insn = insn_buf;
1569 
1570 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1571 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1572 		return 0;
1573 
1574 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1575 		return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1576 
1577 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1578 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1579 	 * r6 = r1->a;
1580 	 * r6 += 10000;
1581 	 * r1->a = r6;
1582 	 * r0 = r6;
1583 	 * r0 *= 2;
1584 	 * BPF_EXIT;
1585 	 */
1586 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1587 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1588 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1589 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1590 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1591 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1592 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1593 	*insn++ = BPF_EXIT_INSN();
1594 
1595 	return insn - insn_buf;
1596 }
1597 
1598 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1599 				    const struct bpf_reg_state *reg,
1600 				    int off, int size)
1601 {
1602 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1603 		return -EACCES;
1604 	return 0;
1605 }
1606 
1607 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1608 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1609 	.btf_struct_access = st_ops_btf_struct_access,
1610 	.gen_prologue = st_ops_gen_prologue,
1611 	.gen_epilogue = st_ops_gen_epilogue,
1612 	.get_func_proto = bpf_base_func_proto,
1613 };
1614 
1615 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1616 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1617 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1618 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1619 };
1620 
1621 static int st_ops_reg(void *kdata, struct bpf_link *link)
1622 {
1623 	int err = 0;
1624 
1625 	mutex_lock(&st_ops_mutex);
1626 	if (st_ops) {
1627 		pr_err("st_ops has already been registered\n");
1628 		err = -EEXIST;
1629 		goto unlock;
1630 	}
1631 	st_ops = kdata;
1632 
1633 unlock:
1634 	mutex_unlock(&st_ops_mutex);
1635 	return err;
1636 }
1637 
1638 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1639 {
1640 	mutex_lock(&st_ops_mutex);
1641 	st_ops = NULL;
1642 	mutex_unlock(&st_ops_mutex);
1643 }
1644 
1645 static int st_ops_init(struct btf *btf)
1646 {
1647 	struct btf *kfunc_btf;
1648 
1649 	bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1650 	bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1651 	if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1652 		return -EINVAL;
1653 
1654 	return 0;
1655 }
1656 
1657 static int st_ops_init_member(const struct btf_type *t,
1658 			      const struct btf_member *member,
1659 			      void *kdata, const void *udata)
1660 {
1661 	return 0;
1662 }
1663 
1664 static struct bpf_struct_ops testmod_st_ops = {
1665 	.verifier_ops = &st_ops_verifier_ops,
1666 	.init = st_ops_init,
1667 	.init_member = st_ops_init_member,
1668 	.reg = st_ops_reg,
1669 	.unreg = st_ops_unreg,
1670 	.cfi_stubs = &st_ops_cfi_stubs,
1671 	.name = "bpf_testmod_st_ops",
1672 	.owner = THIS_MODULE,
1673 };
1674 
1675 struct hlist_head multi_st_ops_list;
1676 static DEFINE_SPINLOCK(multi_st_ops_lock);
1677 
1678 static int multi_st_ops_init(struct btf *btf)
1679 {
1680 	spin_lock_init(&multi_st_ops_lock);
1681 	INIT_HLIST_HEAD(&multi_st_ops_list);
1682 
1683 	return 0;
1684 }
1685 
1686 static int multi_st_ops_init_member(const struct btf_type *t,
1687 				    const struct btf_member *member,
1688 				    void *kdata, const void *udata)
1689 {
1690 	return 0;
1691 }
1692 
1693 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
1694 {
1695 	struct bpf_testmod_multi_st_ops *st_ops;
1696 
1697 	hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
1698 		if (st_ops->id == id)
1699 			return st_ops;
1700 	}
1701 
1702 	return NULL;
1703 }
1704 
1705 /* Call test_1() of the struct_ops map identified by the id */
1706 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
1707 {
1708 	struct bpf_testmod_multi_st_ops *st_ops;
1709 	unsigned long flags;
1710 	int ret = -1;
1711 
1712 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1713 	st_ops = multi_st_ops_find_nolock(id);
1714 	if (st_ops)
1715 		ret = st_ops->test_1(args);
1716 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1717 
1718 	return ret;
1719 }
1720 
1721 /* Call test_1() of the associated struct_ops map */
1722 int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux)
1723 {
1724 	struct bpf_testmod_multi_st_ops *st_ops;
1725 	int ret = -1;
1726 
1727 	st_ops = (struct bpf_testmod_multi_st_ops *)bpf_prog_get_assoc_struct_ops(aux);
1728 	if (st_ops)
1729 		ret = st_ops->test_1(args);
1730 
1731 	return ret;
1732 }
1733 
1734 int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux)
1735 {
1736 	if (aux && a > 0)
1737 		return a;
1738 	return -EINVAL;
1739 }
1740 
1741 int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux)
1742 {
1743 	if (aux)
1744 		return a + b;
1745 	return -EINVAL;
1746 }
1747 
1748 int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux)
1749 {
1750 	return bpf_kfunc_implicit_arg_legacy(a, b, aux);
1751 }
1752 
1753 static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
1754 {
1755 	struct bpf_testmod_multi_st_ops *st_ops =
1756 		(struct bpf_testmod_multi_st_ops *)kdata;
1757 	unsigned long flags;
1758 	int err = 0;
1759 	u32 id;
1760 
1761 	if (!st_ops->test_1)
1762 		return -EINVAL;
1763 
1764 	id = bpf_struct_ops_id(kdata);
1765 
1766 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1767 	if (multi_st_ops_find_nolock(id)) {
1768 		pr_err("multi_st_ops(id:%d) has already been registered\n", id);
1769 		err = -EEXIST;
1770 		goto unlock;
1771 	}
1772 
1773 	st_ops->id = id;
1774 	hlist_add_head(&st_ops->node, &multi_st_ops_list);
1775 unlock:
1776 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1777 
1778 	return err;
1779 }
1780 
1781 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
1782 {
1783 	struct bpf_testmod_multi_st_ops *st_ops;
1784 	unsigned long flags;
1785 	u32 id;
1786 
1787 	id = bpf_struct_ops_id(kdata);
1788 
1789 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1790 	st_ops = multi_st_ops_find_nolock(id);
1791 	if (st_ops)
1792 		hlist_del(&st_ops->node);
1793 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1794 }
1795 
1796 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
1797 {
1798 	return 0;
1799 }
1800 
1801 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
1802 	.test_1 = bpf_testmod_multi_st_ops__test_1,
1803 };
1804 
1805 struct bpf_struct_ops testmod_multi_st_ops = {
1806 	.verifier_ops = &bpf_testmod_verifier_ops,
1807 	.init = multi_st_ops_init,
1808 	.init_member = multi_st_ops_init_member,
1809 	.reg = multi_st_ops_reg,
1810 	.unreg = multi_st_ops_unreg,
1811 	.cfi_stubs = &multi_st_ops_cfi_stubs,
1812 	.name = "bpf_testmod_multi_st_ops",
1813 	.owner = THIS_MODULE,
1814 };
1815 
1816 extern int bpf_fentry_test1(int a);
1817 
1818 static int bpf_testmod_init(void)
1819 {
1820 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1821 		{
1822 			.btf_id		= bpf_testmod_dtor_ids[0],
1823 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1824 		},
1825 	};
1826 	void **tramp;
1827 	int ret;
1828 
1829 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1830 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1831 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1832 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1833 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1834 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1835 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1836 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1837 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1838 	ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
1839 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1840 						 ARRAY_SIZE(bpf_testmod_dtors),
1841 						 THIS_MODULE);
1842 	if (ret < 0)
1843 		return ret;
1844 	if (bpf_fentry_test1(0) < 0)
1845 		return -EINVAL;
1846 	sock = NULL;
1847 	mutex_init(&sock_lock);
1848 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1849 	if (ret < 0)
1850 		return ret;
1851 	ret = register_bpf_testmod_uprobe();
1852 	if (ret < 0)
1853 		return ret;
1854 
1855 	/* Ensure nothing is between tramp_1..tramp_40 */
1856 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1857 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1858 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1859 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1860 		*tramp++ = bpf_testmod_tramp;
1861 
1862 	return 0;
1863 }
1864 
1865 static void bpf_testmod_exit(void)
1866 {
1867         /* Need to wait for all references to be dropped because
1868          * bpf_kfunc_call_test_release() which currently resides in kernel can
1869          * be called after bpf_testmod is unloaded. Once release function is
1870          * moved into the module this wait can be removed.
1871          */
1872 	while (refcount_read(&prog_test_struct.cnt) > 1)
1873 		msleep(20);
1874 
1875 	/* Clean up irqwork and tasklet */
1876 	irq_work_sync(&ctx_check_irq);
1877 	tasklet_kill(&ctx_check_tasklet);
1878 
1879 	bpf_kfunc_close_sock();
1880 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1881 	unregister_bpf_testmod_uprobe();
1882 }
1883 
1884 module_init(bpf_testmod_init);
1885 module_exit(bpf_testmod_exit);
1886 
1887 MODULE_AUTHOR("Andrii Nakryiko");
1888 MODULE_DESCRIPTION("BPF selftests module");
1889 MODULE_LICENSE("Dual BSD/GPL");
1890