xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (revision c574fb2ed7c96f87fc0e5295e910e646a7ee4dfa)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 __bpf_hook_start();
66 
67 noinline int
68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
69 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
70 	return bpf_testmod_test_struct_arg_result;
71 }
72 
73 noinline int
74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
75 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
76 	return bpf_testmod_test_struct_arg_result;
77 }
78 
79 noinline int
80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
81 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
87 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
88 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
93 bpf_testmod_test_struct_arg_5(void) {
94 	bpf_testmod_test_struct_arg_result = 1;
95 	return bpf_testmod_test_struct_arg_result;
96 }
97 
98 noinline int
99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
100 	bpf_testmod_test_struct_arg_result = a->b[0];
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
106 			      struct bpf_testmod_struct_arg_4 f)
107 {
108 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 		(long)e + f.a + f.b;
110 	return bpf_testmod_test_struct_arg_result;
111 }
112 
113 noinline int
114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
115 			      struct bpf_testmod_struct_arg_4 f, int g)
116 {
117 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
118 		(long)e + f.a + f.b + g;
119 	return bpf_testmod_test_struct_arg_result;
120 }
121 
122 noinline int
123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
124 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
125 {
126 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
127 		f + g + h.a + h.b + h.c + h.d + i;
128 	return bpf_testmod_test_struct_arg_result;
129 }
130 
131 noinline int
132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
133 	bpf_testmod_test_struct_arg_result = a->a;
134 	return bpf_testmod_test_struct_arg_result;
135 }
136 
137 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
138 {
139 }
140 
141 __bpf_kfunc void
142 bpf_testmod_test_mod_kfunc(int i)
143 {
144 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
145 }
146 
147 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
148 {
149 	it->cnt = cnt;
150 
151 	if (cnt < 0)
152 		return -EINVAL;
153 
154 	it->value = value;
155 
156 	return 0;
157 }
158 
159 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
160 {
161 	if (it->cnt <= 0)
162 		return NULL;
163 
164 	it->cnt--;
165 
166 	return &it->value;
167 }
168 
169 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
170 {
171 	if (it__iter->cnt < 0)
172 		return 0;
173 
174 	return val + it__iter->value;
175 }
176 
177 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
178 {
179 	it->cnt = 0;
180 }
181 
182 __bpf_kfunc void bpf_kfunc_common_test(void)
183 {
184 }
185 
186 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
187 				       struct bpf_dynptr *ptr__nullable)
188 {
189 }
190 
191 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
192 {
193 	return NULL;
194 }
195 
196 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
197 {
198 	return NULL;
199 }
200 
201 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
202 {
203 }
204 
205 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
206 {
207 }
208 
209 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
210 {
211 }
212 
213 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
214 {
215 }
216 
217 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
218 {
219 }
220 
221 __bpf_kfunc struct bpf_testmod_ctx *
222 bpf_testmod_ctx_create(int *err)
223 {
224 	struct bpf_testmod_ctx *ctx;
225 
226 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
227 	if (!ctx) {
228 		*err = -ENOMEM;
229 		return NULL;
230 	}
231 	refcount_set(&ctx->usage, 1);
232 
233 	return ctx;
234 }
235 
236 static void testmod_free_cb(struct rcu_head *head)
237 {
238 	struct bpf_testmod_ctx *ctx;
239 
240 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
241 	kfree(ctx);
242 }
243 
244 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
245 {
246 	if (!ctx)
247 		return;
248 	if (refcount_dec_and_test(&ctx->usage))
249 		call_rcu(&ctx->rcu, testmod_free_cb);
250 }
251 
252 static struct bpf_testmod_ops3 *st_ops3;
253 
254 static int bpf_testmod_test_3(void)
255 {
256 	return 0;
257 }
258 
259 static int bpf_testmod_test_4(void)
260 {
261 	return 0;
262 }
263 
264 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
265 	.test_1 = bpf_testmod_test_3,
266 	.test_2 = bpf_testmod_test_4,
267 };
268 
269 static void bpf_testmod_test_struct_ops3(void)
270 {
271 	if (st_ops3)
272 		st_ops3->test_1();
273 }
274 
275 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
276 {
277 	st_ops3->test_1();
278 }
279 
280 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
281 {
282 	st_ops3->test_2();
283 }
284 
285 struct bpf_testmod_btf_type_tag_1 {
286 	int a;
287 };
288 
289 struct bpf_testmod_btf_type_tag_2 {
290 	struct bpf_testmod_btf_type_tag_1 __user *p;
291 };
292 
293 struct bpf_testmod_btf_type_tag_3 {
294 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
295 };
296 
297 noinline int
298 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
299 	BTF_TYPE_EMIT(func_proto_typedef);
300 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
301 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
302 	return arg->a;
303 }
304 
305 noinline int
306 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
307 	return arg->p->a;
308 }
309 
310 noinline int
311 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
312 	return arg->a;
313 }
314 
315 noinline int
316 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
317 	return arg->p->a;
318 }
319 
320 noinline int bpf_testmod_loop_test(int n)
321 {
322 	/* Make sum volatile, so smart compilers, such as clang, will not
323 	 * optimize the code by removing the loop.
324 	 */
325 	volatile int sum = 0;
326 	int i;
327 
328 	/* the primary goal of this test is to test LBR. Create a lot of
329 	 * branches in the function, so we can catch it easily.
330 	 */
331 	for (i = 0; i < n; i++)
332 		sum += i;
333 	return sum;
334 }
335 
336 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
337 {
338 	static struct file f = {};
339 
340 	switch (arg) {
341 	case 1: return (void *)EINVAL;		/* user addr */
342 	case 2: return (void *)0xcafe4a11;	/* user addr */
343 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
344 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
345 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
346 	case 6: return &f;			/* valid addr */
347 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
348 #ifdef CONFIG_X86_64
349 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
350 #endif
351 	default: return NULL;
352 	}
353 }
354 
355 noinline int bpf_testmod_fentry_test1(int a)
356 {
357 	return a + 1;
358 }
359 
360 noinline int bpf_testmod_fentry_test2(int a, u64 b)
361 {
362 	return a + b;
363 }
364 
365 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
366 {
367 	return a + b + c;
368 }
369 
370 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
371 				      void *e, char f, int g)
372 {
373 	return a + (long)b + c + d + (long)e + f + g;
374 }
375 
376 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
377 				       void *e, char f, int g,
378 				       unsigned int h, long i, __u64 j,
379 				       unsigned long k)
380 {
381 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
382 }
383 
384 int bpf_testmod_fentry_ok;
385 
386 noinline ssize_t
387 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
388 		      const struct bin_attribute *bin_attr,
389 		      char *buf, loff_t off, size_t len)
390 {
391 	struct bpf_testmod_test_read_ctx ctx = {
392 		.buf = buf,
393 		.off = off,
394 		.len = len,
395 	};
396 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
397 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
398 	struct bpf_testmod_struct_arg_3 *struct_arg3;
399 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
400 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
401 	int i = 1;
402 
403 	while (bpf_testmod_return_ptr(i))
404 		i++;
405 
406 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
407 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
408 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
409 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
410 	(void)bpf_testmod_test_struct_arg_5();
411 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
412 					    (void *)20, struct_arg4);
413 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
414 					    (void *)20, struct_arg4, 23);
415 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
416 					    21, 22, struct_arg5, 27);
417 
418 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
419 
420 	(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
421 
422 	bpf_testmod_test_struct_ops3();
423 
424 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
425 				sizeof(int)), GFP_KERNEL);
426 	if (struct_arg3 != NULL) {
427 		struct_arg3->b[0] = 1;
428 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
429 		kfree(struct_arg3);
430 	}
431 
432 	/* This is always true. Use the check to make sure the compiler
433 	 * doesn't remove bpf_testmod_loop_test.
434 	 */
435 	if (bpf_testmod_loop_test(101) > 100)
436 		trace_bpf_testmod_test_read(current, &ctx);
437 
438 	trace_bpf_testmod_test_nullable_bare_tp(NULL);
439 
440 	/* Magic number to enable writable tp */
441 	if (len == 64) {
442 		struct bpf_testmod_test_writable_ctx writable = {
443 			.val = 1024,
444 		};
445 		trace_bpf_testmod_test_writable_bare_tp(&writable);
446 		if (writable.early_ret)
447 			return snprintf(buf, len, "%d\n", writable.val);
448 	}
449 
450 	if (bpf_testmod_fentry_test1(1) != 2 ||
451 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
452 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
453 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
454 			21, 22) != 133 ||
455 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
456 			21, 22, 23, 24, 25, 26) != 231)
457 		goto out;
458 
459 	bpf_testmod_fentry_ok = 1;
460 out:
461 	return -EIO; /* always fail */
462 }
463 EXPORT_SYMBOL(bpf_testmod_test_read);
464 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
465 
466 noinline ssize_t
467 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
468 		      const struct bin_attribute *bin_attr,
469 		      char *buf, loff_t off, size_t len)
470 {
471 	struct bpf_testmod_test_write_ctx ctx = {
472 		.buf = buf,
473 		.off = off,
474 		.len = len,
475 	};
476 
477 	trace_bpf_testmod_test_write_bare_tp(current, &ctx);
478 
479 	return -EIO; /* always fail */
480 }
481 EXPORT_SYMBOL(bpf_testmod_test_write);
482 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
483 
484 noinline int bpf_fentry_shadow_test(int a)
485 {
486 	return a + 2;
487 }
488 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
489 
490 __bpf_hook_end();
491 
492 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
493 	.attr = { .name = "bpf_testmod", .mode = 0666, },
494 	.read = bpf_testmod_test_read,
495 	.write = bpf_testmod_test_write,
496 };
497 
498 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
499  * please see test_uretprobe_regs_change test
500  */
501 #ifdef __x86_64__
502 
503 static int
504 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
505 {
506 	regs->cx = 0x87654321feebdaed;
507 	return 0;
508 }
509 
510 static int
511 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
512 		   struct pt_regs *regs, __u64 *data)
513 
514 {
515 	regs->ax  = 0x12345678deadbeef;
516 	regs->r11 = (u64) -1;
517 	return 0;
518 }
519 
520 struct testmod_uprobe {
521 	struct path path;
522 	struct uprobe *uprobe;
523 	struct uprobe_consumer consumer;
524 };
525 
526 static DEFINE_MUTEX(testmod_uprobe_mutex);
527 
528 static struct testmod_uprobe uprobe = {
529 	.consumer.handler = uprobe_handler,
530 	.consumer.ret_handler = uprobe_ret_handler,
531 };
532 
533 static int testmod_register_uprobe(loff_t offset)
534 {
535 	int err = -EBUSY;
536 
537 	if (uprobe.uprobe)
538 		return -EBUSY;
539 
540 	mutex_lock(&testmod_uprobe_mutex);
541 
542 	if (uprobe.uprobe)
543 		goto out;
544 
545 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
546 	if (err)
547 		goto out;
548 
549 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
550 					offset, 0, &uprobe.consumer);
551 	if (IS_ERR(uprobe.uprobe)) {
552 		err = PTR_ERR(uprobe.uprobe);
553 		path_put(&uprobe.path);
554 		uprobe.uprobe = NULL;
555 	}
556 out:
557 	mutex_unlock(&testmod_uprobe_mutex);
558 	return err;
559 }
560 
561 static void testmod_unregister_uprobe(void)
562 {
563 	mutex_lock(&testmod_uprobe_mutex);
564 
565 	if (uprobe.uprobe) {
566 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
567 		uprobe_unregister_sync();
568 		path_put(&uprobe.path);
569 		uprobe.uprobe = NULL;
570 	}
571 
572 	mutex_unlock(&testmod_uprobe_mutex);
573 }
574 
575 static ssize_t
576 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
577 			 const struct bin_attribute *bin_attr,
578 			 char *buf, loff_t off, size_t len)
579 {
580 	unsigned long offset = 0;
581 	int err = 0;
582 
583 	if (kstrtoul(buf, 0, &offset))
584 		return -EINVAL;
585 
586 	if (offset)
587 		err = testmod_register_uprobe(offset);
588 	else
589 		testmod_unregister_uprobe();
590 
591 	return err ?: strlen(buf);
592 }
593 
594 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
595 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
596 	.write = bpf_testmod_uprobe_write,
597 };
598 
599 static int register_bpf_testmod_uprobe(void)
600 {
601 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
602 }
603 
604 static void unregister_bpf_testmod_uprobe(void)
605 {
606 	testmod_unregister_uprobe();
607 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
608 }
609 
610 #else
611 static int register_bpf_testmod_uprobe(void)
612 {
613 	return 0;
614 }
615 
616 static void unregister_bpf_testmod_uprobe(void) { }
617 #endif
618 
619 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
620 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
621 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
622 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
623 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
624 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
625 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
626 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
627 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
628 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
629 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
630 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
631 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
632 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
633 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
634 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
635 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
636 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
637 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
638 
639 BTF_ID_LIST(bpf_testmod_dtor_ids)
640 BTF_ID(struct, bpf_testmod_ctx)
641 BTF_ID(func, bpf_testmod_ctx_release)
642 
643 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
644 	.owner = THIS_MODULE,
645 	.set   = &bpf_testmod_common_kfunc_ids,
646 };
647 
648 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
649 {
650 	return a + b + c + d;
651 }
652 
653 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
654 {
655 	return a + b;
656 }
657 
658 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
659 {
660 	return sk;
661 }
662 
663 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
664 {
665 	/* Provoke the compiler to assume that the caller has sign-extended a,
666 	 * b and c on platforms where this is required (e.g. s390x).
667 	 */
668 	return (long)a + (long)b + (long)c + d;
669 }
670 
671 static struct prog_test_ref_kfunc prog_test_struct = {
672 	.a = 42,
673 	.b = 108,
674 	.next = &prog_test_struct,
675 	.cnt = REFCOUNT_INIT(1),
676 };
677 
678 __bpf_kfunc struct prog_test_ref_kfunc *
679 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
680 {
681 	refcount_inc(&prog_test_struct.cnt);
682 	return &prog_test_struct;
683 }
684 
685 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
686 {
687 	WARN_ON_ONCE(1);
688 }
689 
690 __bpf_kfunc struct prog_test_member *
691 bpf_kfunc_call_memb_acquire(void)
692 {
693 	WARN_ON_ONCE(1);
694 	return NULL;
695 }
696 
697 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
698 {
699 	WARN_ON_ONCE(1);
700 }
701 
702 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
703 {
704 	if (size > 2 * sizeof(int))
705 		return NULL;
706 
707 	return (int *)p;
708 }
709 
710 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
711 						  const int rdwr_buf_size)
712 {
713 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
714 }
715 
716 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
717 						    const int rdonly_buf_size)
718 {
719 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
720 }
721 
722 /* the next 2 ones can't be really used for testing expect to ensure
723  * that the verifier rejects the call.
724  * Acquire functions must return struct pointers, so these ones are
725  * failing.
726  */
727 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
728 						    const int rdonly_buf_size)
729 {
730 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
731 }
732 
733 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
734 {
735 }
736 
737 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
738 {
739 }
740 
741 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
742 {
743 }
744 
745 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
746 {
747 }
748 
749 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
750 {
751 }
752 
753 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
754 {
755 }
756 
757 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
758 {
759 }
760 
761 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
762 {
763 }
764 
765 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
766 {
767 }
768 
769 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
770 {
771 }
772 
773 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
774 {
775 	/* p != NULL, but p->cnt could be 0 */
776 }
777 
778 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
779 {
780 }
781 
782 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
783 {
784 	return arg;
785 }
786 
787 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
788 {
789 }
790 
791 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
792 {
793 	int proto;
794 	int err;
795 
796 	mutex_lock(&sock_lock);
797 
798 	if (sock) {
799 		pr_err("%s called without releasing old sock", __func__);
800 		err = -EPERM;
801 		goto out;
802 	}
803 
804 	switch (args->af) {
805 	case AF_INET:
806 	case AF_INET6:
807 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
808 		break;
809 	case AF_UNIX:
810 		proto = PF_UNIX;
811 		break;
812 	default:
813 		pr_err("invalid address family %d\n", args->af);
814 		err = -EINVAL;
815 		goto out;
816 	}
817 
818 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
819 			       proto, &sock);
820 
821 	if (!err)
822 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
823 		 * and consider the connection attempt failed if it returns
824 		 * -EINPROGRESS.
825 		 */
826 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
827 out:
828 	mutex_unlock(&sock_lock);
829 
830 	return err;
831 }
832 
833 __bpf_kfunc void bpf_kfunc_close_sock(void)
834 {
835 	mutex_lock(&sock_lock);
836 
837 	if (sock) {
838 		sock_release(sock);
839 		sock = NULL;
840 	}
841 
842 	mutex_unlock(&sock_lock);
843 }
844 
845 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
846 {
847 	int err;
848 
849 	if (args->addrlen > sizeof(args->addr))
850 		return -EINVAL;
851 
852 	mutex_lock(&sock_lock);
853 
854 	if (!sock) {
855 		pr_err("%s called without initializing sock", __func__);
856 		err = -EPERM;
857 		goto out;
858 	}
859 
860 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
861 			     args->addrlen, 0);
862 out:
863 	mutex_unlock(&sock_lock);
864 
865 	return err;
866 }
867 
868 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
869 {
870 	int err;
871 
872 	if (args->addrlen > sizeof(args->addr))
873 		return -EINVAL;
874 
875 	mutex_lock(&sock_lock);
876 
877 	if (!sock) {
878 		pr_err("%s called without initializing sock", __func__);
879 		err = -EPERM;
880 		goto out;
881 	}
882 
883 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
884 out:
885 	mutex_unlock(&sock_lock);
886 
887 	return err;
888 }
889 
890 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
891 {
892 	int err;
893 
894 	mutex_lock(&sock_lock);
895 
896 	if (!sock) {
897 		pr_err("%s called without initializing sock", __func__);
898 		err = -EPERM;
899 		goto out;
900 	}
901 
902 	err = kernel_listen(sock, 128);
903 out:
904 	mutex_unlock(&sock_lock);
905 
906 	return err;
907 }
908 
909 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
910 {
911 	struct msghdr msg = {
912 		.msg_name	= &args->addr.addr,
913 		.msg_namelen	= args->addr.addrlen,
914 	};
915 	struct kvec iov;
916 	int err;
917 
918 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
919 	    args->msglen > sizeof(args->msg))
920 		return -EINVAL;
921 
922 	iov.iov_base = args->msg;
923 	iov.iov_len  = args->msglen;
924 
925 	mutex_lock(&sock_lock);
926 
927 	if (!sock) {
928 		pr_err("%s called without initializing sock", __func__);
929 		err = -EPERM;
930 		goto out;
931 	}
932 
933 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
934 	args->addr.addrlen = msg.msg_namelen;
935 out:
936 	mutex_unlock(&sock_lock);
937 
938 	return err;
939 }
940 
941 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
942 {
943 	struct msghdr msg = {
944 		.msg_name	= &args->addr.addr,
945 		.msg_namelen	= args->addr.addrlen,
946 	};
947 	struct kvec iov;
948 	int err;
949 
950 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
951 	    args->msglen > sizeof(args->msg))
952 		return -EINVAL;
953 
954 	iov.iov_base = args->msg;
955 	iov.iov_len  = args->msglen;
956 
957 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
958 	mutex_lock(&sock_lock);
959 
960 	if (!sock) {
961 		pr_err("%s called without initializing sock", __func__);
962 		err = -EPERM;
963 		goto out;
964 	}
965 
966 	err = sock_sendmsg(sock, &msg);
967 	args->addr.addrlen = msg.msg_namelen;
968 out:
969 	mutex_unlock(&sock_lock);
970 
971 	return err;
972 }
973 
974 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
975 {
976 	int err;
977 
978 	mutex_lock(&sock_lock);
979 
980 	if (!sock) {
981 		pr_err("%s called without initializing sock", __func__);
982 		err = -EPERM;
983 		goto out;
984 	}
985 
986 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
987 	if (err < 0)
988 		goto out;
989 
990 	args->addrlen = err;
991 	err = 0;
992 out:
993 	mutex_unlock(&sock_lock);
994 
995 	return err;
996 }
997 
998 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
999 {
1000 	int err;
1001 
1002 	mutex_lock(&sock_lock);
1003 
1004 	if (!sock) {
1005 		pr_err("%s called without initializing sock", __func__);
1006 		err = -EPERM;
1007 		goto out;
1008 	}
1009 
1010 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1011 	if (err < 0)
1012 		goto out;
1013 
1014 	args->addrlen = err;
1015 	err = 0;
1016 out:
1017 	mutex_unlock(&sock_lock);
1018 
1019 	return err;
1020 }
1021 
1022 static DEFINE_MUTEX(st_ops_mutex);
1023 static struct bpf_testmod_st_ops *st_ops;
1024 
1025 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1026 {
1027 	int ret = -1;
1028 
1029 	mutex_lock(&st_ops_mutex);
1030 	if (st_ops && st_ops->test_prologue)
1031 		ret = st_ops->test_prologue(args);
1032 	mutex_unlock(&st_ops_mutex);
1033 
1034 	return ret;
1035 }
1036 
1037 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1038 {
1039 	int ret = -1;
1040 
1041 	mutex_lock(&st_ops_mutex);
1042 	if (st_ops && st_ops->test_epilogue)
1043 		ret = st_ops->test_epilogue(args);
1044 	mutex_unlock(&st_ops_mutex);
1045 
1046 	return ret;
1047 }
1048 
1049 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1050 {
1051 	int ret = -1;
1052 
1053 	mutex_lock(&st_ops_mutex);
1054 	if (st_ops && st_ops->test_pro_epilogue)
1055 		ret = st_ops->test_pro_epilogue(args);
1056 	mutex_unlock(&st_ops_mutex);
1057 
1058 	return ret;
1059 }
1060 
1061 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1062 {
1063 	args->a += 10;
1064 	return args->a;
1065 }
1066 
1067 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
1068 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1069 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1070 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1071 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1072 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1073 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1074 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1075 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1076 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1077 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1078 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1079 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1080 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1081 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1082 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1083 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1084 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1085 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1086 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1087 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1088 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1089 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1090 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1091 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1092 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1093 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1094 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1095 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1096 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1097 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1098 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1099 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1100 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1101 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1102 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1103 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1104 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1105 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1106 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1107 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1108 
1109 static int bpf_testmod_ops_init(struct btf *btf)
1110 {
1111 	return 0;
1112 }
1113 
1114 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1115 					    enum bpf_access_type type,
1116 					    const struct bpf_prog *prog,
1117 					    struct bpf_insn_access_aux *info)
1118 {
1119 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1120 }
1121 
1122 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1123 				       const struct btf_member *member,
1124 				       void *kdata, const void *udata)
1125 {
1126 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1127 		/* For data fields, this function has to copy it and return
1128 		 * 1 to indicate that the data has been handled by the
1129 		 * struct_ops type, or the verifier will reject the map if
1130 		 * the value of the data field is not zero.
1131 		 */
1132 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1133 		return 1;
1134 	}
1135 	return 0;
1136 }
1137 
1138 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1139 	.owner = THIS_MODULE,
1140 	.set   = &bpf_testmod_check_kfunc_ids,
1141 };
1142 
1143 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1144 	.get_func_proto	 = bpf_base_func_proto,
1145 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1146 };
1147 
1148 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1149 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1150 };
1151 
1152 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1153 {
1154 	struct bpf_testmod_ops *ops = kdata;
1155 
1156 	if (ops->test_1)
1157 		ops->test_1();
1158 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1159 	 * initialized, so we need to check for NULL.
1160 	 */
1161 	if (ops->test_2)
1162 		ops->test_2(4, ops->data);
1163 
1164 	return 0;
1165 }
1166 
1167 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1168 {
1169 }
1170 
1171 static int bpf_testmod_test_1(void)
1172 {
1173 	return 0;
1174 }
1175 
1176 static void bpf_testmod_test_2(int a, int b)
1177 {
1178 }
1179 
1180 static int bpf_testmod_tramp(int value)
1181 {
1182 	return 0;
1183 }
1184 
1185 static int bpf_testmod_ops__test_maybe_null(int dummy,
1186 					    struct task_struct *task__nullable)
1187 {
1188 	return 0;
1189 }
1190 
1191 static int bpf_testmod_ops__test_refcounted(int dummy,
1192 					    struct task_struct *task__ref)
1193 {
1194 	return 0;
1195 }
1196 
1197 static struct task_struct *
1198 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1199 				      struct cgroup *cgrp)
1200 {
1201 	return NULL;
1202 }
1203 
1204 static struct bpf_testmod_ops __bpf_testmod_ops = {
1205 	.test_1 = bpf_testmod_test_1,
1206 	.test_2 = bpf_testmod_test_2,
1207 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1208 	.test_refcounted = bpf_testmod_ops__test_refcounted,
1209 	.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1210 };
1211 
1212 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1213 	.verifier_ops = &bpf_testmod_verifier_ops,
1214 	.init = bpf_testmod_ops_init,
1215 	.init_member = bpf_testmod_ops_init_member,
1216 	.reg = bpf_dummy_reg,
1217 	.unreg = bpf_dummy_unreg,
1218 	.cfi_stubs = &__bpf_testmod_ops,
1219 	.name = "bpf_testmod_ops",
1220 	.owner = THIS_MODULE,
1221 };
1222 
1223 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1224 {
1225 	struct bpf_testmod_ops2 *ops = kdata;
1226 
1227 	ops->test_1();
1228 	return 0;
1229 }
1230 
1231 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1232 	.test_1 = bpf_testmod_test_1,
1233 };
1234 
1235 struct bpf_struct_ops bpf_testmod_ops2 = {
1236 	.verifier_ops = &bpf_testmod_verifier_ops,
1237 	.init = bpf_testmod_ops_init,
1238 	.init_member = bpf_testmod_ops_init_member,
1239 	.reg = bpf_dummy_reg2,
1240 	.unreg = bpf_dummy_unreg,
1241 	.cfi_stubs = &__bpf_testmod_ops2,
1242 	.name = "bpf_testmod_ops2",
1243 	.owner = THIS_MODULE,
1244 };
1245 
1246 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1247 {
1248 	int err = 0;
1249 
1250 	mutex_lock(&st_ops_mutex);
1251 	if (st_ops3) {
1252 		pr_err("st_ops has already been registered\n");
1253 		err = -EEXIST;
1254 		goto unlock;
1255 	}
1256 	st_ops3 = kdata;
1257 
1258 unlock:
1259 	mutex_unlock(&st_ops_mutex);
1260 	return err;
1261 }
1262 
1263 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1264 {
1265 	mutex_lock(&st_ops_mutex);
1266 	st_ops3 = NULL;
1267 	mutex_unlock(&st_ops_mutex);
1268 }
1269 
1270 static void test_1_recursion_detected(struct bpf_prog *prog)
1271 {
1272 	struct bpf_prog_stats *stats;
1273 
1274 	stats = this_cpu_ptr(prog->stats);
1275 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1276 	       u64_stats_read(&stats->misses));
1277 }
1278 
1279 static int st_ops3_check_member(const struct btf_type *t,
1280 				const struct btf_member *member,
1281 				const struct bpf_prog *prog)
1282 {
1283 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1284 
1285 	switch (moff) {
1286 	case offsetof(struct bpf_testmod_ops3, test_1):
1287 		prog->aux->priv_stack_requested = true;
1288 		prog->aux->recursion_detected = test_1_recursion_detected;
1289 		fallthrough;
1290 	default:
1291 		break;
1292 	}
1293 	return 0;
1294 }
1295 
1296 struct bpf_struct_ops bpf_testmod_ops3 = {
1297 	.verifier_ops = &bpf_testmod_verifier_ops3,
1298 	.init = bpf_testmod_ops_init,
1299 	.init_member = bpf_testmod_ops_init_member,
1300 	.reg = st_ops3_reg,
1301 	.unreg = st_ops3_unreg,
1302 	.check_member = st_ops3_check_member,
1303 	.cfi_stubs = &__bpf_testmod_ops3,
1304 	.name = "bpf_testmod_ops3",
1305 	.owner = THIS_MODULE,
1306 };
1307 
1308 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1309 {
1310 	return 0;
1311 }
1312 
1313 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1314 {
1315 	return 0;
1316 }
1317 
1318 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1319 {
1320 	return 0;
1321 }
1322 
1323 static int bpf_cgroup_from_id_id;
1324 static int bpf_cgroup_release_id;
1325 
1326 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1327 					  const struct bpf_prog *prog)
1328 {
1329 	struct bpf_insn *insn = insn_buf;
1330 
1331 	/* r8 = r1; // r8 will be "u64 *ctx".
1332 	 * r1 = 0;
1333 	 * r0 = bpf_cgroup_from_id(r1);
1334 	 * if r0 != 0 goto pc+5;
1335 	 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1336 	 * r7 = r6->a;
1337 	 * r7 += 1000;
1338 	 * r6->a = r7;
1339 	 * goto pc+2;
1340 	 * r1 = r0;
1341 	 * bpf_cgroup_release(r1);
1342 	 * r1 = r8;
1343 	 */
1344 	*insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1345 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1346 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1347 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1348 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1349 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1350 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1351 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1352 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1353 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1354 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1355 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1356 	*insn++ = prog->insnsi[0];
1357 
1358 	return insn - insn_buf;
1359 }
1360 
1361 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1362 					  s16 ctx_stack_off)
1363 {
1364 	struct bpf_insn *insn = insn_buf;
1365 
1366 	/* r1 = 0;
1367 	 * r6 = 0;
1368 	 * r0 = bpf_cgroup_from_id(r1);
1369 	 * if r0 != 0 goto pc+6;
1370 	 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1371 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1372 	 * r6 = r1->a;
1373 	 * r6 += 10000;
1374 	 * r1->a = r6;
1375 	 * goto pc+2
1376 	 * r1 = r0;
1377 	 * bpf_cgroup_release(r1);
1378 	 * r0 = r6;
1379 	 * r0 *= 2;
1380 	 * BPF_EXIT;
1381 	 */
1382 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1383 	*insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1384 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1385 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1386 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1387 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1388 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1389 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1390 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1391 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1392 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1393 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1394 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1395 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1396 	*insn++ = BPF_EXIT_INSN();
1397 
1398 	return insn - insn_buf;
1399 }
1400 
1401 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
1402 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1403 			       const struct bpf_prog *prog)
1404 {
1405 	struct bpf_insn *insn = insn_buf;
1406 
1407 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1408 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1409 		return 0;
1410 
1411 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1412 		return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1413 
1414 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1415 	 * r7 = r6->a;
1416 	 * r7 += 1000;
1417 	 * r6->a = r7;
1418 	 */
1419 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1420 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1421 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1422 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1423 	*insn++ = prog->insnsi[0];
1424 
1425 	return insn - insn_buf;
1426 }
1427 
1428 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1429 			       s16 ctx_stack_off)
1430 {
1431 	struct bpf_insn *insn = insn_buf;
1432 
1433 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1434 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1435 		return 0;
1436 
1437 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1438 		return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1439 
1440 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1441 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1442 	 * r6 = r1->a;
1443 	 * r6 += 10000;
1444 	 * r1->a = r6;
1445 	 * r0 = r6;
1446 	 * r0 *= 2;
1447 	 * BPF_EXIT;
1448 	 */
1449 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1450 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1451 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1452 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1453 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1454 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1455 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1456 	*insn++ = BPF_EXIT_INSN();
1457 
1458 	return insn - insn_buf;
1459 }
1460 
1461 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1462 				    const struct bpf_reg_state *reg,
1463 				    int off, int size)
1464 {
1465 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1466 		return -EACCES;
1467 	return 0;
1468 }
1469 
1470 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1471 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1472 	.btf_struct_access = st_ops_btf_struct_access,
1473 	.gen_prologue = st_ops_gen_prologue,
1474 	.gen_epilogue = st_ops_gen_epilogue,
1475 	.get_func_proto = bpf_base_func_proto,
1476 };
1477 
1478 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1479 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1480 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1481 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1482 };
1483 
1484 static int st_ops_reg(void *kdata, struct bpf_link *link)
1485 {
1486 	int err = 0;
1487 
1488 	mutex_lock(&st_ops_mutex);
1489 	if (st_ops) {
1490 		pr_err("st_ops has already been registered\n");
1491 		err = -EEXIST;
1492 		goto unlock;
1493 	}
1494 	st_ops = kdata;
1495 
1496 unlock:
1497 	mutex_unlock(&st_ops_mutex);
1498 	return err;
1499 }
1500 
1501 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1502 {
1503 	mutex_lock(&st_ops_mutex);
1504 	st_ops = NULL;
1505 	mutex_unlock(&st_ops_mutex);
1506 }
1507 
1508 static int st_ops_init(struct btf *btf)
1509 {
1510 	struct btf *kfunc_btf;
1511 
1512 	bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1513 	bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1514 	if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1515 		return -EINVAL;
1516 
1517 	return 0;
1518 }
1519 
1520 static int st_ops_init_member(const struct btf_type *t,
1521 			      const struct btf_member *member,
1522 			      void *kdata, const void *udata)
1523 {
1524 	return 0;
1525 }
1526 
1527 static struct bpf_struct_ops testmod_st_ops = {
1528 	.verifier_ops = &st_ops_verifier_ops,
1529 	.init = st_ops_init,
1530 	.init_member = st_ops_init_member,
1531 	.reg = st_ops_reg,
1532 	.unreg = st_ops_unreg,
1533 	.cfi_stubs = &st_ops_cfi_stubs,
1534 	.name = "bpf_testmod_st_ops",
1535 	.owner = THIS_MODULE,
1536 };
1537 
1538 extern int bpf_fentry_test1(int a);
1539 
1540 static int bpf_testmod_init(void)
1541 {
1542 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1543 		{
1544 			.btf_id		= bpf_testmod_dtor_ids[0],
1545 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1546 		},
1547 	};
1548 	void **tramp;
1549 	int ret;
1550 
1551 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1552 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1553 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1554 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1555 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1556 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1557 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1558 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1559 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1560 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1561 						 ARRAY_SIZE(bpf_testmod_dtors),
1562 						 THIS_MODULE);
1563 	if (ret < 0)
1564 		return ret;
1565 	if (bpf_fentry_test1(0) < 0)
1566 		return -EINVAL;
1567 	sock = NULL;
1568 	mutex_init(&sock_lock);
1569 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1570 	if (ret < 0)
1571 		return ret;
1572 	ret = register_bpf_testmod_uprobe();
1573 	if (ret < 0)
1574 		return ret;
1575 
1576 	/* Ensure nothing is between tramp_1..tramp_40 */
1577 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1578 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1579 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1580 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1581 		*tramp++ = bpf_testmod_tramp;
1582 
1583 	return 0;
1584 }
1585 
1586 static void bpf_testmod_exit(void)
1587 {
1588         /* Need to wait for all references to be dropped because
1589          * bpf_kfunc_call_test_release() which currently resides in kernel can
1590          * be called after bpf_testmod is unloaded. Once release function is
1591          * moved into the module this wait can be removed.
1592          */
1593 	while (refcount_read(&prog_test_struct.cnt) > 1)
1594 		msleep(20);
1595 
1596 	bpf_kfunc_close_sock();
1597 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1598 	unregister_bpf_testmod_uprobe();
1599 }
1600 
1601 module_init(bpf_testmod_init);
1602 module_exit(bpf_testmod_exit);
1603 
1604 MODULE_AUTHOR("Andrii Nakryiko");
1605 MODULE_DESCRIPTION("BPF selftests module");
1606 MODULE_LICENSE("Dual BSD/GPL");
1607