xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (revision ae28ed4578e6d5a481e39c5a9827f27048661fdd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 union bpf_testmod_union_arg_1 {
66 	char a;
67 	short b;
68 	struct bpf_testmod_struct_arg_1 arg;
69 };
70 
71 union bpf_testmod_union_arg_2 {
72 	int a;
73 	long b;
74 	struct bpf_testmod_struct_arg_2 arg;
75 };
76 
77 __bpf_hook_start();
78 
79 noinline int
80 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
81 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
86 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
87 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
88 	return bpf_testmod_test_struct_arg_result;
89 }
90 
91 noinline int
92 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
93 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
94 	return bpf_testmod_test_struct_arg_result;
95 }
96 
97 noinline int
98 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
99 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
100 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_5(void) {
106 	bpf_testmod_test_struct_arg_result = 1;
107 	return bpf_testmod_test_struct_arg_result;
108 }
109 
110 noinline int
111 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
112 	bpf_testmod_test_struct_arg_result = a->b[0];
113 	return bpf_testmod_test_struct_arg_result;
114 }
115 
116 noinline int
117 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
118 			      struct bpf_testmod_struct_arg_4 f)
119 {
120 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
121 		(long)e + f.a + f.b;
122 	return bpf_testmod_test_struct_arg_result;
123 }
124 
125 noinline int
126 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
127 			      struct bpf_testmod_struct_arg_4 f, int g)
128 {
129 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
130 		(long)e + f.a + f.b + g;
131 	return bpf_testmod_test_struct_arg_result;
132 }
133 
134 noinline int
135 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
136 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
137 {
138 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
139 		f + g + h.a + h.b + h.c + h.d + i;
140 	return bpf_testmod_test_struct_arg_result;
141 }
142 
143 noinline int
144 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c)
145 {
146 	bpf_testmod_test_struct_arg_result = a.arg.a + b + c;
147 	return bpf_testmod_test_struct_arg_result;
148 }
149 
150 noinline int
151 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b)
152 {
153 	bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b;
154 	return bpf_testmod_test_struct_arg_result;
155 }
156 
157 noinline int
158 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
159 	bpf_testmod_test_struct_arg_result = a->a;
160 	return bpf_testmod_test_struct_arg_result;
161 }
162 
163 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
164 {
165 }
166 
167 __bpf_kfunc void
168 bpf_testmod_test_mod_kfunc(int i)
169 {
170 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
171 }
172 
173 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
174 {
175 	it->cnt = cnt;
176 
177 	if (cnt < 0)
178 		return -EINVAL;
179 
180 	it->value = value;
181 
182 	return 0;
183 }
184 
185 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
186 {
187 	if (it->cnt <= 0)
188 		return NULL;
189 
190 	it->cnt--;
191 
192 	return &it->value;
193 }
194 
195 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
196 {
197 	if (it__iter->cnt < 0)
198 		return 0;
199 
200 	return val + it__iter->value;
201 }
202 
203 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
204 {
205 	it->cnt = 0;
206 }
207 
208 __bpf_kfunc void bpf_kfunc_common_test(void)
209 {
210 }
211 
212 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
213 				       struct bpf_dynptr *ptr__nullable)
214 {
215 }
216 
217 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
218 {
219 	return NULL;
220 }
221 
222 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
223 {
224 	return NULL;
225 }
226 
227 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
228 {
229 }
230 
231 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
232 {
233 }
234 
235 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
236 {
237 }
238 
239 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
240 {
241 }
242 
243 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
244 {
245 }
246 
247 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
248 {
249 	return NULL;
250 }
251 
252 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
253 {
254 	return NULL;
255 }
256 
257 __bpf_kfunc struct bpf_testmod_ctx *
258 bpf_testmod_ctx_create(int *err)
259 {
260 	struct bpf_testmod_ctx *ctx;
261 
262 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
263 	if (!ctx) {
264 		*err = -ENOMEM;
265 		return NULL;
266 	}
267 	refcount_set(&ctx->usage, 1);
268 
269 	return ctx;
270 }
271 
272 static void testmod_free_cb(struct rcu_head *head)
273 {
274 	struct bpf_testmod_ctx *ctx;
275 
276 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
277 	kfree(ctx);
278 }
279 
280 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
281 {
282 	if (!ctx)
283 		return;
284 	if (refcount_dec_and_test(&ctx->usage))
285 		call_rcu(&ctx->rcu, testmod_free_cb);
286 }
287 
288 static struct bpf_testmod_ops3 *st_ops3;
289 
290 static int bpf_testmod_test_3(void)
291 {
292 	return 0;
293 }
294 
295 static int bpf_testmod_test_4(void)
296 {
297 	return 0;
298 }
299 
300 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
301 	.test_1 = bpf_testmod_test_3,
302 	.test_2 = bpf_testmod_test_4,
303 };
304 
305 static void bpf_testmod_test_struct_ops3(void)
306 {
307 	if (st_ops3)
308 		st_ops3->test_1();
309 }
310 
311 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
312 {
313 	st_ops3->test_1();
314 }
315 
316 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
317 {
318 	st_ops3->test_2();
319 }
320 
321 struct bpf_testmod_btf_type_tag_1 {
322 	int a;
323 };
324 
325 struct bpf_testmod_btf_type_tag_2 {
326 	struct bpf_testmod_btf_type_tag_1 __user *p;
327 };
328 
329 struct bpf_testmod_btf_type_tag_3 {
330 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
331 };
332 
333 noinline int
334 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
335 	BTF_TYPE_EMIT(func_proto_typedef);
336 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
337 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
338 	return arg->a;
339 }
340 
341 noinline int
342 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
343 	return arg->p->a;
344 }
345 
346 noinline int
347 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
348 	return arg->a;
349 }
350 
351 noinline int
352 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
353 	return arg->p->a;
354 }
355 
356 noinline int bpf_testmod_loop_test(int n)
357 {
358 	/* Make sum volatile, so smart compilers, such as clang, will not
359 	 * optimize the code by removing the loop.
360 	 */
361 	volatile int sum = 0;
362 	int i;
363 
364 	/* the primary goal of this test is to test LBR. Create a lot of
365 	 * branches in the function, so we can catch it easily.
366 	 */
367 	for (i = 0; i < n; i++)
368 		sum += i;
369 	return sum;
370 }
371 
372 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
373 {
374 	static struct file f = {};
375 
376 	switch (arg) {
377 	case 1: return (void *)EINVAL;		/* user addr */
378 	case 2: return (void *)0xcafe4a11;	/* user addr */
379 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
380 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
381 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
382 	case 6: return &f;			/* valid addr */
383 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
384 #ifdef CONFIG_X86_64
385 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
386 #endif
387 	default: return NULL;
388 	}
389 }
390 
391 noinline int bpf_testmod_fentry_test1(int a)
392 {
393 	return a + 1;
394 }
395 
396 noinline int bpf_testmod_fentry_test2(int a, u64 b)
397 {
398 	return a + b;
399 }
400 
401 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
402 {
403 	return a + b + c;
404 }
405 
406 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
407 				      void *e, char f, int g)
408 {
409 	return a + (long)b + c + d + (long)e + f + g;
410 }
411 
412 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
413 				       void *e, char f, int g,
414 				       unsigned int h, long i, __u64 j,
415 				       unsigned long k)
416 {
417 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
418 }
419 
420 int bpf_testmod_fentry_ok;
421 
422 noinline ssize_t
423 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
424 		      const struct bin_attribute *bin_attr,
425 		      char *buf, loff_t off, size_t len)
426 {
427 	struct bpf_testmod_test_read_ctx ctx = {
428 		.buf = buf,
429 		.off = off,
430 		.len = len,
431 	};
432 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
433 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
434 	struct bpf_testmod_struct_arg_3 *struct_arg3;
435 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
436 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
437 	union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} };
438 	union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} };
439 	int i = 1;
440 
441 	while (bpf_testmod_return_ptr(i))
442 		i++;
443 
444 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
445 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
446 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
447 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
448 	(void)bpf_testmod_test_struct_arg_5();
449 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
450 					    (void *)20, struct_arg4);
451 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
452 					    (void *)20, struct_arg4, 23);
453 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
454 					    21, 22, struct_arg5, 27);
455 
456 	(void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5);
457 	(void)bpf_testmod_test_union_arg_2(6, union_arg2);
458 
459 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
460 
461 	(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
462 
463 	bpf_testmod_test_struct_ops3();
464 
465 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
466 				sizeof(int)), GFP_KERNEL);
467 	if (struct_arg3 != NULL) {
468 		struct_arg3->b[0] = 1;
469 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
470 		kfree(struct_arg3);
471 	}
472 
473 	/* This is always true. Use the check to make sure the compiler
474 	 * doesn't remove bpf_testmod_loop_test.
475 	 */
476 	if (bpf_testmod_loop_test(101) > 100)
477 		trace_bpf_testmod_test_read(current, &ctx);
478 
479 	trace_bpf_testmod_test_nullable_bare_tp(NULL);
480 
481 	/* Magic number to enable writable tp */
482 	if (len == 64) {
483 		struct bpf_testmod_test_writable_ctx writable = {
484 			.val = 1024,
485 		};
486 		trace_bpf_testmod_test_writable_bare_tp(&writable);
487 		if (writable.early_ret)
488 			return snprintf(buf, len, "%d\n", writable.val);
489 	}
490 
491 	if (bpf_testmod_fentry_test1(1) != 2 ||
492 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
493 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
494 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
495 			21, 22) != 133 ||
496 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
497 			21, 22, 23, 24, 25, 26) != 231)
498 		goto out;
499 
500 	bpf_testmod_fentry_ok = 1;
501 out:
502 	return -EIO; /* always fail */
503 }
504 EXPORT_SYMBOL(bpf_testmod_test_read);
505 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
506 
507 noinline ssize_t
508 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
509 		      const struct bin_attribute *bin_attr,
510 		      char *buf, loff_t off, size_t len)
511 {
512 	struct bpf_testmod_test_write_ctx ctx = {
513 		.buf = buf,
514 		.off = off,
515 		.len = len,
516 	};
517 
518 	trace_bpf_testmod_test_write_bare_tp(current, &ctx);
519 
520 	return -EIO; /* always fail */
521 }
522 EXPORT_SYMBOL(bpf_testmod_test_write);
523 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
524 
525 noinline int bpf_fentry_shadow_test(int a)
526 {
527 	return a + 2;
528 }
529 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
530 
531 __bpf_hook_end();
532 
533 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
534 	.attr = { .name = "bpf_testmod", .mode = 0666, },
535 	.read = bpf_testmod_test_read,
536 	.write = bpf_testmod_test_write,
537 };
538 
539 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
540  * please see test_uretprobe_regs_change test
541  */
542 #ifdef __x86_64__
543 
544 static int
545 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
546 {
547 	regs->cx = 0x87654321feebdaed;
548 	return 0;
549 }
550 
551 static int
552 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
553 		   struct pt_regs *regs, __u64 *data)
554 
555 {
556 	regs->ax  = 0x12345678deadbeef;
557 	regs->r11 = (u64) -1;
558 	return 0;
559 }
560 
561 struct testmod_uprobe {
562 	struct path path;
563 	struct uprobe *uprobe;
564 	struct uprobe_consumer consumer;
565 };
566 
567 static DEFINE_MUTEX(testmod_uprobe_mutex);
568 
569 static struct testmod_uprobe uprobe = {
570 	.consumer.handler = uprobe_handler,
571 	.consumer.ret_handler = uprobe_ret_handler,
572 };
573 
574 static int testmod_register_uprobe(loff_t offset)
575 {
576 	int err = -EBUSY;
577 
578 	if (uprobe.uprobe)
579 		return -EBUSY;
580 
581 	mutex_lock(&testmod_uprobe_mutex);
582 
583 	if (uprobe.uprobe)
584 		goto out;
585 
586 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
587 	if (err)
588 		goto out;
589 
590 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
591 					offset, 0, &uprobe.consumer);
592 	if (IS_ERR(uprobe.uprobe)) {
593 		err = PTR_ERR(uprobe.uprobe);
594 		path_put(&uprobe.path);
595 		uprobe.uprobe = NULL;
596 	}
597 out:
598 	mutex_unlock(&testmod_uprobe_mutex);
599 	return err;
600 }
601 
602 static void testmod_unregister_uprobe(void)
603 {
604 	mutex_lock(&testmod_uprobe_mutex);
605 
606 	if (uprobe.uprobe) {
607 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
608 		uprobe_unregister_sync();
609 		path_put(&uprobe.path);
610 		uprobe.uprobe = NULL;
611 	}
612 
613 	mutex_unlock(&testmod_uprobe_mutex);
614 }
615 
616 static ssize_t
617 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
618 			 const struct bin_attribute *bin_attr,
619 			 char *buf, loff_t off, size_t len)
620 {
621 	unsigned long offset = 0;
622 	int err = 0;
623 
624 	if (kstrtoul(buf, 0, &offset))
625 		return -EINVAL;
626 
627 	if (offset)
628 		err = testmod_register_uprobe(offset);
629 	else
630 		testmod_unregister_uprobe();
631 
632 	return err ?: strlen(buf);
633 }
634 
635 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
636 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
637 	.write = bpf_testmod_uprobe_write,
638 };
639 
640 static int register_bpf_testmod_uprobe(void)
641 {
642 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
643 }
644 
645 static void unregister_bpf_testmod_uprobe(void)
646 {
647 	testmod_unregister_uprobe();
648 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
649 }
650 
651 #else
652 static int register_bpf_testmod_uprobe(void)
653 {
654 	return 0;
655 }
656 
657 static void unregister_bpf_testmod_uprobe(void) { }
658 #endif
659 
660 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
661 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
662 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
663 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
664 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
665 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
666 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
667 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
668 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
669 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
670 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
671 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
672 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
673 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
674 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
675 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
676 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
677 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
678 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
679 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
680 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
681 
682 BTF_ID_LIST(bpf_testmod_dtor_ids)
683 BTF_ID(struct, bpf_testmod_ctx)
684 BTF_ID(func, bpf_testmod_ctx_release)
685 
686 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
687 	.owner = THIS_MODULE,
688 	.set   = &bpf_testmod_common_kfunc_ids,
689 };
690 
691 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
692 {
693 	return a + b + c + d;
694 }
695 
696 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
697 {
698 	return a + b;
699 }
700 
701 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
702 {
703 	return sk;
704 }
705 
706 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
707 {
708 	/* Provoke the compiler to assume that the caller has sign-extended a,
709 	 * b and c on platforms where this is required (e.g. s390x).
710 	 */
711 	return (long)a + (long)b + (long)c + d;
712 }
713 
714 static struct prog_test_ref_kfunc prog_test_struct = {
715 	.a = 42,
716 	.b = 108,
717 	.next = &prog_test_struct,
718 	.cnt = REFCOUNT_INIT(1),
719 };
720 
721 __bpf_kfunc struct prog_test_ref_kfunc *
722 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
723 {
724 	refcount_inc(&prog_test_struct.cnt);
725 	return &prog_test_struct;
726 }
727 
728 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
729 {
730 	WARN_ON_ONCE(1);
731 }
732 
733 __bpf_kfunc struct prog_test_member *
734 bpf_kfunc_call_memb_acquire(void)
735 {
736 	WARN_ON_ONCE(1);
737 	return NULL;
738 }
739 
740 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
741 {
742 	WARN_ON_ONCE(1);
743 }
744 
745 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
746 {
747 	if (size > 2 * sizeof(int))
748 		return NULL;
749 
750 	return (int *)p;
751 }
752 
753 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
754 						  const int rdwr_buf_size)
755 {
756 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
757 }
758 
759 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
760 						    const int rdonly_buf_size)
761 {
762 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
763 }
764 
765 /* the next 2 ones can't be really used for testing expect to ensure
766  * that the verifier rejects the call.
767  * Acquire functions must return struct pointers, so these ones are
768  * failing.
769  */
770 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
771 						    const int rdonly_buf_size)
772 {
773 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
774 }
775 
776 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
777 {
778 }
779 
780 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
781 {
782 }
783 
784 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
785 {
786 }
787 
788 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
789 {
790 }
791 
792 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
793 {
794 }
795 
796 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
797 {
798 }
799 
800 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
801 {
802 }
803 
804 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
805 {
806 }
807 
808 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
809 {
810 }
811 
812 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
813 {
814 }
815 
816 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
817 {
818 	/* p != NULL, but p->cnt could be 0 */
819 }
820 
821 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
822 {
823 }
824 
825 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
826 {
827 	return arg;
828 }
829 
830 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
831 {
832 }
833 
834 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
835 {
836 	int proto;
837 	int err;
838 
839 	mutex_lock(&sock_lock);
840 
841 	if (sock) {
842 		pr_err("%s called without releasing old sock", __func__);
843 		err = -EPERM;
844 		goto out;
845 	}
846 
847 	switch (args->af) {
848 	case AF_INET:
849 	case AF_INET6:
850 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
851 		break;
852 	case AF_UNIX:
853 		proto = PF_UNIX;
854 		break;
855 	default:
856 		pr_err("invalid address family %d\n", args->af);
857 		err = -EINVAL;
858 		goto out;
859 	}
860 
861 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
862 			       proto, &sock);
863 
864 	if (!err)
865 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
866 		 * and consider the connection attempt failed if it returns
867 		 * -EINPROGRESS.
868 		 */
869 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
870 out:
871 	mutex_unlock(&sock_lock);
872 
873 	return err;
874 }
875 
876 __bpf_kfunc void bpf_kfunc_close_sock(void)
877 {
878 	mutex_lock(&sock_lock);
879 
880 	if (sock) {
881 		sock_release(sock);
882 		sock = NULL;
883 	}
884 
885 	mutex_unlock(&sock_lock);
886 }
887 
888 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
889 {
890 	int err;
891 
892 	if (args->addrlen > sizeof(args->addr))
893 		return -EINVAL;
894 
895 	mutex_lock(&sock_lock);
896 
897 	if (!sock) {
898 		pr_err("%s called without initializing sock", __func__);
899 		err = -EPERM;
900 		goto out;
901 	}
902 
903 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
904 			     args->addrlen, 0);
905 out:
906 	mutex_unlock(&sock_lock);
907 
908 	return err;
909 }
910 
911 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
912 {
913 	int err;
914 
915 	if (args->addrlen > sizeof(args->addr))
916 		return -EINVAL;
917 
918 	mutex_lock(&sock_lock);
919 
920 	if (!sock) {
921 		pr_err("%s called without initializing sock", __func__);
922 		err = -EPERM;
923 		goto out;
924 	}
925 
926 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
927 out:
928 	mutex_unlock(&sock_lock);
929 
930 	return err;
931 }
932 
933 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
934 {
935 	int err;
936 
937 	mutex_lock(&sock_lock);
938 
939 	if (!sock) {
940 		pr_err("%s called without initializing sock", __func__);
941 		err = -EPERM;
942 		goto out;
943 	}
944 
945 	err = kernel_listen(sock, 128);
946 out:
947 	mutex_unlock(&sock_lock);
948 
949 	return err;
950 }
951 
952 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
953 {
954 	struct msghdr msg = {
955 		.msg_name	= &args->addr.addr,
956 		.msg_namelen	= args->addr.addrlen,
957 	};
958 	struct kvec iov;
959 	int err;
960 
961 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
962 	    args->msglen > sizeof(args->msg))
963 		return -EINVAL;
964 
965 	iov.iov_base = args->msg;
966 	iov.iov_len  = args->msglen;
967 
968 	mutex_lock(&sock_lock);
969 
970 	if (!sock) {
971 		pr_err("%s called without initializing sock", __func__);
972 		err = -EPERM;
973 		goto out;
974 	}
975 
976 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
977 	args->addr.addrlen = msg.msg_namelen;
978 out:
979 	mutex_unlock(&sock_lock);
980 
981 	return err;
982 }
983 
984 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
985 {
986 	struct msghdr msg = {
987 		.msg_name	= &args->addr.addr,
988 		.msg_namelen	= args->addr.addrlen,
989 	};
990 	struct kvec iov;
991 	int err;
992 
993 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
994 	    args->msglen > sizeof(args->msg))
995 		return -EINVAL;
996 
997 	iov.iov_base = args->msg;
998 	iov.iov_len  = args->msglen;
999 
1000 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
1001 	mutex_lock(&sock_lock);
1002 
1003 	if (!sock) {
1004 		pr_err("%s called without initializing sock", __func__);
1005 		err = -EPERM;
1006 		goto out;
1007 	}
1008 
1009 	err = sock_sendmsg(sock, &msg);
1010 	args->addr.addrlen = msg.msg_namelen;
1011 out:
1012 	mutex_unlock(&sock_lock);
1013 
1014 	return err;
1015 }
1016 
1017 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
1018 {
1019 	int err;
1020 
1021 	mutex_lock(&sock_lock);
1022 
1023 	if (!sock) {
1024 		pr_err("%s called without initializing sock", __func__);
1025 		err = -EPERM;
1026 		goto out;
1027 	}
1028 
1029 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
1030 	if (err < 0)
1031 		goto out;
1032 
1033 	args->addrlen = err;
1034 	err = 0;
1035 out:
1036 	mutex_unlock(&sock_lock);
1037 
1038 	return err;
1039 }
1040 
1041 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
1042 {
1043 	int err;
1044 
1045 	mutex_lock(&sock_lock);
1046 
1047 	if (!sock) {
1048 		pr_err("%s called without initializing sock", __func__);
1049 		err = -EPERM;
1050 		goto out;
1051 	}
1052 
1053 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1054 	if (err < 0)
1055 		goto out;
1056 
1057 	args->addrlen = err;
1058 	err = 0;
1059 out:
1060 	mutex_unlock(&sock_lock);
1061 
1062 	return err;
1063 }
1064 
1065 static DEFINE_MUTEX(st_ops_mutex);
1066 static struct bpf_testmod_st_ops *st_ops;
1067 
1068 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1069 {
1070 	int ret = -1;
1071 
1072 	mutex_lock(&st_ops_mutex);
1073 	if (st_ops && st_ops->test_prologue)
1074 		ret = st_ops->test_prologue(args);
1075 	mutex_unlock(&st_ops_mutex);
1076 
1077 	return ret;
1078 }
1079 
1080 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1081 {
1082 	int ret = -1;
1083 
1084 	mutex_lock(&st_ops_mutex);
1085 	if (st_ops && st_ops->test_epilogue)
1086 		ret = st_ops->test_epilogue(args);
1087 	mutex_unlock(&st_ops_mutex);
1088 
1089 	return ret;
1090 }
1091 
1092 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1093 {
1094 	int ret = -1;
1095 
1096 	mutex_lock(&st_ops_mutex);
1097 	if (st_ops && st_ops->test_pro_epilogue)
1098 		ret = st_ops->test_pro_epilogue(args);
1099 	mutex_unlock(&st_ops_mutex);
1100 
1101 	return ret;
1102 }
1103 
1104 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1105 {
1106 	args->a += 10;
1107 	return args->a;
1108 }
1109 
1110 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
1111 
1112 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
1113 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1114 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1115 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1116 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1117 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1118 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1119 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1120 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1121 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1122 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1123 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1124 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1125 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1126 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1127 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1128 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1129 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1130 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1131 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1132 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1133 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1134 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1135 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1136 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1137 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1138 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1139 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1140 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1141 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1142 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1143 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1144 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1145 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1146 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1147 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1148 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1149 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1150 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1151 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1152 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS)
1153 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1154 
1155 static int bpf_testmod_ops_init(struct btf *btf)
1156 {
1157 	return 0;
1158 }
1159 
1160 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1161 					    enum bpf_access_type type,
1162 					    const struct bpf_prog *prog,
1163 					    struct bpf_insn_access_aux *info)
1164 {
1165 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1166 }
1167 
1168 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1169 				       const struct btf_member *member,
1170 				       void *kdata, const void *udata)
1171 {
1172 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1173 		/* For data fields, this function has to copy it and return
1174 		 * 1 to indicate that the data has been handled by the
1175 		 * struct_ops type, or the verifier will reject the map if
1176 		 * the value of the data field is not zero.
1177 		 */
1178 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1179 		return 1;
1180 	}
1181 	return 0;
1182 }
1183 
1184 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1185 	.owner = THIS_MODULE,
1186 	.set   = &bpf_testmod_check_kfunc_ids,
1187 };
1188 
1189 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1190 	.get_func_proto	 = bpf_base_func_proto,
1191 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1192 };
1193 
1194 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1195 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1196 };
1197 
1198 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1199 {
1200 	struct bpf_testmod_ops *ops = kdata;
1201 
1202 	if (ops->test_1)
1203 		ops->test_1();
1204 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1205 	 * initialized, so we need to check for NULL.
1206 	 */
1207 	if (ops->test_2)
1208 		ops->test_2(4, ops->data);
1209 
1210 	return 0;
1211 }
1212 
1213 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1214 {
1215 }
1216 
1217 static int bpf_testmod_test_1(void)
1218 {
1219 	return 0;
1220 }
1221 
1222 static void bpf_testmod_test_2(int a, int b)
1223 {
1224 }
1225 
1226 static int bpf_testmod_tramp(int value)
1227 {
1228 	return 0;
1229 }
1230 
1231 static int bpf_testmod_ops__test_maybe_null(int dummy,
1232 					    struct task_struct *task__nullable)
1233 {
1234 	return 0;
1235 }
1236 
1237 static int bpf_testmod_ops__test_refcounted(int dummy,
1238 					    struct task_struct *task__ref)
1239 {
1240 	return 0;
1241 }
1242 
1243 static struct task_struct *
1244 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1245 				      struct cgroup *cgrp)
1246 {
1247 	return NULL;
1248 }
1249 
1250 static struct bpf_testmod_ops __bpf_testmod_ops = {
1251 	.test_1 = bpf_testmod_test_1,
1252 	.test_2 = bpf_testmod_test_2,
1253 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1254 	.test_refcounted = bpf_testmod_ops__test_refcounted,
1255 	.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1256 };
1257 
1258 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1259 	.verifier_ops = &bpf_testmod_verifier_ops,
1260 	.init = bpf_testmod_ops_init,
1261 	.init_member = bpf_testmod_ops_init_member,
1262 	.reg = bpf_dummy_reg,
1263 	.unreg = bpf_dummy_unreg,
1264 	.cfi_stubs = &__bpf_testmod_ops,
1265 	.name = "bpf_testmod_ops",
1266 	.owner = THIS_MODULE,
1267 };
1268 
1269 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1270 {
1271 	struct bpf_testmod_ops2 *ops = kdata;
1272 
1273 	ops->test_1();
1274 	return 0;
1275 }
1276 
1277 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1278 	.test_1 = bpf_testmod_test_1,
1279 };
1280 
1281 struct bpf_struct_ops bpf_testmod_ops2 = {
1282 	.verifier_ops = &bpf_testmod_verifier_ops,
1283 	.init = bpf_testmod_ops_init,
1284 	.init_member = bpf_testmod_ops_init_member,
1285 	.reg = bpf_dummy_reg2,
1286 	.unreg = bpf_dummy_unreg,
1287 	.cfi_stubs = &__bpf_testmod_ops2,
1288 	.name = "bpf_testmod_ops2",
1289 	.owner = THIS_MODULE,
1290 };
1291 
1292 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1293 {
1294 	int err = 0;
1295 
1296 	mutex_lock(&st_ops_mutex);
1297 	if (st_ops3) {
1298 		pr_err("st_ops has already been registered\n");
1299 		err = -EEXIST;
1300 		goto unlock;
1301 	}
1302 	st_ops3 = kdata;
1303 
1304 unlock:
1305 	mutex_unlock(&st_ops_mutex);
1306 	return err;
1307 }
1308 
1309 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1310 {
1311 	mutex_lock(&st_ops_mutex);
1312 	st_ops3 = NULL;
1313 	mutex_unlock(&st_ops_mutex);
1314 }
1315 
1316 static void test_1_recursion_detected(struct bpf_prog *prog)
1317 {
1318 	struct bpf_prog_stats *stats;
1319 
1320 	stats = this_cpu_ptr(prog->stats);
1321 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1322 	       u64_stats_read(&stats->misses));
1323 }
1324 
1325 static int st_ops3_check_member(const struct btf_type *t,
1326 				const struct btf_member *member,
1327 				const struct bpf_prog *prog)
1328 {
1329 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1330 
1331 	switch (moff) {
1332 	case offsetof(struct bpf_testmod_ops3, test_1):
1333 		prog->aux->priv_stack_requested = true;
1334 		prog->aux->recursion_detected = test_1_recursion_detected;
1335 		fallthrough;
1336 	default:
1337 		break;
1338 	}
1339 	return 0;
1340 }
1341 
1342 struct bpf_struct_ops bpf_testmod_ops3 = {
1343 	.verifier_ops = &bpf_testmod_verifier_ops3,
1344 	.init = bpf_testmod_ops_init,
1345 	.init_member = bpf_testmod_ops_init_member,
1346 	.reg = st_ops3_reg,
1347 	.unreg = st_ops3_unreg,
1348 	.check_member = st_ops3_check_member,
1349 	.cfi_stubs = &__bpf_testmod_ops3,
1350 	.name = "bpf_testmod_ops3",
1351 	.owner = THIS_MODULE,
1352 };
1353 
1354 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1355 {
1356 	return 0;
1357 }
1358 
1359 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1360 {
1361 	return 0;
1362 }
1363 
1364 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1365 {
1366 	return 0;
1367 }
1368 
1369 static int bpf_cgroup_from_id_id;
1370 static int bpf_cgroup_release_id;
1371 
1372 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1373 					  const struct bpf_prog *prog)
1374 {
1375 	struct bpf_insn *insn = insn_buf;
1376 
1377 	/* r8 = r1; // r8 will be "u64 *ctx".
1378 	 * r1 = 0;
1379 	 * r0 = bpf_cgroup_from_id(r1);
1380 	 * if r0 != 0 goto pc+5;
1381 	 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1382 	 * r7 = r6->a;
1383 	 * r7 += 1000;
1384 	 * r6->a = r7;
1385 	 * goto pc+2;
1386 	 * r1 = r0;
1387 	 * bpf_cgroup_release(r1);
1388 	 * r1 = r8;
1389 	 */
1390 	*insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1391 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1392 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1393 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1394 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1395 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1396 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1397 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1398 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1399 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1400 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1401 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1402 	*insn++ = prog->insnsi[0];
1403 
1404 	return insn - insn_buf;
1405 }
1406 
1407 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1408 					  s16 ctx_stack_off)
1409 {
1410 	struct bpf_insn *insn = insn_buf;
1411 
1412 	/* r1 = 0;
1413 	 * r6 = 0;
1414 	 * r0 = bpf_cgroup_from_id(r1);
1415 	 * if r0 != 0 goto pc+6;
1416 	 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1417 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1418 	 * r6 = r1->a;
1419 	 * r6 += 10000;
1420 	 * r1->a = r6;
1421 	 * goto pc+2
1422 	 * r1 = r0;
1423 	 * bpf_cgroup_release(r1);
1424 	 * r0 = r6;
1425 	 * r0 *= 2;
1426 	 * BPF_EXIT;
1427 	 */
1428 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1429 	*insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1430 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1431 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1432 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1433 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1434 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1435 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1436 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1437 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1438 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1439 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1440 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1441 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1442 	*insn++ = BPF_EXIT_INSN();
1443 
1444 	return insn - insn_buf;
1445 }
1446 
1447 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
1448 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1449 			       const struct bpf_prog *prog)
1450 {
1451 	struct bpf_insn *insn = insn_buf;
1452 
1453 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1454 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1455 		return 0;
1456 
1457 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1458 		return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1459 
1460 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1461 	 * r7 = r6->a;
1462 	 * r7 += 1000;
1463 	 * r6->a = r7;
1464 	 */
1465 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1466 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1467 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1468 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1469 	*insn++ = prog->insnsi[0];
1470 
1471 	return insn - insn_buf;
1472 }
1473 
1474 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1475 			       s16 ctx_stack_off)
1476 {
1477 	struct bpf_insn *insn = insn_buf;
1478 
1479 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1480 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1481 		return 0;
1482 
1483 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1484 		return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1485 
1486 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1487 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1488 	 * r6 = r1->a;
1489 	 * r6 += 10000;
1490 	 * r1->a = r6;
1491 	 * r0 = r6;
1492 	 * r0 *= 2;
1493 	 * BPF_EXIT;
1494 	 */
1495 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1496 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1497 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1498 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1499 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1500 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1501 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1502 	*insn++ = BPF_EXIT_INSN();
1503 
1504 	return insn - insn_buf;
1505 }
1506 
1507 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1508 				    const struct bpf_reg_state *reg,
1509 				    int off, int size)
1510 {
1511 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1512 		return -EACCES;
1513 	return 0;
1514 }
1515 
1516 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1517 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1518 	.btf_struct_access = st_ops_btf_struct_access,
1519 	.gen_prologue = st_ops_gen_prologue,
1520 	.gen_epilogue = st_ops_gen_epilogue,
1521 	.get_func_proto = bpf_base_func_proto,
1522 };
1523 
1524 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1525 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1526 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1527 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1528 };
1529 
1530 static int st_ops_reg(void *kdata, struct bpf_link *link)
1531 {
1532 	int err = 0;
1533 
1534 	mutex_lock(&st_ops_mutex);
1535 	if (st_ops) {
1536 		pr_err("st_ops has already been registered\n");
1537 		err = -EEXIST;
1538 		goto unlock;
1539 	}
1540 	st_ops = kdata;
1541 
1542 unlock:
1543 	mutex_unlock(&st_ops_mutex);
1544 	return err;
1545 }
1546 
1547 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1548 {
1549 	mutex_lock(&st_ops_mutex);
1550 	st_ops = NULL;
1551 	mutex_unlock(&st_ops_mutex);
1552 }
1553 
1554 static int st_ops_init(struct btf *btf)
1555 {
1556 	struct btf *kfunc_btf;
1557 
1558 	bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1559 	bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1560 	if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1561 		return -EINVAL;
1562 
1563 	return 0;
1564 }
1565 
1566 static int st_ops_init_member(const struct btf_type *t,
1567 			      const struct btf_member *member,
1568 			      void *kdata, const void *udata)
1569 {
1570 	return 0;
1571 }
1572 
1573 static struct bpf_struct_ops testmod_st_ops = {
1574 	.verifier_ops = &st_ops_verifier_ops,
1575 	.init = st_ops_init,
1576 	.init_member = st_ops_init_member,
1577 	.reg = st_ops_reg,
1578 	.unreg = st_ops_unreg,
1579 	.cfi_stubs = &st_ops_cfi_stubs,
1580 	.name = "bpf_testmod_st_ops",
1581 	.owner = THIS_MODULE,
1582 };
1583 
1584 struct hlist_head multi_st_ops_list;
1585 static DEFINE_SPINLOCK(multi_st_ops_lock);
1586 
1587 static int multi_st_ops_init(struct btf *btf)
1588 {
1589 	spin_lock_init(&multi_st_ops_lock);
1590 	INIT_HLIST_HEAD(&multi_st_ops_list);
1591 
1592 	return 0;
1593 }
1594 
1595 static int multi_st_ops_init_member(const struct btf_type *t,
1596 				    const struct btf_member *member,
1597 				    void *kdata, const void *udata)
1598 {
1599 	return 0;
1600 }
1601 
1602 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
1603 {
1604 	struct bpf_testmod_multi_st_ops *st_ops;
1605 
1606 	hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
1607 		if (st_ops->id == id)
1608 			return st_ops;
1609 	}
1610 
1611 	return NULL;
1612 }
1613 
1614 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
1615 {
1616 	struct bpf_testmod_multi_st_ops *st_ops;
1617 	unsigned long flags;
1618 	int ret = -1;
1619 
1620 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1621 	st_ops = multi_st_ops_find_nolock(id);
1622 	if (st_ops)
1623 		ret = st_ops->test_1(args);
1624 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1625 
1626 	return ret;
1627 }
1628 
1629 static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
1630 {
1631 	struct bpf_testmod_multi_st_ops *st_ops =
1632 		(struct bpf_testmod_multi_st_ops *)kdata;
1633 	unsigned long flags;
1634 	int err = 0;
1635 	u32 id;
1636 
1637 	if (!st_ops->test_1)
1638 		return -EINVAL;
1639 
1640 	id = bpf_struct_ops_id(kdata);
1641 
1642 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1643 	if (multi_st_ops_find_nolock(id)) {
1644 		pr_err("multi_st_ops(id:%d) has already been registered\n", id);
1645 		err = -EEXIST;
1646 		goto unlock;
1647 	}
1648 
1649 	st_ops->id = id;
1650 	hlist_add_head(&st_ops->node, &multi_st_ops_list);
1651 unlock:
1652 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1653 
1654 	return err;
1655 }
1656 
1657 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
1658 {
1659 	struct bpf_testmod_multi_st_ops *st_ops;
1660 	unsigned long flags;
1661 	u32 id;
1662 
1663 	id = bpf_struct_ops_id(kdata);
1664 
1665 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1666 	st_ops = multi_st_ops_find_nolock(id);
1667 	if (st_ops)
1668 		hlist_del(&st_ops->node);
1669 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1670 }
1671 
1672 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
1673 {
1674 	return 0;
1675 }
1676 
1677 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
1678 	.test_1 = bpf_testmod_multi_st_ops__test_1,
1679 };
1680 
1681 struct bpf_struct_ops testmod_multi_st_ops = {
1682 	.verifier_ops = &bpf_testmod_verifier_ops,
1683 	.init = multi_st_ops_init,
1684 	.init_member = multi_st_ops_init_member,
1685 	.reg = multi_st_ops_reg,
1686 	.unreg = multi_st_ops_unreg,
1687 	.cfi_stubs = &multi_st_ops_cfi_stubs,
1688 	.name = "bpf_testmod_multi_st_ops",
1689 	.owner = THIS_MODULE,
1690 };
1691 
1692 extern int bpf_fentry_test1(int a);
1693 
1694 static int bpf_testmod_init(void)
1695 {
1696 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1697 		{
1698 			.btf_id		= bpf_testmod_dtor_ids[0],
1699 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1700 		},
1701 	};
1702 	void **tramp;
1703 	int ret;
1704 
1705 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1706 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1707 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1708 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1709 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1710 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1711 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1712 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1713 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1714 	ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
1715 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1716 						 ARRAY_SIZE(bpf_testmod_dtors),
1717 						 THIS_MODULE);
1718 	if (ret < 0)
1719 		return ret;
1720 	if (bpf_fentry_test1(0) < 0)
1721 		return -EINVAL;
1722 	sock = NULL;
1723 	mutex_init(&sock_lock);
1724 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1725 	if (ret < 0)
1726 		return ret;
1727 	ret = register_bpf_testmod_uprobe();
1728 	if (ret < 0)
1729 		return ret;
1730 
1731 	/* Ensure nothing is between tramp_1..tramp_40 */
1732 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1733 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1734 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1735 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1736 		*tramp++ = bpf_testmod_tramp;
1737 
1738 	return 0;
1739 }
1740 
1741 static void bpf_testmod_exit(void)
1742 {
1743         /* Need to wait for all references to be dropped because
1744          * bpf_kfunc_call_test_release() which currently resides in kernel can
1745          * be called after bpf_testmod is unloaded. Once release function is
1746          * moved into the module this wait can be removed.
1747          */
1748 	while (refcount_read(&prog_test_struct.cnt) > 1)
1749 		msleep(20);
1750 
1751 	bpf_kfunc_close_sock();
1752 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1753 	unregister_bpf_testmod_uprobe();
1754 }
1755 
1756 module_init(bpf_testmod_init);
1757 module_exit(bpf_testmod_exit);
1758 
1759 MODULE_AUTHOR("Andrii Nakryiko");
1760 MODULE_DESCRIPTION("BPF selftests module");
1761 MODULE_LICENSE("Dual BSD/GPL");
1762