xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (revision 40863f4d6ef2c34bb00dd1070dfaf9d5f27a497e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 __bpf_hook_start();
66 
67 noinline int
68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
69 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
70 	return bpf_testmod_test_struct_arg_result;
71 }
72 
73 noinline int
74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
75 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
76 	return bpf_testmod_test_struct_arg_result;
77 }
78 
79 noinline int
80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
81 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
87 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
88 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
93 bpf_testmod_test_struct_arg_5(void) {
94 	bpf_testmod_test_struct_arg_result = 1;
95 	return bpf_testmod_test_struct_arg_result;
96 }
97 
98 noinline int
99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
100 	bpf_testmod_test_struct_arg_result = a->b[0];
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
106 			      struct bpf_testmod_struct_arg_4 f)
107 {
108 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 		(long)e + f.a + f.b;
110 	return bpf_testmod_test_struct_arg_result;
111 }
112 
113 noinline int
114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
115 			      struct bpf_testmod_struct_arg_4 f, int g)
116 {
117 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
118 		(long)e + f.a + f.b + g;
119 	return bpf_testmod_test_struct_arg_result;
120 }
121 
122 noinline int
123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
124 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
125 {
126 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
127 		f + g + h.a + h.b + h.c + h.d + i;
128 	return bpf_testmod_test_struct_arg_result;
129 }
130 
131 noinline int
132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
133 	bpf_testmod_test_struct_arg_result = a->a;
134 	return bpf_testmod_test_struct_arg_result;
135 }
136 
137 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
138 {
139 }
140 
141 __bpf_kfunc void
142 bpf_testmod_test_mod_kfunc(int i)
143 {
144 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
145 }
146 
147 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
148 {
149 	it->cnt = cnt;
150 
151 	if (cnt < 0)
152 		return -EINVAL;
153 
154 	it->value = value;
155 
156 	return 0;
157 }
158 
159 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
160 {
161 	if (it->cnt <= 0)
162 		return NULL;
163 
164 	it->cnt--;
165 
166 	return &it->value;
167 }
168 
169 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
170 {
171 	if (it__iter->cnt < 0)
172 		return 0;
173 
174 	return val + it__iter->value;
175 }
176 
177 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
178 {
179 	it->cnt = 0;
180 }
181 
182 __bpf_kfunc void bpf_kfunc_common_test(void)
183 {
184 }
185 
186 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
187 				       struct bpf_dynptr *ptr__nullable)
188 {
189 }
190 
191 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
192 {
193 	return NULL;
194 }
195 
196 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
197 {
198 	return NULL;
199 }
200 
201 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
202 {
203 }
204 
205 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
206 {
207 }
208 
209 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
210 {
211 }
212 
213 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
214 {
215 }
216 
217 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
218 {
219 }
220 
221 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
222 {
223 	return NULL;
224 }
225 
226 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
227 {
228 	return NULL;
229 }
230 
231 __bpf_kfunc struct bpf_testmod_ctx *
232 bpf_testmod_ctx_create(int *err)
233 {
234 	struct bpf_testmod_ctx *ctx;
235 
236 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
237 	if (!ctx) {
238 		*err = -ENOMEM;
239 		return NULL;
240 	}
241 	refcount_set(&ctx->usage, 1);
242 
243 	return ctx;
244 }
245 
246 static void testmod_free_cb(struct rcu_head *head)
247 {
248 	struct bpf_testmod_ctx *ctx;
249 
250 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
251 	kfree(ctx);
252 }
253 
254 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
255 {
256 	if (!ctx)
257 		return;
258 	if (refcount_dec_and_test(&ctx->usage))
259 		call_rcu(&ctx->rcu, testmod_free_cb);
260 }
261 
262 static struct bpf_testmod_ops3 *st_ops3;
263 
264 static int bpf_testmod_test_3(void)
265 {
266 	return 0;
267 }
268 
269 static int bpf_testmod_test_4(void)
270 {
271 	return 0;
272 }
273 
274 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
275 	.test_1 = bpf_testmod_test_3,
276 	.test_2 = bpf_testmod_test_4,
277 };
278 
279 static void bpf_testmod_test_struct_ops3(void)
280 {
281 	if (st_ops3)
282 		st_ops3->test_1();
283 }
284 
285 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
286 {
287 	st_ops3->test_1();
288 }
289 
290 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
291 {
292 	st_ops3->test_2();
293 }
294 
295 struct bpf_testmod_btf_type_tag_1 {
296 	int a;
297 };
298 
299 struct bpf_testmod_btf_type_tag_2 {
300 	struct bpf_testmod_btf_type_tag_1 __user *p;
301 };
302 
303 struct bpf_testmod_btf_type_tag_3 {
304 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
305 };
306 
307 noinline int
308 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
309 	BTF_TYPE_EMIT(func_proto_typedef);
310 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
311 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
312 	return arg->a;
313 }
314 
315 noinline int
316 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
317 	return arg->p->a;
318 }
319 
320 noinline int
321 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
322 	return arg->a;
323 }
324 
325 noinline int
326 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
327 	return arg->p->a;
328 }
329 
330 noinline int bpf_testmod_loop_test(int n)
331 {
332 	/* Make sum volatile, so smart compilers, such as clang, will not
333 	 * optimize the code by removing the loop.
334 	 */
335 	volatile int sum = 0;
336 	int i;
337 
338 	/* the primary goal of this test is to test LBR. Create a lot of
339 	 * branches in the function, so we can catch it easily.
340 	 */
341 	for (i = 0; i < n; i++)
342 		sum += i;
343 	return sum;
344 }
345 
346 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
347 {
348 	static struct file f = {};
349 
350 	switch (arg) {
351 	case 1: return (void *)EINVAL;		/* user addr */
352 	case 2: return (void *)0xcafe4a11;	/* user addr */
353 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
354 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
355 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
356 	case 6: return &f;			/* valid addr */
357 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
358 #ifdef CONFIG_X86_64
359 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
360 #endif
361 	default: return NULL;
362 	}
363 }
364 
365 noinline int bpf_testmod_fentry_test1(int a)
366 {
367 	return a + 1;
368 }
369 
370 noinline int bpf_testmod_fentry_test2(int a, u64 b)
371 {
372 	return a + b;
373 }
374 
375 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
376 {
377 	return a + b + c;
378 }
379 
380 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
381 				      void *e, char f, int g)
382 {
383 	return a + (long)b + c + d + (long)e + f + g;
384 }
385 
386 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
387 				       void *e, char f, int g,
388 				       unsigned int h, long i, __u64 j,
389 				       unsigned long k)
390 {
391 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
392 }
393 
394 int bpf_testmod_fentry_ok;
395 
396 noinline ssize_t
397 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
398 		      const struct bin_attribute *bin_attr,
399 		      char *buf, loff_t off, size_t len)
400 {
401 	struct bpf_testmod_test_read_ctx ctx = {
402 		.buf = buf,
403 		.off = off,
404 		.len = len,
405 	};
406 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
407 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
408 	struct bpf_testmod_struct_arg_3 *struct_arg3;
409 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
410 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
411 	int i = 1;
412 
413 	while (bpf_testmod_return_ptr(i))
414 		i++;
415 
416 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
417 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
418 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
419 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
420 	(void)bpf_testmod_test_struct_arg_5();
421 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
422 					    (void *)20, struct_arg4);
423 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
424 					    (void *)20, struct_arg4, 23);
425 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
426 					    21, 22, struct_arg5, 27);
427 
428 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
429 
430 	(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
431 
432 	bpf_testmod_test_struct_ops3();
433 
434 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
435 				sizeof(int)), GFP_KERNEL);
436 	if (struct_arg3 != NULL) {
437 		struct_arg3->b[0] = 1;
438 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
439 		kfree(struct_arg3);
440 	}
441 
442 	/* This is always true. Use the check to make sure the compiler
443 	 * doesn't remove bpf_testmod_loop_test.
444 	 */
445 	if (bpf_testmod_loop_test(101) > 100)
446 		trace_bpf_testmod_test_read(current, &ctx);
447 
448 	trace_bpf_testmod_test_nullable_bare_tp(NULL);
449 
450 	/* Magic number to enable writable tp */
451 	if (len == 64) {
452 		struct bpf_testmod_test_writable_ctx writable = {
453 			.val = 1024,
454 		};
455 		trace_bpf_testmod_test_writable_bare_tp(&writable);
456 		if (writable.early_ret)
457 			return snprintf(buf, len, "%d\n", writable.val);
458 	}
459 
460 	if (bpf_testmod_fentry_test1(1) != 2 ||
461 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
462 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
463 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
464 			21, 22) != 133 ||
465 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
466 			21, 22, 23, 24, 25, 26) != 231)
467 		goto out;
468 
469 	bpf_testmod_fentry_ok = 1;
470 out:
471 	return -EIO; /* always fail */
472 }
473 EXPORT_SYMBOL(bpf_testmod_test_read);
474 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
475 
476 noinline ssize_t
477 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
478 		      const struct bin_attribute *bin_attr,
479 		      char *buf, loff_t off, size_t len)
480 {
481 	struct bpf_testmod_test_write_ctx ctx = {
482 		.buf = buf,
483 		.off = off,
484 		.len = len,
485 	};
486 
487 	trace_bpf_testmod_test_write_bare_tp(current, &ctx);
488 
489 	return -EIO; /* always fail */
490 }
491 EXPORT_SYMBOL(bpf_testmod_test_write);
492 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
493 
494 noinline int bpf_fentry_shadow_test(int a)
495 {
496 	return a + 2;
497 }
498 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
499 
500 __bpf_hook_end();
501 
502 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
503 	.attr = { .name = "bpf_testmod", .mode = 0666, },
504 	.read = bpf_testmod_test_read,
505 	.write = bpf_testmod_test_write,
506 };
507 
508 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
509  * please see test_uretprobe_regs_change test
510  */
511 #ifdef __x86_64__
512 
513 static int
514 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
515 		   struct pt_regs *regs, __u64 *data)
516 
517 {
518 	regs->ax  = 0x12345678deadbeef;
519 	regs->cx  = 0x87654321feebdaed;
520 	regs->r11 = (u64) -1;
521 	return true;
522 }
523 
524 struct testmod_uprobe {
525 	struct path path;
526 	struct uprobe *uprobe;
527 	struct uprobe_consumer consumer;
528 };
529 
530 static DEFINE_MUTEX(testmod_uprobe_mutex);
531 
532 static struct testmod_uprobe uprobe = {
533 	.consumer.ret_handler = uprobe_ret_handler,
534 };
535 
536 static int testmod_register_uprobe(loff_t offset)
537 {
538 	int err = -EBUSY;
539 
540 	if (uprobe.uprobe)
541 		return -EBUSY;
542 
543 	mutex_lock(&testmod_uprobe_mutex);
544 
545 	if (uprobe.uprobe)
546 		goto out;
547 
548 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
549 	if (err)
550 		goto out;
551 
552 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
553 					offset, 0, &uprobe.consumer);
554 	if (IS_ERR(uprobe.uprobe)) {
555 		err = PTR_ERR(uprobe.uprobe);
556 		path_put(&uprobe.path);
557 		uprobe.uprobe = NULL;
558 	}
559 out:
560 	mutex_unlock(&testmod_uprobe_mutex);
561 	return err;
562 }
563 
564 static void testmod_unregister_uprobe(void)
565 {
566 	mutex_lock(&testmod_uprobe_mutex);
567 
568 	if (uprobe.uprobe) {
569 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
570 		uprobe_unregister_sync();
571 		path_put(&uprobe.path);
572 		uprobe.uprobe = NULL;
573 	}
574 
575 	mutex_unlock(&testmod_uprobe_mutex);
576 }
577 
578 static ssize_t
579 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
580 			 const struct bin_attribute *bin_attr,
581 			 char *buf, loff_t off, size_t len)
582 {
583 	unsigned long offset = 0;
584 	int err = 0;
585 
586 	if (kstrtoul(buf, 0, &offset))
587 		return -EINVAL;
588 
589 	if (offset)
590 		err = testmod_register_uprobe(offset);
591 	else
592 		testmod_unregister_uprobe();
593 
594 	return err ?: strlen(buf);
595 }
596 
597 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
598 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
599 	.write = bpf_testmod_uprobe_write,
600 };
601 
602 static int register_bpf_testmod_uprobe(void)
603 {
604 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
605 }
606 
607 static void unregister_bpf_testmod_uprobe(void)
608 {
609 	testmod_unregister_uprobe();
610 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
611 }
612 
613 #else
614 static int register_bpf_testmod_uprobe(void)
615 {
616 	return 0;
617 }
618 
619 static void unregister_bpf_testmod_uprobe(void) { }
620 #endif
621 
622 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
623 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
624 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
625 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
626 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
627 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
628 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
629 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
630 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
631 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
632 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
633 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
634 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
635 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
636 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
637 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
638 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
639 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
640 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
641 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
642 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
643 
644 BTF_ID_LIST(bpf_testmod_dtor_ids)
645 BTF_ID(struct, bpf_testmod_ctx)
646 BTF_ID(func, bpf_testmod_ctx_release)
647 
648 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
649 	.owner = THIS_MODULE,
650 	.set   = &bpf_testmod_common_kfunc_ids,
651 };
652 
653 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
654 {
655 	return a + b + c + d;
656 }
657 
658 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
659 {
660 	return a + b;
661 }
662 
663 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
664 {
665 	return sk;
666 }
667 
668 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
669 {
670 	/* Provoke the compiler to assume that the caller has sign-extended a,
671 	 * b and c on platforms where this is required (e.g. s390x).
672 	 */
673 	return (long)a + (long)b + (long)c + d;
674 }
675 
676 static struct prog_test_ref_kfunc prog_test_struct = {
677 	.a = 42,
678 	.b = 108,
679 	.next = &prog_test_struct,
680 	.cnt = REFCOUNT_INIT(1),
681 };
682 
683 __bpf_kfunc struct prog_test_ref_kfunc *
684 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
685 {
686 	refcount_inc(&prog_test_struct.cnt);
687 	return &prog_test_struct;
688 }
689 
690 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
691 {
692 	WARN_ON_ONCE(1);
693 }
694 
695 __bpf_kfunc struct prog_test_member *
696 bpf_kfunc_call_memb_acquire(void)
697 {
698 	WARN_ON_ONCE(1);
699 	return NULL;
700 }
701 
702 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
703 {
704 	WARN_ON_ONCE(1);
705 }
706 
707 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
708 {
709 	if (size > 2 * sizeof(int))
710 		return NULL;
711 
712 	return (int *)p;
713 }
714 
715 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
716 						  const int rdwr_buf_size)
717 {
718 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
719 }
720 
721 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
722 						    const int rdonly_buf_size)
723 {
724 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
725 }
726 
727 /* the next 2 ones can't be really used for testing expect to ensure
728  * that the verifier rejects the call.
729  * Acquire functions must return struct pointers, so these ones are
730  * failing.
731  */
732 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
733 						    const int rdonly_buf_size)
734 {
735 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
736 }
737 
738 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
739 {
740 }
741 
742 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
743 {
744 }
745 
746 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
747 {
748 }
749 
750 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
751 {
752 }
753 
754 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
755 {
756 }
757 
758 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
759 {
760 }
761 
762 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
763 {
764 }
765 
766 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
767 {
768 }
769 
770 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
771 {
772 }
773 
774 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
775 {
776 }
777 
778 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
779 {
780 	/* p != NULL, but p->cnt could be 0 */
781 }
782 
783 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
784 {
785 }
786 
787 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
788 {
789 	return arg;
790 }
791 
792 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
793 {
794 }
795 
796 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
797 {
798 	int proto;
799 	int err;
800 
801 	mutex_lock(&sock_lock);
802 
803 	if (sock) {
804 		pr_err("%s called without releasing old sock", __func__);
805 		err = -EPERM;
806 		goto out;
807 	}
808 
809 	switch (args->af) {
810 	case AF_INET:
811 	case AF_INET6:
812 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
813 		break;
814 	case AF_UNIX:
815 		proto = PF_UNIX;
816 		break;
817 	default:
818 		pr_err("invalid address family %d\n", args->af);
819 		err = -EINVAL;
820 		goto out;
821 	}
822 
823 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
824 			       proto, &sock);
825 
826 	if (!err)
827 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
828 		 * and consider the connection attempt failed if it returns
829 		 * -EINPROGRESS.
830 		 */
831 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
832 out:
833 	mutex_unlock(&sock_lock);
834 
835 	return err;
836 }
837 
838 __bpf_kfunc void bpf_kfunc_close_sock(void)
839 {
840 	mutex_lock(&sock_lock);
841 
842 	if (sock) {
843 		sock_release(sock);
844 		sock = NULL;
845 	}
846 
847 	mutex_unlock(&sock_lock);
848 }
849 
850 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
851 {
852 	int err;
853 
854 	if (args->addrlen > sizeof(args->addr))
855 		return -EINVAL;
856 
857 	mutex_lock(&sock_lock);
858 
859 	if (!sock) {
860 		pr_err("%s called without initializing sock", __func__);
861 		err = -EPERM;
862 		goto out;
863 	}
864 
865 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
866 			     args->addrlen, 0);
867 out:
868 	mutex_unlock(&sock_lock);
869 
870 	return err;
871 }
872 
873 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
874 {
875 	int err;
876 
877 	if (args->addrlen > sizeof(args->addr))
878 		return -EINVAL;
879 
880 	mutex_lock(&sock_lock);
881 
882 	if (!sock) {
883 		pr_err("%s called without initializing sock", __func__);
884 		err = -EPERM;
885 		goto out;
886 	}
887 
888 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
889 out:
890 	mutex_unlock(&sock_lock);
891 
892 	return err;
893 }
894 
895 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
896 {
897 	int err;
898 
899 	mutex_lock(&sock_lock);
900 
901 	if (!sock) {
902 		pr_err("%s called without initializing sock", __func__);
903 		err = -EPERM;
904 		goto out;
905 	}
906 
907 	err = kernel_listen(sock, 128);
908 out:
909 	mutex_unlock(&sock_lock);
910 
911 	return err;
912 }
913 
914 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
915 {
916 	struct msghdr msg = {
917 		.msg_name	= &args->addr.addr,
918 		.msg_namelen	= args->addr.addrlen,
919 	};
920 	struct kvec iov;
921 	int err;
922 
923 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
924 	    args->msglen > sizeof(args->msg))
925 		return -EINVAL;
926 
927 	iov.iov_base = args->msg;
928 	iov.iov_len  = args->msglen;
929 
930 	mutex_lock(&sock_lock);
931 
932 	if (!sock) {
933 		pr_err("%s called without initializing sock", __func__);
934 		err = -EPERM;
935 		goto out;
936 	}
937 
938 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
939 	args->addr.addrlen = msg.msg_namelen;
940 out:
941 	mutex_unlock(&sock_lock);
942 
943 	return err;
944 }
945 
946 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
947 {
948 	struct msghdr msg = {
949 		.msg_name	= &args->addr.addr,
950 		.msg_namelen	= args->addr.addrlen,
951 	};
952 	struct kvec iov;
953 	int err;
954 
955 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
956 	    args->msglen > sizeof(args->msg))
957 		return -EINVAL;
958 
959 	iov.iov_base = args->msg;
960 	iov.iov_len  = args->msglen;
961 
962 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
963 	mutex_lock(&sock_lock);
964 
965 	if (!sock) {
966 		pr_err("%s called without initializing sock", __func__);
967 		err = -EPERM;
968 		goto out;
969 	}
970 
971 	err = sock_sendmsg(sock, &msg);
972 	args->addr.addrlen = msg.msg_namelen;
973 out:
974 	mutex_unlock(&sock_lock);
975 
976 	return err;
977 }
978 
979 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
980 {
981 	int err;
982 
983 	mutex_lock(&sock_lock);
984 
985 	if (!sock) {
986 		pr_err("%s called without initializing sock", __func__);
987 		err = -EPERM;
988 		goto out;
989 	}
990 
991 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
992 	if (err < 0)
993 		goto out;
994 
995 	args->addrlen = err;
996 	err = 0;
997 out:
998 	mutex_unlock(&sock_lock);
999 
1000 	return err;
1001 }
1002 
1003 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
1004 {
1005 	int err;
1006 
1007 	mutex_lock(&sock_lock);
1008 
1009 	if (!sock) {
1010 		pr_err("%s called without initializing sock", __func__);
1011 		err = -EPERM;
1012 		goto out;
1013 	}
1014 
1015 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1016 	if (err < 0)
1017 		goto out;
1018 
1019 	args->addrlen = err;
1020 	err = 0;
1021 out:
1022 	mutex_unlock(&sock_lock);
1023 
1024 	return err;
1025 }
1026 
1027 static DEFINE_MUTEX(st_ops_mutex);
1028 static struct bpf_testmod_st_ops *st_ops;
1029 
1030 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1031 {
1032 	int ret = -1;
1033 
1034 	mutex_lock(&st_ops_mutex);
1035 	if (st_ops && st_ops->test_prologue)
1036 		ret = st_ops->test_prologue(args);
1037 	mutex_unlock(&st_ops_mutex);
1038 
1039 	return ret;
1040 }
1041 
1042 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1043 {
1044 	int ret = -1;
1045 
1046 	mutex_lock(&st_ops_mutex);
1047 	if (st_ops && st_ops->test_epilogue)
1048 		ret = st_ops->test_epilogue(args);
1049 	mutex_unlock(&st_ops_mutex);
1050 
1051 	return ret;
1052 }
1053 
1054 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1055 {
1056 	int ret = -1;
1057 
1058 	mutex_lock(&st_ops_mutex);
1059 	if (st_ops && st_ops->test_pro_epilogue)
1060 		ret = st_ops->test_pro_epilogue(args);
1061 	mutex_unlock(&st_ops_mutex);
1062 
1063 	return ret;
1064 }
1065 
1066 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1067 {
1068 	args->a += 10;
1069 	return args->a;
1070 }
1071 
1072 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
1073 
1074 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
1075 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1076 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1077 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1078 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1079 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1080 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1081 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1082 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1083 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1084 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1085 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1086 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1087 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1088 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1089 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1090 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1091 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1092 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1093 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1094 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1095 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1096 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1097 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1098 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1099 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1100 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1101 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1102 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1103 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1104 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1105 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1106 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1107 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1108 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1109 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1110 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1111 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1112 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1113 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1114 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS)
1115 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1116 
1117 static int bpf_testmod_ops_init(struct btf *btf)
1118 {
1119 	return 0;
1120 }
1121 
1122 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1123 					    enum bpf_access_type type,
1124 					    const struct bpf_prog *prog,
1125 					    struct bpf_insn_access_aux *info)
1126 {
1127 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1128 }
1129 
1130 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1131 				       const struct btf_member *member,
1132 				       void *kdata, const void *udata)
1133 {
1134 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1135 		/* For data fields, this function has to copy it and return
1136 		 * 1 to indicate that the data has been handled by the
1137 		 * struct_ops type, or the verifier will reject the map if
1138 		 * the value of the data field is not zero.
1139 		 */
1140 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1141 		return 1;
1142 	}
1143 	return 0;
1144 }
1145 
1146 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1147 	.owner = THIS_MODULE,
1148 	.set   = &bpf_testmod_check_kfunc_ids,
1149 };
1150 
1151 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1152 	.get_func_proto	 = bpf_base_func_proto,
1153 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1154 };
1155 
1156 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1157 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1158 };
1159 
1160 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1161 {
1162 	struct bpf_testmod_ops *ops = kdata;
1163 
1164 	if (ops->test_1)
1165 		ops->test_1();
1166 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1167 	 * initialized, so we need to check for NULL.
1168 	 */
1169 	if (ops->test_2)
1170 		ops->test_2(4, ops->data);
1171 
1172 	return 0;
1173 }
1174 
1175 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1176 {
1177 }
1178 
1179 static int bpf_testmod_test_1(void)
1180 {
1181 	return 0;
1182 }
1183 
1184 static void bpf_testmod_test_2(int a, int b)
1185 {
1186 }
1187 
1188 static int bpf_testmod_tramp(int value)
1189 {
1190 	return 0;
1191 }
1192 
1193 static int bpf_testmod_ops__test_maybe_null(int dummy,
1194 					    struct task_struct *task__nullable)
1195 {
1196 	return 0;
1197 }
1198 
1199 static int bpf_testmod_ops__test_refcounted(int dummy,
1200 					    struct task_struct *task__ref)
1201 {
1202 	return 0;
1203 }
1204 
1205 static struct task_struct *
1206 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1207 				      struct cgroup *cgrp)
1208 {
1209 	return NULL;
1210 }
1211 
1212 static struct bpf_testmod_ops __bpf_testmod_ops = {
1213 	.test_1 = bpf_testmod_test_1,
1214 	.test_2 = bpf_testmod_test_2,
1215 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1216 	.test_refcounted = bpf_testmod_ops__test_refcounted,
1217 	.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1218 };
1219 
1220 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1221 	.verifier_ops = &bpf_testmod_verifier_ops,
1222 	.init = bpf_testmod_ops_init,
1223 	.init_member = bpf_testmod_ops_init_member,
1224 	.reg = bpf_dummy_reg,
1225 	.unreg = bpf_dummy_unreg,
1226 	.cfi_stubs = &__bpf_testmod_ops,
1227 	.name = "bpf_testmod_ops",
1228 	.owner = THIS_MODULE,
1229 };
1230 
1231 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1232 {
1233 	struct bpf_testmod_ops2 *ops = kdata;
1234 
1235 	ops->test_1();
1236 	return 0;
1237 }
1238 
1239 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1240 	.test_1 = bpf_testmod_test_1,
1241 };
1242 
1243 struct bpf_struct_ops bpf_testmod_ops2 = {
1244 	.verifier_ops = &bpf_testmod_verifier_ops,
1245 	.init = bpf_testmod_ops_init,
1246 	.init_member = bpf_testmod_ops_init_member,
1247 	.reg = bpf_dummy_reg2,
1248 	.unreg = bpf_dummy_unreg,
1249 	.cfi_stubs = &__bpf_testmod_ops2,
1250 	.name = "bpf_testmod_ops2",
1251 	.owner = THIS_MODULE,
1252 };
1253 
1254 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1255 {
1256 	int err = 0;
1257 
1258 	mutex_lock(&st_ops_mutex);
1259 	if (st_ops3) {
1260 		pr_err("st_ops has already been registered\n");
1261 		err = -EEXIST;
1262 		goto unlock;
1263 	}
1264 	st_ops3 = kdata;
1265 
1266 unlock:
1267 	mutex_unlock(&st_ops_mutex);
1268 	return err;
1269 }
1270 
1271 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1272 {
1273 	mutex_lock(&st_ops_mutex);
1274 	st_ops3 = NULL;
1275 	mutex_unlock(&st_ops_mutex);
1276 }
1277 
1278 static void test_1_recursion_detected(struct bpf_prog *prog)
1279 {
1280 	struct bpf_prog_stats *stats;
1281 
1282 	stats = this_cpu_ptr(prog->stats);
1283 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1284 	       u64_stats_read(&stats->misses));
1285 }
1286 
1287 static int st_ops3_check_member(const struct btf_type *t,
1288 				const struct btf_member *member,
1289 				const struct bpf_prog *prog)
1290 {
1291 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1292 
1293 	switch (moff) {
1294 	case offsetof(struct bpf_testmod_ops3, test_1):
1295 		prog->aux->priv_stack_requested = true;
1296 		prog->aux->recursion_detected = test_1_recursion_detected;
1297 		fallthrough;
1298 	default:
1299 		break;
1300 	}
1301 	return 0;
1302 }
1303 
1304 struct bpf_struct_ops bpf_testmod_ops3 = {
1305 	.verifier_ops = &bpf_testmod_verifier_ops3,
1306 	.init = bpf_testmod_ops_init,
1307 	.init_member = bpf_testmod_ops_init_member,
1308 	.reg = st_ops3_reg,
1309 	.unreg = st_ops3_unreg,
1310 	.check_member = st_ops3_check_member,
1311 	.cfi_stubs = &__bpf_testmod_ops3,
1312 	.name = "bpf_testmod_ops3",
1313 	.owner = THIS_MODULE,
1314 };
1315 
1316 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1317 {
1318 	return 0;
1319 }
1320 
1321 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1322 {
1323 	return 0;
1324 }
1325 
1326 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1327 {
1328 	return 0;
1329 }
1330 
1331 static int bpf_cgroup_from_id_id;
1332 static int bpf_cgroup_release_id;
1333 
1334 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1335 					  const struct bpf_prog *prog)
1336 {
1337 	struct bpf_insn *insn = insn_buf;
1338 
1339 	/* r8 = r1; // r8 will be "u64 *ctx".
1340 	 * r1 = 0;
1341 	 * r0 = bpf_cgroup_from_id(r1);
1342 	 * if r0 != 0 goto pc+5;
1343 	 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1344 	 * r7 = r6->a;
1345 	 * r7 += 1000;
1346 	 * r6->a = r7;
1347 	 * goto pc+2;
1348 	 * r1 = r0;
1349 	 * bpf_cgroup_release(r1);
1350 	 * r1 = r8;
1351 	 */
1352 	*insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1353 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1354 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1355 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1356 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1357 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1358 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1359 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1360 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1361 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1362 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1363 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1364 	*insn++ = prog->insnsi[0];
1365 
1366 	return insn - insn_buf;
1367 }
1368 
1369 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1370 					  s16 ctx_stack_off)
1371 {
1372 	struct bpf_insn *insn = insn_buf;
1373 
1374 	/* r1 = 0;
1375 	 * r6 = 0;
1376 	 * r0 = bpf_cgroup_from_id(r1);
1377 	 * if r0 != 0 goto pc+6;
1378 	 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1379 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1380 	 * r6 = r1->a;
1381 	 * r6 += 10000;
1382 	 * r1->a = r6;
1383 	 * goto pc+2
1384 	 * r1 = r0;
1385 	 * bpf_cgroup_release(r1);
1386 	 * r0 = r6;
1387 	 * r0 *= 2;
1388 	 * BPF_EXIT;
1389 	 */
1390 	*insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1391 	*insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1392 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1393 	*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1394 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1395 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1396 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1397 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1398 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1399 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1400 	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1401 	*insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1402 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1403 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1404 	*insn++ = BPF_EXIT_INSN();
1405 
1406 	return insn - insn_buf;
1407 }
1408 
1409 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
1410 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1411 			       const struct bpf_prog *prog)
1412 {
1413 	struct bpf_insn *insn = insn_buf;
1414 
1415 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1416 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1417 		return 0;
1418 
1419 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1420 		return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1421 
1422 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1423 	 * r7 = r6->a;
1424 	 * r7 += 1000;
1425 	 * r6->a = r7;
1426 	 */
1427 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1428 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1429 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1430 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1431 	*insn++ = prog->insnsi[0];
1432 
1433 	return insn - insn_buf;
1434 }
1435 
1436 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1437 			       s16 ctx_stack_off)
1438 {
1439 	struct bpf_insn *insn = insn_buf;
1440 
1441 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1442 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1443 		return 0;
1444 
1445 	if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1446 		return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1447 
1448 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1449 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1450 	 * r6 = r1->a;
1451 	 * r6 += 10000;
1452 	 * r1->a = r6;
1453 	 * r0 = r6;
1454 	 * r0 *= 2;
1455 	 * BPF_EXIT;
1456 	 */
1457 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1458 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1459 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1460 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1461 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1462 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1463 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1464 	*insn++ = BPF_EXIT_INSN();
1465 
1466 	return insn - insn_buf;
1467 }
1468 
1469 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1470 				    const struct bpf_reg_state *reg,
1471 				    int off, int size)
1472 {
1473 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1474 		return -EACCES;
1475 	return 0;
1476 }
1477 
1478 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1479 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1480 	.btf_struct_access = st_ops_btf_struct_access,
1481 	.gen_prologue = st_ops_gen_prologue,
1482 	.gen_epilogue = st_ops_gen_epilogue,
1483 	.get_func_proto = bpf_base_func_proto,
1484 };
1485 
1486 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1487 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1488 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1489 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1490 };
1491 
1492 static int st_ops_reg(void *kdata, struct bpf_link *link)
1493 {
1494 	int err = 0;
1495 
1496 	mutex_lock(&st_ops_mutex);
1497 	if (st_ops) {
1498 		pr_err("st_ops has already been registered\n");
1499 		err = -EEXIST;
1500 		goto unlock;
1501 	}
1502 	st_ops = kdata;
1503 
1504 unlock:
1505 	mutex_unlock(&st_ops_mutex);
1506 	return err;
1507 }
1508 
1509 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1510 {
1511 	mutex_lock(&st_ops_mutex);
1512 	st_ops = NULL;
1513 	mutex_unlock(&st_ops_mutex);
1514 }
1515 
1516 static int st_ops_init(struct btf *btf)
1517 {
1518 	struct btf *kfunc_btf;
1519 
1520 	bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1521 	bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1522 	if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1523 		return -EINVAL;
1524 
1525 	return 0;
1526 }
1527 
1528 static int st_ops_init_member(const struct btf_type *t,
1529 			      const struct btf_member *member,
1530 			      void *kdata, const void *udata)
1531 {
1532 	return 0;
1533 }
1534 
1535 static struct bpf_struct_ops testmod_st_ops = {
1536 	.verifier_ops = &st_ops_verifier_ops,
1537 	.init = st_ops_init,
1538 	.init_member = st_ops_init_member,
1539 	.reg = st_ops_reg,
1540 	.unreg = st_ops_unreg,
1541 	.cfi_stubs = &st_ops_cfi_stubs,
1542 	.name = "bpf_testmod_st_ops",
1543 	.owner = THIS_MODULE,
1544 };
1545 
1546 struct hlist_head multi_st_ops_list;
1547 static DEFINE_SPINLOCK(multi_st_ops_lock);
1548 
1549 static int multi_st_ops_init(struct btf *btf)
1550 {
1551 	spin_lock_init(&multi_st_ops_lock);
1552 	INIT_HLIST_HEAD(&multi_st_ops_list);
1553 
1554 	return 0;
1555 }
1556 
1557 static int multi_st_ops_init_member(const struct btf_type *t,
1558 				    const struct btf_member *member,
1559 				    void *kdata, const void *udata)
1560 {
1561 	return 0;
1562 }
1563 
1564 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
1565 {
1566 	struct bpf_testmod_multi_st_ops *st_ops;
1567 
1568 	hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
1569 		if (st_ops->id == id)
1570 			return st_ops;
1571 	}
1572 
1573 	return NULL;
1574 }
1575 
1576 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
1577 {
1578 	struct bpf_testmod_multi_st_ops *st_ops;
1579 	unsigned long flags;
1580 	int ret = -1;
1581 
1582 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1583 	st_ops = multi_st_ops_find_nolock(id);
1584 	if (st_ops)
1585 		ret = st_ops->test_1(args);
1586 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1587 
1588 	return ret;
1589 }
1590 
1591 static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
1592 {
1593 	struct bpf_testmod_multi_st_ops *st_ops =
1594 		(struct bpf_testmod_multi_st_ops *)kdata;
1595 	unsigned long flags;
1596 	int err = 0;
1597 	u32 id;
1598 
1599 	if (!st_ops->test_1)
1600 		return -EINVAL;
1601 
1602 	id = bpf_struct_ops_id(kdata);
1603 
1604 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1605 	if (multi_st_ops_find_nolock(id)) {
1606 		pr_err("multi_st_ops(id:%d) has already been registered\n", id);
1607 		err = -EEXIST;
1608 		goto unlock;
1609 	}
1610 
1611 	st_ops->id = id;
1612 	hlist_add_head(&st_ops->node, &multi_st_ops_list);
1613 unlock:
1614 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1615 
1616 	return err;
1617 }
1618 
1619 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
1620 {
1621 	struct bpf_testmod_multi_st_ops *st_ops;
1622 	unsigned long flags;
1623 	u32 id;
1624 
1625 	id = bpf_struct_ops_id(kdata);
1626 
1627 	spin_lock_irqsave(&multi_st_ops_lock, flags);
1628 	st_ops = multi_st_ops_find_nolock(id);
1629 	if (st_ops)
1630 		hlist_del(&st_ops->node);
1631 	spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1632 }
1633 
1634 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
1635 {
1636 	return 0;
1637 }
1638 
1639 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
1640 	.test_1 = bpf_testmod_multi_st_ops__test_1,
1641 };
1642 
1643 struct bpf_struct_ops testmod_multi_st_ops = {
1644 	.verifier_ops = &bpf_testmod_verifier_ops,
1645 	.init = multi_st_ops_init,
1646 	.init_member = multi_st_ops_init_member,
1647 	.reg = multi_st_ops_reg,
1648 	.unreg = multi_st_ops_unreg,
1649 	.cfi_stubs = &multi_st_ops_cfi_stubs,
1650 	.name = "bpf_testmod_multi_st_ops",
1651 	.owner = THIS_MODULE,
1652 };
1653 
1654 extern int bpf_fentry_test1(int a);
1655 
1656 static int bpf_testmod_init(void)
1657 {
1658 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1659 		{
1660 			.btf_id		= bpf_testmod_dtor_ids[0],
1661 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1662 		},
1663 	};
1664 	void **tramp;
1665 	int ret;
1666 
1667 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1668 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1669 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1670 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1671 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1672 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1673 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1674 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1675 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1676 	ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
1677 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1678 						 ARRAY_SIZE(bpf_testmod_dtors),
1679 						 THIS_MODULE);
1680 	if (ret < 0)
1681 		return ret;
1682 	if (bpf_fentry_test1(0) < 0)
1683 		return -EINVAL;
1684 	sock = NULL;
1685 	mutex_init(&sock_lock);
1686 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1687 	if (ret < 0)
1688 		return ret;
1689 	ret = register_bpf_testmod_uprobe();
1690 	if (ret < 0)
1691 		return ret;
1692 
1693 	/* Ensure nothing is between tramp_1..tramp_40 */
1694 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1695 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1696 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1697 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1698 		*tramp++ = bpf_testmod_tramp;
1699 
1700 	return 0;
1701 }
1702 
1703 static void bpf_testmod_exit(void)
1704 {
1705         /* Need to wait for all references to be dropped because
1706          * bpf_kfunc_call_test_release() which currently resides in kernel can
1707          * be called after bpf_testmod is unloaded. Once release function is
1708          * moved into the module this wait can be removed.
1709          */
1710 	while (refcount_read(&prog_test_struct.cnt) > 1)
1711 		msleep(20);
1712 
1713 	bpf_kfunc_close_sock();
1714 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1715 	unregister_bpf_testmod_uprobe();
1716 }
1717 
1718 module_init(bpf_testmod_init);
1719 module_exit(bpf_testmod_exit);
1720 
1721 MODULE_AUTHOR("Andrii Nakryiko");
1722 MODULE_DESCRIPTION("BPF selftests module");
1723 MODULE_LICENSE("Dual BSD/GPL");
1724