xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 __bpf_hook_start();
66 
67 noinline int
68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
69 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
70 	return bpf_testmod_test_struct_arg_result;
71 }
72 
73 noinline int
74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
75 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
76 	return bpf_testmod_test_struct_arg_result;
77 }
78 
79 noinline int
80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
81 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
87 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
88 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
93 bpf_testmod_test_struct_arg_5(void) {
94 	bpf_testmod_test_struct_arg_result = 1;
95 	return bpf_testmod_test_struct_arg_result;
96 }
97 
98 noinline int
99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
100 	bpf_testmod_test_struct_arg_result = a->b[0];
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
106 			      struct bpf_testmod_struct_arg_4 f)
107 {
108 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 		(long)e + f.a + f.b;
110 	return bpf_testmod_test_struct_arg_result;
111 }
112 
113 noinline int
114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
115 			      struct bpf_testmod_struct_arg_4 f, int g)
116 {
117 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
118 		(long)e + f.a + f.b + g;
119 	return bpf_testmod_test_struct_arg_result;
120 }
121 
122 noinline int
123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
124 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
125 {
126 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
127 		f + g + h.a + h.b + h.c + h.d + i;
128 	return bpf_testmod_test_struct_arg_result;
129 }
130 
131 noinline int
132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
133 	bpf_testmod_test_struct_arg_result = a->a;
134 	return bpf_testmod_test_struct_arg_result;
135 }
136 
137 __bpf_kfunc void
138 bpf_testmod_test_mod_kfunc(int i)
139 {
140 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
141 }
142 
143 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
144 {
145 	it->cnt = cnt;
146 
147 	if (cnt < 0)
148 		return -EINVAL;
149 
150 	it->value = value;
151 
152 	return 0;
153 }
154 
155 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
156 {
157 	if (it->cnt <= 0)
158 		return NULL;
159 
160 	it->cnt--;
161 
162 	return &it->value;
163 }
164 
165 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
166 {
167 	if (it__iter->cnt < 0)
168 		return 0;
169 
170 	return val + it__iter->value;
171 }
172 
173 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
174 {
175 	it->cnt = 0;
176 }
177 
178 __bpf_kfunc void bpf_kfunc_common_test(void)
179 {
180 }
181 
182 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
183 				       struct bpf_dynptr *ptr__nullable)
184 {
185 }
186 
187 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
188 {
189 	return NULL;
190 }
191 
192 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
193 {
194 	return NULL;
195 }
196 
197 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
198 {
199 }
200 
201 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
202 {
203 }
204 
205 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
206 {
207 }
208 
209 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
210 {
211 }
212 
213 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
214 {
215 }
216 
217 __bpf_kfunc struct bpf_testmod_ctx *
218 bpf_testmod_ctx_create(int *err)
219 {
220 	struct bpf_testmod_ctx *ctx;
221 
222 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
223 	if (!ctx) {
224 		*err = -ENOMEM;
225 		return NULL;
226 	}
227 	refcount_set(&ctx->usage, 1);
228 
229 	return ctx;
230 }
231 
232 static void testmod_free_cb(struct rcu_head *head)
233 {
234 	struct bpf_testmod_ctx *ctx;
235 
236 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
237 	kfree(ctx);
238 }
239 
240 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
241 {
242 	if (!ctx)
243 		return;
244 	if (refcount_dec_and_test(&ctx->usage))
245 		call_rcu(&ctx->rcu, testmod_free_cb);
246 }
247 
248 static struct bpf_testmod_ops3 *st_ops3;
249 
250 static int bpf_testmod_test_3(void)
251 {
252 	return 0;
253 }
254 
255 static int bpf_testmod_test_4(void)
256 {
257 	return 0;
258 }
259 
260 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
261 	.test_1 = bpf_testmod_test_3,
262 	.test_2 = bpf_testmod_test_4,
263 };
264 
265 static void bpf_testmod_test_struct_ops3(void)
266 {
267 	if (st_ops3)
268 		st_ops3->test_1();
269 }
270 
271 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
272 {
273 	st_ops3->test_1();
274 }
275 
276 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
277 {
278 	st_ops3->test_2();
279 }
280 
281 struct bpf_testmod_btf_type_tag_1 {
282 	int a;
283 };
284 
285 struct bpf_testmod_btf_type_tag_2 {
286 	struct bpf_testmod_btf_type_tag_1 __user *p;
287 };
288 
289 struct bpf_testmod_btf_type_tag_3 {
290 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
291 };
292 
293 noinline int
294 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
295 	BTF_TYPE_EMIT(func_proto_typedef);
296 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
297 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
298 	return arg->a;
299 }
300 
301 noinline int
302 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
303 	return arg->p->a;
304 }
305 
306 noinline int
307 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
308 	return arg->a;
309 }
310 
311 noinline int
312 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
313 	return arg->p->a;
314 }
315 
316 noinline int bpf_testmod_loop_test(int n)
317 {
318 	/* Make sum volatile, so smart compilers, such as clang, will not
319 	 * optimize the code by removing the loop.
320 	 */
321 	volatile int sum = 0;
322 	int i;
323 
324 	/* the primary goal of this test is to test LBR. Create a lot of
325 	 * branches in the function, so we can catch it easily.
326 	 */
327 	for (i = 0; i < n; i++)
328 		sum += i;
329 	return sum;
330 }
331 
332 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
333 {
334 	static struct file f = {};
335 
336 	switch (arg) {
337 	case 1: return (void *)EINVAL;		/* user addr */
338 	case 2: return (void *)0xcafe4a11;	/* user addr */
339 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
340 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
341 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
342 	case 6: return &f;			/* valid addr */
343 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
344 #ifdef CONFIG_X86_64
345 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
346 #endif
347 	default: return NULL;
348 	}
349 }
350 
351 noinline int bpf_testmod_fentry_test1(int a)
352 {
353 	return a + 1;
354 }
355 
356 noinline int bpf_testmod_fentry_test2(int a, u64 b)
357 {
358 	return a + b;
359 }
360 
361 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
362 {
363 	return a + b + c;
364 }
365 
366 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
367 				      void *e, char f, int g)
368 {
369 	return a + (long)b + c + d + (long)e + f + g;
370 }
371 
372 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
373 				       void *e, char f, int g,
374 				       unsigned int h, long i, __u64 j,
375 				       unsigned long k)
376 {
377 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
378 }
379 
380 int bpf_testmod_fentry_ok;
381 
382 noinline ssize_t
383 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
384 		      struct bin_attribute *bin_attr,
385 		      char *buf, loff_t off, size_t len)
386 {
387 	struct bpf_testmod_test_read_ctx ctx = {
388 		.buf = buf,
389 		.off = off,
390 		.len = len,
391 	};
392 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
393 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
394 	struct bpf_testmod_struct_arg_3 *struct_arg3;
395 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
396 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
397 	int i = 1;
398 
399 	while (bpf_testmod_return_ptr(i))
400 		i++;
401 
402 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
403 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
404 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
405 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
406 	(void)bpf_testmod_test_struct_arg_5();
407 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
408 					    (void *)20, struct_arg4);
409 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
410 					    (void *)20, struct_arg4, 23);
411 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
412 					    21, 22, struct_arg5, 27);
413 
414 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
415 
416 	(void)trace_bpf_testmod_test_raw_tp_null(NULL);
417 
418 	bpf_testmod_test_struct_ops3();
419 
420 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
421 				sizeof(int)), GFP_KERNEL);
422 	if (struct_arg3 != NULL) {
423 		struct_arg3->b[0] = 1;
424 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
425 		kfree(struct_arg3);
426 	}
427 
428 	/* This is always true. Use the check to make sure the compiler
429 	 * doesn't remove bpf_testmod_loop_test.
430 	 */
431 	if (bpf_testmod_loop_test(101) > 100)
432 		trace_bpf_testmod_test_read(current, &ctx);
433 
434 	trace_bpf_testmod_test_nullable_bare(NULL);
435 
436 	/* Magic number to enable writable tp */
437 	if (len == 64) {
438 		struct bpf_testmod_test_writable_ctx writable = {
439 			.val = 1024,
440 		};
441 		trace_bpf_testmod_test_writable_bare(&writable);
442 		if (writable.early_ret)
443 			return snprintf(buf, len, "%d\n", writable.val);
444 	}
445 
446 	if (bpf_testmod_fentry_test1(1) != 2 ||
447 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
448 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
449 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
450 			21, 22) != 133 ||
451 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
452 			21, 22, 23, 24, 25, 26) != 231)
453 		goto out;
454 
455 	bpf_testmod_fentry_ok = 1;
456 out:
457 	return -EIO; /* always fail */
458 }
459 EXPORT_SYMBOL(bpf_testmod_test_read);
460 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
461 
462 noinline ssize_t
463 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
464 		      struct bin_attribute *bin_attr,
465 		      char *buf, loff_t off, size_t len)
466 {
467 	struct bpf_testmod_test_write_ctx ctx = {
468 		.buf = buf,
469 		.off = off,
470 		.len = len,
471 	};
472 
473 	trace_bpf_testmod_test_write_bare(current, &ctx);
474 
475 	return -EIO; /* always fail */
476 }
477 EXPORT_SYMBOL(bpf_testmod_test_write);
478 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
479 
480 noinline int bpf_fentry_shadow_test(int a)
481 {
482 	return a + 2;
483 }
484 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
485 
486 __bpf_hook_end();
487 
488 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
489 	.attr = { .name = "bpf_testmod", .mode = 0666, },
490 	.read = bpf_testmod_test_read,
491 	.write = bpf_testmod_test_write,
492 };
493 
494 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
495  * please see test_uretprobe_regs_change test
496  */
497 #ifdef __x86_64__
498 
499 static int
500 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
501 		   struct pt_regs *regs, __u64 *data)
502 
503 {
504 	regs->ax  = 0x12345678deadbeef;
505 	regs->cx  = 0x87654321feebdaed;
506 	regs->r11 = (u64) -1;
507 	return true;
508 }
509 
510 struct testmod_uprobe {
511 	struct path path;
512 	struct uprobe *uprobe;
513 	struct uprobe_consumer consumer;
514 };
515 
516 static DEFINE_MUTEX(testmod_uprobe_mutex);
517 
518 static struct testmod_uprobe uprobe = {
519 	.consumer.ret_handler = uprobe_ret_handler,
520 };
521 
522 static int testmod_register_uprobe(loff_t offset)
523 {
524 	int err = -EBUSY;
525 
526 	if (uprobe.uprobe)
527 		return -EBUSY;
528 
529 	mutex_lock(&testmod_uprobe_mutex);
530 
531 	if (uprobe.uprobe)
532 		goto out;
533 
534 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
535 	if (err)
536 		goto out;
537 
538 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
539 					offset, 0, &uprobe.consumer);
540 	if (IS_ERR(uprobe.uprobe)) {
541 		err = PTR_ERR(uprobe.uprobe);
542 		path_put(&uprobe.path);
543 		uprobe.uprobe = NULL;
544 	}
545 out:
546 	mutex_unlock(&testmod_uprobe_mutex);
547 	return err;
548 }
549 
550 static void testmod_unregister_uprobe(void)
551 {
552 	mutex_lock(&testmod_uprobe_mutex);
553 
554 	if (uprobe.uprobe) {
555 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
556 		uprobe_unregister_sync();
557 		path_put(&uprobe.path);
558 		uprobe.uprobe = NULL;
559 	}
560 
561 	mutex_unlock(&testmod_uprobe_mutex);
562 }
563 
564 static ssize_t
565 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
566 			 struct bin_attribute *bin_attr,
567 			 char *buf, loff_t off, size_t len)
568 {
569 	unsigned long offset = 0;
570 	int err = 0;
571 
572 	if (kstrtoul(buf, 0, &offset))
573 		return -EINVAL;
574 
575 	if (offset)
576 		err = testmod_register_uprobe(offset);
577 	else
578 		testmod_unregister_uprobe();
579 
580 	return err ?: strlen(buf);
581 }
582 
583 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
584 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
585 	.write = bpf_testmod_uprobe_write,
586 };
587 
588 static int register_bpf_testmod_uprobe(void)
589 {
590 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
591 }
592 
593 static void unregister_bpf_testmod_uprobe(void)
594 {
595 	testmod_unregister_uprobe();
596 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
597 }
598 
599 #else
600 static int register_bpf_testmod_uprobe(void)
601 {
602 	return 0;
603 }
604 
605 static void unregister_bpf_testmod_uprobe(void) { }
606 #endif
607 
608 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
609 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
610 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
611 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
612 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
613 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
614 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
615 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
616 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
617 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
618 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
619 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
620 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
621 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
622 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
623 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
624 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
625 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
626 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
627 
628 BTF_ID_LIST(bpf_testmod_dtor_ids)
629 BTF_ID(struct, bpf_testmod_ctx)
630 BTF_ID(func, bpf_testmod_ctx_release)
631 
632 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
633 	.owner = THIS_MODULE,
634 	.set   = &bpf_testmod_common_kfunc_ids,
635 };
636 
637 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
638 {
639 	return a + b + c + d;
640 }
641 
642 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
643 {
644 	return a + b;
645 }
646 
647 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
648 {
649 	return sk;
650 }
651 
652 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
653 {
654 	/* Provoke the compiler to assume that the caller has sign-extended a,
655 	 * b and c on platforms where this is required (e.g. s390x).
656 	 */
657 	return (long)a + (long)b + (long)c + d;
658 }
659 
660 static struct prog_test_ref_kfunc prog_test_struct = {
661 	.a = 42,
662 	.b = 108,
663 	.next = &prog_test_struct,
664 	.cnt = REFCOUNT_INIT(1),
665 };
666 
667 __bpf_kfunc struct prog_test_ref_kfunc *
668 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
669 {
670 	refcount_inc(&prog_test_struct.cnt);
671 	return &prog_test_struct;
672 }
673 
674 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
675 {
676 	WARN_ON_ONCE(1);
677 }
678 
679 __bpf_kfunc struct prog_test_member *
680 bpf_kfunc_call_memb_acquire(void)
681 {
682 	WARN_ON_ONCE(1);
683 	return NULL;
684 }
685 
686 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
687 {
688 	WARN_ON_ONCE(1);
689 }
690 
691 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
692 {
693 	if (size > 2 * sizeof(int))
694 		return NULL;
695 
696 	return (int *)p;
697 }
698 
699 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
700 						  const int rdwr_buf_size)
701 {
702 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
703 }
704 
705 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
706 						    const int rdonly_buf_size)
707 {
708 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
709 }
710 
711 /* the next 2 ones can't be really used for testing expect to ensure
712  * that the verifier rejects the call.
713  * Acquire functions must return struct pointers, so these ones are
714  * failing.
715  */
716 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
717 						    const int rdonly_buf_size)
718 {
719 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
720 }
721 
722 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
723 {
724 }
725 
726 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
727 {
728 }
729 
730 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
731 {
732 }
733 
734 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
735 {
736 }
737 
738 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
739 {
740 }
741 
742 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
743 {
744 }
745 
746 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
747 {
748 }
749 
750 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
751 {
752 }
753 
754 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
755 {
756 }
757 
758 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
759 {
760 }
761 
762 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
763 {
764 	/* p != NULL, but p->cnt could be 0 */
765 }
766 
767 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
768 {
769 }
770 
771 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
772 {
773 	return arg;
774 }
775 
776 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
777 {
778 }
779 
780 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
781 {
782 	int proto;
783 	int err;
784 
785 	mutex_lock(&sock_lock);
786 
787 	if (sock) {
788 		pr_err("%s called without releasing old sock", __func__);
789 		err = -EPERM;
790 		goto out;
791 	}
792 
793 	switch (args->af) {
794 	case AF_INET:
795 	case AF_INET6:
796 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
797 		break;
798 	case AF_UNIX:
799 		proto = PF_UNIX;
800 		break;
801 	default:
802 		pr_err("invalid address family %d\n", args->af);
803 		err = -EINVAL;
804 		goto out;
805 	}
806 
807 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
808 			       proto, &sock);
809 
810 	if (!err)
811 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
812 		 * and consider the connection attempt failed if it returns
813 		 * -EINPROGRESS.
814 		 */
815 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
816 out:
817 	mutex_unlock(&sock_lock);
818 
819 	return err;
820 }
821 
822 __bpf_kfunc void bpf_kfunc_close_sock(void)
823 {
824 	mutex_lock(&sock_lock);
825 
826 	if (sock) {
827 		sock_release(sock);
828 		sock = NULL;
829 	}
830 
831 	mutex_unlock(&sock_lock);
832 }
833 
834 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
835 {
836 	int err;
837 
838 	if (args->addrlen > sizeof(args->addr))
839 		return -EINVAL;
840 
841 	mutex_lock(&sock_lock);
842 
843 	if (!sock) {
844 		pr_err("%s called without initializing sock", __func__);
845 		err = -EPERM;
846 		goto out;
847 	}
848 
849 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
850 			     args->addrlen, 0);
851 out:
852 	mutex_unlock(&sock_lock);
853 
854 	return err;
855 }
856 
857 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
858 {
859 	int err;
860 
861 	if (args->addrlen > sizeof(args->addr))
862 		return -EINVAL;
863 
864 	mutex_lock(&sock_lock);
865 
866 	if (!sock) {
867 		pr_err("%s called without initializing sock", __func__);
868 		err = -EPERM;
869 		goto out;
870 	}
871 
872 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
873 out:
874 	mutex_unlock(&sock_lock);
875 
876 	return err;
877 }
878 
879 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
880 {
881 	int err;
882 
883 	mutex_lock(&sock_lock);
884 
885 	if (!sock) {
886 		pr_err("%s called without initializing sock", __func__);
887 		err = -EPERM;
888 		goto out;
889 	}
890 
891 	err = kernel_listen(sock, 128);
892 out:
893 	mutex_unlock(&sock_lock);
894 
895 	return err;
896 }
897 
898 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
899 {
900 	struct msghdr msg = {
901 		.msg_name	= &args->addr.addr,
902 		.msg_namelen	= args->addr.addrlen,
903 	};
904 	struct kvec iov;
905 	int err;
906 
907 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
908 	    args->msglen > sizeof(args->msg))
909 		return -EINVAL;
910 
911 	iov.iov_base = args->msg;
912 	iov.iov_len  = args->msglen;
913 
914 	mutex_lock(&sock_lock);
915 
916 	if (!sock) {
917 		pr_err("%s called without initializing sock", __func__);
918 		err = -EPERM;
919 		goto out;
920 	}
921 
922 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
923 	args->addr.addrlen = msg.msg_namelen;
924 out:
925 	mutex_unlock(&sock_lock);
926 
927 	return err;
928 }
929 
930 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
931 {
932 	struct msghdr msg = {
933 		.msg_name	= &args->addr.addr,
934 		.msg_namelen	= args->addr.addrlen,
935 	};
936 	struct kvec iov;
937 	int err;
938 
939 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
940 	    args->msglen > sizeof(args->msg))
941 		return -EINVAL;
942 
943 	iov.iov_base = args->msg;
944 	iov.iov_len  = args->msglen;
945 
946 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
947 	mutex_lock(&sock_lock);
948 
949 	if (!sock) {
950 		pr_err("%s called without initializing sock", __func__);
951 		err = -EPERM;
952 		goto out;
953 	}
954 
955 	err = sock_sendmsg(sock, &msg);
956 	args->addr.addrlen = msg.msg_namelen;
957 out:
958 	mutex_unlock(&sock_lock);
959 
960 	return err;
961 }
962 
963 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
964 {
965 	int err;
966 
967 	mutex_lock(&sock_lock);
968 
969 	if (!sock) {
970 		pr_err("%s called without initializing sock", __func__);
971 		err = -EPERM;
972 		goto out;
973 	}
974 
975 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
976 	if (err < 0)
977 		goto out;
978 
979 	args->addrlen = err;
980 	err = 0;
981 out:
982 	mutex_unlock(&sock_lock);
983 
984 	return err;
985 }
986 
987 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
988 {
989 	int err;
990 
991 	mutex_lock(&sock_lock);
992 
993 	if (!sock) {
994 		pr_err("%s called without initializing sock", __func__);
995 		err = -EPERM;
996 		goto out;
997 	}
998 
999 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1000 	if (err < 0)
1001 		goto out;
1002 
1003 	args->addrlen = err;
1004 	err = 0;
1005 out:
1006 	mutex_unlock(&sock_lock);
1007 
1008 	return err;
1009 }
1010 
1011 static DEFINE_MUTEX(st_ops_mutex);
1012 static struct bpf_testmod_st_ops *st_ops;
1013 
1014 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1015 {
1016 	int ret = -1;
1017 
1018 	mutex_lock(&st_ops_mutex);
1019 	if (st_ops && st_ops->test_prologue)
1020 		ret = st_ops->test_prologue(args);
1021 	mutex_unlock(&st_ops_mutex);
1022 
1023 	return ret;
1024 }
1025 
1026 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1027 {
1028 	int ret = -1;
1029 
1030 	mutex_lock(&st_ops_mutex);
1031 	if (st_ops && st_ops->test_epilogue)
1032 		ret = st_ops->test_epilogue(args);
1033 	mutex_unlock(&st_ops_mutex);
1034 
1035 	return ret;
1036 }
1037 
1038 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1039 {
1040 	int ret = -1;
1041 
1042 	mutex_lock(&st_ops_mutex);
1043 	if (st_ops && st_ops->test_pro_epilogue)
1044 		ret = st_ops->test_pro_epilogue(args);
1045 	mutex_unlock(&st_ops_mutex);
1046 
1047 	return ret;
1048 }
1049 
1050 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1051 {
1052 	args->a += 10;
1053 	return args->a;
1054 }
1055 
1056 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
1057 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1058 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1059 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1060 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1061 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1062 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1063 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1064 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1065 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1066 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1067 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1068 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1069 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1070 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1071 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1072 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1073 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1074 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1075 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1076 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1077 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1078 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1079 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1080 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1081 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1082 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1083 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1084 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1085 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1086 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1087 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1088 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1089 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1090 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1091 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1092 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1093 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1094 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1095 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1096 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1097 
1098 static int bpf_testmod_ops_init(struct btf *btf)
1099 {
1100 	return 0;
1101 }
1102 
1103 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1104 					    enum bpf_access_type type,
1105 					    const struct bpf_prog *prog,
1106 					    struct bpf_insn_access_aux *info)
1107 {
1108 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1109 }
1110 
1111 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1112 				       const struct btf_member *member,
1113 				       void *kdata, const void *udata)
1114 {
1115 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1116 		/* For data fields, this function has to copy it and return
1117 		 * 1 to indicate that the data has been handled by the
1118 		 * struct_ops type, or the verifier will reject the map if
1119 		 * the value of the data field is not zero.
1120 		 */
1121 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1122 		return 1;
1123 	}
1124 	return 0;
1125 }
1126 
1127 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1128 	.owner = THIS_MODULE,
1129 	.set   = &bpf_testmod_check_kfunc_ids,
1130 };
1131 
1132 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1133 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1134 };
1135 
1136 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1137 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1138 };
1139 
1140 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1141 {
1142 	struct bpf_testmod_ops *ops = kdata;
1143 
1144 	if (ops->test_1)
1145 		ops->test_1();
1146 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1147 	 * initialized, so we need to check for NULL.
1148 	 */
1149 	if (ops->test_2)
1150 		ops->test_2(4, ops->data);
1151 
1152 	return 0;
1153 }
1154 
1155 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1156 {
1157 }
1158 
1159 static int bpf_testmod_test_1(void)
1160 {
1161 	return 0;
1162 }
1163 
1164 static void bpf_testmod_test_2(int a, int b)
1165 {
1166 }
1167 
1168 static int bpf_testmod_tramp(int value)
1169 {
1170 	return 0;
1171 }
1172 
1173 static int bpf_testmod_ops__test_maybe_null(int dummy,
1174 					    struct task_struct *task__nullable)
1175 {
1176 	return 0;
1177 }
1178 
1179 static struct bpf_testmod_ops __bpf_testmod_ops = {
1180 	.test_1 = bpf_testmod_test_1,
1181 	.test_2 = bpf_testmod_test_2,
1182 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1183 };
1184 
1185 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1186 	.verifier_ops = &bpf_testmod_verifier_ops,
1187 	.init = bpf_testmod_ops_init,
1188 	.init_member = bpf_testmod_ops_init_member,
1189 	.reg = bpf_dummy_reg,
1190 	.unreg = bpf_dummy_unreg,
1191 	.cfi_stubs = &__bpf_testmod_ops,
1192 	.name = "bpf_testmod_ops",
1193 	.owner = THIS_MODULE,
1194 };
1195 
1196 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1197 {
1198 	struct bpf_testmod_ops2 *ops = kdata;
1199 
1200 	ops->test_1();
1201 	return 0;
1202 }
1203 
1204 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1205 	.test_1 = bpf_testmod_test_1,
1206 };
1207 
1208 struct bpf_struct_ops bpf_testmod_ops2 = {
1209 	.verifier_ops = &bpf_testmod_verifier_ops,
1210 	.init = bpf_testmod_ops_init,
1211 	.init_member = bpf_testmod_ops_init_member,
1212 	.reg = bpf_dummy_reg2,
1213 	.unreg = bpf_dummy_unreg,
1214 	.cfi_stubs = &__bpf_testmod_ops2,
1215 	.name = "bpf_testmod_ops2",
1216 	.owner = THIS_MODULE,
1217 };
1218 
1219 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1220 {
1221 	int err = 0;
1222 
1223 	mutex_lock(&st_ops_mutex);
1224 	if (st_ops3) {
1225 		pr_err("st_ops has already been registered\n");
1226 		err = -EEXIST;
1227 		goto unlock;
1228 	}
1229 	st_ops3 = kdata;
1230 
1231 unlock:
1232 	mutex_unlock(&st_ops_mutex);
1233 	return err;
1234 }
1235 
1236 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1237 {
1238 	mutex_lock(&st_ops_mutex);
1239 	st_ops3 = NULL;
1240 	mutex_unlock(&st_ops_mutex);
1241 }
1242 
1243 static void test_1_recursion_detected(struct bpf_prog *prog)
1244 {
1245 	struct bpf_prog_stats *stats;
1246 
1247 	stats = this_cpu_ptr(prog->stats);
1248 	printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1249 	       u64_stats_read(&stats->misses));
1250 }
1251 
1252 static int st_ops3_check_member(const struct btf_type *t,
1253 				const struct btf_member *member,
1254 				const struct bpf_prog *prog)
1255 {
1256 	u32 moff = __btf_member_bit_offset(t, member) / 8;
1257 
1258 	switch (moff) {
1259 	case offsetof(struct bpf_testmod_ops3, test_1):
1260 		prog->aux->priv_stack_requested = true;
1261 		prog->aux->recursion_detected = test_1_recursion_detected;
1262 		fallthrough;
1263 	default:
1264 		break;
1265 	}
1266 	return 0;
1267 }
1268 
1269 struct bpf_struct_ops bpf_testmod_ops3 = {
1270 	.verifier_ops = &bpf_testmod_verifier_ops3,
1271 	.init = bpf_testmod_ops_init,
1272 	.init_member = bpf_testmod_ops_init_member,
1273 	.reg = st_ops3_reg,
1274 	.unreg = st_ops3_unreg,
1275 	.check_member = st_ops3_check_member,
1276 	.cfi_stubs = &__bpf_testmod_ops3,
1277 	.name = "bpf_testmod_ops3",
1278 	.owner = THIS_MODULE,
1279 };
1280 
1281 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1282 {
1283 	return 0;
1284 }
1285 
1286 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1287 {
1288 	return 0;
1289 }
1290 
1291 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1292 {
1293 	return 0;
1294 }
1295 
1296 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1297 			       const struct bpf_prog *prog)
1298 {
1299 	struct bpf_insn *insn = insn_buf;
1300 
1301 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1302 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1303 		return 0;
1304 
1305 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1306 	 * r7 = r6->a;
1307 	 * r7 += 1000;
1308 	 * r6->a = r7;
1309 	 */
1310 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1311 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1312 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1313 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1314 	*insn++ = prog->insnsi[0];
1315 
1316 	return insn - insn_buf;
1317 }
1318 
1319 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1320 			       s16 ctx_stack_off)
1321 {
1322 	struct bpf_insn *insn = insn_buf;
1323 
1324 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1325 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1326 		return 0;
1327 
1328 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1329 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1330 	 * r6 = r1->a;
1331 	 * r6 += 10000;
1332 	 * r1->a = r6;
1333 	 * r0 = r6;
1334 	 * r0 *= 2;
1335 	 * BPF_EXIT;
1336 	 */
1337 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1338 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1339 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1340 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1341 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1342 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1343 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1344 	*insn++ = BPF_EXIT_INSN();
1345 
1346 	return insn - insn_buf;
1347 }
1348 
1349 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1350 				    const struct bpf_reg_state *reg,
1351 				    int off, int size)
1352 {
1353 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1354 		return -EACCES;
1355 	return 0;
1356 }
1357 
1358 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1359 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1360 	.btf_struct_access = st_ops_btf_struct_access,
1361 	.gen_prologue = st_ops_gen_prologue,
1362 	.gen_epilogue = st_ops_gen_epilogue,
1363 	.get_func_proto = bpf_base_func_proto,
1364 };
1365 
1366 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1367 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1368 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1369 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1370 };
1371 
1372 static int st_ops_reg(void *kdata, struct bpf_link *link)
1373 {
1374 	int err = 0;
1375 
1376 	mutex_lock(&st_ops_mutex);
1377 	if (st_ops) {
1378 		pr_err("st_ops has already been registered\n");
1379 		err = -EEXIST;
1380 		goto unlock;
1381 	}
1382 	st_ops = kdata;
1383 
1384 unlock:
1385 	mutex_unlock(&st_ops_mutex);
1386 	return err;
1387 }
1388 
1389 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1390 {
1391 	mutex_lock(&st_ops_mutex);
1392 	st_ops = NULL;
1393 	mutex_unlock(&st_ops_mutex);
1394 }
1395 
1396 static int st_ops_init(struct btf *btf)
1397 {
1398 	return 0;
1399 }
1400 
1401 static int st_ops_init_member(const struct btf_type *t,
1402 			      const struct btf_member *member,
1403 			      void *kdata, const void *udata)
1404 {
1405 	return 0;
1406 }
1407 
1408 static struct bpf_struct_ops testmod_st_ops = {
1409 	.verifier_ops = &st_ops_verifier_ops,
1410 	.init = st_ops_init,
1411 	.init_member = st_ops_init_member,
1412 	.reg = st_ops_reg,
1413 	.unreg = st_ops_unreg,
1414 	.cfi_stubs = &st_ops_cfi_stubs,
1415 	.name = "bpf_testmod_st_ops",
1416 	.owner = THIS_MODULE,
1417 };
1418 
1419 extern int bpf_fentry_test1(int a);
1420 
1421 static int bpf_testmod_init(void)
1422 {
1423 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1424 		{
1425 			.btf_id		= bpf_testmod_dtor_ids[0],
1426 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1427 		},
1428 	};
1429 	void **tramp;
1430 	int ret;
1431 
1432 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1433 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1434 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1435 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1436 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1437 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1438 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1439 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1440 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1441 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1442 						 ARRAY_SIZE(bpf_testmod_dtors),
1443 						 THIS_MODULE);
1444 	if (ret < 0)
1445 		return ret;
1446 	if (bpf_fentry_test1(0) < 0)
1447 		return -EINVAL;
1448 	sock = NULL;
1449 	mutex_init(&sock_lock);
1450 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1451 	if (ret < 0)
1452 		return ret;
1453 	ret = register_bpf_testmod_uprobe();
1454 	if (ret < 0)
1455 		return ret;
1456 
1457 	/* Ensure nothing is between tramp_1..tramp_40 */
1458 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1459 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1460 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1461 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1462 		*tramp++ = bpf_testmod_tramp;
1463 
1464 	return 0;
1465 }
1466 
1467 static void bpf_testmod_exit(void)
1468 {
1469         /* Need to wait for all references to be dropped because
1470          * bpf_kfunc_call_test_release() which currently resides in kernel can
1471          * be called after bpf_testmod is unloaded. Once release function is
1472          * moved into the module this wait can be removed.
1473          */
1474 	while (refcount_read(&prog_test_struct.cnt) > 1)
1475 		msleep(20);
1476 
1477 	bpf_kfunc_close_sock();
1478 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1479 	unregister_bpf_testmod_uprobe();
1480 }
1481 
1482 module_init(bpf_testmod_init);
1483 module_exit(bpf_testmod_exit);
1484 
1485 MODULE_AUTHOR("Andrii Nakryiko");
1486 MODULE_DESCRIPTION("BPF selftests module");
1487 MODULE_LICENSE("Dual BSD/GPL");
1488