xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <net/sock.h>
21 #include "bpf_testmod.h"
22 #include "bpf_testmod_kfunc.h"
23 
24 #define CREATE_TRACE_POINTS
25 #include "bpf_testmod-events.h"
26 
27 #define CONNECT_TIMEOUT_SEC 1
28 
29 typedef int (*func_proto_typedef)(long);
30 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
31 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
32 
33 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
34 long bpf_testmod_test_struct_arg_result;
35 static DEFINE_MUTEX(sock_lock);
36 static struct socket *sock;
37 
38 struct bpf_testmod_struct_arg_1 {
39 	int a;
40 };
41 struct bpf_testmod_struct_arg_2 {
42 	long a;
43 	long b;
44 };
45 
46 struct bpf_testmod_struct_arg_3 {
47 	int a;
48 	int b[];
49 };
50 
51 struct bpf_testmod_struct_arg_4 {
52 	u64 a;
53 	int b;
54 };
55 
56 __bpf_hook_start();
57 
58 noinline int
59 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
60 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
61 	return bpf_testmod_test_struct_arg_result;
62 }
63 
64 noinline int
65 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
66 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
67 	return bpf_testmod_test_struct_arg_result;
68 }
69 
70 noinline int
71 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
72 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
73 	return bpf_testmod_test_struct_arg_result;
74 }
75 
76 noinline int
77 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
78 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
79 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
80 	return bpf_testmod_test_struct_arg_result;
81 }
82 
83 noinline int
84 bpf_testmod_test_struct_arg_5(void) {
85 	bpf_testmod_test_struct_arg_result = 1;
86 	return bpf_testmod_test_struct_arg_result;
87 }
88 
89 noinline int
90 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
91 	bpf_testmod_test_struct_arg_result = a->b[0];
92 	return bpf_testmod_test_struct_arg_result;
93 }
94 
95 noinline int
96 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
97 			      struct bpf_testmod_struct_arg_4 f)
98 {
99 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
100 		(long)e + f.a + f.b;
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
105 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
106 			      struct bpf_testmod_struct_arg_4 f, int g)
107 {
108 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 		(long)e + f.a + f.b + g;
110 	return bpf_testmod_test_struct_arg_result;
111 }
112 
113 noinline int
114 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
115 	bpf_testmod_test_struct_arg_result = a->a;
116 	return bpf_testmod_test_struct_arg_result;
117 }
118 
119 __bpf_kfunc void
120 bpf_testmod_test_mod_kfunc(int i)
121 {
122 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
123 }
124 
125 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
126 {
127 	if (cnt < 0) {
128 		it->cnt = 0;
129 		return -EINVAL;
130 	}
131 
132 	it->value = value;
133 	it->cnt = cnt;
134 
135 	return 0;
136 }
137 
138 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
139 {
140 	if (it->cnt <= 0)
141 		return NULL;
142 
143 	it->cnt--;
144 
145 	return &it->value;
146 }
147 
148 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
149 {
150 	it->cnt = 0;
151 }
152 
153 __bpf_kfunc void bpf_kfunc_common_test(void)
154 {
155 }
156 
157 struct bpf_testmod_btf_type_tag_1 {
158 	int a;
159 };
160 
161 struct bpf_testmod_btf_type_tag_2 {
162 	struct bpf_testmod_btf_type_tag_1 __user *p;
163 };
164 
165 struct bpf_testmod_btf_type_tag_3 {
166 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
167 };
168 
169 noinline int
170 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
171 	BTF_TYPE_EMIT(func_proto_typedef);
172 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
173 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
174 	return arg->a;
175 }
176 
177 noinline int
178 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
179 	return arg->p->a;
180 }
181 
182 noinline int
183 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
184 	return arg->a;
185 }
186 
187 noinline int
188 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
189 	return arg->p->a;
190 }
191 
192 noinline int bpf_testmod_loop_test(int n)
193 {
194 	/* Make sum volatile, so smart compilers, such as clang, will not
195 	 * optimize the code by removing the loop.
196 	 */
197 	volatile int sum = 0;
198 	int i;
199 
200 	/* the primary goal of this test is to test LBR. Create a lot of
201 	 * branches in the function, so we can catch it easily.
202 	 */
203 	for (i = 0; i < n; i++)
204 		sum += i;
205 	return sum;
206 }
207 
208 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
209 {
210 	static struct file f = {};
211 
212 	switch (arg) {
213 	case 1: return (void *)EINVAL;		/* user addr */
214 	case 2: return (void *)0xcafe4a11;	/* user addr */
215 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
216 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
217 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
218 	case 6: return &f;			/* valid addr */
219 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
220 #ifdef CONFIG_X86_64
221 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
222 #endif
223 	default: return NULL;
224 	}
225 }
226 
227 noinline int bpf_testmod_fentry_test1(int a)
228 {
229 	return a + 1;
230 }
231 
232 noinline int bpf_testmod_fentry_test2(int a, u64 b)
233 {
234 	return a + b;
235 }
236 
237 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
238 {
239 	return a + b + c;
240 }
241 
242 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
243 				      void *e, char f, int g)
244 {
245 	return a + (long)b + c + d + (long)e + f + g;
246 }
247 
248 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
249 				       void *e, char f, int g,
250 				       unsigned int h, long i, __u64 j,
251 				       unsigned long k)
252 {
253 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
254 }
255 
256 int bpf_testmod_fentry_ok;
257 
258 noinline ssize_t
259 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
260 		      struct bin_attribute *bin_attr,
261 		      char *buf, loff_t off, size_t len)
262 {
263 	struct bpf_testmod_test_read_ctx ctx = {
264 		.buf = buf,
265 		.off = off,
266 		.len = len,
267 	};
268 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
269 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
270 	struct bpf_testmod_struct_arg_3 *struct_arg3;
271 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
272 	int i = 1;
273 
274 	while (bpf_testmod_return_ptr(i))
275 		i++;
276 
277 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
278 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
279 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
280 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
281 	(void)bpf_testmod_test_struct_arg_5();
282 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
283 					    (void *)20, struct_arg4);
284 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
285 					    (void *)20, struct_arg4, 23);
286 
287 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
288 
289 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
290 				sizeof(int)), GFP_KERNEL);
291 	if (struct_arg3 != NULL) {
292 		struct_arg3->b[0] = 1;
293 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
294 		kfree(struct_arg3);
295 	}
296 
297 	/* This is always true. Use the check to make sure the compiler
298 	 * doesn't remove bpf_testmod_loop_test.
299 	 */
300 	if (bpf_testmod_loop_test(101) > 100)
301 		trace_bpf_testmod_test_read(current, &ctx);
302 
303 	/* Magic number to enable writable tp */
304 	if (len == 64) {
305 		struct bpf_testmod_test_writable_ctx writable = {
306 			.val = 1024,
307 		};
308 		trace_bpf_testmod_test_writable_bare(&writable);
309 		if (writable.early_ret)
310 			return snprintf(buf, len, "%d\n", writable.val);
311 	}
312 
313 	if (bpf_testmod_fentry_test1(1) != 2 ||
314 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
315 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
316 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
317 			21, 22) != 133 ||
318 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
319 			21, 22, 23, 24, 25, 26) != 231)
320 		goto out;
321 
322 	bpf_testmod_fentry_ok = 1;
323 out:
324 	return -EIO; /* always fail */
325 }
326 EXPORT_SYMBOL(bpf_testmod_test_read);
327 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
328 
329 noinline ssize_t
330 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
331 		      struct bin_attribute *bin_attr,
332 		      char *buf, loff_t off, size_t len)
333 {
334 	struct bpf_testmod_test_write_ctx ctx = {
335 		.buf = buf,
336 		.off = off,
337 		.len = len,
338 	};
339 
340 	trace_bpf_testmod_test_write_bare(current, &ctx);
341 
342 	return -EIO; /* always fail */
343 }
344 EXPORT_SYMBOL(bpf_testmod_test_write);
345 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
346 
347 noinline int bpf_fentry_shadow_test(int a)
348 {
349 	return a + 2;
350 }
351 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
352 
353 __bpf_hook_end();
354 
355 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
356 	.attr = { .name = "bpf_testmod", .mode = 0666, },
357 	.read = bpf_testmod_test_read,
358 	.write = bpf_testmod_test_write,
359 };
360 
361 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
362 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
363 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
364 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
365 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
366 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
367 
368 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
369 	.owner = THIS_MODULE,
370 	.set   = &bpf_testmod_common_kfunc_ids,
371 };
372 
373 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
374 {
375 	return a + b + c + d;
376 }
377 
378 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
379 {
380 	return a + b;
381 }
382 
383 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
384 {
385 	return sk;
386 }
387 
388 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
389 {
390 	/* Provoke the compiler to assume that the caller has sign-extended a,
391 	 * b and c on platforms where this is required (e.g. s390x).
392 	 */
393 	return (long)a + (long)b + (long)c + d;
394 }
395 
396 static struct prog_test_ref_kfunc prog_test_struct = {
397 	.a = 42,
398 	.b = 108,
399 	.next = &prog_test_struct,
400 	.cnt = REFCOUNT_INIT(1),
401 };
402 
403 __bpf_kfunc struct prog_test_ref_kfunc *
404 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
405 {
406 	refcount_inc(&prog_test_struct.cnt);
407 	return &prog_test_struct;
408 }
409 
410 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
411 {
412 	WARN_ON_ONCE(1);
413 }
414 
415 __bpf_kfunc struct prog_test_member *
416 bpf_kfunc_call_memb_acquire(void)
417 {
418 	WARN_ON_ONCE(1);
419 	return NULL;
420 }
421 
422 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
423 {
424 	WARN_ON_ONCE(1);
425 }
426 
427 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
428 {
429 	if (size > 2 * sizeof(int))
430 		return NULL;
431 
432 	return (int *)p;
433 }
434 
435 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
436 						  const int rdwr_buf_size)
437 {
438 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
439 }
440 
441 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
442 						    const int rdonly_buf_size)
443 {
444 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
445 }
446 
447 /* the next 2 ones can't be really used for testing expect to ensure
448  * that the verifier rejects the call.
449  * Acquire functions must return struct pointers, so these ones are
450  * failing.
451  */
452 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
453 						    const int rdonly_buf_size)
454 {
455 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
456 }
457 
458 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
459 {
460 }
461 
462 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
463 {
464 }
465 
466 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
467 {
468 }
469 
470 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
471 {
472 }
473 
474 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
475 {
476 }
477 
478 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
479 {
480 }
481 
482 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
483 {
484 }
485 
486 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
487 {
488 }
489 
490 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
491 {
492 }
493 
494 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
495 {
496 }
497 
498 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
499 {
500 	/* p != NULL, but p->cnt could be 0 */
501 }
502 
503 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
504 {
505 }
506 
507 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
508 {
509 	return arg;
510 }
511 
512 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
513 {
514 }
515 
516 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
517 {
518 	int proto;
519 	int err;
520 
521 	mutex_lock(&sock_lock);
522 
523 	if (sock) {
524 		pr_err("%s called without releasing old sock", __func__);
525 		err = -EPERM;
526 		goto out;
527 	}
528 
529 	switch (args->af) {
530 	case AF_INET:
531 	case AF_INET6:
532 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
533 		break;
534 	case AF_UNIX:
535 		proto = PF_UNIX;
536 		break;
537 	default:
538 		pr_err("invalid address family %d\n", args->af);
539 		err = -EINVAL;
540 		goto out;
541 	}
542 
543 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
544 			       proto, &sock);
545 
546 	if (!err)
547 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
548 		 * and consider the connection attempt failed if it returns
549 		 * -EINPROGRESS.
550 		 */
551 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
552 out:
553 	mutex_unlock(&sock_lock);
554 
555 	return err;
556 }
557 
558 __bpf_kfunc void bpf_kfunc_close_sock(void)
559 {
560 	mutex_lock(&sock_lock);
561 
562 	if (sock) {
563 		sock_release(sock);
564 		sock = NULL;
565 	}
566 
567 	mutex_unlock(&sock_lock);
568 }
569 
570 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
571 {
572 	int err;
573 
574 	if (args->addrlen > sizeof(args->addr))
575 		return -EINVAL;
576 
577 	mutex_lock(&sock_lock);
578 
579 	if (!sock) {
580 		pr_err("%s called without initializing sock", __func__);
581 		err = -EPERM;
582 		goto out;
583 	}
584 
585 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
586 			     args->addrlen, 0);
587 out:
588 	mutex_unlock(&sock_lock);
589 
590 	return err;
591 }
592 
593 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
594 {
595 	int err;
596 
597 	if (args->addrlen > sizeof(args->addr))
598 		return -EINVAL;
599 
600 	mutex_lock(&sock_lock);
601 
602 	if (!sock) {
603 		pr_err("%s called without initializing sock", __func__);
604 		err = -EPERM;
605 		goto out;
606 	}
607 
608 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
609 out:
610 	mutex_unlock(&sock_lock);
611 
612 	return err;
613 }
614 
615 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
616 {
617 	int err;
618 
619 	mutex_lock(&sock_lock);
620 
621 	if (!sock) {
622 		pr_err("%s called without initializing sock", __func__);
623 		err = -EPERM;
624 		goto out;
625 	}
626 
627 	err = kernel_listen(sock, 128);
628 out:
629 	mutex_unlock(&sock_lock);
630 
631 	return err;
632 }
633 
634 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
635 {
636 	struct msghdr msg = {
637 		.msg_name	= &args->addr.addr,
638 		.msg_namelen	= args->addr.addrlen,
639 	};
640 	struct kvec iov;
641 	int err;
642 
643 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
644 	    args->msglen > sizeof(args->msg))
645 		return -EINVAL;
646 
647 	iov.iov_base = args->msg;
648 	iov.iov_len  = args->msglen;
649 
650 	mutex_lock(&sock_lock);
651 
652 	if (!sock) {
653 		pr_err("%s called without initializing sock", __func__);
654 		err = -EPERM;
655 		goto out;
656 	}
657 
658 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
659 	args->addr.addrlen = msg.msg_namelen;
660 out:
661 	mutex_unlock(&sock_lock);
662 
663 	return err;
664 }
665 
666 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
667 {
668 	struct msghdr msg = {
669 		.msg_name	= &args->addr.addr,
670 		.msg_namelen	= args->addr.addrlen,
671 	};
672 	struct kvec iov;
673 	int err;
674 
675 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
676 	    args->msglen > sizeof(args->msg))
677 		return -EINVAL;
678 
679 	iov.iov_base = args->msg;
680 	iov.iov_len  = args->msglen;
681 
682 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
683 	mutex_lock(&sock_lock);
684 
685 	if (!sock) {
686 		pr_err("%s called without initializing sock", __func__);
687 		err = -EPERM;
688 		goto out;
689 	}
690 
691 	err = sock_sendmsg(sock, &msg);
692 	args->addr.addrlen = msg.msg_namelen;
693 out:
694 	mutex_unlock(&sock_lock);
695 
696 	return err;
697 }
698 
699 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
700 {
701 	int err;
702 
703 	mutex_lock(&sock_lock);
704 
705 	if (!sock) {
706 		pr_err("%s called without initializing sock", __func__);
707 		err = -EPERM;
708 		goto out;
709 	}
710 
711 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
712 	if (err < 0)
713 		goto out;
714 
715 	args->addrlen = err;
716 	err = 0;
717 out:
718 	mutex_unlock(&sock_lock);
719 
720 	return err;
721 }
722 
723 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
724 {
725 	int err;
726 
727 	mutex_lock(&sock_lock);
728 
729 	if (!sock) {
730 		pr_err("%s called without initializing sock", __func__);
731 		err = -EPERM;
732 		goto out;
733 	}
734 
735 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
736 	if (err < 0)
737 		goto out;
738 
739 	args->addrlen = err;
740 	err = 0;
741 out:
742 	mutex_unlock(&sock_lock);
743 
744 	return err;
745 }
746 
747 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
748 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
749 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
750 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
751 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
752 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
753 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
754 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
755 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
756 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
757 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
758 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
759 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
760 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
761 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
762 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
763 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
764 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
765 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
766 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
767 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
768 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
769 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
770 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
771 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
772 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
773 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
774 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
775 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
776 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
777 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
778 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
779 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
780 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
781 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
782 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
783 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
784 
785 static int bpf_testmod_ops_init(struct btf *btf)
786 {
787 	return 0;
788 }
789 
790 static bool bpf_testmod_ops_is_valid_access(int off, int size,
791 					    enum bpf_access_type type,
792 					    const struct bpf_prog *prog,
793 					    struct bpf_insn_access_aux *info)
794 {
795 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
796 }
797 
798 static int bpf_testmod_ops_init_member(const struct btf_type *t,
799 				       const struct btf_member *member,
800 				       void *kdata, const void *udata)
801 {
802 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
803 		/* For data fields, this function has to copy it and return
804 		 * 1 to indicate that the data has been handled by the
805 		 * struct_ops type, or the verifier will reject the map if
806 		 * the value of the data field is not zero.
807 		 */
808 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
809 		return 1;
810 	}
811 	return 0;
812 }
813 
814 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
815 	.owner = THIS_MODULE,
816 	.set   = &bpf_testmod_check_kfunc_ids,
817 };
818 
819 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
820 	.is_valid_access = bpf_testmod_ops_is_valid_access,
821 };
822 
823 static int bpf_dummy_reg(void *kdata)
824 {
825 	struct bpf_testmod_ops *ops = kdata;
826 
827 	if (ops->test_1)
828 		ops->test_1();
829 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
830 	 * initialized, so we need to check for NULL.
831 	 */
832 	if (ops->test_2)
833 		ops->test_2(4, ops->data);
834 
835 	return 0;
836 }
837 
838 static void bpf_dummy_unreg(void *kdata)
839 {
840 }
841 
842 static int bpf_testmod_test_1(void)
843 {
844 	return 0;
845 }
846 
847 static void bpf_testmod_test_2(int a, int b)
848 {
849 }
850 
851 static int bpf_testmod_ops__test_maybe_null(int dummy,
852 					    struct task_struct *task__nullable)
853 {
854 	return 0;
855 }
856 
857 static struct bpf_testmod_ops __bpf_testmod_ops = {
858 	.test_1 = bpf_testmod_test_1,
859 	.test_2 = bpf_testmod_test_2,
860 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
861 };
862 
863 struct bpf_struct_ops bpf_bpf_testmod_ops = {
864 	.verifier_ops = &bpf_testmod_verifier_ops,
865 	.init = bpf_testmod_ops_init,
866 	.init_member = bpf_testmod_ops_init_member,
867 	.reg = bpf_dummy_reg,
868 	.unreg = bpf_dummy_unreg,
869 	.cfi_stubs = &__bpf_testmod_ops,
870 	.name = "bpf_testmod_ops",
871 	.owner = THIS_MODULE,
872 };
873 
874 static int bpf_dummy_reg2(void *kdata)
875 {
876 	struct bpf_testmod_ops2 *ops = kdata;
877 
878 	ops->test_1();
879 	return 0;
880 }
881 
882 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
883 	.test_1 = bpf_testmod_test_1,
884 };
885 
886 struct bpf_struct_ops bpf_testmod_ops2 = {
887 	.verifier_ops = &bpf_testmod_verifier_ops,
888 	.init = bpf_testmod_ops_init,
889 	.init_member = bpf_testmod_ops_init_member,
890 	.reg = bpf_dummy_reg2,
891 	.unreg = bpf_dummy_unreg,
892 	.cfi_stubs = &__bpf_testmod_ops2,
893 	.name = "bpf_testmod_ops2",
894 	.owner = THIS_MODULE,
895 };
896 
897 extern int bpf_fentry_test1(int a);
898 
899 static int bpf_testmod_init(void)
900 {
901 	int ret;
902 
903 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
904 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
905 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
906 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
907 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
908 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
909 	if (ret < 0)
910 		return ret;
911 	if (bpf_fentry_test1(0) < 0)
912 		return -EINVAL;
913 	sock = NULL;
914 	mutex_init(&sock_lock);
915 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
916 }
917 
918 static void bpf_testmod_exit(void)
919 {
920         /* Need to wait for all references to be dropped because
921          * bpf_kfunc_call_test_release() which currently resides in kernel can
922          * be called after bpf_testmod is unloaded. Once release function is
923          * moved into the module this wait can be removed.
924          */
925 	while (refcount_read(&prog_test_struct.cnt) > 1)
926 		msleep(20);
927 
928 	bpf_kfunc_close_sock();
929 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
930 }
931 
932 module_init(bpf_testmod_init);
933 module_exit(bpf_testmod_exit);
934 
935 MODULE_AUTHOR("Andrii Nakryiko");
936 MODULE_DESCRIPTION("BPF selftests module");
937 MODULE_LICENSE("Dual BSD/GPL");
938