xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <net/sock.h>
21 #include <linux/namei.h>
22 #include "bpf_testmod.h"
23 #include "bpf_testmod_kfunc.h"
24 
25 #define CREATE_TRACE_POINTS
26 #include "bpf_testmod-events.h"
27 
28 #define CONNECT_TIMEOUT_SEC 1
29 
30 typedef int (*func_proto_typedef)(long);
31 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
32 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
33 
34 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
35 long bpf_testmod_test_struct_arg_result;
36 static DEFINE_MUTEX(sock_lock);
37 static struct socket *sock;
38 
39 struct bpf_testmod_struct_arg_1 {
40 	int a;
41 };
42 struct bpf_testmod_struct_arg_2 {
43 	long a;
44 	long b;
45 };
46 
47 struct bpf_testmod_struct_arg_3 {
48 	int a;
49 	int b[];
50 };
51 
52 struct bpf_testmod_struct_arg_4 {
53 	u64 a;
54 	int b;
55 };
56 
57 struct bpf_testmod_struct_arg_5 {
58 	char a;
59 	short b;
60 	int c;
61 	long d;
62 };
63 
64 __bpf_hook_start();
65 
66 noinline int
67 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
68 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
69 	return bpf_testmod_test_struct_arg_result;
70 }
71 
72 noinline int
73 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
74 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
75 	return bpf_testmod_test_struct_arg_result;
76 }
77 
78 noinline int
79 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
80 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
81 	return bpf_testmod_test_struct_arg_result;
82 }
83 
84 noinline int
85 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
86 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
87 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
88 	return bpf_testmod_test_struct_arg_result;
89 }
90 
91 noinline int
92 bpf_testmod_test_struct_arg_5(void) {
93 	bpf_testmod_test_struct_arg_result = 1;
94 	return bpf_testmod_test_struct_arg_result;
95 }
96 
97 noinline int
98 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
99 	bpf_testmod_test_struct_arg_result = a->b[0];
100 	return bpf_testmod_test_struct_arg_result;
101 }
102 
103 noinline int
104 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
105 			      struct bpf_testmod_struct_arg_4 f)
106 {
107 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
108 		(long)e + f.a + f.b;
109 	return bpf_testmod_test_struct_arg_result;
110 }
111 
112 noinline int
113 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
114 			      struct bpf_testmod_struct_arg_4 f, int g)
115 {
116 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
117 		(long)e + f.a + f.b + g;
118 	return bpf_testmod_test_struct_arg_result;
119 }
120 
121 noinline int
122 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
123 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
124 {
125 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
126 		f + g + h.a + h.b + h.c + h.d + i;
127 	return bpf_testmod_test_struct_arg_result;
128 }
129 
130 noinline int
131 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
132 	bpf_testmod_test_struct_arg_result = a->a;
133 	return bpf_testmod_test_struct_arg_result;
134 }
135 
136 __bpf_kfunc void
137 bpf_testmod_test_mod_kfunc(int i)
138 {
139 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
140 }
141 
142 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
143 {
144 	if (cnt < 0) {
145 		it->cnt = 0;
146 		return -EINVAL;
147 	}
148 
149 	it->value = value;
150 	it->cnt = cnt;
151 
152 	return 0;
153 }
154 
155 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
156 {
157 	if (it->cnt <= 0)
158 		return NULL;
159 
160 	it->cnt--;
161 
162 	return &it->value;
163 }
164 
165 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
166 {
167 	it->cnt = 0;
168 }
169 
170 __bpf_kfunc void bpf_kfunc_common_test(void)
171 {
172 }
173 
174 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
175 				       struct bpf_dynptr *ptr__nullable)
176 {
177 }
178 
179 __bpf_kfunc struct bpf_testmod_ctx *
180 bpf_testmod_ctx_create(int *err)
181 {
182 	struct bpf_testmod_ctx *ctx;
183 
184 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
185 	if (!ctx) {
186 		*err = -ENOMEM;
187 		return NULL;
188 	}
189 	refcount_set(&ctx->usage, 1);
190 
191 	return ctx;
192 }
193 
194 static void testmod_free_cb(struct rcu_head *head)
195 {
196 	struct bpf_testmod_ctx *ctx;
197 
198 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
199 	kfree(ctx);
200 }
201 
202 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
203 {
204 	if (!ctx)
205 		return;
206 	if (refcount_dec_and_test(&ctx->usage))
207 		call_rcu(&ctx->rcu, testmod_free_cb);
208 }
209 
210 struct bpf_testmod_btf_type_tag_1 {
211 	int a;
212 };
213 
214 struct bpf_testmod_btf_type_tag_2 {
215 	struct bpf_testmod_btf_type_tag_1 __user *p;
216 };
217 
218 struct bpf_testmod_btf_type_tag_3 {
219 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
220 };
221 
222 noinline int
223 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
224 	BTF_TYPE_EMIT(func_proto_typedef);
225 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
226 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
227 	return arg->a;
228 }
229 
230 noinline int
231 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
232 	return arg->p->a;
233 }
234 
235 noinline int
236 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
237 	return arg->a;
238 }
239 
240 noinline int
241 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
242 	return arg->p->a;
243 }
244 
245 noinline int bpf_testmod_loop_test(int n)
246 {
247 	/* Make sum volatile, so smart compilers, such as clang, will not
248 	 * optimize the code by removing the loop.
249 	 */
250 	volatile int sum = 0;
251 	int i;
252 
253 	/* the primary goal of this test is to test LBR. Create a lot of
254 	 * branches in the function, so we can catch it easily.
255 	 */
256 	for (i = 0; i < n; i++)
257 		sum += i;
258 	return sum;
259 }
260 
261 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
262 {
263 	static struct file f = {};
264 
265 	switch (arg) {
266 	case 1: return (void *)EINVAL;		/* user addr */
267 	case 2: return (void *)0xcafe4a11;	/* user addr */
268 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
269 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
270 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
271 	case 6: return &f;			/* valid addr */
272 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
273 #ifdef CONFIG_X86_64
274 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
275 #endif
276 	default: return NULL;
277 	}
278 }
279 
280 noinline int bpf_testmod_fentry_test1(int a)
281 {
282 	return a + 1;
283 }
284 
285 noinline int bpf_testmod_fentry_test2(int a, u64 b)
286 {
287 	return a + b;
288 }
289 
290 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
291 {
292 	return a + b + c;
293 }
294 
295 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
296 				      void *e, char f, int g)
297 {
298 	return a + (long)b + c + d + (long)e + f + g;
299 }
300 
301 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
302 				       void *e, char f, int g,
303 				       unsigned int h, long i, __u64 j,
304 				       unsigned long k)
305 {
306 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
307 }
308 
309 int bpf_testmod_fentry_ok;
310 
311 noinline ssize_t
312 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
313 		      struct bin_attribute *bin_attr,
314 		      char *buf, loff_t off, size_t len)
315 {
316 	struct bpf_testmod_test_read_ctx ctx = {
317 		.buf = buf,
318 		.off = off,
319 		.len = len,
320 	};
321 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
322 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
323 	struct bpf_testmod_struct_arg_3 *struct_arg3;
324 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
325 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
326 	int i = 1;
327 
328 	while (bpf_testmod_return_ptr(i))
329 		i++;
330 
331 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
332 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
333 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
334 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
335 	(void)bpf_testmod_test_struct_arg_5();
336 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
337 					    (void *)20, struct_arg4);
338 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
339 					    (void *)20, struct_arg4, 23);
340 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
341 					    21, 22, struct_arg5, 27);
342 
343 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
344 
345 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
346 				sizeof(int)), GFP_KERNEL);
347 	if (struct_arg3 != NULL) {
348 		struct_arg3->b[0] = 1;
349 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
350 		kfree(struct_arg3);
351 	}
352 
353 	/* This is always true. Use the check to make sure the compiler
354 	 * doesn't remove bpf_testmod_loop_test.
355 	 */
356 	if (bpf_testmod_loop_test(101) > 100)
357 		trace_bpf_testmod_test_read(current, &ctx);
358 
359 	/* Magic number to enable writable tp */
360 	if (len == 64) {
361 		struct bpf_testmod_test_writable_ctx writable = {
362 			.val = 1024,
363 		};
364 		trace_bpf_testmod_test_writable_bare(&writable);
365 		if (writable.early_ret)
366 			return snprintf(buf, len, "%d\n", writable.val);
367 	}
368 
369 	if (bpf_testmod_fentry_test1(1) != 2 ||
370 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
371 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
372 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
373 			21, 22) != 133 ||
374 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
375 			21, 22, 23, 24, 25, 26) != 231)
376 		goto out;
377 
378 	bpf_testmod_fentry_ok = 1;
379 out:
380 	return -EIO; /* always fail */
381 }
382 EXPORT_SYMBOL(bpf_testmod_test_read);
383 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
384 
385 noinline ssize_t
386 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
387 		      struct bin_attribute *bin_attr,
388 		      char *buf, loff_t off, size_t len)
389 {
390 	struct bpf_testmod_test_write_ctx ctx = {
391 		.buf = buf,
392 		.off = off,
393 		.len = len,
394 	};
395 
396 	trace_bpf_testmod_test_write_bare(current, &ctx);
397 
398 	return -EIO; /* always fail */
399 }
400 EXPORT_SYMBOL(bpf_testmod_test_write);
401 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
402 
403 noinline int bpf_fentry_shadow_test(int a)
404 {
405 	return a + 2;
406 }
407 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
408 
409 __bpf_hook_end();
410 
411 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
412 	.attr = { .name = "bpf_testmod", .mode = 0666, },
413 	.read = bpf_testmod_test_read,
414 	.write = bpf_testmod_test_write,
415 };
416 
417 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
418  * please see test_uretprobe_regs_change test
419  */
420 #ifdef __x86_64__
421 
422 static int
423 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
424 		   struct pt_regs *regs)
425 
426 {
427 	regs->ax  = 0x12345678deadbeef;
428 	regs->cx  = 0x87654321feebdaed;
429 	regs->r11 = (u64) -1;
430 	return true;
431 }
432 
433 struct testmod_uprobe {
434 	struct path path;
435 	loff_t offset;
436 	struct uprobe_consumer consumer;
437 };
438 
439 static DEFINE_MUTEX(testmod_uprobe_mutex);
440 
441 static struct testmod_uprobe uprobe = {
442 	.consumer.ret_handler = uprobe_ret_handler,
443 };
444 
445 static int testmod_register_uprobe(loff_t offset)
446 {
447 	int err = -EBUSY;
448 
449 	if (uprobe.offset)
450 		return -EBUSY;
451 
452 	mutex_lock(&testmod_uprobe_mutex);
453 
454 	if (uprobe.offset)
455 		goto out;
456 
457 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
458 	if (err)
459 		goto out;
460 
461 	err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry),
462 				     offset, 0, &uprobe.consumer);
463 	if (err)
464 		path_put(&uprobe.path);
465 	else
466 		uprobe.offset = offset;
467 
468 out:
469 	mutex_unlock(&testmod_uprobe_mutex);
470 	return err;
471 }
472 
473 static void testmod_unregister_uprobe(void)
474 {
475 	mutex_lock(&testmod_uprobe_mutex);
476 
477 	if (uprobe.offset) {
478 		uprobe_unregister(d_real_inode(uprobe.path.dentry),
479 				  uprobe.offset, &uprobe.consumer);
480 		uprobe.offset = 0;
481 	}
482 
483 	mutex_unlock(&testmod_uprobe_mutex);
484 }
485 
486 static ssize_t
487 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
488 			 struct bin_attribute *bin_attr,
489 			 char *buf, loff_t off, size_t len)
490 {
491 	unsigned long offset = 0;
492 	int err = 0;
493 
494 	if (kstrtoul(buf, 0, &offset))
495 		return -EINVAL;
496 
497 	if (offset)
498 		err = testmod_register_uprobe(offset);
499 	else
500 		testmod_unregister_uprobe();
501 
502 	return err ?: strlen(buf);
503 }
504 
505 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
506 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
507 	.write = bpf_testmod_uprobe_write,
508 };
509 
510 static int register_bpf_testmod_uprobe(void)
511 {
512 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
513 }
514 
515 static void unregister_bpf_testmod_uprobe(void)
516 {
517 	testmod_unregister_uprobe();
518 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
519 }
520 
521 #else
522 static int register_bpf_testmod_uprobe(void)
523 {
524 	return 0;
525 }
526 
527 static void unregister_bpf_testmod_uprobe(void) { }
528 #endif
529 
530 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
531 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
532 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
533 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
534 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
535 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
536 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
537 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
538 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
539 
540 BTF_ID_LIST(bpf_testmod_dtor_ids)
541 BTF_ID(struct, bpf_testmod_ctx)
542 BTF_ID(func, bpf_testmod_ctx_release)
543 
544 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
545 	.owner = THIS_MODULE,
546 	.set   = &bpf_testmod_common_kfunc_ids,
547 };
548 
549 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
550 {
551 	return a + b + c + d;
552 }
553 
554 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
555 {
556 	return a + b;
557 }
558 
559 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
560 {
561 	return sk;
562 }
563 
564 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
565 {
566 	/* Provoke the compiler to assume that the caller has sign-extended a,
567 	 * b and c on platforms where this is required (e.g. s390x).
568 	 */
569 	return (long)a + (long)b + (long)c + d;
570 }
571 
572 static struct prog_test_ref_kfunc prog_test_struct = {
573 	.a = 42,
574 	.b = 108,
575 	.next = &prog_test_struct,
576 	.cnt = REFCOUNT_INIT(1),
577 };
578 
579 __bpf_kfunc struct prog_test_ref_kfunc *
580 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
581 {
582 	refcount_inc(&prog_test_struct.cnt);
583 	return &prog_test_struct;
584 }
585 
586 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
587 {
588 	WARN_ON_ONCE(1);
589 }
590 
591 __bpf_kfunc struct prog_test_member *
592 bpf_kfunc_call_memb_acquire(void)
593 {
594 	WARN_ON_ONCE(1);
595 	return NULL;
596 }
597 
598 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
599 {
600 	WARN_ON_ONCE(1);
601 }
602 
603 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
604 {
605 	if (size > 2 * sizeof(int))
606 		return NULL;
607 
608 	return (int *)p;
609 }
610 
611 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
612 						  const int rdwr_buf_size)
613 {
614 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
615 }
616 
617 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
618 						    const int rdonly_buf_size)
619 {
620 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
621 }
622 
623 /* the next 2 ones can't be really used for testing expect to ensure
624  * that the verifier rejects the call.
625  * Acquire functions must return struct pointers, so these ones are
626  * failing.
627  */
628 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
629 						    const int rdonly_buf_size)
630 {
631 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
632 }
633 
634 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
635 {
636 }
637 
638 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
639 {
640 }
641 
642 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
643 {
644 }
645 
646 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
647 {
648 }
649 
650 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
651 {
652 }
653 
654 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
655 {
656 }
657 
658 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
659 {
660 }
661 
662 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
663 {
664 }
665 
666 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
667 {
668 }
669 
670 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
671 {
672 }
673 
674 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
675 {
676 	/* p != NULL, but p->cnt could be 0 */
677 }
678 
679 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
680 {
681 }
682 
683 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
684 {
685 	return arg;
686 }
687 
688 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
689 {
690 }
691 
692 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
693 {
694 	int proto;
695 	int err;
696 
697 	mutex_lock(&sock_lock);
698 
699 	if (sock) {
700 		pr_err("%s called without releasing old sock", __func__);
701 		err = -EPERM;
702 		goto out;
703 	}
704 
705 	switch (args->af) {
706 	case AF_INET:
707 	case AF_INET6:
708 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
709 		break;
710 	case AF_UNIX:
711 		proto = PF_UNIX;
712 		break;
713 	default:
714 		pr_err("invalid address family %d\n", args->af);
715 		err = -EINVAL;
716 		goto out;
717 	}
718 
719 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
720 			       proto, &sock);
721 
722 	if (!err)
723 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
724 		 * and consider the connection attempt failed if it returns
725 		 * -EINPROGRESS.
726 		 */
727 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
728 out:
729 	mutex_unlock(&sock_lock);
730 
731 	return err;
732 }
733 
734 __bpf_kfunc void bpf_kfunc_close_sock(void)
735 {
736 	mutex_lock(&sock_lock);
737 
738 	if (sock) {
739 		sock_release(sock);
740 		sock = NULL;
741 	}
742 
743 	mutex_unlock(&sock_lock);
744 }
745 
746 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
747 {
748 	int err;
749 
750 	if (args->addrlen > sizeof(args->addr))
751 		return -EINVAL;
752 
753 	mutex_lock(&sock_lock);
754 
755 	if (!sock) {
756 		pr_err("%s called without initializing sock", __func__);
757 		err = -EPERM;
758 		goto out;
759 	}
760 
761 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
762 			     args->addrlen, 0);
763 out:
764 	mutex_unlock(&sock_lock);
765 
766 	return err;
767 }
768 
769 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
770 {
771 	int err;
772 
773 	if (args->addrlen > sizeof(args->addr))
774 		return -EINVAL;
775 
776 	mutex_lock(&sock_lock);
777 
778 	if (!sock) {
779 		pr_err("%s called without initializing sock", __func__);
780 		err = -EPERM;
781 		goto out;
782 	}
783 
784 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
785 out:
786 	mutex_unlock(&sock_lock);
787 
788 	return err;
789 }
790 
791 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
792 {
793 	int err;
794 
795 	mutex_lock(&sock_lock);
796 
797 	if (!sock) {
798 		pr_err("%s called without initializing sock", __func__);
799 		err = -EPERM;
800 		goto out;
801 	}
802 
803 	err = kernel_listen(sock, 128);
804 out:
805 	mutex_unlock(&sock_lock);
806 
807 	return err;
808 }
809 
810 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
811 {
812 	struct msghdr msg = {
813 		.msg_name	= &args->addr.addr,
814 		.msg_namelen	= args->addr.addrlen,
815 	};
816 	struct kvec iov;
817 	int err;
818 
819 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
820 	    args->msglen > sizeof(args->msg))
821 		return -EINVAL;
822 
823 	iov.iov_base = args->msg;
824 	iov.iov_len  = args->msglen;
825 
826 	mutex_lock(&sock_lock);
827 
828 	if (!sock) {
829 		pr_err("%s called without initializing sock", __func__);
830 		err = -EPERM;
831 		goto out;
832 	}
833 
834 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
835 	args->addr.addrlen = msg.msg_namelen;
836 out:
837 	mutex_unlock(&sock_lock);
838 
839 	return err;
840 }
841 
842 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
843 {
844 	struct msghdr msg = {
845 		.msg_name	= &args->addr.addr,
846 		.msg_namelen	= args->addr.addrlen,
847 	};
848 	struct kvec iov;
849 	int err;
850 
851 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
852 	    args->msglen > sizeof(args->msg))
853 		return -EINVAL;
854 
855 	iov.iov_base = args->msg;
856 	iov.iov_len  = args->msglen;
857 
858 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
859 	mutex_lock(&sock_lock);
860 
861 	if (!sock) {
862 		pr_err("%s called without initializing sock", __func__);
863 		err = -EPERM;
864 		goto out;
865 	}
866 
867 	err = sock_sendmsg(sock, &msg);
868 	args->addr.addrlen = msg.msg_namelen;
869 out:
870 	mutex_unlock(&sock_lock);
871 
872 	return err;
873 }
874 
875 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
876 {
877 	int err;
878 
879 	mutex_lock(&sock_lock);
880 
881 	if (!sock) {
882 		pr_err("%s called without initializing sock", __func__);
883 		err = -EPERM;
884 		goto out;
885 	}
886 
887 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
888 	if (err < 0)
889 		goto out;
890 
891 	args->addrlen = err;
892 	err = 0;
893 out:
894 	mutex_unlock(&sock_lock);
895 
896 	return err;
897 }
898 
899 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
900 {
901 	int err;
902 
903 	mutex_lock(&sock_lock);
904 
905 	if (!sock) {
906 		pr_err("%s called without initializing sock", __func__);
907 		err = -EPERM;
908 		goto out;
909 	}
910 
911 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
912 	if (err < 0)
913 		goto out;
914 
915 	args->addrlen = err;
916 	err = 0;
917 out:
918 	mutex_unlock(&sock_lock);
919 
920 	return err;
921 }
922 
923 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
924 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
925 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
926 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
927 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
928 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
929 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
930 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
931 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
932 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
933 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
934 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
935 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
936 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
937 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
938 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
939 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
940 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
941 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
942 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
943 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
944 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
945 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
946 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
947 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
948 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
949 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
950 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
951 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
952 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
953 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
954 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
955 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
956 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
957 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
958 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
959 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
960 
961 static int bpf_testmod_ops_init(struct btf *btf)
962 {
963 	return 0;
964 }
965 
966 static bool bpf_testmod_ops_is_valid_access(int off, int size,
967 					    enum bpf_access_type type,
968 					    const struct bpf_prog *prog,
969 					    struct bpf_insn_access_aux *info)
970 {
971 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
972 }
973 
974 static int bpf_testmod_ops_init_member(const struct btf_type *t,
975 				       const struct btf_member *member,
976 				       void *kdata, const void *udata)
977 {
978 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
979 		/* For data fields, this function has to copy it and return
980 		 * 1 to indicate that the data has been handled by the
981 		 * struct_ops type, or the verifier will reject the map if
982 		 * the value of the data field is not zero.
983 		 */
984 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
985 		return 1;
986 	}
987 	return 0;
988 }
989 
990 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
991 	.owner = THIS_MODULE,
992 	.set   = &bpf_testmod_check_kfunc_ids,
993 };
994 
995 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
996 	.is_valid_access = bpf_testmod_ops_is_valid_access,
997 };
998 
999 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1000 {
1001 	struct bpf_testmod_ops *ops = kdata;
1002 
1003 	if (ops->test_1)
1004 		ops->test_1();
1005 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1006 	 * initialized, so we need to check for NULL.
1007 	 */
1008 	if (ops->test_2)
1009 		ops->test_2(4, ops->data);
1010 
1011 	return 0;
1012 }
1013 
1014 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1015 {
1016 }
1017 
1018 static int bpf_testmod_test_1(void)
1019 {
1020 	return 0;
1021 }
1022 
1023 static void bpf_testmod_test_2(int a, int b)
1024 {
1025 }
1026 
1027 static int bpf_testmod_ops__test_maybe_null(int dummy,
1028 					    struct task_struct *task__nullable)
1029 {
1030 	return 0;
1031 }
1032 
1033 static struct bpf_testmod_ops __bpf_testmod_ops = {
1034 	.test_1 = bpf_testmod_test_1,
1035 	.test_2 = bpf_testmod_test_2,
1036 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1037 };
1038 
1039 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1040 	.verifier_ops = &bpf_testmod_verifier_ops,
1041 	.init = bpf_testmod_ops_init,
1042 	.init_member = bpf_testmod_ops_init_member,
1043 	.reg = bpf_dummy_reg,
1044 	.unreg = bpf_dummy_unreg,
1045 	.cfi_stubs = &__bpf_testmod_ops,
1046 	.name = "bpf_testmod_ops",
1047 	.owner = THIS_MODULE,
1048 };
1049 
1050 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1051 {
1052 	struct bpf_testmod_ops2 *ops = kdata;
1053 
1054 	ops->test_1();
1055 	return 0;
1056 }
1057 
1058 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1059 	.test_1 = bpf_testmod_test_1,
1060 };
1061 
1062 struct bpf_struct_ops bpf_testmod_ops2 = {
1063 	.verifier_ops = &bpf_testmod_verifier_ops,
1064 	.init = bpf_testmod_ops_init,
1065 	.init_member = bpf_testmod_ops_init_member,
1066 	.reg = bpf_dummy_reg2,
1067 	.unreg = bpf_dummy_unreg,
1068 	.cfi_stubs = &__bpf_testmod_ops2,
1069 	.name = "bpf_testmod_ops2",
1070 	.owner = THIS_MODULE,
1071 };
1072 
1073 extern int bpf_fentry_test1(int a);
1074 
1075 static int bpf_testmod_init(void)
1076 {
1077 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1078 		{
1079 			.btf_id		= bpf_testmod_dtor_ids[0],
1080 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1081 		},
1082 	};
1083 	int ret;
1084 
1085 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1086 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1087 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1088 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1089 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1090 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1091 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1092 						 ARRAY_SIZE(bpf_testmod_dtors),
1093 						 THIS_MODULE);
1094 	if (ret < 0)
1095 		return ret;
1096 	if (bpf_fentry_test1(0) < 0)
1097 		return -EINVAL;
1098 	sock = NULL;
1099 	mutex_init(&sock_lock);
1100 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1101 	if (ret < 0)
1102 		return ret;
1103 	ret = register_bpf_testmod_uprobe();
1104 	if (ret < 0)
1105 		return ret;
1106 	return 0;
1107 }
1108 
1109 static void bpf_testmod_exit(void)
1110 {
1111         /* Need to wait for all references to be dropped because
1112          * bpf_kfunc_call_test_release() which currently resides in kernel can
1113          * be called after bpf_testmod is unloaded. Once release function is
1114          * moved into the module this wait can be removed.
1115          */
1116 	while (refcount_read(&prog_test_struct.cnt) > 1)
1117 		msleep(20);
1118 
1119 	bpf_kfunc_close_sock();
1120 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1121 	unregister_bpf_testmod_uprobe();
1122 }
1123 
1124 module_init(bpf_testmod_init);
1125 module_exit(bpf_testmod_exit);
1126 
1127 MODULE_AUTHOR("Andrii Nakryiko");
1128 MODULE_DESCRIPTION("BPF selftests module");
1129 MODULE_LICENSE("Dual BSD/GPL");
1130