xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision f6154d8babbb8a98f0d3ea325aafae2e33bfd8be)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 #include "bpf_testmod_kfunc.h"
13 
14 #define CREATE_TRACE_POINTS
15 #include "bpf_testmod-events.h"
16 
17 typedef int (*func_proto_typedef)(long);
18 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20 
21 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22 long bpf_testmod_test_struct_arg_result;
23 
24 struct bpf_testmod_struct_arg_1 {
25 	int a;
26 };
27 struct bpf_testmod_struct_arg_2 {
28 	long a;
29 	long b;
30 };
31 
32 struct bpf_testmod_struct_arg_3 {
33 	int a;
34 	int b[];
35 };
36 
37 struct bpf_testmod_struct_arg_4 {
38 	u64 a;
39 	int b;
40 };
41 
42 __bpf_hook_start();
43 
44 noinline int
45 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
46 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
47 	return bpf_testmod_test_struct_arg_result;
48 }
49 
50 noinline int
51 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
52 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
53 	return bpf_testmod_test_struct_arg_result;
54 }
55 
56 noinline int
57 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
58 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
59 	return bpf_testmod_test_struct_arg_result;
60 }
61 
62 noinline int
63 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
64 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
65 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
66 	return bpf_testmod_test_struct_arg_result;
67 }
68 
69 noinline int
70 bpf_testmod_test_struct_arg_5(void) {
71 	bpf_testmod_test_struct_arg_result = 1;
72 	return bpf_testmod_test_struct_arg_result;
73 }
74 
75 noinline int
76 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
77 	bpf_testmod_test_struct_arg_result = a->b[0];
78 	return bpf_testmod_test_struct_arg_result;
79 }
80 
81 noinline int
82 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
83 			      struct bpf_testmod_struct_arg_4 f)
84 {
85 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
86 		(long)e + f.a + f.b;
87 	return bpf_testmod_test_struct_arg_result;
88 }
89 
90 noinline int
91 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
92 			      struct bpf_testmod_struct_arg_4 f, int g)
93 {
94 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
95 		(long)e + f.a + f.b + g;
96 	return bpf_testmod_test_struct_arg_result;
97 }
98 
99 noinline int
100 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
101 	bpf_testmod_test_struct_arg_result = a->a;
102 	return bpf_testmod_test_struct_arg_result;
103 }
104 
105 __bpf_kfunc void
106 bpf_testmod_test_mod_kfunc(int i)
107 {
108 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
109 }
110 
111 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
112 {
113 	if (cnt < 0) {
114 		it->cnt = 0;
115 		return -EINVAL;
116 	}
117 
118 	it->value = value;
119 	it->cnt = cnt;
120 
121 	return 0;
122 }
123 
124 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
125 {
126 	if (it->cnt <= 0)
127 		return NULL;
128 
129 	it->cnt--;
130 
131 	return &it->value;
132 }
133 
134 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
135 {
136 	it->cnt = 0;
137 }
138 
139 __bpf_kfunc void bpf_kfunc_common_test(void)
140 {
141 }
142 
143 struct bpf_testmod_btf_type_tag_1 {
144 	int a;
145 };
146 
147 struct bpf_testmod_btf_type_tag_2 {
148 	struct bpf_testmod_btf_type_tag_1 __user *p;
149 };
150 
151 struct bpf_testmod_btf_type_tag_3 {
152 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
153 };
154 
155 noinline int
156 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
157 	BTF_TYPE_EMIT(func_proto_typedef);
158 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
159 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
160 	return arg->a;
161 }
162 
163 noinline int
164 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
165 	return arg->p->a;
166 }
167 
168 noinline int
169 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
170 	return arg->a;
171 }
172 
173 noinline int
174 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
175 	return arg->p->a;
176 }
177 
178 noinline int bpf_testmod_loop_test(int n)
179 {
180 	/* Make sum volatile, so smart compilers, such as clang, will not
181 	 * optimize the code by removing the loop.
182 	 */
183 	volatile int sum = 0;
184 	int i;
185 
186 	/* the primary goal of this test is to test LBR. Create a lot of
187 	 * branches in the function, so we can catch it easily.
188 	 */
189 	for (i = 0; i < n; i++)
190 		sum += i;
191 	return sum;
192 }
193 
194 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
195 {
196 	static struct file f = {};
197 
198 	switch (arg) {
199 	case 1: return (void *)EINVAL;		/* user addr */
200 	case 2: return (void *)0xcafe4a11;	/* user addr */
201 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
202 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
203 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
204 	case 6: return &f;			/* valid addr */
205 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
206 	default: return NULL;
207 	}
208 }
209 
210 noinline int bpf_testmod_fentry_test1(int a)
211 {
212 	return a + 1;
213 }
214 
215 noinline int bpf_testmod_fentry_test2(int a, u64 b)
216 {
217 	return a + b;
218 }
219 
220 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
221 {
222 	return a + b + c;
223 }
224 
225 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
226 				      void *e, char f, int g)
227 {
228 	return a + (long)b + c + d + (long)e + f + g;
229 }
230 
231 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
232 				       void *e, char f, int g,
233 				       unsigned int h, long i, __u64 j,
234 				       unsigned long k)
235 {
236 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
237 }
238 
239 int bpf_testmod_fentry_ok;
240 
241 noinline ssize_t
242 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
243 		      struct bin_attribute *bin_attr,
244 		      char *buf, loff_t off, size_t len)
245 {
246 	struct bpf_testmod_test_read_ctx ctx = {
247 		.buf = buf,
248 		.off = off,
249 		.len = len,
250 	};
251 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
252 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
253 	struct bpf_testmod_struct_arg_3 *struct_arg3;
254 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
255 	int i = 1;
256 
257 	while (bpf_testmod_return_ptr(i))
258 		i++;
259 
260 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
261 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
262 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
263 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
264 	(void)bpf_testmod_test_struct_arg_5();
265 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
266 					    (void *)20, struct_arg4);
267 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
268 					    (void *)20, struct_arg4, 23);
269 
270 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
271 
272 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
273 				sizeof(int)), GFP_KERNEL);
274 	if (struct_arg3 != NULL) {
275 		struct_arg3->b[0] = 1;
276 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
277 		kfree(struct_arg3);
278 	}
279 
280 	/* This is always true. Use the check to make sure the compiler
281 	 * doesn't remove bpf_testmod_loop_test.
282 	 */
283 	if (bpf_testmod_loop_test(101) > 100)
284 		trace_bpf_testmod_test_read(current, &ctx);
285 
286 	/* Magic number to enable writable tp */
287 	if (len == 64) {
288 		struct bpf_testmod_test_writable_ctx writable = {
289 			.val = 1024,
290 		};
291 		trace_bpf_testmod_test_writable_bare(&writable);
292 		if (writable.early_ret)
293 			return snprintf(buf, len, "%d\n", writable.val);
294 	}
295 
296 	if (bpf_testmod_fentry_test1(1) != 2 ||
297 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
298 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
299 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
300 			21, 22) != 133 ||
301 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
302 			21, 22, 23, 24, 25, 26) != 231)
303 		goto out;
304 
305 	bpf_testmod_fentry_ok = 1;
306 out:
307 	return -EIO; /* always fail */
308 }
309 EXPORT_SYMBOL(bpf_testmod_test_read);
310 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
311 
312 noinline ssize_t
313 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
314 		      struct bin_attribute *bin_attr,
315 		      char *buf, loff_t off, size_t len)
316 {
317 	struct bpf_testmod_test_write_ctx ctx = {
318 		.buf = buf,
319 		.off = off,
320 		.len = len,
321 	};
322 
323 	trace_bpf_testmod_test_write_bare(current, &ctx);
324 
325 	return -EIO; /* always fail */
326 }
327 EXPORT_SYMBOL(bpf_testmod_test_write);
328 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
329 
330 noinline int bpf_fentry_shadow_test(int a)
331 {
332 	return a + 2;
333 }
334 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
335 
336 __bpf_hook_end();
337 
338 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
339 	.attr = { .name = "bpf_testmod", .mode = 0666, },
340 	.read = bpf_testmod_test_read,
341 	.write = bpf_testmod_test_write,
342 };
343 
344 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
345 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
346 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
347 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
348 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
349 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
350 
351 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
352 	.owner = THIS_MODULE,
353 	.set   = &bpf_testmod_common_kfunc_ids,
354 };
355 
356 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
357 {
358 	return a + b + c + d;
359 }
360 
361 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
362 {
363 	return a + b;
364 }
365 
366 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
367 {
368 	return sk;
369 }
370 
371 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
372 {
373 	/* Provoke the compiler to assume that the caller has sign-extended a,
374 	 * b and c on platforms where this is required (e.g. s390x).
375 	 */
376 	return (long)a + (long)b + (long)c + d;
377 }
378 
379 static struct prog_test_ref_kfunc prog_test_struct = {
380 	.a = 42,
381 	.b = 108,
382 	.next = &prog_test_struct,
383 	.cnt = REFCOUNT_INIT(1),
384 };
385 
386 __bpf_kfunc struct prog_test_ref_kfunc *
387 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
388 {
389 	refcount_inc(&prog_test_struct.cnt);
390 	return &prog_test_struct;
391 }
392 
393 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
394 {
395 	WARN_ON_ONCE(1);
396 }
397 
398 __bpf_kfunc struct prog_test_member *
399 bpf_kfunc_call_memb_acquire(void)
400 {
401 	WARN_ON_ONCE(1);
402 	return NULL;
403 }
404 
405 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
406 {
407 	WARN_ON_ONCE(1);
408 }
409 
410 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
411 {
412 	if (size > 2 * sizeof(int))
413 		return NULL;
414 
415 	return (int *)p;
416 }
417 
418 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
419 						  const int rdwr_buf_size)
420 {
421 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
422 }
423 
424 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
425 						    const int rdonly_buf_size)
426 {
427 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
428 }
429 
430 /* the next 2 ones can't be really used for testing expect to ensure
431  * that the verifier rejects the call.
432  * Acquire functions must return struct pointers, so these ones are
433  * failing.
434  */
435 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
436 						    const int rdonly_buf_size)
437 {
438 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
439 }
440 
441 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
442 {
443 }
444 
445 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
446 {
447 }
448 
449 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
450 {
451 }
452 
453 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
454 {
455 }
456 
457 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
458 {
459 }
460 
461 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
462 {
463 }
464 
465 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
466 {
467 }
468 
469 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
470 {
471 }
472 
473 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
474 {
475 }
476 
477 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
478 {
479 }
480 
481 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
482 {
483 	/* p != NULL, but p->cnt could be 0 */
484 }
485 
486 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
487 {
488 }
489 
490 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
491 {
492 	return arg;
493 }
494 
495 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
496 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
497 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
498 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
499 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
500 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
501 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
502 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
503 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
504 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
505 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
506 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
507 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
508 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
509 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
510 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
511 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
512 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
513 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
514 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
515 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
516 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
517 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
518 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
519 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
520 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
521 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
522 
523 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
524 	.owner = THIS_MODULE,
525 	.set   = &bpf_testmod_check_kfunc_ids,
526 };
527 
528 extern int bpf_fentry_test1(int a);
529 
530 static int bpf_testmod_init(void)
531 {
532 	int ret;
533 
534 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
535 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
536 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
537 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
538 	if (ret < 0)
539 		return ret;
540 	if (bpf_fentry_test1(0) < 0)
541 		return -EINVAL;
542 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
543 }
544 
545 static void bpf_testmod_exit(void)
546 {
547 	return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
548 }
549 
550 module_init(bpf_testmod_init);
551 module_exit(bpf_testmod_exit);
552 
553 MODULE_AUTHOR("Andrii Nakryiko");
554 MODULE_DESCRIPTION("BPF selftests module");
555 MODULE_LICENSE("Dual BSD/GPL");
556