xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision d457a0e329b0bfd3a1450e0b1a18cd2b47a25a08)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 #include "bpf_testmod_kfunc.h"
13 
14 #define CREATE_TRACE_POINTS
15 #include "bpf_testmod-events.h"
16 
17 typedef int (*func_proto_typedef)(long);
18 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20 
21 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22 long bpf_testmod_test_struct_arg_result;
23 
24 struct bpf_testmod_struct_arg_1 {
25 	int a;
26 };
27 struct bpf_testmod_struct_arg_2 {
28 	long a;
29 	long b;
30 };
31 
32 struct bpf_testmod_struct_arg_3 {
33 	int a;
34 	int b[];
35 };
36 
37 __diag_push();
38 __diag_ignore_all("-Wmissing-prototypes",
39 		  "Global functions as their definitions will be in bpf_testmod.ko BTF");
40 
41 noinline int
42 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
43 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
44 	return bpf_testmod_test_struct_arg_result;
45 }
46 
47 noinline int
48 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
49 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
50 	return bpf_testmod_test_struct_arg_result;
51 }
52 
53 noinline int
54 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
55 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
56 	return bpf_testmod_test_struct_arg_result;
57 }
58 
59 noinline int
60 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
61 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
62 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
63 	return bpf_testmod_test_struct_arg_result;
64 }
65 
66 noinline int
67 bpf_testmod_test_struct_arg_5(void) {
68 	bpf_testmod_test_struct_arg_result = 1;
69 	return bpf_testmod_test_struct_arg_result;
70 }
71 
72 noinline int
73 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
74 	bpf_testmod_test_struct_arg_result = a->b[0];
75 	return bpf_testmod_test_struct_arg_result;
76 }
77 
78 __bpf_kfunc void
79 bpf_testmod_test_mod_kfunc(int i)
80 {
81 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
82 }
83 
84 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
85 {
86 	if (cnt < 0) {
87 		it->cnt = 0;
88 		return -EINVAL;
89 	}
90 
91 	it->value = value;
92 	it->cnt = cnt;
93 
94 	return 0;
95 }
96 
97 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
98 {
99 	if (it->cnt <= 0)
100 		return NULL;
101 
102 	it->cnt--;
103 
104 	return &it->value;
105 }
106 
107 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
108 {
109 	it->cnt = 0;
110 }
111 
112 struct bpf_testmod_btf_type_tag_1 {
113 	int a;
114 };
115 
116 struct bpf_testmod_btf_type_tag_2 {
117 	struct bpf_testmod_btf_type_tag_1 __user *p;
118 };
119 
120 struct bpf_testmod_btf_type_tag_3 {
121 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
122 };
123 
124 noinline int
125 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
126 	BTF_TYPE_EMIT(func_proto_typedef);
127 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
128 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
129 	return arg->a;
130 }
131 
132 noinline int
133 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
134 	return arg->p->a;
135 }
136 
137 noinline int
138 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
139 	return arg->a;
140 }
141 
142 noinline int
143 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
144 	return arg->p->a;
145 }
146 
147 noinline int bpf_testmod_loop_test(int n)
148 {
149 	/* Make sum volatile, so smart compilers, such as clang, will not
150 	 * optimize the code by removing the loop.
151 	 */
152 	volatile int sum = 0;
153 	int i;
154 
155 	/* the primary goal of this test is to test LBR. Create a lot of
156 	 * branches in the function, so we can catch it easily.
157 	 */
158 	for (i = 0; i < n; i++)
159 		sum += i;
160 	return sum;
161 }
162 
163 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
164 {
165 	static struct file f = {};
166 
167 	switch (arg) {
168 	case 1: return (void *)EINVAL;		/* user addr */
169 	case 2: return (void *)0xcafe4a11;	/* user addr */
170 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
171 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
172 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
173 	case 6: return &f;			/* valid addr */
174 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
175 	default: return NULL;
176 	}
177 }
178 
179 noinline int bpf_testmod_fentry_test1(int a)
180 {
181 	return a + 1;
182 }
183 
184 noinline int bpf_testmod_fentry_test2(int a, u64 b)
185 {
186 	return a + b;
187 }
188 
189 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
190 {
191 	return a + b + c;
192 }
193 
194 __diag_pop();
195 
196 int bpf_testmod_fentry_ok;
197 
198 noinline ssize_t
199 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
200 		      struct bin_attribute *bin_attr,
201 		      char *buf, loff_t off, size_t len)
202 {
203 	struct bpf_testmod_test_read_ctx ctx = {
204 		.buf = buf,
205 		.off = off,
206 		.len = len,
207 	};
208 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
209 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
210 	struct bpf_testmod_struct_arg_3 *struct_arg3;
211 	int i = 1;
212 
213 	while (bpf_testmod_return_ptr(i))
214 		i++;
215 
216 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
217 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
218 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
219 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
220 	(void)bpf_testmod_test_struct_arg_5();
221 
222 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
223 				sizeof(int)), GFP_KERNEL);
224 	if (struct_arg3 != NULL) {
225 		struct_arg3->b[0] = 1;
226 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
227 		kfree(struct_arg3);
228 	}
229 
230 	/* This is always true. Use the check to make sure the compiler
231 	 * doesn't remove bpf_testmod_loop_test.
232 	 */
233 	if (bpf_testmod_loop_test(101) > 100)
234 		trace_bpf_testmod_test_read(current, &ctx);
235 
236 	/* Magic number to enable writable tp */
237 	if (len == 64) {
238 		struct bpf_testmod_test_writable_ctx writable = {
239 			.val = 1024,
240 		};
241 		trace_bpf_testmod_test_writable_bare(&writable);
242 		if (writable.early_ret)
243 			return snprintf(buf, len, "%d\n", writable.val);
244 	}
245 
246 	if (bpf_testmod_fentry_test1(1) != 2 ||
247 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
248 	    bpf_testmod_fentry_test3(4, 5, 6) != 15)
249 		goto out;
250 
251 	bpf_testmod_fentry_ok = 1;
252 out:
253 	return -EIO; /* always fail */
254 }
255 EXPORT_SYMBOL(bpf_testmod_test_read);
256 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
257 
258 noinline ssize_t
259 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
260 		      struct bin_attribute *bin_attr,
261 		      char *buf, loff_t off, size_t len)
262 {
263 	struct bpf_testmod_test_write_ctx ctx = {
264 		.buf = buf,
265 		.off = off,
266 		.len = len,
267 	};
268 
269 	trace_bpf_testmod_test_write_bare(current, &ctx);
270 
271 	return -EIO; /* always fail */
272 }
273 EXPORT_SYMBOL(bpf_testmod_test_write);
274 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
275 
276 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
277 	.attr = { .name = "bpf_testmod", .mode = 0666, },
278 	.read = bpf_testmod_test_read,
279 	.write = bpf_testmod_test_write,
280 };
281 
282 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
283 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
284 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
285 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
286 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
287 
288 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
289 	.owner = THIS_MODULE,
290 	.set   = &bpf_testmod_common_kfunc_ids,
291 };
292 
293 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
294 {
295 	return a + b + c + d;
296 }
297 
298 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
299 {
300 	return a + b;
301 }
302 
303 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
304 {
305 	return sk;
306 }
307 
308 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
309 {
310 	/* Provoke the compiler to assume that the caller has sign-extended a,
311 	 * b and c on platforms where this is required (e.g. s390x).
312 	 */
313 	return (long)a + (long)b + (long)c + d;
314 }
315 
316 static struct prog_test_ref_kfunc prog_test_struct = {
317 	.a = 42,
318 	.b = 108,
319 	.next = &prog_test_struct,
320 	.cnt = REFCOUNT_INIT(1),
321 };
322 
323 __bpf_kfunc struct prog_test_ref_kfunc *
324 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
325 {
326 	refcount_inc(&prog_test_struct.cnt);
327 	return &prog_test_struct;
328 }
329 
330 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
331 {
332 	WARN_ON_ONCE(1);
333 }
334 
335 __bpf_kfunc struct prog_test_member *
336 bpf_kfunc_call_memb_acquire(void)
337 {
338 	WARN_ON_ONCE(1);
339 	return NULL;
340 }
341 
342 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
343 {
344 	WARN_ON_ONCE(1);
345 }
346 
347 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
348 {
349 	if (size > 2 * sizeof(int))
350 		return NULL;
351 
352 	return (int *)p;
353 }
354 
355 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
356 						  const int rdwr_buf_size)
357 {
358 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
359 }
360 
361 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
362 						    const int rdonly_buf_size)
363 {
364 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
365 }
366 
367 /* the next 2 ones can't be really used for testing expect to ensure
368  * that the verifier rejects the call.
369  * Acquire functions must return struct pointers, so these ones are
370  * failing.
371  */
372 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
373 						    const int rdonly_buf_size)
374 {
375 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
376 }
377 
378 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
379 {
380 }
381 
382 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
383 {
384 }
385 
386 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
387 {
388 }
389 
390 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
391 {
392 }
393 
394 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
395 {
396 }
397 
398 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
399 {
400 }
401 
402 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
403 {
404 }
405 
406 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
407 {
408 }
409 
410 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
411 {
412 }
413 
414 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
415 {
416 }
417 
418 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
419 {
420 	/* p != NULL, but p->cnt could be 0 */
421 }
422 
423 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
424 {
425 }
426 
427 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
428 {
429 	return arg;
430 }
431 
432 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
433 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
434 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
435 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
436 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
437 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
438 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
439 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
440 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
441 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
442 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
443 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
444 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
445 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
446 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
447 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
448 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
449 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
450 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
451 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
452 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
453 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
454 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
455 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
456 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
457 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
458 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
459 
460 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
461 	.owner = THIS_MODULE,
462 	.set   = &bpf_testmod_check_kfunc_ids,
463 };
464 
465 noinline int bpf_fentry_shadow_test(int a)
466 {
467 	return a + 2;
468 }
469 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
470 
471 extern int bpf_fentry_test1(int a);
472 
473 static int bpf_testmod_init(void)
474 {
475 	int ret;
476 
477 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
478 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
479 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
480 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
481 	if (ret < 0)
482 		return ret;
483 	if (bpf_fentry_test1(0) < 0)
484 		return -EINVAL;
485 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
486 }
487 
488 static void bpf_testmod_exit(void)
489 {
490 	return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
491 }
492 
493 module_init(bpf_testmod_init);
494 module_exit(bpf_testmod_exit);
495 
496 MODULE_AUTHOR("Andrii Nakryiko");
497 MODULE_DESCRIPTION("BPF selftests module");
498 MODULE_LICENSE("Dual BSD/GPL");
499