xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision 6dee402daba4eb8677a9438ebdcd8fe90ddd4326)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include "bpf_testmod.h"
14 #include "bpf_testmod_kfunc.h"
15 
16 #define CREATE_TRACE_POINTS
17 #include "bpf_testmod-events.h"
18 
19 typedef int (*func_proto_typedef)(long);
20 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
21 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
22 
23 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
24 long bpf_testmod_test_struct_arg_result;
25 
26 struct bpf_testmod_struct_arg_1 {
27 	int a;
28 };
29 struct bpf_testmod_struct_arg_2 {
30 	long a;
31 	long b;
32 };
33 
34 struct bpf_testmod_struct_arg_3 {
35 	int a;
36 	int b[];
37 };
38 
39 struct bpf_testmod_struct_arg_4 {
40 	u64 a;
41 	int b;
42 };
43 
44 __bpf_hook_start();
45 
46 noinline int
47 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
48 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
49 	return bpf_testmod_test_struct_arg_result;
50 }
51 
52 noinline int
53 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
54 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
55 	return bpf_testmod_test_struct_arg_result;
56 }
57 
58 noinline int
59 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
60 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
61 	return bpf_testmod_test_struct_arg_result;
62 }
63 
64 noinline int
65 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
66 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
67 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
68 	return bpf_testmod_test_struct_arg_result;
69 }
70 
71 noinline int
72 bpf_testmod_test_struct_arg_5(void) {
73 	bpf_testmod_test_struct_arg_result = 1;
74 	return bpf_testmod_test_struct_arg_result;
75 }
76 
77 noinline int
78 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
79 	bpf_testmod_test_struct_arg_result = a->b[0];
80 	return bpf_testmod_test_struct_arg_result;
81 }
82 
83 noinline int
84 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
85 			      struct bpf_testmod_struct_arg_4 f)
86 {
87 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
88 		(long)e + f.a + f.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
93 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
94 			      struct bpf_testmod_struct_arg_4 f, int g)
95 {
96 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
97 		(long)e + f.a + f.b + g;
98 	return bpf_testmod_test_struct_arg_result;
99 }
100 
101 noinline int
102 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
103 	bpf_testmod_test_struct_arg_result = a->a;
104 	return bpf_testmod_test_struct_arg_result;
105 }
106 
107 __bpf_kfunc void
108 bpf_testmod_test_mod_kfunc(int i)
109 {
110 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
111 }
112 
113 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
114 {
115 	if (cnt < 0) {
116 		it->cnt = 0;
117 		return -EINVAL;
118 	}
119 
120 	it->value = value;
121 	it->cnt = cnt;
122 
123 	return 0;
124 }
125 
126 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
127 {
128 	if (it->cnt <= 0)
129 		return NULL;
130 
131 	it->cnt--;
132 
133 	return &it->value;
134 }
135 
136 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
137 {
138 	it->cnt = 0;
139 }
140 
141 __bpf_kfunc void bpf_kfunc_common_test(void)
142 {
143 }
144 
145 struct bpf_testmod_btf_type_tag_1 {
146 	int a;
147 };
148 
149 struct bpf_testmod_btf_type_tag_2 {
150 	struct bpf_testmod_btf_type_tag_1 __user *p;
151 };
152 
153 struct bpf_testmod_btf_type_tag_3 {
154 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
155 };
156 
157 noinline int
158 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
159 	BTF_TYPE_EMIT(func_proto_typedef);
160 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
161 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
162 	return arg->a;
163 }
164 
165 noinline int
166 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
167 	return arg->p->a;
168 }
169 
170 noinline int
171 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
172 	return arg->a;
173 }
174 
175 noinline int
176 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
177 	return arg->p->a;
178 }
179 
180 noinline int bpf_testmod_loop_test(int n)
181 {
182 	/* Make sum volatile, so smart compilers, such as clang, will not
183 	 * optimize the code by removing the loop.
184 	 */
185 	volatile int sum = 0;
186 	int i;
187 
188 	/* the primary goal of this test is to test LBR. Create a lot of
189 	 * branches in the function, so we can catch it easily.
190 	 */
191 	for (i = 0; i < n; i++)
192 		sum += i;
193 	return sum;
194 }
195 
196 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
197 {
198 	static struct file f = {};
199 
200 	switch (arg) {
201 	case 1: return (void *)EINVAL;		/* user addr */
202 	case 2: return (void *)0xcafe4a11;	/* user addr */
203 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
204 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
205 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
206 	case 6: return &f;			/* valid addr */
207 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
208 #ifdef CONFIG_X86_64
209 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
210 #endif
211 	default: return NULL;
212 	}
213 }
214 
215 noinline int bpf_testmod_fentry_test1(int a)
216 {
217 	return a + 1;
218 }
219 
220 noinline int bpf_testmod_fentry_test2(int a, u64 b)
221 {
222 	return a + b;
223 }
224 
225 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
226 {
227 	return a + b + c;
228 }
229 
230 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
231 				      void *e, char f, int g)
232 {
233 	return a + (long)b + c + d + (long)e + f + g;
234 }
235 
236 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
237 				       void *e, char f, int g,
238 				       unsigned int h, long i, __u64 j,
239 				       unsigned long k)
240 {
241 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
242 }
243 
244 int bpf_testmod_fentry_ok;
245 
246 noinline ssize_t
247 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
248 		      struct bin_attribute *bin_attr,
249 		      char *buf, loff_t off, size_t len)
250 {
251 	struct bpf_testmod_test_read_ctx ctx = {
252 		.buf = buf,
253 		.off = off,
254 		.len = len,
255 	};
256 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
257 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
258 	struct bpf_testmod_struct_arg_3 *struct_arg3;
259 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
260 	int i = 1;
261 
262 	while (bpf_testmod_return_ptr(i))
263 		i++;
264 
265 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
266 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
267 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
268 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
269 	(void)bpf_testmod_test_struct_arg_5();
270 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
271 					    (void *)20, struct_arg4);
272 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
273 					    (void *)20, struct_arg4, 23);
274 
275 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
276 
277 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
278 				sizeof(int)), GFP_KERNEL);
279 	if (struct_arg3 != NULL) {
280 		struct_arg3->b[0] = 1;
281 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
282 		kfree(struct_arg3);
283 	}
284 
285 	/* This is always true. Use the check to make sure the compiler
286 	 * doesn't remove bpf_testmod_loop_test.
287 	 */
288 	if (bpf_testmod_loop_test(101) > 100)
289 		trace_bpf_testmod_test_read(current, &ctx);
290 
291 	/* Magic number to enable writable tp */
292 	if (len == 64) {
293 		struct bpf_testmod_test_writable_ctx writable = {
294 			.val = 1024,
295 		};
296 		trace_bpf_testmod_test_writable_bare(&writable);
297 		if (writable.early_ret)
298 			return snprintf(buf, len, "%d\n", writable.val);
299 	}
300 
301 	if (bpf_testmod_fentry_test1(1) != 2 ||
302 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
303 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
304 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
305 			21, 22) != 133 ||
306 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
307 			21, 22, 23, 24, 25, 26) != 231)
308 		goto out;
309 
310 	bpf_testmod_fentry_ok = 1;
311 out:
312 	return -EIO; /* always fail */
313 }
314 EXPORT_SYMBOL(bpf_testmod_test_read);
315 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
316 
317 noinline ssize_t
318 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
319 		      struct bin_attribute *bin_attr,
320 		      char *buf, loff_t off, size_t len)
321 {
322 	struct bpf_testmod_test_write_ctx ctx = {
323 		.buf = buf,
324 		.off = off,
325 		.len = len,
326 	};
327 
328 	trace_bpf_testmod_test_write_bare(current, &ctx);
329 
330 	return -EIO; /* always fail */
331 }
332 EXPORT_SYMBOL(bpf_testmod_test_write);
333 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
334 
335 noinline int bpf_fentry_shadow_test(int a)
336 {
337 	return a + 2;
338 }
339 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
340 
341 __bpf_hook_end();
342 
343 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
344 	.attr = { .name = "bpf_testmod", .mode = 0666, },
345 	.read = bpf_testmod_test_read,
346 	.write = bpf_testmod_test_write,
347 };
348 
349 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
350 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
351 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
352 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
353 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
354 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
355 
356 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
357 	.owner = THIS_MODULE,
358 	.set   = &bpf_testmod_common_kfunc_ids,
359 };
360 
361 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
362 {
363 	return a + b + c + d;
364 }
365 
366 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
367 {
368 	return a + b;
369 }
370 
371 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
372 {
373 	return sk;
374 }
375 
376 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
377 {
378 	/* Provoke the compiler to assume that the caller has sign-extended a,
379 	 * b and c on platforms where this is required (e.g. s390x).
380 	 */
381 	return (long)a + (long)b + (long)c + d;
382 }
383 
384 static struct prog_test_ref_kfunc prog_test_struct = {
385 	.a = 42,
386 	.b = 108,
387 	.next = &prog_test_struct,
388 	.cnt = REFCOUNT_INIT(1),
389 };
390 
391 __bpf_kfunc struct prog_test_ref_kfunc *
392 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
393 {
394 	refcount_inc(&prog_test_struct.cnt);
395 	return &prog_test_struct;
396 }
397 
398 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
399 {
400 	WARN_ON_ONCE(1);
401 }
402 
403 __bpf_kfunc struct prog_test_member *
404 bpf_kfunc_call_memb_acquire(void)
405 {
406 	WARN_ON_ONCE(1);
407 	return NULL;
408 }
409 
410 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
411 {
412 	WARN_ON_ONCE(1);
413 }
414 
415 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
416 {
417 	if (size > 2 * sizeof(int))
418 		return NULL;
419 
420 	return (int *)p;
421 }
422 
423 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
424 						  const int rdwr_buf_size)
425 {
426 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
427 }
428 
429 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
430 						    const int rdonly_buf_size)
431 {
432 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
433 }
434 
435 /* the next 2 ones can't be really used for testing expect to ensure
436  * that the verifier rejects the call.
437  * Acquire functions must return struct pointers, so these ones are
438  * failing.
439  */
440 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
441 						    const int rdonly_buf_size)
442 {
443 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
444 }
445 
446 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
447 {
448 }
449 
450 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
451 {
452 }
453 
454 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
455 {
456 }
457 
458 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
459 {
460 }
461 
462 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
463 {
464 }
465 
466 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
467 {
468 }
469 
470 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
471 {
472 }
473 
474 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
475 {
476 }
477 
478 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
479 {
480 }
481 
482 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
483 {
484 }
485 
486 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
487 {
488 	/* p != NULL, but p->cnt could be 0 */
489 }
490 
491 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
492 {
493 }
494 
495 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
496 {
497 	return arg;
498 }
499 
500 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
501 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
502 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
503 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
504 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
505 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
506 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
507 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
508 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
509 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
510 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
511 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
512 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
513 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
514 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
515 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
516 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
517 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
518 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
519 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
520 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
521 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
522 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
523 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
524 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
525 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
526 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
527 
528 static int bpf_testmod_ops_init(struct btf *btf)
529 {
530 	return 0;
531 }
532 
533 static bool bpf_testmod_ops_is_valid_access(int off, int size,
534 					    enum bpf_access_type type,
535 					    const struct bpf_prog *prog,
536 					    struct bpf_insn_access_aux *info)
537 {
538 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
539 }
540 
541 static int bpf_testmod_ops_init_member(const struct btf_type *t,
542 				       const struct btf_member *member,
543 				       void *kdata, const void *udata)
544 {
545 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
546 		/* For data fields, this function has to copy it and return
547 		 * 1 to indicate that the data has been handled by the
548 		 * struct_ops type, or the verifier will reject the map if
549 		 * the value of the data field is not zero.
550 		 */
551 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
552 		return 1;
553 	}
554 	return 0;
555 }
556 
557 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
558 	.owner = THIS_MODULE,
559 	.set   = &bpf_testmod_check_kfunc_ids,
560 };
561 
562 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
563 	.is_valid_access = bpf_testmod_ops_is_valid_access,
564 };
565 
566 static int bpf_dummy_reg(void *kdata)
567 {
568 	struct bpf_testmod_ops *ops = kdata;
569 
570 	if (ops->test_1)
571 		ops->test_1();
572 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
573 	 * initialized, so we need to check for NULL.
574 	 */
575 	if (ops->test_2)
576 		ops->test_2(4, ops->data);
577 
578 	return 0;
579 }
580 
581 static void bpf_dummy_unreg(void *kdata)
582 {
583 }
584 
585 static int bpf_testmod_test_1(void)
586 {
587 	return 0;
588 }
589 
590 static void bpf_testmod_test_2(int a, int b)
591 {
592 }
593 
594 static int bpf_testmod_ops__test_maybe_null(int dummy,
595 					    struct task_struct *task__nullable)
596 {
597 	return 0;
598 }
599 
600 static struct bpf_testmod_ops __bpf_testmod_ops = {
601 	.test_1 = bpf_testmod_test_1,
602 	.test_2 = bpf_testmod_test_2,
603 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
604 };
605 
606 struct bpf_struct_ops bpf_bpf_testmod_ops = {
607 	.verifier_ops = &bpf_testmod_verifier_ops,
608 	.init = bpf_testmod_ops_init,
609 	.init_member = bpf_testmod_ops_init_member,
610 	.reg = bpf_dummy_reg,
611 	.unreg = bpf_dummy_unreg,
612 	.cfi_stubs = &__bpf_testmod_ops,
613 	.name = "bpf_testmod_ops",
614 	.owner = THIS_MODULE,
615 };
616 
617 static int bpf_dummy_reg2(void *kdata)
618 {
619 	struct bpf_testmod_ops2 *ops = kdata;
620 
621 	ops->test_1();
622 	return 0;
623 }
624 
625 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
626 	.test_1 = bpf_testmod_test_1,
627 };
628 
629 struct bpf_struct_ops bpf_testmod_ops2 = {
630 	.verifier_ops = &bpf_testmod_verifier_ops,
631 	.init = bpf_testmod_ops_init,
632 	.init_member = bpf_testmod_ops_init_member,
633 	.reg = bpf_dummy_reg2,
634 	.unreg = bpf_dummy_unreg,
635 	.cfi_stubs = &__bpf_testmod_ops2,
636 	.name = "bpf_testmod_ops2",
637 	.owner = THIS_MODULE,
638 };
639 
640 extern int bpf_fentry_test1(int a);
641 
642 static int bpf_testmod_init(void)
643 {
644 	int ret;
645 
646 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
647 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
648 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
649 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
650 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
651 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
652 	if (ret < 0)
653 		return ret;
654 	if (bpf_fentry_test1(0) < 0)
655 		return -EINVAL;
656 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
657 }
658 
659 static void bpf_testmod_exit(void)
660 {
661         /* Need to wait for all references to be dropped because
662          * bpf_kfunc_call_test_release() which currently resides in kernel can
663          * be called after bpf_testmod is unloaded. Once release function is
664          * moved into the module this wait can be removed.
665          */
666 	while (refcount_read(&prog_test_struct.cnt) > 1)
667 		msleep(20);
668 
669 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
670 }
671 
672 module_init(bpf_testmod_init);
673 module_exit(bpf_testmod_exit);
674 
675 MODULE_AUTHOR("Andrii Nakryiko");
676 MODULE_DESCRIPTION("BPF selftests module");
677 MODULE_LICENSE("Dual BSD/GPL");
678