xref: /linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25 
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28 
29 #define CONNECT_TIMEOUT_SEC 1
30 
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34 
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39 
40 struct bpf_testmod_struct_arg_1 {
41 	int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 	long a;
45 	long b;
46 };
47 
48 struct bpf_testmod_struct_arg_3 {
49 	int a;
50 	int b[];
51 };
52 
53 struct bpf_testmod_struct_arg_4 {
54 	u64 a;
55 	int b;
56 };
57 
58 struct bpf_testmod_struct_arg_5 {
59 	char a;
60 	short b;
61 	int c;
62 	long d;
63 };
64 
65 __bpf_hook_start();
66 
67 noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a,int b,int c)68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
69 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
70 	return bpf_testmod_test_struct_arg_result;
71 }
72 
73 noinline int
bpf_testmod_test_struct_arg_2(int a,struct bpf_testmod_struct_arg_2 b,int c)74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
75 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
76 	return bpf_testmod_test_struct_arg_result;
77 }
78 
79 noinline int
bpf_testmod_test_struct_arg_3(int a,int b,struct bpf_testmod_struct_arg_2 c)80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
81 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
82 	return bpf_testmod_test_struct_arg_result;
83 }
84 
85 noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a,int b,int c,int d,struct bpf_testmod_struct_arg_2 e)86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
87 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
88 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
bpf_testmod_test_struct_arg_5(void)93 bpf_testmod_test_struct_arg_5(void) {
94 	bpf_testmod_test_struct_arg_result = 1;
95 	return bpf_testmod_test_struct_arg_result;
96 }
97 
98 noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 * a)99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
100 	bpf_testmod_test_struct_arg_result = a->b[0];
101 	return bpf_testmod_test_struct_arg_result;
102 }
103 
104 noinline int
bpf_testmod_test_struct_arg_7(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f)105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
106 			      struct bpf_testmod_struct_arg_4 f)
107 {
108 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 		(long)e + f.a + f.b;
110 	return bpf_testmod_test_struct_arg_result;
111 }
112 
113 noinline int
bpf_testmod_test_struct_arg_8(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f,int g)114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
115 			      struct bpf_testmod_struct_arg_4 f, int g)
116 {
117 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
118 		(long)e + f.a + f.b + g;
119 	return bpf_testmod_test_struct_arg_result;
120 }
121 
122 noinline int
bpf_testmod_test_struct_arg_9(u64 a,void * b,short c,int d,void * e,char f,short g,struct bpf_testmod_struct_arg_5 h,long i)123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
124 			      short g, struct bpf_testmod_struct_arg_5 h, long i)
125 {
126 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
127 		f + g + h.a + h.b + h.c + h.d + i;
128 	return bpf_testmod_test_struct_arg_result;
129 }
130 
131 noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 * a)132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
133 	bpf_testmod_test_struct_arg_result = a->a;
134 	return bpf_testmod_test_struct_arg_result;
135 }
136 
137 __bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)138 bpf_testmod_test_mod_kfunc(int i)
139 {
140 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
141 }
142 
bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq * it,s64 value,int cnt)143 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
144 {
145 	it->cnt = cnt;
146 
147 	if (cnt < 0)
148 		return -EINVAL;
149 
150 	it->value = value;
151 
152 	return 0;
153 }
154 
bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq * it)155 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
156 {
157 	if (it->cnt <= 0)
158 		return NULL;
159 
160 	it->cnt--;
161 
162 	return &it->value;
163 }
164 
bpf_iter_testmod_seq_value(int val,struct bpf_iter_testmod_seq * it__iter)165 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
166 {
167 	if (it__iter->cnt < 0)
168 		return 0;
169 
170 	return val + it__iter->value;
171 }
172 
bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq * it)173 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
174 {
175 	it->cnt = 0;
176 }
177 
bpf_kfunc_common_test(void)178 __bpf_kfunc void bpf_kfunc_common_test(void)
179 {
180 }
181 
bpf_kfunc_dynptr_test(struct bpf_dynptr * ptr,struct bpf_dynptr * ptr__nullable)182 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
183 				       struct bpf_dynptr *ptr__nullable)
184 {
185 }
186 
bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head * ptr)187 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
188 {
189 	return NULL;
190 }
191 
bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common * ptr)192 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
193 {
194 	return NULL;
195 }
196 
bpf_kfunc_nested_release_test(struct sk_buff * ptr)197 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
198 {
199 }
200 
bpf_kfunc_trusted_vma_test(struct vm_area_struct * ptr)201 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
202 {
203 }
204 
bpf_kfunc_trusted_task_test(struct task_struct * ptr)205 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
206 {
207 }
208 
bpf_kfunc_trusted_num_test(int * ptr)209 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
210 {
211 }
212 
bpf_kfunc_rcu_task_test(struct task_struct * ptr)213 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
214 {
215 }
216 
217 __bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int * err)218 bpf_testmod_ctx_create(int *err)
219 {
220 	struct bpf_testmod_ctx *ctx;
221 
222 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
223 	if (!ctx) {
224 		*err = -ENOMEM;
225 		return NULL;
226 	}
227 	refcount_set(&ctx->usage, 1);
228 
229 	return ctx;
230 }
231 
testmod_free_cb(struct rcu_head * head)232 static void testmod_free_cb(struct rcu_head *head)
233 {
234 	struct bpf_testmod_ctx *ctx;
235 
236 	ctx = container_of(head, struct bpf_testmod_ctx, rcu);
237 	kfree(ctx);
238 }
239 
bpf_testmod_ctx_release(struct bpf_testmod_ctx * ctx)240 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
241 {
242 	if (!ctx)
243 		return;
244 	if (refcount_dec_and_test(&ctx->usage))
245 		call_rcu(&ctx->rcu, testmod_free_cb);
246 }
247 
248 struct bpf_testmod_btf_type_tag_1 {
249 	int a;
250 };
251 
252 struct bpf_testmod_btf_type_tag_2 {
253 	struct bpf_testmod_btf_type_tag_1 __user *p;
254 };
255 
256 struct bpf_testmod_btf_type_tag_3 {
257 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
258 };
259 
260 noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user * arg)261 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
262 	BTF_TYPE_EMIT(func_proto_typedef);
263 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
264 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
265 	return arg->a;
266 }
267 
268 noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 * arg)269 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
270 	return arg->p->a;
271 }
272 
273 noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu * arg)274 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
275 	return arg->a;
276 }
277 
278 noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 * arg)279 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
280 	return arg->p->a;
281 }
282 
bpf_testmod_loop_test(int n)283 noinline int bpf_testmod_loop_test(int n)
284 {
285 	/* Make sum volatile, so smart compilers, such as clang, will not
286 	 * optimize the code by removing the loop.
287 	 */
288 	volatile int sum = 0;
289 	int i;
290 
291 	/* the primary goal of this test is to test LBR. Create a lot of
292 	 * branches in the function, so we can catch it easily.
293 	 */
294 	for (i = 0; i < n; i++)
295 		sum += i;
296 	return sum;
297 }
298 
bpf_testmod_return_ptr(int arg)299 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
300 {
301 	static struct file f = {};
302 
303 	switch (arg) {
304 	case 1: return (void *)EINVAL;		/* user addr */
305 	case 2: return (void *)0xcafe4a11;	/* user addr */
306 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
307 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
308 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
309 	case 6: return &f;			/* valid addr */
310 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
311 #ifdef CONFIG_X86_64
312 	case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
313 #endif
314 	default: return NULL;
315 	}
316 }
317 
bpf_testmod_fentry_test1(int a)318 noinline int bpf_testmod_fentry_test1(int a)
319 {
320 	return a + 1;
321 }
322 
bpf_testmod_fentry_test2(int a,u64 b)323 noinline int bpf_testmod_fentry_test2(int a, u64 b)
324 {
325 	return a + b;
326 }
327 
bpf_testmod_fentry_test3(char a,int b,u64 c)328 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
329 {
330 	return a + b + c;
331 }
332 
bpf_testmod_fentry_test7(u64 a,void * b,short c,int d,void * e,char f,int g)333 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
334 				      void *e, char f, int g)
335 {
336 	return a + (long)b + c + d + (long)e + f + g;
337 }
338 
bpf_testmod_fentry_test11(u64 a,void * b,short c,int d,void * e,char f,int g,unsigned int h,long i,__u64 j,unsigned long k)339 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
340 				       void *e, char f, int g,
341 				       unsigned int h, long i, __u64 j,
342 				       unsigned long k)
343 {
344 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
345 }
346 
347 int bpf_testmod_fentry_ok;
348 
349 noinline ssize_t
bpf_testmod_test_read(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)350 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
351 		      struct bin_attribute *bin_attr,
352 		      char *buf, loff_t off, size_t len)
353 {
354 	struct bpf_testmod_test_read_ctx ctx = {
355 		.buf = buf,
356 		.off = off,
357 		.len = len,
358 	};
359 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
360 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
361 	struct bpf_testmod_struct_arg_3 *struct_arg3;
362 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
363 	struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
364 	int i = 1;
365 
366 	while (bpf_testmod_return_ptr(i))
367 		i++;
368 
369 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
370 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
371 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
372 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
373 	(void)bpf_testmod_test_struct_arg_5();
374 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
375 					    (void *)20, struct_arg4);
376 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
377 					    (void *)20, struct_arg4, 23);
378 	(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
379 					    21, 22, struct_arg5, 27);
380 
381 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
382 
383 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
384 				sizeof(int)), GFP_KERNEL);
385 	if (struct_arg3 != NULL) {
386 		struct_arg3->b[0] = 1;
387 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
388 		kfree(struct_arg3);
389 	}
390 
391 	/* This is always true. Use the check to make sure the compiler
392 	 * doesn't remove bpf_testmod_loop_test.
393 	 */
394 	if (bpf_testmod_loop_test(101) > 100)
395 		trace_bpf_testmod_test_read(current, &ctx);
396 
397 	trace_bpf_testmod_test_nullable_bare(NULL);
398 
399 	/* Magic number to enable writable tp */
400 	if (len == 64) {
401 		struct bpf_testmod_test_writable_ctx writable = {
402 			.val = 1024,
403 		};
404 		trace_bpf_testmod_test_writable_bare(&writable);
405 		if (writable.early_ret)
406 			return snprintf(buf, len, "%d\n", writable.val);
407 	}
408 
409 	if (bpf_testmod_fentry_test1(1) != 2 ||
410 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
411 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
412 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
413 			21, 22) != 133 ||
414 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
415 			21, 22, 23, 24, 25, 26) != 231)
416 		goto out;
417 
418 	bpf_testmod_fentry_ok = 1;
419 out:
420 	return -EIO; /* always fail */
421 }
422 EXPORT_SYMBOL(bpf_testmod_test_read);
423 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
424 
425 noinline ssize_t
bpf_testmod_test_write(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)426 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
427 		      struct bin_attribute *bin_attr,
428 		      char *buf, loff_t off, size_t len)
429 {
430 	struct bpf_testmod_test_write_ctx ctx = {
431 		.buf = buf,
432 		.off = off,
433 		.len = len,
434 	};
435 
436 	trace_bpf_testmod_test_write_bare(current, &ctx);
437 
438 	return -EIO; /* always fail */
439 }
440 EXPORT_SYMBOL(bpf_testmod_test_write);
441 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
442 
bpf_fentry_shadow_test(int a)443 noinline int bpf_fentry_shadow_test(int a)
444 {
445 	return a + 2;
446 }
447 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
448 
449 __bpf_hook_end();
450 
451 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
452 	.attr = { .name = "bpf_testmod", .mode = 0666, },
453 	.read = bpf_testmod_test_read,
454 	.write = bpf_testmod_test_write,
455 };
456 
457 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
458  * please see test_uretprobe_regs_change test
459  */
460 #ifdef __x86_64__
461 
462 static int
uprobe_ret_handler(struct uprobe_consumer * self,unsigned long func,struct pt_regs * regs)463 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
464 		   struct pt_regs *regs)
465 
466 {
467 	regs->ax  = 0x12345678deadbeef;
468 	regs->cx  = 0x87654321feebdaed;
469 	regs->r11 = (u64) -1;
470 	return true;
471 }
472 
473 struct testmod_uprobe {
474 	struct path path;
475 	struct uprobe *uprobe;
476 	struct uprobe_consumer consumer;
477 };
478 
479 static DEFINE_MUTEX(testmod_uprobe_mutex);
480 
481 static struct testmod_uprobe uprobe = {
482 	.consumer.ret_handler = uprobe_ret_handler,
483 };
484 
testmod_register_uprobe(loff_t offset)485 static int testmod_register_uprobe(loff_t offset)
486 {
487 	int err = -EBUSY;
488 
489 	if (uprobe.uprobe)
490 		return -EBUSY;
491 
492 	mutex_lock(&testmod_uprobe_mutex);
493 
494 	if (uprobe.uprobe)
495 		goto out;
496 
497 	err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
498 	if (err)
499 		goto out;
500 
501 	uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
502 					offset, 0, &uprobe.consumer);
503 	if (IS_ERR(uprobe.uprobe)) {
504 		err = PTR_ERR(uprobe.uprobe);
505 		path_put(&uprobe.path);
506 		uprobe.uprobe = NULL;
507 	}
508 out:
509 	mutex_unlock(&testmod_uprobe_mutex);
510 	return err;
511 }
512 
testmod_unregister_uprobe(void)513 static void testmod_unregister_uprobe(void)
514 {
515 	mutex_lock(&testmod_uprobe_mutex);
516 
517 	if (uprobe.uprobe) {
518 		uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
519 		uprobe_unregister_sync();
520 		path_put(&uprobe.path);
521 		uprobe.uprobe = NULL;
522 	}
523 
524 	mutex_unlock(&testmod_uprobe_mutex);
525 }
526 
527 static ssize_t
bpf_testmod_uprobe_write(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)528 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
529 			 struct bin_attribute *bin_attr,
530 			 char *buf, loff_t off, size_t len)
531 {
532 	unsigned long offset = 0;
533 	int err = 0;
534 
535 	if (kstrtoul(buf, 0, &offset))
536 		return -EINVAL;
537 
538 	if (offset)
539 		err = testmod_register_uprobe(offset);
540 	else
541 		testmod_unregister_uprobe();
542 
543 	return err ?: strlen(buf);
544 }
545 
546 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
547 	.attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
548 	.write = bpf_testmod_uprobe_write,
549 };
550 
register_bpf_testmod_uprobe(void)551 static int register_bpf_testmod_uprobe(void)
552 {
553 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
554 }
555 
unregister_bpf_testmod_uprobe(void)556 static void unregister_bpf_testmod_uprobe(void)
557 {
558 	testmod_unregister_uprobe();
559 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
560 }
561 
562 #else
register_bpf_testmod_uprobe(void)563 static int register_bpf_testmod_uprobe(void)
564 {
565 	return 0;
566 }
567 
unregister_bpf_testmod_uprobe(void)568 static void unregister_bpf_testmod_uprobe(void) { }
569 #endif
570 
571 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
572 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
573 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
574 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
575 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
576 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
577 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
578 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
579 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
580 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
581 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
582 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
583 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
584 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
585 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
586 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
587 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
588 
589 BTF_ID_LIST(bpf_testmod_dtor_ids)
590 BTF_ID(struct, bpf_testmod_ctx)
591 BTF_ID(func, bpf_testmod_ctx_release)
592 
593 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
594 	.owner = THIS_MODULE,
595 	.set   = &bpf_testmod_common_kfunc_ids,
596 };
597 
bpf_kfunc_call_test1(struct sock * sk,u32 a,u64 b,u32 c,u64 d)598 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
599 {
600 	return a + b + c + d;
601 }
602 
bpf_kfunc_call_test2(struct sock * sk,u32 a,u32 b)603 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
604 {
605 	return a + b;
606 }
607 
bpf_kfunc_call_test3(struct sock * sk)608 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
609 {
610 	return sk;
611 }
612 
bpf_kfunc_call_test4(signed char a,short b,int c,long d)613 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
614 {
615 	/* Provoke the compiler to assume that the caller has sign-extended a,
616 	 * b and c on platforms where this is required (e.g. s390x).
617 	 */
618 	return (long)a + (long)b + (long)c + d;
619 }
620 
621 static struct prog_test_ref_kfunc prog_test_struct = {
622 	.a = 42,
623 	.b = 108,
624 	.next = &prog_test_struct,
625 	.cnt = REFCOUNT_INIT(1),
626 };
627 
628 __bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long * scalar_ptr)629 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
630 {
631 	refcount_inc(&prog_test_struct.cnt);
632 	return &prog_test_struct;
633 }
634 
bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc * p)635 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
636 {
637 	WARN_ON_ONCE(1);
638 }
639 
640 __bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)641 bpf_kfunc_call_memb_acquire(void)
642 {
643 	WARN_ON_ONCE(1);
644 	return NULL;
645 }
646 
bpf_kfunc_call_memb1_release(struct prog_test_member1 * p)647 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
648 {
649 	WARN_ON_ONCE(1);
650 }
651 
__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc * p,const int size)652 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
653 {
654 	if (size > 2 * sizeof(int))
655 		return NULL;
656 
657 	return (int *)p;
658 }
659 
bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc * p,const int rdwr_buf_size)660 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
661 						  const int rdwr_buf_size)
662 {
663 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
664 }
665 
bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)666 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
667 						    const int rdonly_buf_size)
668 {
669 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
670 }
671 
672 /* the next 2 ones can't be really used for testing expect to ensure
673  * that the verifier rejects the call.
674  * Acquire functions must return struct pointers, so these ones are
675  * failing.
676  */
bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)677 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
678 						    const int rdonly_buf_size)
679 {
680 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
681 }
682 
bpf_kfunc_call_int_mem_release(int * p)683 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
684 {
685 }
686 
bpf_kfunc_call_test_pass_ctx(struct __sk_buff * skb)687 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
688 {
689 }
690 
bpf_kfunc_call_test_pass1(struct prog_test_pass1 * p)691 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
692 {
693 }
694 
bpf_kfunc_call_test_pass2(struct prog_test_pass2 * p)695 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
696 {
697 }
698 
bpf_kfunc_call_test_fail1(struct prog_test_fail1 * p)699 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
700 {
701 }
702 
bpf_kfunc_call_test_fail2(struct prog_test_fail2 * p)703 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
704 {
705 }
706 
bpf_kfunc_call_test_fail3(struct prog_test_fail3 * p)707 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
708 {
709 }
710 
bpf_kfunc_call_test_mem_len_pass1(void * mem,int mem__sz)711 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
712 {
713 }
714 
bpf_kfunc_call_test_mem_len_fail1(void * mem,int len)715 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
716 {
717 }
718 
bpf_kfunc_call_test_mem_len_fail2(u64 * mem,int len)719 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
720 {
721 }
722 
bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc * p)723 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
724 {
725 	/* p != NULL, but p->cnt could be 0 */
726 }
727 
bpf_kfunc_call_test_destructive(void)728 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
729 {
730 }
731 
bpf_kfunc_call_test_static_unused_arg(u32 arg,u32 unused)732 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
733 {
734 	return arg;
735 }
736 
bpf_kfunc_call_test_sleepable(void)737 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
738 {
739 }
740 
bpf_kfunc_init_sock(struct init_sock_args * args)741 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
742 {
743 	int proto;
744 	int err;
745 
746 	mutex_lock(&sock_lock);
747 
748 	if (sock) {
749 		pr_err("%s called without releasing old sock", __func__);
750 		err = -EPERM;
751 		goto out;
752 	}
753 
754 	switch (args->af) {
755 	case AF_INET:
756 	case AF_INET6:
757 		proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
758 		break;
759 	case AF_UNIX:
760 		proto = PF_UNIX;
761 		break;
762 	default:
763 		pr_err("invalid address family %d\n", args->af);
764 		err = -EINVAL;
765 		goto out;
766 	}
767 
768 	err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
769 			       proto, &sock);
770 
771 	if (!err)
772 		/* Set timeout for call to kernel_connect() to prevent it from hanging,
773 		 * and consider the connection attempt failed if it returns
774 		 * -EINPROGRESS.
775 		 */
776 		sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
777 out:
778 	mutex_unlock(&sock_lock);
779 
780 	return err;
781 }
782 
bpf_kfunc_close_sock(void)783 __bpf_kfunc void bpf_kfunc_close_sock(void)
784 {
785 	mutex_lock(&sock_lock);
786 
787 	if (sock) {
788 		sock_release(sock);
789 		sock = NULL;
790 	}
791 
792 	mutex_unlock(&sock_lock);
793 }
794 
bpf_kfunc_call_kernel_connect(struct addr_args * args)795 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
796 {
797 	int err;
798 
799 	if (args->addrlen > sizeof(args->addr))
800 		return -EINVAL;
801 
802 	mutex_lock(&sock_lock);
803 
804 	if (!sock) {
805 		pr_err("%s called without initializing sock", __func__);
806 		err = -EPERM;
807 		goto out;
808 	}
809 
810 	err = kernel_connect(sock, (struct sockaddr *)&args->addr,
811 			     args->addrlen, 0);
812 out:
813 	mutex_unlock(&sock_lock);
814 
815 	return err;
816 }
817 
bpf_kfunc_call_kernel_bind(struct addr_args * args)818 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
819 {
820 	int err;
821 
822 	if (args->addrlen > sizeof(args->addr))
823 		return -EINVAL;
824 
825 	mutex_lock(&sock_lock);
826 
827 	if (!sock) {
828 		pr_err("%s called without initializing sock", __func__);
829 		err = -EPERM;
830 		goto out;
831 	}
832 
833 	err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
834 out:
835 	mutex_unlock(&sock_lock);
836 
837 	return err;
838 }
839 
bpf_kfunc_call_kernel_listen(void)840 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
841 {
842 	int err;
843 
844 	mutex_lock(&sock_lock);
845 
846 	if (!sock) {
847 		pr_err("%s called without initializing sock", __func__);
848 		err = -EPERM;
849 		goto out;
850 	}
851 
852 	err = kernel_listen(sock, 128);
853 out:
854 	mutex_unlock(&sock_lock);
855 
856 	return err;
857 }
858 
bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args * args)859 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
860 {
861 	struct msghdr msg = {
862 		.msg_name	= &args->addr.addr,
863 		.msg_namelen	= args->addr.addrlen,
864 	};
865 	struct kvec iov;
866 	int err;
867 
868 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
869 	    args->msglen > sizeof(args->msg))
870 		return -EINVAL;
871 
872 	iov.iov_base = args->msg;
873 	iov.iov_len  = args->msglen;
874 
875 	mutex_lock(&sock_lock);
876 
877 	if (!sock) {
878 		pr_err("%s called without initializing sock", __func__);
879 		err = -EPERM;
880 		goto out;
881 	}
882 
883 	err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
884 	args->addr.addrlen = msg.msg_namelen;
885 out:
886 	mutex_unlock(&sock_lock);
887 
888 	return err;
889 }
890 
bpf_kfunc_call_sock_sendmsg(struct sendmsg_args * args)891 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
892 {
893 	struct msghdr msg = {
894 		.msg_name	= &args->addr.addr,
895 		.msg_namelen	= args->addr.addrlen,
896 	};
897 	struct kvec iov;
898 	int err;
899 
900 	if (args->addr.addrlen > sizeof(args->addr.addr) ||
901 	    args->msglen > sizeof(args->msg))
902 		return -EINVAL;
903 
904 	iov.iov_base = args->msg;
905 	iov.iov_len  = args->msglen;
906 
907 	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
908 	mutex_lock(&sock_lock);
909 
910 	if (!sock) {
911 		pr_err("%s called without initializing sock", __func__);
912 		err = -EPERM;
913 		goto out;
914 	}
915 
916 	err = sock_sendmsg(sock, &msg);
917 	args->addr.addrlen = msg.msg_namelen;
918 out:
919 	mutex_unlock(&sock_lock);
920 
921 	return err;
922 }
923 
bpf_kfunc_call_kernel_getsockname(struct addr_args * args)924 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
925 {
926 	int err;
927 
928 	mutex_lock(&sock_lock);
929 
930 	if (!sock) {
931 		pr_err("%s called without initializing sock", __func__);
932 		err = -EPERM;
933 		goto out;
934 	}
935 
936 	err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
937 	if (err < 0)
938 		goto out;
939 
940 	args->addrlen = err;
941 	err = 0;
942 out:
943 	mutex_unlock(&sock_lock);
944 
945 	return err;
946 }
947 
bpf_kfunc_call_kernel_getpeername(struct addr_args * args)948 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
949 {
950 	int err;
951 
952 	mutex_lock(&sock_lock);
953 
954 	if (!sock) {
955 		pr_err("%s called without initializing sock", __func__);
956 		err = -EPERM;
957 		goto out;
958 	}
959 
960 	err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
961 	if (err < 0)
962 		goto out;
963 
964 	args->addrlen = err;
965 	err = 0;
966 out:
967 	mutex_unlock(&sock_lock);
968 
969 	return err;
970 }
971 
972 static DEFINE_MUTEX(st_ops_mutex);
973 static struct bpf_testmod_st_ops *st_ops;
974 
bpf_kfunc_st_ops_test_prologue(struct st_ops_args * args)975 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
976 {
977 	int ret = -1;
978 
979 	mutex_lock(&st_ops_mutex);
980 	if (st_ops && st_ops->test_prologue)
981 		ret = st_ops->test_prologue(args);
982 	mutex_unlock(&st_ops_mutex);
983 
984 	return ret;
985 }
986 
bpf_kfunc_st_ops_test_epilogue(struct st_ops_args * args)987 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
988 {
989 	int ret = -1;
990 
991 	mutex_lock(&st_ops_mutex);
992 	if (st_ops && st_ops->test_epilogue)
993 		ret = st_ops->test_epilogue(args);
994 	mutex_unlock(&st_ops_mutex);
995 
996 	return ret;
997 }
998 
bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args * args)999 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1000 {
1001 	int ret = -1;
1002 
1003 	mutex_lock(&st_ops_mutex);
1004 	if (st_ops && st_ops->test_pro_epilogue)
1005 		ret = st_ops->test_pro_epilogue(args);
1006 	mutex_unlock(&st_ops_mutex);
1007 
1008 	return ret;
1009 }
1010 
bpf_kfunc_st_ops_inc10(struct st_ops_args * args)1011 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1012 {
1013 	args->a += 10;
1014 	return args->a;
1015 }
1016 
1017 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_testmod_test_mod_kfunc)1018 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1019 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1020 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1021 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1022 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1023 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1024 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1025 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1026 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1027 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1028 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1029 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1030 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1031 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1032 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1033 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1034 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1035 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1036 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1037 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1038 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1039 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1040 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1041 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1042 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1043 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1044 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1045 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1046 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1047 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1048 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1049 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1050 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1051 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1052 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1053 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1054 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1055 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1056 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1057 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1058 
1059 static int bpf_testmod_ops_init(struct btf *btf)
1060 {
1061 	return 0;
1062 }
1063 
bpf_testmod_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1064 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1065 					    enum bpf_access_type type,
1066 					    const struct bpf_prog *prog,
1067 					    struct bpf_insn_access_aux *info)
1068 {
1069 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1070 }
1071 
bpf_testmod_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1072 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1073 				       const struct btf_member *member,
1074 				       void *kdata, const void *udata)
1075 {
1076 	if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1077 		/* For data fields, this function has to copy it and return
1078 		 * 1 to indicate that the data has been handled by the
1079 		 * struct_ops type, or the verifier will reject the map if
1080 		 * the value of the data field is not zero.
1081 		 */
1082 		((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1083 		return 1;
1084 	}
1085 	return 0;
1086 }
1087 
1088 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1089 	.owner = THIS_MODULE,
1090 	.set   = &bpf_testmod_check_kfunc_ids,
1091 };
1092 
1093 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1094 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1095 };
1096 
bpf_dummy_reg(void * kdata,struct bpf_link * link)1097 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1098 {
1099 	struct bpf_testmod_ops *ops = kdata;
1100 
1101 	if (ops->test_1)
1102 		ops->test_1();
1103 	/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1104 	 * initialized, so we need to check for NULL.
1105 	 */
1106 	if (ops->test_2)
1107 		ops->test_2(4, ops->data);
1108 
1109 	return 0;
1110 }
1111 
bpf_dummy_unreg(void * kdata,struct bpf_link * link)1112 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1113 {
1114 }
1115 
bpf_testmod_test_1(void)1116 static int bpf_testmod_test_1(void)
1117 {
1118 	return 0;
1119 }
1120 
bpf_testmod_test_2(int a,int b)1121 static void bpf_testmod_test_2(int a, int b)
1122 {
1123 }
1124 
bpf_testmod_tramp(int value)1125 static int bpf_testmod_tramp(int value)
1126 {
1127 	return 0;
1128 }
1129 
bpf_testmod_ops__test_maybe_null(int dummy,struct task_struct * task__nullable)1130 static int bpf_testmod_ops__test_maybe_null(int dummy,
1131 					    struct task_struct *task__nullable)
1132 {
1133 	return 0;
1134 }
1135 
1136 static struct bpf_testmod_ops __bpf_testmod_ops = {
1137 	.test_1 = bpf_testmod_test_1,
1138 	.test_2 = bpf_testmod_test_2,
1139 	.test_maybe_null = bpf_testmod_ops__test_maybe_null,
1140 };
1141 
1142 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1143 	.verifier_ops = &bpf_testmod_verifier_ops,
1144 	.init = bpf_testmod_ops_init,
1145 	.init_member = bpf_testmod_ops_init_member,
1146 	.reg = bpf_dummy_reg,
1147 	.unreg = bpf_dummy_unreg,
1148 	.cfi_stubs = &__bpf_testmod_ops,
1149 	.name = "bpf_testmod_ops",
1150 	.owner = THIS_MODULE,
1151 };
1152 
bpf_dummy_reg2(void * kdata,struct bpf_link * link)1153 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1154 {
1155 	struct bpf_testmod_ops2 *ops = kdata;
1156 
1157 	ops->test_1();
1158 	return 0;
1159 }
1160 
1161 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1162 	.test_1 = bpf_testmod_test_1,
1163 };
1164 
1165 struct bpf_struct_ops bpf_testmod_ops2 = {
1166 	.verifier_ops = &bpf_testmod_verifier_ops,
1167 	.init = bpf_testmod_ops_init,
1168 	.init_member = bpf_testmod_ops_init_member,
1169 	.reg = bpf_dummy_reg2,
1170 	.unreg = bpf_dummy_unreg,
1171 	.cfi_stubs = &__bpf_testmod_ops2,
1172 	.name = "bpf_testmod_ops2",
1173 	.owner = THIS_MODULE,
1174 };
1175 
bpf_test_mod_st_ops__test_prologue(struct st_ops_args * args)1176 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1177 {
1178 	return 0;
1179 }
1180 
bpf_test_mod_st_ops__test_epilogue(struct st_ops_args * args)1181 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1182 {
1183 	return 0;
1184 }
1185 
bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args * args)1186 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1187 {
1188 	return 0;
1189 }
1190 
st_ops_gen_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1191 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1192 			       const struct bpf_prog *prog)
1193 {
1194 	struct bpf_insn *insn = insn_buf;
1195 
1196 	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1197 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1198 		return 0;
1199 
1200 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1201 	 * r7 = r6->a;
1202 	 * r7 += 1000;
1203 	 * r6->a = r7;
1204 	 */
1205 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1206 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1207 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1208 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1209 	*insn++ = prog->insnsi[0];
1210 
1211 	return insn - insn_buf;
1212 }
1213 
st_ops_gen_epilogue(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1214 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1215 			       s16 ctx_stack_off)
1216 {
1217 	struct bpf_insn *insn = insn_buf;
1218 
1219 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1220 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1221 		return 0;
1222 
1223 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1224 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1225 	 * r6 = r1->a;
1226 	 * r6 += 10000;
1227 	 * r1->a = r6;
1228 	 * r0 = r6;
1229 	 * r0 *= 2;
1230 	 * BPF_EXIT;
1231 	 */
1232 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1233 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1234 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1235 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1236 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1237 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1238 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1239 	*insn++ = BPF_EXIT_INSN();
1240 
1241 	return insn - insn_buf;
1242 }
1243 
st_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)1244 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1245 				    const struct bpf_reg_state *reg,
1246 				    int off, int size)
1247 {
1248 	if (off < 0 || off + size > sizeof(struct st_ops_args))
1249 		return -EACCES;
1250 	return 0;
1251 }
1252 
1253 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1254 	.is_valid_access = bpf_testmod_ops_is_valid_access,
1255 	.btf_struct_access = st_ops_btf_struct_access,
1256 	.gen_prologue = st_ops_gen_prologue,
1257 	.gen_epilogue = st_ops_gen_epilogue,
1258 	.get_func_proto = bpf_base_func_proto,
1259 };
1260 
1261 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1262 	.test_prologue = bpf_test_mod_st_ops__test_prologue,
1263 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1264 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1265 };
1266 
st_ops_reg(void * kdata,struct bpf_link * link)1267 static int st_ops_reg(void *kdata, struct bpf_link *link)
1268 {
1269 	int err = 0;
1270 
1271 	mutex_lock(&st_ops_mutex);
1272 	if (st_ops) {
1273 		pr_err("st_ops has already been registered\n");
1274 		err = -EEXIST;
1275 		goto unlock;
1276 	}
1277 	st_ops = kdata;
1278 
1279 unlock:
1280 	mutex_unlock(&st_ops_mutex);
1281 	return err;
1282 }
1283 
st_ops_unreg(void * kdata,struct bpf_link * link)1284 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1285 {
1286 	mutex_lock(&st_ops_mutex);
1287 	st_ops = NULL;
1288 	mutex_unlock(&st_ops_mutex);
1289 }
1290 
st_ops_init(struct btf * btf)1291 static int st_ops_init(struct btf *btf)
1292 {
1293 	return 0;
1294 }
1295 
st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1296 static int st_ops_init_member(const struct btf_type *t,
1297 			      const struct btf_member *member,
1298 			      void *kdata, const void *udata)
1299 {
1300 	return 0;
1301 }
1302 
1303 static struct bpf_struct_ops testmod_st_ops = {
1304 	.verifier_ops = &st_ops_verifier_ops,
1305 	.init = st_ops_init,
1306 	.init_member = st_ops_init_member,
1307 	.reg = st_ops_reg,
1308 	.unreg = st_ops_unreg,
1309 	.cfi_stubs = &st_ops_cfi_stubs,
1310 	.name = "bpf_testmod_st_ops",
1311 	.owner = THIS_MODULE,
1312 };
1313 
1314 extern int bpf_fentry_test1(int a);
1315 
bpf_testmod_init(void)1316 static int bpf_testmod_init(void)
1317 {
1318 	const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1319 		{
1320 			.btf_id		= bpf_testmod_dtor_ids[0],
1321 			.kfunc_btf_id	= bpf_testmod_dtor_ids[1]
1322 		},
1323 	};
1324 	void **tramp;
1325 	int ret;
1326 
1327 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1328 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1329 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1330 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1331 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1332 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1333 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1334 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1335 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1336 						 ARRAY_SIZE(bpf_testmod_dtors),
1337 						 THIS_MODULE);
1338 	if (ret < 0)
1339 		return ret;
1340 	if (bpf_fentry_test1(0) < 0)
1341 		return -EINVAL;
1342 	sock = NULL;
1343 	mutex_init(&sock_lock);
1344 	ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1345 	if (ret < 0)
1346 		return ret;
1347 	ret = register_bpf_testmod_uprobe();
1348 	if (ret < 0)
1349 		return ret;
1350 
1351 	/* Ensure nothing is between tramp_1..tramp_40 */
1352 	BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1353 		     offsetofend(struct bpf_testmod_ops, tramp_40));
1354 	tramp = (void **)&__bpf_testmod_ops.tramp_1;
1355 	while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1356 		*tramp++ = bpf_testmod_tramp;
1357 
1358 	return 0;
1359 }
1360 
bpf_testmod_exit(void)1361 static void bpf_testmod_exit(void)
1362 {
1363         /* Need to wait for all references to be dropped because
1364          * bpf_kfunc_call_test_release() which currently resides in kernel can
1365          * be called after bpf_testmod is unloaded. Once release function is
1366          * moved into the module this wait can be removed.
1367          */
1368 	while (refcount_read(&prog_test_struct.cnt) > 1)
1369 		msleep(20);
1370 
1371 	bpf_kfunc_close_sock();
1372 	sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1373 	unregister_bpf_testmod_uprobe();
1374 }
1375 
1376 module_init(bpf_testmod_init);
1377 module_exit(bpf_testmod_exit);
1378 
1379 MODULE_AUTHOR("Andrii Nakryiko");
1380 MODULE_DESCRIPTION("BPF selftests module");
1381 MODULE_LICENSE("Dual BSD/GPL");
1382