1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28
29 #define CONNECT_TIMEOUT_SEC 1
30
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39
40 struct bpf_testmod_struct_arg_1 {
41 int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 long a;
45 long b;
46 };
47
48 struct bpf_testmod_struct_arg_3 {
49 int a;
50 int b[];
51 };
52
53 struct bpf_testmod_struct_arg_4 {
54 u64 a;
55 int b;
56 };
57
58 struct bpf_testmod_struct_arg_5 {
59 char a;
60 short b;
61 int c;
62 long d;
63 };
64
65 union bpf_testmod_union_arg_1 {
66 char a;
67 short b;
68 struct bpf_testmod_struct_arg_1 arg;
69 };
70
71 union bpf_testmod_union_arg_2 {
72 int a;
73 long b;
74 struct bpf_testmod_struct_arg_2 arg;
75 };
76
77 __bpf_hook_start();
78
79 noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a,int b,int c)80 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
81 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
82 return bpf_testmod_test_struct_arg_result;
83 }
84
85 noinline int
bpf_testmod_test_struct_arg_2(int a,struct bpf_testmod_struct_arg_2 b,int c)86 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
87 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
88 return bpf_testmod_test_struct_arg_result;
89 }
90
91 noinline int
bpf_testmod_test_struct_arg_3(int a,int b,struct bpf_testmod_struct_arg_2 c)92 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
93 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
94 return bpf_testmod_test_struct_arg_result;
95 }
96
97 noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a,int b,int c,int d,struct bpf_testmod_struct_arg_2 e)98 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
99 int c, int d, struct bpf_testmod_struct_arg_2 e) {
100 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
101 return bpf_testmod_test_struct_arg_result;
102 }
103
104 noinline int
bpf_testmod_test_struct_arg_5(void)105 bpf_testmod_test_struct_arg_5(void) {
106 bpf_testmod_test_struct_arg_result = 1;
107 return bpf_testmod_test_struct_arg_result;
108 }
109
110 noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 * a)111 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
112 bpf_testmod_test_struct_arg_result = a->b[0];
113 return bpf_testmod_test_struct_arg_result;
114 }
115
116 noinline int
bpf_testmod_test_struct_arg_7(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f)117 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
118 struct bpf_testmod_struct_arg_4 f)
119 {
120 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
121 (long)e + f.a + f.b;
122 return bpf_testmod_test_struct_arg_result;
123 }
124
125 noinline int
bpf_testmod_test_struct_arg_8(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f,int g)126 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
127 struct bpf_testmod_struct_arg_4 f, int g)
128 {
129 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
130 (long)e + f.a + f.b + g;
131 return bpf_testmod_test_struct_arg_result;
132 }
133
134 noinline int
bpf_testmod_test_struct_arg_9(u64 a,void * b,short c,int d,void * e,char f,short g,struct bpf_testmod_struct_arg_5 h,long i)135 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
136 short g, struct bpf_testmod_struct_arg_5 h, long i)
137 {
138 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
139 f + g + h.a + h.b + h.c + h.d + i;
140 return bpf_testmod_test_struct_arg_result;
141 }
142
143 noinline int
bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a,int b,int c)144 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c)
145 {
146 bpf_testmod_test_struct_arg_result = a.arg.a + b + c;
147 return bpf_testmod_test_struct_arg_result;
148 }
149
150 noinline int
bpf_testmod_test_union_arg_2(int a,union bpf_testmod_union_arg_2 b)151 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b)
152 {
153 bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b;
154 return bpf_testmod_test_struct_arg_result;
155 }
156
157 noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 * a)158 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
159 bpf_testmod_test_struct_arg_result = a->a;
160 return bpf_testmod_test_struct_arg_result;
161 }
162
bpf_testmod_looooooooooooooooooooooooooooooong_name(void)163 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void)
164 {
165 }
166
167 __bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)168 bpf_testmod_test_mod_kfunc(int i)
169 {
170 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
171 }
172
bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq * it,s64 value,int cnt)173 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
174 {
175 it->cnt = cnt;
176
177 if (cnt < 0)
178 return -EINVAL;
179
180 it->value = value;
181
182 return 0;
183 }
184
bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq * it)185 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
186 {
187 if (it->cnt <= 0)
188 return NULL;
189
190 it->cnt--;
191
192 return &it->value;
193 }
194
bpf_iter_testmod_seq_value(int val,struct bpf_iter_testmod_seq * it__iter)195 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
196 {
197 if (it__iter->cnt < 0)
198 return 0;
199
200 return val + it__iter->value;
201 }
202
bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq * it)203 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
204 {
205 it->cnt = 0;
206 }
207
bpf_kfunc_common_test(void)208 __bpf_kfunc void bpf_kfunc_common_test(void)
209 {
210 }
211
bpf_kfunc_dynptr_test(struct bpf_dynptr * ptr,struct bpf_dynptr * ptr__nullable)212 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
213 struct bpf_dynptr *ptr__nullable)
214 {
215 }
216
bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head * ptr)217 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
218 {
219 return NULL;
220 }
221
bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common * ptr)222 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
223 {
224 return NULL;
225 }
226
bpf_kfunc_nested_release_test(struct sk_buff * ptr)227 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
228 {
229 }
230
bpf_kfunc_trusted_vma_test(struct vm_area_struct * ptr)231 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
232 {
233 }
234
bpf_kfunc_trusted_task_test(struct task_struct * ptr)235 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
236 {
237 }
238
bpf_kfunc_trusted_num_test(int * ptr)239 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
240 {
241 }
242
bpf_kfunc_rcu_task_test(struct task_struct * ptr)243 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
244 {
245 }
246
bpf_kfunc_ret_rcu_test(void)247 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
248 {
249 return NULL;
250 }
251
bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)252 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
253 {
254 return NULL;
255 }
256
257 __bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int * err)258 bpf_testmod_ctx_create(int *err)
259 {
260 struct bpf_testmod_ctx *ctx;
261
262 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
263 if (!ctx) {
264 *err = -ENOMEM;
265 return NULL;
266 }
267 refcount_set(&ctx->usage, 1);
268
269 return ctx;
270 }
271
testmod_free_cb(struct rcu_head * head)272 static void testmod_free_cb(struct rcu_head *head)
273 {
274 struct bpf_testmod_ctx *ctx;
275
276 ctx = container_of(head, struct bpf_testmod_ctx, rcu);
277 kfree(ctx);
278 }
279
bpf_testmod_ctx_release(struct bpf_testmod_ctx * ctx)280 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
281 {
282 if (!ctx)
283 return;
284 if (refcount_dec_and_test(&ctx->usage))
285 call_rcu(&ctx->rcu, testmod_free_cb);
286 }
287
288 static struct bpf_testmod_ops3 *st_ops3;
289
bpf_testmod_test_3(void)290 static int bpf_testmod_test_3(void)
291 {
292 return 0;
293 }
294
bpf_testmod_test_4(void)295 static int bpf_testmod_test_4(void)
296 {
297 return 0;
298 }
299
300 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = {
301 .test_1 = bpf_testmod_test_3,
302 .test_2 = bpf_testmod_test_4,
303 };
304
bpf_testmod_test_struct_ops3(void)305 static void bpf_testmod_test_struct_ops3(void)
306 {
307 if (st_ops3)
308 st_ops3->test_1();
309 }
310
bpf_testmod_ops3_call_test_1(void)311 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
312 {
313 st_ops3->test_1();
314 }
315
bpf_testmod_ops3_call_test_2(void)316 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
317 {
318 st_ops3->test_2();
319 }
320
321 struct bpf_testmod_btf_type_tag_1 {
322 int a;
323 };
324
325 struct bpf_testmod_btf_type_tag_2 {
326 struct bpf_testmod_btf_type_tag_1 __user *p;
327 };
328
329 struct bpf_testmod_btf_type_tag_3 {
330 struct bpf_testmod_btf_type_tag_1 __percpu *p;
331 };
332
333 noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user * arg)334 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
335 BTF_TYPE_EMIT(func_proto_typedef);
336 BTF_TYPE_EMIT(func_proto_typedef_nested1);
337 BTF_TYPE_EMIT(func_proto_typedef_nested2);
338 return arg->a;
339 }
340
341 noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 * arg)342 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
343 return arg->p->a;
344 }
345
346 noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu * arg)347 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
348 return arg->a;
349 }
350
351 noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 * arg)352 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
353 return arg->p->a;
354 }
355
bpf_testmod_loop_test(int n)356 noinline int bpf_testmod_loop_test(int n)
357 {
358 /* Make sum volatile, so smart compilers, such as clang, will not
359 * optimize the code by removing the loop.
360 */
361 volatile int sum = 0;
362 int i;
363
364 /* the primary goal of this test is to test LBR. Create a lot of
365 * branches in the function, so we can catch it easily.
366 */
367 for (i = 0; i < n; i++)
368 sum += i;
369 return sum;
370 }
371
bpf_testmod_return_ptr(int arg)372 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
373 {
374 static struct file f = {};
375
376 switch (arg) {
377 case 1: return (void *)EINVAL; /* user addr */
378 case 2: return (void *)0xcafe4a11; /* user addr */
379 case 3: return (void *)-EINVAL; /* canonical, but invalid */
380 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
381 case 5: return (void *)~(1ull << 30); /* trigger extable */
382 case 6: return &f; /* valid addr */
383 case 7: return (void *)((long)&f | 1); /* kernel tricks */
384 #ifdef CONFIG_X86_64
385 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */
386 #endif
387 default: return NULL;
388 }
389 }
390
bpf_testmod_fentry_test1(int a)391 noinline int bpf_testmod_fentry_test1(int a)
392 {
393 return a + 1;
394 }
395
bpf_testmod_fentry_test2(int a,u64 b)396 noinline int bpf_testmod_fentry_test2(int a, u64 b)
397 {
398 return a + b;
399 }
400
bpf_testmod_fentry_test3(char a,int b,u64 c)401 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
402 {
403 return a + b + c;
404 }
405
bpf_testmod_fentry_test7(u64 a,void * b,short c,int d,void * e,char f,int g)406 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
407 void *e, char f, int g)
408 {
409 return a + (long)b + c + d + (long)e + f + g;
410 }
411
bpf_testmod_fentry_test11(u64 a,void * b,short c,int d,void * e,char f,int g,unsigned int h,long i,__u64 j,unsigned long k)412 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
413 void *e, char f, int g,
414 unsigned int h, long i, __u64 j,
415 unsigned long k)
416 {
417 return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
418 }
419
bpf_testmod_stacktrace_test(void)420 noinline void bpf_testmod_stacktrace_test(void)
421 {
422 /* used for stacktrace test as attach function */
423 asm volatile ("");
424 }
425
bpf_testmod_stacktrace_test_3(void)426 noinline void bpf_testmod_stacktrace_test_3(void)
427 {
428 bpf_testmod_stacktrace_test();
429 asm volatile ("");
430 }
431
bpf_testmod_stacktrace_test_2(void)432 noinline void bpf_testmod_stacktrace_test_2(void)
433 {
434 bpf_testmod_stacktrace_test_3();
435 asm volatile ("");
436 }
437
bpf_testmod_stacktrace_test_1(void)438 noinline void bpf_testmod_stacktrace_test_1(void)
439 {
440 bpf_testmod_stacktrace_test_2();
441 asm volatile ("");
442 }
443
444 int bpf_testmod_fentry_ok;
445
446 noinline ssize_t
bpf_testmod_test_read(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)447 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
448 const struct bin_attribute *bin_attr,
449 char *buf, loff_t off, size_t len)
450 {
451 struct bpf_testmod_test_read_ctx ctx = {
452 .buf = buf,
453 .off = off,
454 .len = len,
455 };
456 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
457 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
458 struct bpf_testmod_struct_arg_3 *struct_arg3;
459 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
460 struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
461 union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} };
462 union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} };
463 int i = 1;
464
465 while (bpf_testmod_return_ptr(i))
466 i++;
467
468 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
469 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
470 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
471 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
472 (void)bpf_testmod_test_struct_arg_5();
473 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
474 (void *)20, struct_arg4);
475 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
476 (void *)20, struct_arg4, 23);
477 (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
478 21, 22, struct_arg5, 27);
479
480 (void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5);
481 (void)bpf_testmod_test_union_arg_2(6, union_arg2);
482
483 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
484
485 (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
486
487 bpf_testmod_test_struct_ops3();
488
489 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
490 sizeof(int)), GFP_KERNEL);
491 if (struct_arg3 != NULL) {
492 struct_arg3->b[0] = 1;
493 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
494 kfree(struct_arg3);
495 }
496
497 /* This is always true. Use the check to make sure the compiler
498 * doesn't remove bpf_testmod_loop_test.
499 */
500 if (bpf_testmod_loop_test(101) > 100)
501 trace_bpf_testmod_test_read(current, &ctx);
502
503 trace_bpf_testmod_test_nullable_bare_tp(NULL);
504
505 /* Magic number to enable writable tp */
506 if (len == 64) {
507 struct bpf_testmod_test_writable_ctx writable = {
508 .val = 1024,
509 };
510 trace_bpf_testmod_test_writable_bare_tp(&writable);
511 if (writable.early_ret)
512 return snprintf(buf, len, "%d\n", writable.val);
513 }
514
515 if (bpf_testmod_fentry_test1(1) != 2 ||
516 bpf_testmod_fentry_test2(2, 3) != 5 ||
517 bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
518 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
519 21, 22) != 133 ||
520 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
521 21, 22, 23, 24, 25, 26) != 231)
522 goto out;
523
524 bpf_testmod_stacktrace_test_1();
525
526 bpf_testmod_fentry_ok = 1;
527 out:
528 return -EIO; /* always fail */
529 }
530 EXPORT_SYMBOL(bpf_testmod_test_read);
531 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
532
533 noinline ssize_t
bpf_testmod_test_write(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)534 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
535 const struct bin_attribute *bin_attr,
536 char *buf, loff_t off, size_t len)
537 {
538 struct bpf_testmod_test_write_ctx ctx = {
539 .buf = buf,
540 .off = off,
541 .len = len,
542 };
543
544 trace_bpf_testmod_test_write_bare_tp(current, &ctx);
545
546 return -EIO; /* always fail */
547 }
548 EXPORT_SYMBOL(bpf_testmod_test_write);
549 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
550
bpf_fentry_shadow_test(int a)551 noinline int bpf_fentry_shadow_test(int a)
552 {
553 return a + 2;
554 }
555 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
556
557 __bpf_hook_end();
558
559 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
560 .attr = { .name = "bpf_testmod", .mode = 0666, },
561 .read = bpf_testmod_test_read,
562 .write = bpf_testmod_test_write,
563 };
564
565 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
566 * please see test_uretprobe_regs_change test
567 */
568 #ifdef __x86_64__
569
570 static int
uprobe_handler(struct uprobe_consumer * self,struct pt_regs * regs,__u64 * data)571 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
572 {
573 regs->cx = 0x87654321feebdaed;
574 return 0;
575 }
576
577 static int
uprobe_ret_handler(struct uprobe_consumer * self,unsigned long func,struct pt_regs * regs,__u64 * data)578 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
579 struct pt_regs *regs, __u64 *data)
580
581 {
582 regs->ax = 0x12345678deadbeef;
583 regs->r11 = (u64) -1;
584 return 0;
585 }
586
587 struct testmod_uprobe {
588 struct path path;
589 struct uprobe *uprobe;
590 struct uprobe_consumer consumer;
591 };
592
593 static DEFINE_MUTEX(testmod_uprobe_mutex);
594
595 static struct testmod_uprobe uprobe = {
596 .consumer.handler = uprobe_handler,
597 .consumer.ret_handler = uprobe_ret_handler,
598 };
599
testmod_register_uprobe(loff_t offset)600 static int testmod_register_uprobe(loff_t offset)
601 {
602 int err = -EBUSY;
603
604 if (uprobe.uprobe)
605 return -EBUSY;
606
607 mutex_lock(&testmod_uprobe_mutex);
608
609 if (uprobe.uprobe)
610 goto out;
611
612 err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
613 if (err)
614 goto out;
615
616 uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
617 offset, 0, &uprobe.consumer);
618 if (IS_ERR(uprobe.uprobe)) {
619 err = PTR_ERR(uprobe.uprobe);
620 path_put(&uprobe.path);
621 uprobe.uprobe = NULL;
622 }
623 out:
624 mutex_unlock(&testmod_uprobe_mutex);
625 return err;
626 }
627
testmod_unregister_uprobe(void)628 static void testmod_unregister_uprobe(void)
629 {
630 mutex_lock(&testmod_uprobe_mutex);
631
632 if (uprobe.uprobe) {
633 uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
634 uprobe_unregister_sync();
635 path_put(&uprobe.path);
636 uprobe.uprobe = NULL;
637 }
638
639 mutex_unlock(&testmod_uprobe_mutex);
640 }
641
642 static ssize_t
bpf_testmod_uprobe_write(struct file * file,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)643 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
644 const struct bin_attribute *bin_attr,
645 char *buf, loff_t off, size_t len)
646 {
647 unsigned long offset = 0;
648 int err = 0;
649
650 if (kstrtoul(buf, 0, &offset))
651 return -EINVAL;
652
653 if (offset)
654 err = testmod_register_uprobe(offset);
655 else
656 testmod_unregister_uprobe();
657
658 return err ?: strlen(buf);
659 }
660
661 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
662 .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
663 .write = bpf_testmod_uprobe_write,
664 };
665
register_bpf_testmod_uprobe(void)666 static int register_bpf_testmod_uprobe(void)
667 {
668 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
669 }
670
unregister_bpf_testmod_uprobe(void)671 static void unregister_bpf_testmod_uprobe(void)
672 {
673 testmod_unregister_uprobe();
674 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
675 }
676
677 #else
register_bpf_testmod_uprobe(void)678 static int register_bpf_testmod_uprobe(void)
679 {
680 return 0;
681 }
682
unregister_bpf_testmod_uprobe(void)683 static void unregister_bpf_testmod_uprobe(void) { }
684 #endif
685
686 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
687 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
688 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
689 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
690 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
691 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
692 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
693 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
694 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
695 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
696 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
697 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
698 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
699 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
700 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
701 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
702 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
703 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
704 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
705 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2)
706 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
707
708 BTF_ID_LIST(bpf_testmod_dtor_ids)
709 BTF_ID(struct, bpf_testmod_ctx)
710 BTF_ID(func, bpf_testmod_ctx_release)
711
712 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
713 .owner = THIS_MODULE,
714 .set = &bpf_testmod_common_kfunc_ids,
715 };
716
bpf_kfunc_call_test1(struct sock * sk,u32 a,u64 b,u32 c,u64 d)717 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
718 {
719 return a + b + c + d;
720 }
721
bpf_kfunc_call_test2(struct sock * sk,u32 a,u32 b)722 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
723 {
724 return a + b;
725 }
726
bpf_kfunc_call_test3(struct sock * sk)727 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
728 {
729 return sk;
730 }
731
bpf_kfunc_call_test4(signed char a,short b,int c,long d)732 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
733 {
734 /* Provoke the compiler to assume that the caller has sign-extended a,
735 * b and c on platforms where this is required (e.g. s390x).
736 */
737 return (long)a + (long)b + (long)c + d;
738 }
739
740 static struct prog_test_ref_kfunc prog_test_struct = {
741 .a = 42,
742 .b = 108,
743 .next = &prog_test_struct,
744 .cnt = REFCOUNT_INIT(1),
745 };
746
747 __bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long * scalar_ptr)748 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
749 {
750 refcount_inc(&prog_test_struct.cnt);
751 return &prog_test_struct;
752 }
753
bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc * p)754 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
755 {
756 WARN_ON_ONCE(1);
757 }
758
759 __bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)760 bpf_kfunc_call_memb_acquire(void)
761 {
762 WARN_ON_ONCE(1);
763 return NULL;
764 }
765
bpf_kfunc_call_memb1_release(struct prog_test_member1 * p)766 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
767 {
768 WARN_ON_ONCE(1);
769 }
770
__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc * p,const int size)771 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
772 {
773 if (size > 2 * sizeof(int))
774 return NULL;
775
776 return (int *)p;
777 }
778
bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc * p,const int rdwr_buf_size)779 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
780 const int rdwr_buf_size)
781 {
782 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
783 }
784
bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)785 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
786 const int rdonly_buf_size)
787 {
788 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
789 }
790
791 /* the next 2 ones can't be really used for testing expect to ensure
792 * that the verifier rejects the call.
793 * Acquire functions must return struct pointers, so these ones are
794 * failing.
795 */
bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)796 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
797 const int rdonly_buf_size)
798 {
799 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
800 }
801
bpf_kfunc_call_int_mem_release(int * p)802 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
803 {
804 }
805
bpf_kfunc_call_test_pass_ctx(struct __sk_buff * skb)806 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
807 {
808 }
809
bpf_kfunc_call_test_pass1(struct prog_test_pass1 * p)810 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
811 {
812 }
813
bpf_kfunc_call_test_pass2(struct prog_test_pass2 * p)814 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
815 {
816 }
817
bpf_kfunc_call_test_fail1(struct prog_test_fail1 * p)818 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
819 {
820 }
821
bpf_kfunc_call_test_fail2(struct prog_test_fail2 * p)822 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
823 {
824 }
825
bpf_kfunc_call_test_fail3(struct prog_test_fail3 * p)826 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
827 {
828 }
829
bpf_kfunc_call_test_mem_len_pass1(void * mem,int mem__sz)830 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
831 {
832 }
833
bpf_kfunc_call_test_mem_len_fail1(void * mem,int len)834 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
835 {
836 }
837
bpf_kfunc_call_test_mem_len_fail2(u64 * mem,int len)838 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
839 {
840 }
841
bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc * p)842 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
843 {
844 /* p != NULL, but p->cnt could be 0 */
845 }
846
bpf_kfunc_call_test_destructive(void)847 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
848 {
849 }
850
bpf_kfunc_call_test_static_unused_arg(u32 arg,u32 unused)851 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
852 {
853 return arg;
854 }
855
bpf_kfunc_call_test_sleepable(void)856 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
857 {
858 }
859
bpf_kfunc_init_sock(struct init_sock_args * args)860 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
861 {
862 int proto;
863 int err;
864
865 mutex_lock(&sock_lock);
866
867 if (sock) {
868 pr_err("%s called without releasing old sock", __func__);
869 err = -EPERM;
870 goto out;
871 }
872
873 switch (args->af) {
874 case AF_INET:
875 case AF_INET6:
876 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
877 break;
878 case AF_UNIX:
879 proto = PF_UNIX;
880 break;
881 default:
882 pr_err("invalid address family %d\n", args->af);
883 err = -EINVAL;
884 goto out;
885 }
886
887 err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
888 proto, &sock);
889
890 if (!err)
891 /* Set timeout for call to kernel_connect() to prevent it from hanging,
892 * and consider the connection attempt failed if it returns
893 * -EINPROGRESS.
894 */
895 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
896 out:
897 mutex_unlock(&sock_lock);
898
899 return err;
900 }
901
bpf_kfunc_close_sock(void)902 __bpf_kfunc void bpf_kfunc_close_sock(void)
903 {
904 mutex_lock(&sock_lock);
905
906 if (sock) {
907 sock_release(sock);
908 sock = NULL;
909 }
910
911 mutex_unlock(&sock_lock);
912 }
913
bpf_kfunc_call_kernel_connect(struct addr_args * args)914 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
915 {
916 int err;
917
918 if (args->addrlen > sizeof(args->addr))
919 return -EINVAL;
920
921 mutex_lock(&sock_lock);
922
923 if (!sock) {
924 pr_err("%s called without initializing sock", __func__);
925 err = -EPERM;
926 goto out;
927 }
928
929 err = kernel_connect(sock, (struct sockaddr *)&args->addr,
930 args->addrlen, 0);
931 out:
932 mutex_unlock(&sock_lock);
933
934 return err;
935 }
936
bpf_kfunc_call_kernel_bind(struct addr_args * args)937 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
938 {
939 int err;
940
941 if (args->addrlen > sizeof(args->addr))
942 return -EINVAL;
943
944 mutex_lock(&sock_lock);
945
946 if (!sock) {
947 pr_err("%s called without initializing sock", __func__);
948 err = -EPERM;
949 goto out;
950 }
951
952 err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
953 out:
954 mutex_unlock(&sock_lock);
955
956 return err;
957 }
958
bpf_kfunc_call_kernel_listen(void)959 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
960 {
961 int err;
962
963 mutex_lock(&sock_lock);
964
965 if (!sock) {
966 pr_err("%s called without initializing sock", __func__);
967 err = -EPERM;
968 goto out;
969 }
970
971 err = kernel_listen(sock, 128);
972 out:
973 mutex_unlock(&sock_lock);
974
975 return err;
976 }
977
bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args * args)978 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
979 {
980 struct msghdr msg = {
981 .msg_name = &args->addr.addr,
982 .msg_namelen = args->addr.addrlen,
983 };
984 struct kvec iov;
985 int err;
986
987 if (args->addr.addrlen > sizeof(args->addr.addr) ||
988 args->msglen > sizeof(args->msg))
989 return -EINVAL;
990
991 iov.iov_base = args->msg;
992 iov.iov_len = args->msglen;
993
994 mutex_lock(&sock_lock);
995
996 if (!sock) {
997 pr_err("%s called without initializing sock", __func__);
998 err = -EPERM;
999 goto out;
1000 }
1001
1002 err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
1003 args->addr.addrlen = msg.msg_namelen;
1004 out:
1005 mutex_unlock(&sock_lock);
1006
1007 return err;
1008 }
1009
bpf_kfunc_call_sock_sendmsg(struct sendmsg_args * args)1010 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
1011 {
1012 struct msghdr msg = {
1013 .msg_name = &args->addr.addr,
1014 .msg_namelen = args->addr.addrlen,
1015 };
1016 struct kvec iov;
1017 int err;
1018
1019 if (args->addr.addrlen > sizeof(args->addr.addr) ||
1020 args->msglen > sizeof(args->msg))
1021 return -EINVAL;
1022
1023 iov.iov_base = args->msg;
1024 iov.iov_len = args->msglen;
1025
1026 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
1027 mutex_lock(&sock_lock);
1028
1029 if (!sock) {
1030 pr_err("%s called without initializing sock", __func__);
1031 err = -EPERM;
1032 goto out;
1033 }
1034
1035 err = sock_sendmsg(sock, &msg);
1036 args->addr.addrlen = msg.msg_namelen;
1037 out:
1038 mutex_unlock(&sock_lock);
1039
1040 return err;
1041 }
1042
bpf_kfunc_call_kernel_getsockname(struct addr_args * args)1043 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
1044 {
1045 int err;
1046
1047 mutex_lock(&sock_lock);
1048
1049 if (!sock) {
1050 pr_err("%s called without initializing sock", __func__);
1051 err = -EPERM;
1052 goto out;
1053 }
1054
1055 err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
1056 if (err < 0)
1057 goto out;
1058
1059 args->addrlen = err;
1060 err = 0;
1061 out:
1062 mutex_unlock(&sock_lock);
1063
1064 return err;
1065 }
1066
bpf_kfunc_call_kernel_getpeername(struct addr_args * args)1067 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
1068 {
1069 int err;
1070
1071 mutex_lock(&sock_lock);
1072
1073 if (!sock) {
1074 pr_err("%s called without initializing sock", __func__);
1075 err = -EPERM;
1076 goto out;
1077 }
1078
1079 err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
1080 if (err < 0)
1081 goto out;
1082
1083 args->addrlen = err;
1084 err = 0;
1085 out:
1086 mutex_unlock(&sock_lock);
1087
1088 return err;
1089 }
1090
1091 static DEFINE_MUTEX(st_ops_mutex);
1092 static struct bpf_testmod_st_ops *st_ops;
1093
bpf_kfunc_st_ops_test_prologue(struct st_ops_args * args)1094 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
1095 {
1096 int ret = -1;
1097
1098 mutex_lock(&st_ops_mutex);
1099 if (st_ops && st_ops->test_prologue)
1100 ret = st_ops->test_prologue(args);
1101 mutex_unlock(&st_ops_mutex);
1102
1103 return ret;
1104 }
1105
bpf_kfunc_st_ops_test_epilogue(struct st_ops_args * args)1106 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
1107 {
1108 int ret = -1;
1109
1110 mutex_lock(&st_ops_mutex);
1111 if (st_ops && st_ops->test_epilogue)
1112 ret = st_ops->test_epilogue(args);
1113 mutex_unlock(&st_ops_mutex);
1114
1115 return ret;
1116 }
1117
bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args * args)1118 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1119 {
1120 int ret = -1;
1121
1122 mutex_lock(&st_ops_mutex);
1123 if (st_ops && st_ops->test_pro_epilogue)
1124 ret = st_ops->test_pro_epilogue(args);
1125 mutex_unlock(&st_ops_mutex);
1126
1127 return ret;
1128 }
1129
bpf_kfunc_st_ops_inc10(struct st_ops_args * args)1130 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1131 {
1132 args->a += 10;
1133 return args->a;
1134 }
1135
1136 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
1137
1138 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_testmod_test_mod_kfunc)1139 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1140 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1141 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1142 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1143 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1144 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1145 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1146 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1147 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1148 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1149 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1150 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1151 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1152 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1153 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1154 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1155 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1156 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1157 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1158 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1159 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1160 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1161 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1162 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1163 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1164 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1165 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1166 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1167 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1168 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1169 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1170 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1171 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1172 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1173 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1174 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1175 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1176 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1177 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1178 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS)
1179 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1180
1181 static int bpf_testmod_ops_init(struct btf *btf)
1182 {
1183 return 0;
1184 }
1185
bpf_testmod_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1186 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1187 enum bpf_access_type type,
1188 const struct bpf_prog *prog,
1189 struct bpf_insn_access_aux *info)
1190 {
1191 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1192 }
1193
bpf_testmod_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1194 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1195 const struct btf_member *member,
1196 void *kdata, const void *udata)
1197 {
1198 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1199 /* For data fields, this function has to copy it and return
1200 * 1 to indicate that the data has been handled by the
1201 * struct_ops type, or the verifier will reject the map if
1202 * the value of the data field is not zero.
1203 */
1204 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1205 return 1;
1206 }
1207 return 0;
1208 }
1209
1210 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1211 .owner = THIS_MODULE,
1212 .set = &bpf_testmod_check_kfunc_ids,
1213 };
1214
1215 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1216 .get_func_proto = bpf_base_func_proto,
1217 .is_valid_access = bpf_testmod_ops_is_valid_access,
1218 };
1219
1220 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
1221 .is_valid_access = bpf_testmod_ops_is_valid_access,
1222 };
1223
bpf_dummy_reg(void * kdata,struct bpf_link * link)1224 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1225 {
1226 struct bpf_testmod_ops *ops = kdata;
1227
1228 if (ops->test_1)
1229 ops->test_1();
1230 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1231 * initialized, so we need to check for NULL.
1232 */
1233 if (ops->test_2)
1234 ops->test_2(4, ops->data);
1235
1236 return 0;
1237 }
1238
bpf_dummy_unreg(void * kdata,struct bpf_link * link)1239 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1240 {
1241 }
1242
bpf_testmod_test_1(void)1243 static int bpf_testmod_test_1(void)
1244 {
1245 return 0;
1246 }
1247
bpf_testmod_test_2(int a,int b)1248 static void bpf_testmod_test_2(int a, int b)
1249 {
1250 }
1251
bpf_testmod_tramp(int value)1252 static int bpf_testmod_tramp(int value)
1253 {
1254 return 0;
1255 }
1256
bpf_testmod_ops__test_maybe_null(int dummy,struct task_struct * task__nullable)1257 static int bpf_testmod_ops__test_maybe_null(int dummy,
1258 struct task_struct *task__nullable)
1259 {
1260 return 0;
1261 }
1262
bpf_testmod_ops__test_refcounted(int dummy,struct task_struct * task__ref)1263 static int bpf_testmod_ops__test_refcounted(int dummy,
1264 struct task_struct *task__ref)
1265 {
1266 return 0;
1267 }
1268
1269 static struct task_struct *
bpf_testmod_ops__test_return_ref_kptr(int dummy,struct task_struct * task__ref,struct cgroup * cgrp)1270 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
1271 struct cgroup *cgrp)
1272 {
1273 return NULL;
1274 }
1275
1276 static struct bpf_testmod_ops __bpf_testmod_ops = {
1277 .test_1 = bpf_testmod_test_1,
1278 .test_2 = bpf_testmod_test_2,
1279 .test_maybe_null = bpf_testmod_ops__test_maybe_null,
1280 .test_refcounted = bpf_testmod_ops__test_refcounted,
1281 .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
1282 };
1283
1284 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1285 .verifier_ops = &bpf_testmod_verifier_ops,
1286 .init = bpf_testmod_ops_init,
1287 .init_member = bpf_testmod_ops_init_member,
1288 .reg = bpf_dummy_reg,
1289 .unreg = bpf_dummy_unreg,
1290 .cfi_stubs = &__bpf_testmod_ops,
1291 .name = "bpf_testmod_ops",
1292 .owner = THIS_MODULE,
1293 };
1294
bpf_dummy_reg2(void * kdata,struct bpf_link * link)1295 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1296 {
1297 struct bpf_testmod_ops2 *ops = kdata;
1298
1299 ops->test_1();
1300 return 0;
1301 }
1302
1303 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1304 .test_1 = bpf_testmod_test_1,
1305 };
1306
1307 struct bpf_struct_ops bpf_testmod_ops2 = {
1308 .verifier_ops = &bpf_testmod_verifier_ops,
1309 .init = bpf_testmod_ops_init,
1310 .init_member = bpf_testmod_ops_init_member,
1311 .reg = bpf_dummy_reg2,
1312 .unreg = bpf_dummy_unreg,
1313 .cfi_stubs = &__bpf_testmod_ops2,
1314 .name = "bpf_testmod_ops2",
1315 .owner = THIS_MODULE,
1316 };
1317
st_ops3_reg(void * kdata,struct bpf_link * link)1318 static int st_ops3_reg(void *kdata, struct bpf_link *link)
1319 {
1320 int err = 0;
1321
1322 mutex_lock(&st_ops_mutex);
1323 if (st_ops3) {
1324 pr_err("st_ops has already been registered\n");
1325 err = -EEXIST;
1326 goto unlock;
1327 }
1328 st_ops3 = kdata;
1329
1330 unlock:
1331 mutex_unlock(&st_ops_mutex);
1332 return err;
1333 }
1334
st_ops3_unreg(void * kdata,struct bpf_link * link)1335 static void st_ops3_unreg(void *kdata, struct bpf_link *link)
1336 {
1337 mutex_lock(&st_ops_mutex);
1338 st_ops3 = NULL;
1339 mutex_unlock(&st_ops_mutex);
1340 }
1341
test_1_recursion_detected(struct bpf_prog * prog)1342 static void test_1_recursion_detected(struct bpf_prog *prog)
1343 {
1344 struct bpf_prog_stats *stats;
1345
1346 stats = this_cpu_ptr(prog->stats);
1347 printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu",
1348 u64_stats_read(&stats->misses));
1349 }
1350
st_ops3_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)1351 static int st_ops3_check_member(const struct btf_type *t,
1352 const struct btf_member *member,
1353 const struct bpf_prog *prog)
1354 {
1355 u32 moff = __btf_member_bit_offset(t, member) / 8;
1356
1357 switch (moff) {
1358 case offsetof(struct bpf_testmod_ops3, test_1):
1359 prog->aux->priv_stack_requested = true;
1360 prog->aux->recursion_detected = test_1_recursion_detected;
1361 fallthrough;
1362 default:
1363 break;
1364 }
1365 return 0;
1366 }
1367
1368 struct bpf_struct_ops bpf_testmod_ops3 = {
1369 .verifier_ops = &bpf_testmod_verifier_ops3,
1370 .init = bpf_testmod_ops_init,
1371 .init_member = bpf_testmod_ops_init_member,
1372 .reg = st_ops3_reg,
1373 .unreg = st_ops3_unreg,
1374 .check_member = st_ops3_check_member,
1375 .cfi_stubs = &__bpf_testmod_ops3,
1376 .name = "bpf_testmod_ops3",
1377 .owner = THIS_MODULE,
1378 };
1379
bpf_test_mod_st_ops__test_prologue(struct st_ops_args * args)1380 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1381 {
1382 return 0;
1383 }
1384
bpf_test_mod_st_ops__test_epilogue(struct st_ops_args * args)1385 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1386 {
1387 return 0;
1388 }
1389
bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args * args)1390 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1391 {
1392 return 0;
1393 }
1394
1395 static int bpf_cgroup_from_id_id;
1396 static int bpf_cgroup_release_id;
1397
st_ops_gen_prologue_with_kfunc(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1398 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write,
1399 const struct bpf_prog *prog)
1400 {
1401 struct bpf_insn *insn = insn_buf;
1402
1403 /* r8 = r1; // r8 will be "u64 *ctx".
1404 * r1 = 0;
1405 * r0 = bpf_cgroup_from_id(r1);
1406 * if r0 != 0 goto pc+5;
1407 * r6 = r8[0]; // r6 will be "struct st_ops *args".
1408 * r7 = r6->a;
1409 * r7 += 1000;
1410 * r6->a = r7;
1411 * goto pc+2;
1412 * r1 = r0;
1413 * bpf_cgroup_release(r1);
1414 * r1 = r8;
1415 */
1416 *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1);
1417 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1418 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1419 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5);
1420 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0);
1421 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1422 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1423 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1424 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1425 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1426 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1427 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8);
1428 *insn++ = prog->insnsi[0];
1429
1430 return insn - insn_buf;
1431 }
1432
st_ops_gen_epilogue_with_kfunc(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1433 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1434 s16 ctx_stack_off)
1435 {
1436 struct bpf_insn *insn = insn_buf;
1437
1438 /* r1 = 0;
1439 * r6 = 0;
1440 * r0 = bpf_cgroup_from_id(r1);
1441 * if r0 != 0 goto pc+6;
1442 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1443 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1444 * r6 = r1->a;
1445 * r6 += 10000;
1446 * r1->a = r6;
1447 * goto pc+2
1448 * r1 = r0;
1449 * bpf_cgroup_release(r1);
1450 * r0 = r6;
1451 * r0 *= 2;
1452 * BPF_EXIT;
1453 */
1454 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0);
1455 *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0);
1456 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id);
1457 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6);
1458 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1459 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1460 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1461 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1462 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1463 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2);
1464 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
1465 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id);
1466 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1467 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1468 *insn++ = BPF_EXIT_INSN();
1469
1470 return insn - insn_buf;
1471 }
1472
1473 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_"
st_ops_gen_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1474 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1475 const struct bpf_prog *prog)
1476 {
1477 struct bpf_insn *insn = insn_buf;
1478
1479 if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1480 strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1481 return 0;
1482
1483 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1484 return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog);
1485
1486 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1487 * r7 = r6->a;
1488 * r7 += 1000;
1489 * r6->a = r7;
1490 */
1491 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1492 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1493 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1494 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1495 *insn++ = prog->insnsi[0];
1496
1497 return insn - insn_buf;
1498 }
1499
st_ops_gen_epilogue(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1500 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1501 s16 ctx_stack_off)
1502 {
1503 struct bpf_insn *insn = insn_buf;
1504
1505 if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1506 strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1507 return 0;
1508
1509 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX)))
1510 return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off);
1511
1512 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1513 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1514 * r6 = r1->a;
1515 * r6 += 10000;
1516 * r1->a = r6;
1517 * r0 = r6;
1518 * r0 *= 2;
1519 * BPF_EXIT;
1520 */
1521 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1522 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1523 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1524 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1525 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1526 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1527 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1528 *insn++ = BPF_EXIT_INSN();
1529
1530 return insn - insn_buf;
1531 }
1532
st_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)1533 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1534 const struct bpf_reg_state *reg,
1535 int off, int size)
1536 {
1537 if (off < 0 || off + size > sizeof(struct st_ops_args))
1538 return -EACCES;
1539 return 0;
1540 }
1541
1542 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1543 .is_valid_access = bpf_testmod_ops_is_valid_access,
1544 .btf_struct_access = st_ops_btf_struct_access,
1545 .gen_prologue = st_ops_gen_prologue,
1546 .gen_epilogue = st_ops_gen_epilogue,
1547 .get_func_proto = bpf_base_func_proto,
1548 };
1549
1550 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1551 .test_prologue = bpf_test_mod_st_ops__test_prologue,
1552 .test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1553 .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1554 };
1555
st_ops_reg(void * kdata,struct bpf_link * link)1556 static int st_ops_reg(void *kdata, struct bpf_link *link)
1557 {
1558 int err = 0;
1559
1560 mutex_lock(&st_ops_mutex);
1561 if (st_ops) {
1562 pr_err("st_ops has already been registered\n");
1563 err = -EEXIST;
1564 goto unlock;
1565 }
1566 st_ops = kdata;
1567
1568 unlock:
1569 mutex_unlock(&st_ops_mutex);
1570 return err;
1571 }
1572
st_ops_unreg(void * kdata,struct bpf_link * link)1573 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1574 {
1575 mutex_lock(&st_ops_mutex);
1576 st_ops = NULL;
1577 mutex_unlock(&st_ops_mutex);
1578 }
1579
st_ops_init(struct btf * btf)1580 static int st_ops_init(struct btf *btf)
1581 {
1582 struct btf *kfunc_btf;
1583
1584 bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf);
1585 bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf);
1586 if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0)
1587 return -EINVAL;
1588
1589 return 0;
1590 }
1591
st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1592 static int st_ops_init_member(const struct btf_type *t,
1593 const struct btf_member *member,
1594 void *kdata, const void *udata)
1595 {
1596 return 0;
1597 }
1598
1599 static struct bpf_struct_ops testmod_st_ops = {
1600 .verifier_ops = &st_ops_verifier_ops,
1601 .init = st_ops_init,
1602 .init_member = st_ops_init_member,
1603 .reg = st_ops_reg,
1604 .unreg = st_ops_unreg,
1605 .cfi_stubs = &st_ops_cfi_stubs,
1606 .name = "bpf_testmod_st_ops",
1607 .owner = THIS_MODULE,
1608 };
1609
1610 struct hlist_head multi_st_ops_list;
1611 static DEFINE_SPINLOCK(multi_st_ops_lock);
1612
multi_st_ops_init(struct btf * btf)1613 static int multi_st_ops_init(struct btf *btf)
1614 {
1615 spin_lock_init(&multi_st_ops_lock);
1616 INIT_HLIST_HEAD(&multi_st_ops_list);
1617
1618 return 0;
1619 }
1620
multi_st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1621 static int multi_st_ops_init_member(const struct btf_type *t,
1622 const struct btf_member *member,
1623 void *kdata, const void *udata)
1624 {
1625 return 0;
1626 }
1627
multi_st_ops_find_nolock(u32 id)1628 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
1629 {
1630 struct bpf_testmod_multi_st_ops *st_ops;
1631
1632 hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
1633 if (st_ops->id == id)
1634 return st_ops;
1635 }
1636
1637 return NULL;
1638 }
1639
bpf_kfunc_multi_st_ops_test_1(struct st_ops_args * args,u32 id)1640 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
1641 {
1642 struct bpf_testmod_multi_st_ops *st_ops;
1643 unsigned long flags;
1644 int ret = -1;
1645
1646 spin_lock_irqsave(&multi_st_ops_lock, flags);
1647 st_ops = multi_st_ops_find_nolock(id);
1648 if (st_ops)
1649 ret = st_ops->test_1(args);
1650 spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1651
1652 return ret;
1653 }
1654
multi_st_ops_reg(void * kdata,struct bpf_link * link)1655 static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
1656 {
1657 struct bpf_testmod_multi_st_ops *st_ops =
1658 (struct bpf_testmod_multi_st_ops *)kdata;
1659 unsigned long flags;
1660 int err = 0;
1661 u32 id;
1662
1663 if (!st_ops->test_1)
1664 return -EINVAL;
1665
1666 id = bpf_struct_ops_id(kdata);
1667
1668 spin_lock_irqsave(&multi_st_ops_lock, flags);
1669 if (multi_st_ops_find_nolock(id)) {
1670 pr_err("multi_st_ops(id:%d) has already been registered\n", id);
1671 err = -EEXIST;
1672 goto unlock;
1673 }
1674
1675 st_ops->id = id;
1676 hlist_add_head(&st_ops->node, &multi_st_ops_list);
1677 unlock:
1678 spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1679
1680 return err;
1681 }
1682
multi_st_ops_unreg(void * kdata,struct bpf_link * link)1683 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
1684 {
1685 struct bpf_testmod_multi_st_ops *st_ops;
1686 unsigned long flags;
1687 u32 id;
1688
1689 id = bpf_struct_ops_id(kdata);
1690
1691 spin_lock_irqsave(&multi_st_ops_lock, flags);
1692 st_ops = multi_st_ops_find_nolock(id);
1693 if (st_ops)
1694 hlist_del(&st_ops->node);
1695 spin_unlock_irqrestore(&multi_st_ops_lock, flags);
1696 }
1697
bpf_testmod_multi_st_ops__test_1(struct st_ops_args * args)1698 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
1699 {
1700 return 0;
1701 }
1702
1703 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
1704 .test_1 = bpf_testmod_multi_st_ops__test_1,
1705 };
1706
1707 struct bpf_struct_ops testmod_multi_st_ops = {
1708 .verifier_ops = &bpf_testmod_verifier_ops,
1709 .init = multi_st_ops_init,
1710 .init_member = multi_st_ops_init_member,
1711 .reg = multi_st_ops_reg,
1712 .unreg = multi_st_ops_unreg,
1713 .cfi_stubs = &multi_st_ops_cfi_stubs,
1714 .name = "bpf_testmod_multi_st_ops",
1715 .owner = THIS_MODULE,
1716 };
1717
1718 extern int bpf_fentry_test1(int a);
1719
bpf_testmod_init(void)1720 static int bpf_testmod_init(void)
1721 {
1722 const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1723 {
1724 .btf_id = bpf_testmod_dtor_ids[0],
1725 .kfunc_btf_id = bpf_testmod_dtor_ids[1]
1726 },
1727 };
1728 void **tramp;
1729 int ret;
1730
1731 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1732 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1733 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1734 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1735 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1736 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1737 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1738 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
1739 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1740 ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
1741 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1742 ARRAY_SIZE(bpf_testmod_dtors),
1743 THIS_MODULE);
1744 if (ret < 0)
1745 return ret;
1746 if (bpf_fentry_test1(0) < 0)
1747 return -EINVAL;
1748 sock = NULL;
1749 mutex_init(&sock_lock);
1750 ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1751 if (ret < 0)
1752 return ret;
1753 ret = register_bpf_testmod_uprobe();
1754 if (ret < 0)
1755 return ret;
1756
1757 /* Ensure nothing is between tramp_1..tramp_40 */
1758 BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1759 offsetofend(struct bpf_testmod_ops, tramp_40));
1760 tramp = (void **)&__bpf_testmod_ops.tramp_1;
1761 while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1762 *tramp++ = bpf_testmod_tramp;
1763
1764 return 0;
1765 }
1766
bpf_testmod_exit(void)1767 static void bpf_testmod_exit(void)
1768 {
1769 /* Need to wait for all references to be dropped because
1770 * bpf_kfunc_call_test_release() which currently resides in kernel can
1771 * be called after bpf_testmod is unloaded. Once release function is
1772 * moved into the module this wait can be removed.
1773 */
1774 while (refcount_read(&prog_test_struct.cnt) > 1)
1775 msleep(20);
1776
1777 bpf_kfunc_close_sock();
1778 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1779 unregister_bpf_testmod_uprobe();
1780 }
1781
1782 module_init(bpf_testmod_init);
1783 module_exit(bpf_testmod_exit);
1784
1785 MODULE_AUTHOR("Andrii Nakryiko");
1786 MODULE_DESCRIPTION("BPF selftests module");
1787 MODULE_LICENSE("Dual BSD/GPL");
1788