1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7
8 #include "bpf_misc.h"
9 #include "cpumask_common.h"
10
11 char _license[] SEC("license") = "GPL";
12
13 int pid, nr_cpus;
14
15 struct kptr_nested {
16 struct bpf_cpumask __kptr * mask;
17 };
18
19 struct kptr_nested_pair {
20 struct bpf_cpumask __kptr * mask_1;
21 struct bpf_cpumask __kptr * mask_2;
22 };
23
24 struct kptr_nested_mid {
25 int dummy;
26 struct kptr_nested m;
27 };
28
29 struct kptr_nested_deep {
30 struct kptr_nested_mid ptrs[2];
31 struct kptr_nested_pair ptr_pairs[3];
32 };
33
34 struct kptr_nested_deep_array_1_2 {
35 int dummy;
36 struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
37 };
38
39 struct kptr_nested_deep_array_1_1 {
40 int dummy;
41 struct kptr_nested_deep_array_1_2 d_2;
42 };
43
44 struct kptr_nested_deep_array_1 {
45 long dummy;
46 struct kptr_nested_deep_array_1_1 d_1;
47 };
48
49 struct kptr_nested_deep_array_2_2 {
50 long dummy[2];
51 struct bpf_cpumask __kptr * mask;
52 };
53
54 struct kptr_nested_deep_array_2_1 {
55 int dummy;
56 struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
57 };
58
59 struct kptr_nested_deep_array_2 {
60 long dummy;
61 struct kptr_nested_deep_array_2_1 d_1;
62 };
63
64 struct kptr_nested_deep_array_3_2 {
65 long dummy[2];
66 struct bpf_cpumask __kptr * mask;
67 };
68
69 struct kptr_nested_deep_array_3_1 {
70 int dummy;
71 struct kptr_nested_deep_array_3_2 d_2;
72 };
73
74 struct kptr_nested_deep_array_3 {
75 long dummy;
76 struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
77 };
78
79 private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
80 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
81 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
82 private(MASK) static struct kptr_nested global_mask_nested[2];
83 private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
84 private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
85 private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
86 private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
87
is_test_task(void)88 static bool is_test_task(void)
89 {
90 int cur_pid = bpf_get_current_pid_tgid() >> 32;
91
92 return pid == cur_pid;
93 }
94
create_cpumask_set(struct bpf_cpumask ** out1,struct bpf_cpumask ** out2,struct bpf_cpumask ** out3,struct bpf_cpumask ** out4)95 static bool create_cpumask_set(struct bpf_cpumask **out1,
96 struct bpf_cpumask **out2,
97 struct bpf_cpumask **out3,
98 struct bpf_cpumask **out4)
99 {
100 struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
101
102 mask1 = create_cpumask();
103 if (!mask1)
104 return false;
105
106 mask2 = create_cpumask();
107 if (!mask2) {
108 bpf_cpumask_release(mask1);
109 err = 3;
110 return false;
111 }
112
113 mask3 = create_cpumask();
114 if (!mask3) {
115 bpf_cpumask_release(mask1);
116 bpf_cpumask_release(mask2);
117 err = 4;
118 return false;
119 }
120
121 mask4 = create_cpumask();
122 if (!mask4) {
123 bpf_cpumask_release(mask1);
124 bpf_cpumask_release(mask2);
125 bpf_cpumask_release(mask3);
126 err = 5;
127 return false;
128 }
129
130 *out1 = mask1;
131 *out2 = mask2;
132 *out3 = mask3;
133 *out4 = mask4;
134
135 return true;
136 }
137
138 SEC("tp_btf/task_newtask")
BPF_PROG(test_alloc_free_cpumask,struct task_struct * task,u64 clone_flags)139 int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
140 {
141 struct bpf_cpumask *cpumask;
142
143 if (!is_test_task())
144 return 0;
145
146 cpumask = create_cpumask();
147 if (!cpumask)
148 return 0;
149
150 bpf_cpumask_release(cpumask);
151 return 0;
152 }
153
154 SEC("tp_btf/task_newtask")
BPF_PROG(test_set_clear_cpu,struct task_struct * task,u64 clone_flags)155 int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
156 {
157 struct bpf_cpumask *cpumask;
158
159 if (!is_test_task())
160 return 0;
161
162 cpumask = create_cpumask();
163 if (!cpumask)
164 return 0;
165
166 bpf_cpumask_set_cpu(0, cpumask);
167 if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
168 err = 3;
169 goto release_exit;
170 }
171
172 bpf_cpumask_clear_cpu(0, cpumask);
173 if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
174 err = 4;
175 goto release_exit;
176 }
177
178 release_exit:
179 bpf_cpumask_release(cpumask);
180 return 0;
181 }
182
183 SEC("tp_btf/task_newtask")
BPF_PROG(test_setall_clear_cpu,struct task_struct * task,u64 clone_flags)184 int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
185 {
186 struct bpf_cpumask *cpumask;
187
188 if (!is_test_task())
189 return 0;
190
191 cpumask = create_cpumask();
192 if (!cpumask)
193 return 0;
194
195 bpf_cpumask_setall(cpumask);
196 if (!bpf_cpumask_full(cast(cpumask))) {
197 err = 3;
198 goto release_exit;
199 }
200
201 bpf_cpumask_clear(cpumask);
202 if (!bpf_cpumask_empty(cast(cpumask))) {
203 err = 4;
204 goto release_exit;
205 }
206
207 release_exit:
208 bpf_cpumask_release(cpumask);
209 return 0;
210 }
211
212 SEC("tp_btf/task_newtask")
BPF_PROG(test_first_firstzero_cpu,struct task_struct * task,u64 clone_flags)213 int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
214 {
215 struct bpf_cpumask *cpumask;
216
217 if (!is_test_task())
218 return 0;
219
220 cpumask = create_cpumask();
221 if (!cpumask)
222 return 0;
223
224 if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
225 err = 3;
226 goto release_exit;
227 }
228
229 if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
230 bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
231 err = 4;
232 goto release_exit;
233 }
234
235 bpf_cpumask_set_cpu(0, cpumask);
236 if (bpf_cpumask_first(cast(cpumask)) != 0) {
237 err = 5;
238 goto release_exit;
239 }
240
241 if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
242 err = 6;
243 goto release_exit;
244 }
245
246 release_exit:
247 bpf_cpumask_release(cpumask);
248 return 0;
249 }
250
251 SEC("tp_btf/task_newtask")
BPF_PROG(test_firstand_nocpu,struct task_struct * task,u64 clone_flags)252 int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
253 {
254 struct bpf_cpumask *mask1, *mask2;
255 u32 first;
256
257 if (!is_test_task())
258 return 0;
259
260 mask1 = create_cpumask();
261 if (!mask1)
262 return 0;
263
264 mask2 = create_cpumask();
265 if (!mask2)
266 goto release_exit;
267
268 bpf_cpumask_set_cpu(0, mask1);
269 bpf_cpumask_set_cpu(1, mask2);
270
271 first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
272 if (first <= 1)
273 err = 3;
274
275 release_exit:
276 if (mask1)
277 bpf_cpumask_release(mask1);
278 if (mask2)
279 bpf_cpumask_release(mask2);
280 return 0;
281 }
282
283 SEC("tp_btf/task_newtask")
BPF_PROG(test_test_and_set_clear,struct task_struct * task,u64 clone_flags)284 int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
285 {
286 struct bpf_cpumask *cpumask;
287
288 if (!is_test_task())
289 return 0;
290
291 cpumask = create_cpumask();
292 if (!cpumask)
293 return 0;
294
295 if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
296 err = 3;
297 goto release_exit;
298 }
299
300 if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
301 err = 4;
302 goto release_exit;
303 }
304
305 if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
306 err = 5;
307 goto release_exit;
308 }
309
310 release_exit:
311 bpf_cpumask_release(cpumask);
312 return 0;
313 }
314
315 SEC("tp_btf/task_newtask")
BPF_PROG(test_and_or_xor,struct task_struct * task,u64 clone_flags)316 int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
317 {
318 struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
319
320 if (!is_test_task())
321 return 0;
322
323 if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
324 return 0;
325
326 bpf_cpumask_set_cpu(0, mask1);
327 bpf_cpumask_set_cpu(1, mask2);
328
329 if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
330 err = 6;
331 goto release_exit;
332 }
333 if (!bpf_cpumask_empty(cast(dst1))) {
334 err = 7;
335 goto release_exit;
336 }
337
338 bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
339 if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
340 err = 8;
341 goto release_exit;
342 }
343 if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
344 err = 9;
345 goto release_exit;
346 }
347
348 bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
349 if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
350 err = 10;
351 goto release_exit;
352 }
353
354 release_exit:
355 bpf_cpumask_release(mask1);
356 bpf_cpumask_release(mask2);
357 bpf_cpumask_release(dst1);
358 bpf_cpumask_release(dst2);
359 return 0;
360 }
361
362 SEC("tp_btf/task_newtask")
BPF_PROG(test_intersects_subset,struct task_struct * task,u64 clone_flags)363 int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
364 {
365 struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
366
367 if (!is_test_task())
368 return 0;
369
370 if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
371 return 0;
372
373 bpf_cpumask_set_cpu(0, mask1);
374 bpf_cpumask_set_cpu(1, mask2);
375 if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
376 err = 6;
377 goto release_exit;
378 }
379
380 bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
381 if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
382 err = 7;
383 goto release_exit;
384 }
385
386 if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
387 err = 8;
388 goto release_exit;
389 }
390
391 if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
392 err = 9;
393 goto release_exit;
394 }
395
396 release_exit:
397 bpf_cpumask_release(mask1);
398 bpf_cpumask_release(mask2);
399 bpf_cpumask_release(dst1);
400 bpf_cpumask_release(dst2);
401 return 0;
402 }
403
404 SEC("tp_btf/task_newtask")
BPF_PROG(test_copy_any_anyand,struct task_struct * task,u64 clone_flags)405 int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
406 {
407 struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
408 int cpu;
409
410 if (!is_test_task())
411 return 0;
412
413 if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
414 return 0;
415
416 bpf_cpumask_set_cpu(0, mask1);
417 bpf_cpumask_set_cpu(1, mask2);
418 bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
419
420 cpu = bpf_cpumask_any_distribute(cast(mask1));
421 if (cpu != 0) {
422 err = 6;
423 goto release_exit;
424 }
425
426 cpu = bpf_cpumask_any_distribute(cast(dst2));
427 if (cpu < nr_cpus) {
428 err = 7;
429 goto release_exit;
430 }
431
432 bpf_cpumask_copy(dst2, cast(dst1));
433 if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
434 err = 8;
435 goto release_exit;
436 }
437
438 cpu = bpf_cpumask_any_distribute(cast(dst2));
439 if (cpu > 1) {
440 err = 9;
441 goto release_exit;
442 }
443
444 cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
445 if (cpu < nr_cpus) {
446 err = 10;
447 goto release_exit;
448 }
449
450 release_exit:
451 bpf_cpumask_release(mask1);
452 bpf_cpumask_release(mask2);
453 bpf_cpumask_release(dst1);
454 bpf_cpumask_release(dst2);
455 return 0;
456 }
457
458 SEC("tp_btf/task_newtask")
BPF_PROG(test_insert_leave,struct task_struct * task,u64 clone_flags)459 int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
460 {
461 struct bpf_cpumask *cpumask;
462
463 cpumask = create_cpumask();
464 if (!cpumask)
465 return 0;
466
467 if (cpumask_map_insert(cpumask))
468 err = 3;
469
470 return 0;
471 }
472
473 SEC("tp_btf/task_newtask")
BPF_PROG(test_insert_remove_release,struct task_struct * task,u64 clone_flags)474 int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
475 {
476 struct bpf_cpumask *cpumask;
477 struct __cpumask_map_value *v;
478
479 cpumask = create_cpumask();
480 if (!cpumask)
481 return 0;
482
483 if (cpumask_map_insert(cpumask)) {
484 err = 3;
485 return 0;
486 }
487
488 v = cpumask_map_value_lookup();
489 if (!v) {
490 err = 4;
491 return 0;
492 }
493
494 cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
495 if (cpumask)
496 bpf_cpumask_release(cpumask);
497 else
498 err = 5;
499
500 return 0;
501 }
502
503 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_rcu,struct task_struct * task,u64 clone_flags)504 int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
505 {
506 struct bpf_cpumask *local, *prev;
507
508 if (!is_test_task())
509 return 0;
510
511 local = create_cpumask();
512 if (!local)
513 return 0;
514
515 prev = bpf_kptr_xchg(&global_mask, local);
516 if (prev) {
517 bpf_cpumask_release(prev);
518 err = 3;
519 return 0;
520 }
521
522 bpf_rcu_read_lock();
523 local = global_mask;
524 if (!local) {
525 err = 4;
526 bpf_rcu_read_unlock();
527 return 0;
528 }
529
530 bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
531 bpf_rcu_read_unlock();
532
533 return 0;
534 }
535
536 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_array_one_rcu,struct task_struct * task,u64 clone_flags)537 int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
538 {
539 struct bpf_cpumask *local, *prev;
540
541 if (!is_test_task())
542 return 0;
543
544 /* Kptr arrays with one element are special cased, being treated
545 * just like a single pointer.
546 */
547
548 local = create_cpumask();
549 if (!local)
550 return 0;
551
552 prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
553 if (prev) {
554 bpf_cpumask_release(prev);
555 err = 3;
556 return 0;
557 }
558
559 bpf_rcu_read_lock();
560 local = global_mask_array_one[0];
561 if (!local) {
562 err = 4;
563 bpf_rcu_read_unlock();
564 return 0;
565 }
566
567 bpf_rcu_read_unlock();
568
569 return 0;
570 }
571
_global_mask_array_rcu(struct bpf_cpumask ** mask0,struct bpf_cpumask ** mask1)572 static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
573 struct bpf_cpumask **mask1)
574 {
575 struct bpf_cpumask *local;
576
577 if (!is_test_task())
578 return 0;
579
580 /* Check if two kptrs in the array work and independently */
581
582 local = create_cpumask();
583 if (!local)
584 return 0;
585
586 bpf_rcu_read_lock();
587
588 local = bpf_kptr_xchg(mask0, local);
589 if (local) {
590 err = 1;
591 goto err_exit;
592 }
593
594 /* [<mask 0>, *] */
595 if (!*mask0) {
596 err = 2;
597 goto err_exit;
598 }
599
600 if (!mask1)
601 goto err_exit;
602
603 /* [*, NULL] */
604 if (*mask1) {
605 err = 3;
606 goto err_exit;
607 }
608
609 local = create_cpumask();
610 if (!local) {
611 err = 9;
612 goto err_exit;
613 }
614
615 local = bpf_kptr_xchg(mask1, local);
616 if (local) {
617 err = 10;
618 goto err_exit;
619 }
620
621 /* [<mask 0>, <mask 1>] */
622 if (!*mask0 || !*mask1 || *mask0 == *mask1) {
623 err = 11;
624 goto err_exit;
625 }
626
627 err_exit:
628 if (local)
629 bpf_cpumask_release(local);
630 bpf_rcu_read_unlock();
631 return 0;
632 }
633
634 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_array_rcu,struct task_struct * task,u64 clone_flags)635 int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
636 {
637 return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
638 }
639
640 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_array_l2_rcu,struct task_struct * task,u64 clone_flags)641 int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
642 {
643 return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
644 }
645
646 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_nested_rcu,struct task_struct * task,u64 clone_flags)647 int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
648 {
649 return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
650 }
651
652 /* Ensure that the field->offset has been correctly advanced from one
653 * nested struct or array sub-tree to another. In the case of
654 * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
655 * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
656 * the verifier should reject the program if the field->offset of any kptr
657 * is incorrect.
658 *
659 * For instance, if we have 10 kptrs in a nested struct and a program that
660 * accesses each kptr individually with bpf_kptr_xchg(), the compiler
661 * should emit instructions to access 10 different offsets if it works
662 * correctly. If the field->offset values of any pair of them are
663 * incorrectly the same, the number of unique offsets in btf_record for
664 * this nested struct should be less than 10. The verifier should fail to
665 * discover some of the offsets emitted by the compiler.
666 *
667 * Even if the field->offset values of kptrs are not duplicated, the
668 * verifier should fail to find a btf_field for the instruction accessing a
669 * kptr if the corresponding field->offset is pointing to a random
670 * incorrect offset.
671 */
672 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_nested_deep_rcu,struct task_struct * task,u64 clone_flags)673 int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
674 {
675 int r, i;
676
677 r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
678 &global_mask_nested_deep.ptrs[1].m.mask);
679 if (r)
680 return r;
681
682 for (i = 0; i < 3; i++) {
683 r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
684 &global_mask_nested_deep.ptr_pairs[i].mask_2);
685 if (r)
686 return r;
687 }
688 return 0;
689 }
690
691 SEC("tp_btf/task_newtask")
BPF_PROG(test_global_mask_nested_deep_array_rcu,struct task_struct * task,u64 clone_flags)692 int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
693 {
694 int i;
695
696 for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
697 _global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
698
699 for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
700 _global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
701
702 for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
703 _global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
704
705 return 0;
706 }
707
708 SEC("tp_btf/task_newtask")
BPF_PROG(test_cpumask_weight,struct task_struct * task,u64 clone_flags)709 int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
710 {
711 struct bpf_cpumask *local;
712
713 if (!is_test_task())
714 return 0;
715
716 local = create_cpumask();
717 if (!local)
718 return 0;
719
720 if (bpf_cpumask_weight(cast(local)) != 0) {
721 err = 3;
722 goto out;
723 }
724
725 bpf_cpumask_set_cpu(0, local);
726 if (bpf_cpumask_weight(cast(local)) != 1) {
727 err = 4;
728 goto out;
729 }
730
731 /*
732 * Make sure that adding additional CPUs changes the weight. Test to
733 * see whether the CPU was set to account for running on UP machines.
734 */
735 bpf_cpumask_set_cpu(1, local);
736 if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
737 err = 5;
738 goto out;
739 }
740
741 bpf_cpumask_clear(local);
742 if (bpf_cpumask_weight(cast(local)) != 0) {
743 err = 6;
744 goto out;
745 }
746 out:
747 bpf_cpumask_release(local);
748 return 0;
749 }
750
751 SEC("tp_btf/task_newtask")
752 __success
BPF_PROG(test_refcount_null_tracking,struct task_struct * task,u64 clone_flags)753 int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
754 {
755 struct bpf_cpumask *mask1, *mask2;
756
757 mask1 = bpf_cpumask_create();
758 mask2 = bpf_cpumask_create();
759
760 if (!mask1 || !mask2)
761 goto free_masks_return;
762
763 bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
764 bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
765
766 free_masks_return:
767 if (mask1)
768 bpf_cpumask_release(mask1);
769 if (mask2)
770 bpf_cpumask_release(mask2);
771 return 0;
772 }
773