xref: /linux/tools/testing/selftests/bpf/progs/cpumask_success.c (revision 6efc0ab3b05de0d7bab8ec0597214e4788251071)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 
8 #include "bpf_misc.h"
9 #include "cpumask_common.h"
10 
11 char _license[] SEC("license") = "GPL";
12 
13 int pid, nr_cpus;
14 
15 struct kptr_nested {
16 	struct bpf_cpumask __kptr * mask;
17 };
18 
19 struct kptr_nested_pair {
20 	struct bpf_cpumask __kptr * mask_1;
21 	struct bpf_cpumask __kptr * mask_2;
22 };
23 
24 struct kptr_nested_mid {
25 	int dummy;
26 	struct kptr_nested m;
27 };
28 
29 struct kptr_nested_deep {
30 	struct kptr_nested_mid ptrs[2];
31 	struct kptr_nested_pair ptr_pairs[3];
32 };
33 
34 private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
35 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
36 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
37 private(MASK) static struct kptr_nested global_mask_nested[2];
38 private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
39 
40 static bool is_test_task(void)
41 {
42 	int cur_pid = bpf_get_current_pid_tgid() >> 32;
43 
44 	return pid == cur_pid;
45 }
46 
47 static bool create_cpumask_set(struct bpf_cpumask **out1,
48 			       struct bpf_cpumask **out2,
49 			       struct bpf_cpumask **out3,
50 			       struct bpf_cpumask **out4)
51 {
52 	struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
53 
54 	mask1 = create_cpumask();
55 	if (!mask1)
56 		return false;
57 
58 	mask2 = create_cpumask();
59 	if (!mask2) {
60 		bpf_cpumask_release(mask1);
61 		err = 3;
62 		return false;
63 	}
64 
65 	mask3 = create_cpumask();
66 	if (!mask3) {
67 		bpf_cpumask_release(mask1);
68 		bpf_cpumask_release(mask2);
69 		err = 4;
70 		return false;
71 	}
72 
73 	mask4 = create_cpumask();
74 	if (!mask4) {
75 		bpf_cpumask_release(mask1);
76 		bpf_cpumask_release(mask2);
77 		bpf_cpumask_release(mask3);
78 		err = 5;
79 		return false;
80 	}
81 
82 	*out1 = mask1;
83 	*out2 = mask2;
84 	*out3 = mask3;
85 	*out4 = mask4;
86 
87 	return true;
88 }
89 
90 SEC("tp_btf/task_newtask")
91 int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
92 {
93 	struct bpf_cpumask *cpumask;
94 
95 	if (!is_test_task())
96 		return 0;
97 
98 	cpumask = create_cpumask();
99 	if (!cpumask)
100 		return 0;
101 
102 	bpf_cpumask_release(cpumask);
103 	return 0;
104 }
105 
106 SEC("tp_btf/task_newtask")
107 int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
108 {
109 	struct bpf_cpumask *cpumask;
110 
111 	if (!is_test_task())
112 		return 0;
113 
114 	cpumask = create_cpumask();
115 	if (!cpumask)
116 		return 0;
117 
118 	bpf_cpumask_set_cpu(0, cpumask);
119 	if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
120 		err = 3;
121 		goto release_exit;
122 	}
123 
124 	bpf_cpumask_clear_cpu(0, cpumask);
125 	if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
126 		err = 4;
127 		goto release_exit;
128 	}
129 
130 release_exit:
131 	bpf_cpumask_release(cpumask);
132 	return 0;
133 }
134 
135 SEC("tp_btf/task_newtask")
136 int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
137 {
138 	struct bpf_cpumask *cpumask;
139 
140 	if (!is_test_task())
141 		return 0;
142 
143 	cpumask = create_cpumask();
144 	if (!cpumask)
145 		return 0;
146 
147 	bpf_cpumask_setall(cpumask);
148 	if (!bpf_cpumask_full(cast(cpumask))) {
149 		err = 3;
150 		goto release_exit;
151 	}
152 
153 	bpf_cpumask_clear(cpumask);
154 	if (!bpf_cpumask_empty(cast(cpumask))) {
155 		err = 4;
156 		goto release_exit;
157 	}
158 
159 release_exit:
160 	bpf_cpumask_release(cpumask);
161 	return 0;
162 }
163 
164 SEC("tp_btf/task_newtask")
165 int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
166 {
167 	struct bpf_cpumask *cpumask;
168 
169 	if (!is_test_task())
170 		return 0;
171 
172 	cpumask = create_cpumask();
173 	if (!cpumask)
174 		return 0;
175 
176 	if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
177 		err = 3;
178 		goto release_exit;
179 	}
180 
181 	if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
182 		bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
183 		err = 4;
184 		goto release_exit;
185 	}
186 
187 	bpf_cpumask_set_cpu(0, cpumask);
188 	if (bpf_cpumask_first(cast(cpumask)) != 0) {
189 		err = 5;
190 		goto release_exit;
191 	}
192 
193 	if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
194 		err = 6;
195 		goto release_exit;
196 	}
197 
198 release_exit:
199 	bpf_cpumask_release(cpumask);
200 	return 0;
201 }
202 
203 SEC("tp_btf/task_newtask")
204 int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
205 {
206 	struct bpf_cpumask *mask1, *mask2;
207 	u32 first;
208 
209 	if (!is_test_task())
210 		return 0;
211 
212 	mask1 = create_cpumask();
213 	if (!mask1)
214 		return 0;
215 
216 	mask2 = create_cpumask();
217 	if (!mask2)
218 		goto release_exit;
219 
220 	bpf_cpumask_set_cpu(0, mask1);
221 	bpf_cpumask_set_cpu(1, mask2);
222 
223 	first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
224 	if (first <= 1)
225 		err = 3;
226 
227 release_exit:
228 	if (mask1)
229 		bpf_cpumask_release(mask1);
230 	if (mask2)
231 		bpf_cpumask_release(mask2);
232 	return 0;
233 }
234 
235 SEC("tp_btf/task_newtask")
236 int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
237 {
238 	struct bpf_cpumask *cpumask;
239 
240 	if (!is_test_task())
241 		return 0;
242 
243 	cpumask = create_cpumask();
244 	if (!cpumask)
245 		return 0;
246 
247 	if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
248 		err = 3;
249 		goto release_exit;
250 	}
251 
252 	if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
253 		err = 4;
254 		goto release_exit;
255 	}
256 
257 	if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
258 		err = 5;
259 		goto release_exit;
260 	}
261 
262 release_exit:
263 	bpf_cpumask_release(cpumask);
264 	return 0;
265 }
266 
267 SEC("tp_btf/task_newtask")
268 int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
269 {
270 	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
271 
272 	if (!is_test_task())
273 		return 0;
274 
275 	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
276 		return 0;
277 
278 	bpf_cpumask_set_cpu(0, mask1);
279 	bpf_cpumask_set_cpu(1, mask2);
280 
281 	if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
282 		err = 6;
283 		goto release_exit;
284 	}
285 	if (!bpf_cpumask_empty(cast(dst1))) {
286 		err = 7;
287 		goto release_exit;
288 	}
289 
290 	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
291 	if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
292 		err = 8;
293 		goto release_exit;
294 	}
295 	if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
296 		err = 9;
297 		goto release_exit;
298 	}
299 
300 	bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
301 	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
302 		err = 10;
303 		goto release_exit;
304 	}
305 
306 release_exit:
307 	bpf_cpumask_release(mask1);
308 	bpf_cpumask_release(mask2);
309 	bpf_cpumask_release(dst1);
310 	bpf_cpumask_release(dst2);
311 	return 0;
312 }
313 
314 SEC("tp_btf/task_newtask")
315 int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
316 {
317 	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
318 
319 	if (!is_test_task())
320 		return 0;
321 
322 	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
323 		return 0;
324 
325 	bpf_cpumask_set_cpu(0, mask1);
326 	bpf_cpumask_set_cpu(1, mask2);
327 	if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
328 		err = 6;
329 		goto release_exit;
330 	}
331 
332 	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
333 	if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
334 		err = 7;
335 		goto release_exit;
336 	}
337 
338 	if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
339 		err = 8;
340 		goto release_exit;
341 	}
342 
343 	if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
344 		err = 9;
345 		goto release_exit;
346 	}
347 
348 release_exit:
349 	bpf_cpumask_release(mask1);
350 	bpf_cpumask_release(mask2);
351 	bpf_cpumask_release(dst1);
352 	bpf_cpumask_release(dst2);
353 	return 0;
354 }
355 
356 SEC("tp_btf/task_newtask")
357 int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
358 {
359 	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
360 	int cpu;
361 
362 	if (!is_test_task())
363 		return 0;
364 
365 	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
366 		return 0;
367 
368 	bpf_cpumask_set_cpu(0, mask1);
369 	bpf_cpumask_set_cpu(1, mask2);
370 	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
371 
372 	cpu = bpf_cpumask_any_distribute(cast(mask1));
373 	if (cpu != 0) {
374 		err = 6;
375 		goto release_exit;
376 	}
377 
378 	cpu = bpf_cpumask_any_distribute(cast(dst2));
379 	if (cpu < nr_cpus) {
380 		err = 7;
381 		goto release_exit;
382 	}
383 
384 	bpf_cpumask_copy(dst2, cast(dst1));
385 	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
386 		err = 8;
387 		goto release_exit;
388 	}
389 
390 	cpu = bpf_cpumask_any_distribute(cast(dst2));
391 	if (cpu > 1) {
392 		err = 9;
393 		goto release_exit;
394 	}
395 
396 	cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
397 	if (cpu < nr_cpus) {
398 		err = 10;
399 		goto release_exit;
400 	}
401 
402 release_exit:
403 	bpf_cpumask_release(mask1);
404 	bpf_cpumask_release(mask2);
405 	bpf_cpumask_release(dst1);
406 	bpf_cpumask_release(dst2);
407 	return 0;
408 }
409 
410 SEC("tp_btf/task_newtask")
411 int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
412 {
413 	struct bpf_cpumask *cpumask;
414 
415 	cpumask = create_cpumask();
416 	if (!cpumask)
417 		return 0;
418 
419 	if (cpumask_map_insert(cpumask))
420 		err = 3;
421 
422 	return 0;
423 }
424 
425 SEC("tp_btf/task_newtask")
426 int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
427 {
428 	struct bpf_cpumask *cpumask;
429 	struct __cpumask_map_value *v;
430 
431 	cpumask = create_cpumask();
432 	if (!cpumask)
433 		return 0;
434 
435 	if (cpumask_map_insert(cpumask)) {
436 		err = 3;
437 		return 0;
438 	}
439 
440 	v = cpumask_map_value_lookup();
441 	if (!v) {
442 		err = 4;
443 		return 0;
444 	}
445 
446 	cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
447 	if (cpumask)
448 		bpf_cpumask_release(cpumask);
449 	else
450 		err = 5;
451 
452 	return 0;
453 }
454 
455 SEC("tp_btf/task_newtask")
456 int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
457 {
458 	struct bpf_cpumask *local, *prev;
459 
460 	if (!is_test_task())
461 		return 0;
462 
463 	local = create_cpumask();
464 	if (!local)
465 		return 0;
466 
467 	prev = bpf_kptr_xchg(&global_mask, local);
468 	if (prev) {
469 		bpf_cpumask_release(prev);
470 		err = 3;
471 		return 0;
472 	}
473 
474 	bpf_rcu_read_lock();
475 	local = global_mask;
476 	if (!local) {
477 		err = 4;
478 		bpf_rcu_read_unlock();
479 		return 0;
480 	}
481 
482 	bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
483 	bpf_rcu_read_unlock();
484 
485 	return 0;
486 }
487 
488 SEC("tp_btf/task_newtask")
489 int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
490 {
491 	struct bpf_cpumask *local, *prev;
492 
493 	if (!is_test_task())
494 		return 0;
495 
496 	/* Kptr arrays with one element are special cased, being treated
497 	 * just like a single pointer.
498 	 */
499 
500 	local = create_cpumask();
501 	if (!local)
502 		return 0;
503 
504 	prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
505 	if (prev) {
506 		bpf_cpumask_release(prev);
507 		err = 3;
508 		return 0;
509 	}
510 
511 	bpf_rcu_read_lock();
512 	local = global_mask_array_one[0];
513 	if (!local) {
514 		err = 4;
515 		bpf_rcu_read_unlock();
516 		return 0;
517 	}
518 
519 	bpf_rcu_read_unlock();
520 
521 	return 0;
522 }
523 
524 static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
525 				  struct bpf_cpumask **mask1)
526 {
527 	struct bpf_cpumask *local;
528 
529 	if (!is_test_task())
530 		return 0;
531 
532 	/* Check if two kptrs in the array work and independently */
533 
534 	local = create_cpumask();
535 	if (!local)
536 		return 0;
537 
538 	bpf_rcu_read_lock();
539 
540 	local = bpf_kptr_xchg(mask0, local);
541 	if (local) {
542 		err = 1;
543 		goto err_exit;
544 	}
545 
546 	/* [<mask 0>, NULL] */
547 	if (!*mask0 || *mask1) {
548 		err = 2;
549 		goto err_exit;
550 	}
551 
552 	local = create_cpumask();
553 	if (!local) {
554 		err = 9;
555 		goto err_exit;
556 	}
557 
558 	local = bpf_kptr_xchg(mask1, local);
559 	if (local) {
560 		err = 10;
561 		goto err_exit;
562 	}
563 
564 	/* [<mask 0>, <mask 1>] */
565 	if (!*mask0 || !*mask1 || *mask0 == *mask1) {
566 		err = 11;
567 		goto err_exit;
568 	}
569 
570 err_exit:
571 	if (local)
572 		bpf_cpumask_release(local);
573 	bpf_rcu_read_unlock();
574 	return 0;
575 }
576 
577 SEC("tp_btf/task_newtask")
578 int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
579 {
580 	return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
581 }
582 
583 SEC("tp_btf/task_newtask")
584 int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
585 {
586 	return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
587 }
588 
589 SEC("tp_btf/task_newtask")
590 int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
591 {
592 	return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
593 }
594 
595 /* Ensure that the field->offset has been correctly advanced from one
596  * nested struct or array sub-tree to another. In the case of
597  * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2.  By
598  * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
599  * the verifier should reject the program if the field->offset of any kptr
600  * is incorrect.
601  *
602  * For instance, if we have 10 kptrs in a nested struct and a program that
603  * accesses each kptr individually with bpf_kptr_xchg(), the compiler
604  * should emit instructions to access 10 different offsets if it works
605  * correctly. If the field->offset values of any pair of them are
606  * incorrectly the same, the number of unique offsets in btf_record for
607  * this nested struct should be less than 10. The verifier should fail to
608  * discover some of the offsets emitted by the compiler.
609  *
610  * Even if the field->offset values of kptrs are not duplicated, the
611  * verifier should fail to find a btf_field for the instruction accessing a
612  * kptr if the corresponding field->offset is pointing to a random
613  * incorrect offset.
614  */
615 SEC("tp_btf/task_newtask")
616 int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
617 {
618 	int r, i;
619 
620 	r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
621 				   &global_mask_nested_deep.ptrs[1].m.mask);
622 	if (r)
623 		return r;
624 
625 	for (i = 0; i < 3; i++) {
626 		r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
627 					   &global_mask_nested_deep.ptr_pairs[i].mask_2);
628 		if (r)
629 			return r;
630 	}
631 	return 0;
632 }
633 
634 SEC("tp_btf/task_newtask")
635 int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
636 {
637 	struct bpf_cpumask *local;
638 
639 	if (!is_test_task())
640 		return 0;
641 
642 	local = create_cpumask();
643 	if (!local)
644 		return 0;
645 
646 	if (bpf_cpumask_weight(cast(local)) != 0) {
647 		err = 3;
648 		goto out;
649 	}
650 
651 	bpf_cpumask_set_cpu(0, local);
652 	if (bpf_cpumask_weight(cast(local)) != 1) {
653 		err = 4;
654 		goto out;
655 	}
656 
657 	/*
658 	 * Make sure that adding additional CPUs changes the weight. Test to
659 	 * see whether the CPU was set to account for running on UP machines.
660 	 */
661 	bpf_cpumask_set_cpu(1, local);
662 	if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
663 		err = 5;
664 		goto out;
665 	}
666 
667 	bpf_cpumask_clear(local);
668 	if (bpf_cpumask_weight(cast(local)) != 0) {
669 		err = 6;
670 		goto out;
671 	}
672 out:
673 	bpf_cpumask_release(local);
674 	return 0;
675 }
676 
677 SEC("tp_btf/task_newtask")
678 __success
679 int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
680 {
681 	struct bpf_cpumask *mask1, *mask2;
682 
683 	mask1 = bpf_cpumask_create();
684 	mask2 = bpf_cpumask_create();
685 
686 	if (!mask1 || !mask2)
687 		goto free_masks_return;
688 
689 	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
690 	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
691 
692 free_masks_return:
693 	if (mask1)
694 		bpf_cpumask_release(mask1);
695 	if (mask2)
696 		bpf_cpumask_release(mask2);
697 	return 0;
698 }
699