xref: /linux/tools/testing/selftests/cgroup/test_cpu.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define _GNU_SOURCE
4 #include <linux/limits.h>
5 #include <sys/sysinfo.h>
6 #include <sys/wait.h>
7 #include <errno.h>
8 #include <pthread.h>
9 #include <stdio.h>
10 #include <time.h>
11 #include <unistd.h>
12 
13 #include "../kselftest.h"
14 #include "cgroup_util.h"
15 
16 enum hog_clock_type {
17 	// Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
18 	CPU_HOG_CLOCK_PROCESS,
19 	// Count elapsed time using system wallclock time.
20 	CPU_HOG_CLOCK_WALL,
21 };
22 
23 struct cpu_hogger {
24 	char *cgroup;
25 	pid_t pid;
26 	long usage;
27 };
28 
29 struct cpu_hog_func_param {
30 	int nprocs;
31 	struct timespec ts;
32 	enum hog_clock_type clock_type;
33 };
34 
35 /*
36  * This test creates two nested cgroups with and without enabling
37  * the cpu controller.
38  */
39 static int test_cpucg_subtree_control(const char *root)
40 {
41 	char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
42 	int ret = KSFT_FAIL;
43 
44 	// Create two nested cgroups with the cpu controller enabled.
45 	parent = cg_name(root, "cpucg_test_0");
46 	if (!parent)
47 		goto cleanup;
48 
49 	if (cg_create(parent))
50 		goto cleanup;
51 
52 	if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
53 		goto cleanup;
54 
55 	child = cg_name(parent, "cpucg_test_child");
56 	if (!child)
57 		goto cleanup;
58 
59 	if (cg_create(child))
60 		goto cleanup;
61 
62 	if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
63 		goto cleanup;
64 
65 	// Create two nested cgroups without enabling the cpu controller.
66 	parent2 = cg_name(root, "cpucg_test_1");
67 	if (!parent2)
68 		goto cleanup;
69 
70 	if (cg_create(parent2))
71 		goto cleanup;
72 
73 	child2 = cg_name(parent2, "cpucg_test_child");
74 	if (!child2)
75 		goto cleanup;
76 
77 	if (cg_create(child2))
78 		goto cleanup;
79 
80 	if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
81 		goto cleanup;
82 
83 	ret = KSFT_PASS;
84 
85 cleanup:
86 	cg_destroy(child);
87 	free(child);
88 	cg_destroy(child2);
89 	free(child2);
90 	cg_destroy(parent);
91 	free(parent);
92 	cg_destroy(parent2);
93 	free(parent2);
94 
95 	return ret;
96 }
97 
98 static void *hog_cpu_thread_func(void *arg)
99 {
100 	while (1)
101 		;
102 
103 	return NULL;
104 }
105 
106 static struct timespec
107 timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
108 {
109 	struct timespec zero = {
110 		.tv_sec = 0,
111 		.tv_nsec = 0,
112 	};
113 	struct timespec ret;
114 
115 	if (lhs->tv_sec < rhs->tv_sec)
116 		return zero;
117 
118 	ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
119 
120 	if (lhs->tv_nsec < rhs->tv_nsec) {
121 		if (ret.tv_sec == 0)
122 			return zero;
123 
124 		ret.tv_sec--;
125 		ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
126 	} else
127 		ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
128 
129 	return ret;
130 }
131 
132 static int hog_cpus_timed(const char *cgroup, void *arg)
133 {
134 	const struct cpu_hog_func_param *param =
135 		(struct cpu_hog_func_param *)arg;
136 	struct timespec ts_run = param->ts;
137 	struct timespec ts_remaining = ts_run;
138 	struct timespec ts_start;
139 	int i, ret;
140 
141 	ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
142 	if (ret != 0)
143 		return ret;
144 
145 	for (i = 0; i < param->nprocs; i++) {
146 		pthread_t tid;
147 
148 		ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
149 		if (ret != 0)
150 			return ret;
151 	}
152 
153 	while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
154 		struct timespec ts_total;
155 
156 		ret = nanosleep(&ts_remaining, NULL);
157 		if (ret && errno != EINTR)
158 			return ret;
159 
160 		if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
161 			ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
162 			if (ret != 0)
163 				return ret;
164 		} else {
165 			struct timespec ts_current;
166 
167 			ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
168 			if (ret != 0)
169 				return ret;
170 
171 			ts_total = timespec_sub(&ts_current, &ts_start);
172 		}
173 
174 		ts_remaining = timespec_sub(&ts_run, &ts_total);
175 	}
176 
177 	return 0;
178 }
179 
180 /*
181  * Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
182  * cpu.stat shows the expected output.
183  */
184 static int test_cpucg_stats(const char *root)
185 {
186 	int ret = KSFT_FAIL;
187 	long usage_usec, user_usec, system_usec;
188 	long usage_seconds = 2;
189 	long expected_usage_usec = usage_seconds * USEC_PER_SEC;
190 	char *cpucg;
191 
192 	cpucg = cg_name(root, "cpucg_test");
193 	if (!cpucg)
194 		goto cleanup;
195 
196 	if (cg_create(cpucg))
197 		goto cleanup;
198 
199 	usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
200 	user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
201 	system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
202 	if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
203 		goto cleanup;
204 
205 	struct cpu_hog_func_param param = {
206 		.nprocs = 1,
207 		.ts = {
208 			.tv_sec = usage_seconds,
209 			.tv_nsec = 0,
210 		},
211 		.clock_type = CPU_HOG_CLOCK_PROCESS,
212 	};
213 	if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
214 		goto cleanup;
215 
216 	usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
217 	user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
218 	if (user_usec <= 0)
219 		goto cleanup;
220 
221 	if (!values_close(usage_usec, expected_usage_usec, 1))
222 		goto cleanup;
223 
224 	ret = KSFT_PASS;
225 
226 cleanup:
227 	cg_destroy(cpucg);
228 	free(cpucg);
229 
230 	return ret;
231 }
232 
233 /*
234  * Creates a nice process that consumes CPU and checks that the elapsed
235  * usertime in the cgroup is close to the expected time.
236  */
237 static int test_cpucg_nice(const char *root)
238 {
239 	int ret = KSFT_FAIL;
240 	int status;
241 	long user_usec, nice_usec;
242 	long usage_seconds = 2;
243 	long expected_nice_usec = usage_seconds * USEC_PER_SEC;
244 	char *cpucg;
245 	pid_t pid;
246 
247 	cpucg = cg_name(root, "cpucg_test");
248 	if (!cpucg)
249 		goto cleanup;
250 
251 	if (cg_create(cpucg))
252 		goto cleanup;
253 
254 	user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
255 	nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
256 	if (nice_usec == -1)
257 		ret = KSFT_SKIP;
258 	if (user_usec != 0 || nice_usec != 0)
259 		goto cleanup;
260 
261 	/*
262 	 * We fork here to create a new process that can be niced without
263 	 * polluting the nice value of other selftests
264 	 */
265 	pid = fork();
266 	if (pid < 0) {
267 		goto cleanup;
268 	} else if (pid == 0) {
269 		struct cpu_hog_func_param param = {
270 			.nprocs = 1,
271 			.ts = {
272 				.tv_sec = usage_seconds,
273 				.tv_nsec = 0,
274 			},
275 			.clock_type = CPU_HOG_CLOCK_PROCESS,
276 		};
277 		char buf[64];
278 		snprintf(buf, sizeof(buf), "%d", getpid());
279 		if (cg_write(cpucg, "cgroup.procs", buf))
280 			goto cleanup;
281 
282 		/* Try to keep niced CPU usage as constrained to hog_cpu as possible */
283 		nice(1);
284 		hog_cpus_timed(cpucg, &param);
285 		exit(0);
286 	} else {
287 		waitpid(pid, &status, 0);
288 		if (!WIFEXITED(status))
289 			goto cleanup;
290 
291 		user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
292 		nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
293 		if (!values_close(nice_usec, expected_nice_usec, 1))
294 			goto cleanup;
295 
296 		ret = KSFT_PASS;
297 	}
298 
299 cleanup:
300 	cg_destroy(cpucg);
301 	free(cpucg);
302 
303 	return ret;
304 }
305 
306 static int
307 run_cpucg_weight_test(
308 		const char *root,
309 		pid_t (*spawn_child)(const struct cpu_hogger *child),
310 		int (*validate)(const struct cpu_hogger *children, int num_children))
311 {
312 	int ret = KSFT_FAIL, i;
313 	char *parent = NULL;
314 	struct cpu_hogger children[3] = {};
315 
316 	parent = cg_name(root, "cpucg_test_0");
317 	if (!parent)
318 		goto cleanup;
319 
320 	if (cg_create(parent))
321 		goto cleanup;
322 
323 	if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
324 		goto cleanup;
325 
326 	for (i = 0; i < ARRAY_SIZE(children); i++) {
327 		children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
328 		if (!children[i].cgroup)
329 			goto cleanup;
330 
331 		if (cg_create(children[i].cgroup))
332 			goto cleanup;
333 
334 		if (cg_write_numeric(children[i].cgroup, "cpu.weight",
335 					50 * (i + 1)))
336 			goto cleanup;
337 	}
338 
339 	for (i = 0; i < ARRAY_SIZE(children); i++) {
340 		pid_t pid = spawn_child(&children[i]);
341 		if (pid <= 0)
342 			goto cleanup;
343 		children[i].pid = pid;
344 	}
345 
346 	for (i = 0; i < ARRAY_SIZE(children); i++) {
347 		int retcode;
348 
349 		waitpid(children[i].pid, &retcode, 0);
350 		if (!WIFEXITED(retcode))
351 			goto cleanup;
352 		if (WEXITSTATUS(retcode))
353 			goto cleanup;
354 	}
355 
356 	for (i = 0; i < ARRAY_SIZE(children); i++)
357 		children[i].usage = cg_read_key_long(children[i].cgroup,
358 				"cpu.stat", "usage_usec");
359 
360 	if (validate(children, ARRAY_SIZE(children)))
361 		goto cleanup;
362 
363 	ret = KSFT_PASS;
364 cleanup:
365 	for (i = 0; i < ARRAY_SIZE(children); i++) {
366 		cg_destroy(children[i].cgroup);
367 		free(children[i].cgroup);
368 	}
369 	cg_destroy(parent);
370 	free(parent);
371 
372 	return ret;
373 }
374 
375 static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
376 {
377 	long usage_seconds = 10;
378 	struct cpu_hog_func_param param = {
379 		.nprocs = ncpus,
380 		.ts = {
381 			.tv_sec = usage_seconds,
382 			.tv_nsec = 0,
383 		},
384 		.clock_type = CPU_HOG_CLOCK_WALL,
385 	};
386 	return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)&param);
387 }
388 
389 static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
390 {
391 	return weight_hog_ncpus(child, get_nprocs());
392 }
393 
394 static int
395 overprovision_validate(const struct cpu_hogger *children, int num_children)
396 {
397 	int ret = KSFT_FAIL, i;
398 
399 	for (i = 0; i < num_children - 1; i++) {
400 		long delta;
401 
402 		if (children[i + 1].usage <= children[i].usage)
403 			goto cleanup;
404 
405 		delta = children[i + 1].usage - children[i].usage;
406 		if (!values_close(delta, children[0].usage, 35))
407 			goto cleanup;
408 	}
409 
410 	ret = KSFT_PASS;
411 cleanup:
412 	return ret;
413 }
414 
415 /*
416  * First, this test creates the following hierarchy:
417  * A
418  * A/B     cpu.weight = 50
419  * A/C     cpu.weight = 100
420  * A/D     cpu.weight = 150
421  *
422  * A separate process is then created for each child cgroup which spawns as
423  * many threads as there are cores, and hogs each CPU as much as possible
424  * for some time interval.
425  *
426  * Once all of the children have exited, we verify that each child cgroup
427  * was given proportional runtime as informed by their cpu.weight.
428  */
429 static int test_cpucg_weight_overprovisioned(const char *root)
430 {
431 	return run_cpucg_weight_test(root, weight_hog_all_cpus,
432 			overprovision_validate);
433 }
434 
435 static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
436 {
437 	return weight_hog_ncpus(child, 1);
438 }
439 
440 static int
441 underprovision_validate(const struct cpu_hogger *children, int num_children)
442 {
443 	int ret = KSFT_FAIL, i;
444 
445 	for (i = 0; i < num_children - 1; i++) {
446 		if (!values_close(children[i + 1].usage, children[0].usage, 15))
447 			goto cleanup;
448 	}
449 
450 	ret = KSFT_PASS;
451 cleanup:
452 	return ret;
453 }
454 
455 /*
456  * First, this test creates the following hierarchy:
457  * A
458  * A/B     cpu.weight = 50
459  * A/C     cpu.weight = 100
460  * A/D     cpu.weight = 150
461  *
462  * A separate process is then created for each child cgroup which spawns a
463  * single thread that hogs a CPU. The testcase is only run on systems that
464  * have at least one core per-thread in the child processes.
465  *
466  * Once all of the children have exited, we verify that each child cgroup
467  * had roughly the same runtime despite having different cpu.weight.
468  */
469 static int test_cpucg_weight_underprovisioned(const char *root)
470 {
471 	// Only run the test if there are enough cores to avoid overprovisioning
472 	// the system.
473 	if (get_nprocs() < 4)
474 		return KSFT_SKIP;
475 
476 	return run_cpucg_weight_test(root, weight_hog_one_cpu,
477 			underprovision_validate);
478 }
479 
480 static int
481 run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
482 {
483 	int ret = KSFT_FAIL, i;
484 	char *parent = NULL, *child = NULL;
485 	struct cpu_hogger leaf[3] = {};
486 	long nested_leaf_usage, child_usage;
487 	int nprocs = get_nprocs();
488 
489 	if (!overprovisioned) {
490 		if (nprocs < 4)
491 			/*
492 			 * Only run the test if there are enough cores to avoid overprovisioning
493 			 * the system.
494 			 */
495 			return KSFT_SKIP;
496 		nprocs /= 4;
497 	}
498 
499 	parent = cg_name(root, "cpucg_test");
500 	child = cg_name(parent, "cpucg_child");
501 	if (!parent || !child)
502 		goto cleanup;
503 
504 	if (cg_create(parent))
505 		goto cleanup;
506 	if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
507 		goto cleanup;
508 
509 	if (cg_create(child))
510 		goto cleanup;
511 	if (cg_write(child, "cgroup.subtree_control", "+cpu"))
512 		goto cleanup;
513 	if (cg_write(child, "cpu.weight", "1000"))
514 		goto cleanup;
515 
516 	for (i = 0; i < ARRAY_SIZE(leaf); i++) {
517 		const char *ancestor;
518 		long weight;
519 
520 		if (i == 0) {
521 			ancestor = parent;
522 			weight = 1000;
523 		} else {
524 			ancestor = child;
525 			weight = 5000;
526 		}
527 		leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
528 		if (!leaf[i].cgroup)
529 			goto cleanup;
530 
531 		if (cg_create(leaf[i].cgroup))
532 			goto cleanup;
533 
534 		if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
535 			goto cleanup;
536 	}
537 
538 	for (i = 0; i < ARRAY_SIZE(leaf); i++) {
539 		pid_t pid;
540 		struct cpu_hog_func_param param = {
541 			.nprocs = nprocs,
542 			.ts = {
543 				.tv_sec = 10,
544 				.tv_nsec = 0,
545 			},
546 			.clock_type = CPU_HOG_CLOCK_WALL,
547 		};
548 
549 		pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
550 				(void *)&param);
551 		if (pid <= 0)
552 			goto cleanup;
553 		leaf[i].pid = pid;
554 	}
555 
556 	for (i = 0; i < ARRAY_SIZE(leaf); i++) {
557 		int retcode;
558 
559 		waitpid(leaf[i].pid, &retcode, 0);
560 		if (!WIFEXITED(retcode))
561 			goto cleanup;
562 		if (WEXITSTATUS(retcode))
563 			goto cleanup;
564 	}
565 
566 	for (i = 0; i < ARRAY_SIZE(leaf); i++) {
567 		leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
568 				"cpu.stat", "usage_usec");
569 		if (leaf[i].usage <= 0)
570 			goto cleanup;
571 	}
572 
573 	nested_leaf_usage = leaf[1].usage + leaf[2].usage;
574 	if (overprovisioned) {
575 		if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
576 			goto cleanup;
577 	} else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
578 		goto cleanup;
579 
580 
581 	child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
582 	if (child_usage <= 0)
583 		goto cleanup;
584 	if (!values_close(child_usage, nested_leaf_usage, 1))
585 		goto cleanup;
586 
587 	ret = KSFT_PASS;
588 cleanup:
589 	for (i = 0; i < ARRAY_SIZE(leaf); i++) {
590 		cg_destroy(leaf[i].cgroup);
591 		free(leaf[i].cgroup);
592 	}
593 	cg_destroy(child);
594 	free(child);
595 	cg_destroy(parent);
596 	free(parent);
597 
598 	return ret;
599 }
600 
601 /*
602  * First, this test creates the following hierarchy:
603  * A
604  * A/B     cpu.weight = 1000
605  * A/C     cpu.weight = 1000
606  * A/C/D   cpu.weight = 5000
607  * A/C/E   cpu.weight = 5000
608  *
609  * A separate process is then created for each leaf, which spawn nproc threads
610  * that burn a CPU for a few seconds.
611  *
612  * Once all of those processes have exited, we verify that each of the leaf
613  * cgroups have roughly the same usage from cpu.stat.
614  */
615 static int
616 test_cpucg_nested_weight_overprovisioned(const char *root)
617 {
618 	return run_cpucg_nested_weight_test(root, true);
619 }
620 
621 /*
622  * First, this test creates the following hierarchy:
623  * A
624  * A/B     cpu.weight = 1000
625  * A/C     cpu.weight = 1000
626  * A/C/D   cpu.weight = 5000
627  * A/C/E   cpu.weight = 5000
628  *
629  * A separate process is then created for each leaf, which nproc / 4 threads
630  * that burns a CPU for a few seconds.
631  *
632  * Once all of those processes have exited, we verify that each of the leaf
633  * cgroups have roughly the same usage from cpu.stat.
634  */
635 static int
636 test_cpucg_nested_weight_underprovisioned(const char *root)
637 {
638 	return run_cpucg_nested_weight_test(root, false);
639 }
640 
641 /*
642  * This test creates a cgroup with some maximum value within a period, and
643  * verifies that a process in the cgroup is not overscheduled.
644  */
645 static int test_cpucg_max(const char *root)
646 {
647 	int ret = KSFT_FAIL;
648 	long usage_usec, user_usec;
649 	long usage_seconds = 1;
650 	long expected_usage_usec = usage_seconds * USEC_PER_SEC;
651 	char *cpucg;
652 
653 	cpucg = cg_name(root, "cpucg_test");
654 	if (!cpucg)
655 		goto cleanup;
656 
657 	if (cg_create(cpucg))
658 		goto cleanup;
659 
660 	if (cg_write(cpucg, "cpu.max", "1000"))
661 		goto cleanup;
662 
663 	struct cpu_hog_func_param param = {
664 		.nprocs = 1,
665 		.ts = {
666 			.tv_sec = usage_seconds,
667 			.tv_nsec = 0,
668 		},
669 		.clock_type = CPU_HOG_CLOCK_WALL,
670 	};
671 	if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
672 		goto cleanup;
673 
674 	usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
675 	user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
676 	if (user_usec <= 0)
677 		goto cleanup;
678 
679 	if (user_usec >= expected_usage_usec)
680 		goto cleanup;
681 
682 	if (values_close(usage_usec, expected_usage_usec, 95))
683 		goto cleanup;
684 
685 	ret = KSFT_PASS;
686 
687 cleanup:
688 	cg_destroy(cpucg);
689 	free(cpucg);
690 
691 	return ret;
692 }
693 
694 /*
695  * This test verifies that a process inside of a nested cgroup whose parent
696  * group has a cpu.max value set, is properly throttled.
697  */
698 static int test_cpucg_max_nested(const char *root)
699 {
700 	int ret = KSFT_FAIL;
701 	long usage_usec, user_usec;
702 	long usage_seconds = 1;
703 	long expected_usage_usec = usage_seconds * USEC_PER_SEC;
704 	char *parent, *child;
705 
706 	parent = cg_name(root, "cpucg_parent");
707 	child = cg_name(parent, "cpucg_child");
708 	if (!parent || !child)
709 		goto cleanup;
710 
711 	if (cg_create(parent))
712 		goto cleanup;
713 
714 	if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
715 		goto cleanup;
716 
717 	if (cg_create(child))
718 		goto cleanup;
719 
720 	if (cg_write(parent, "cpu.max", "1000"))
721 		goto cleanup;
722 
723 	struct cpu_hog_func_param param = {
724 		.nprocs = 1,
725 		.ts = {
726 			.tv_sec = usage_seconds,
727 			.tv_nsec = 0,
728 		},
729 		.clock_type = CPU_HOG_CLOCK_WALL,
730 	};
731 	if (cg_run(child, hog_cpus_timed, (void *)&param))
732 		goto cleanup;
733 
734 	usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
735 	user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
736 	if (user_usec <= 0)
737 		goto cleanup;
738 
739 	if (user_usec >= expected_usage_usec)
740 		goto cleanup;
741 
742 	if (values_close(usage_usec, expected_usage_usec, 95))
743 		goto cleanup;
744 
745 	ret = KSFT_PASS;
746 
747 cleanup:
748 	cg_destroy(child);
749 	free(child);
750 	cg_destroy(parent);
751 	free(parent);
752 
753 	return ret;
754 }
755 
756 #define T(x) { x, #x }
757 struct cpucg_test {
758 	int (*fn)(const char *root);
759 	const char *name;
760 } tests[] = {
761 	T(test_cpucg_subtree_control),
762 	T(test_cpucg_stats),
763 	T(test_cpucg_nice),
764 	T(test_cpucg_weight_overprovisioned),
765 	T(test_cpucg_weight_underprovisioned),
766 	T(test_cpucg_nested_weight_overprovisioned),
767 	T(test_cpucg_nested_weight_underprovisioned),
768 	T(test_cpucg_max),
769 	T(test_cpucg_max_nested),
770 };
771 #undef T
772 
773 int main(int argc, char *argv[])
774 {
775 	char root[PATH_MAX];
776 	int i, ret = EXIT_SUCCESS;
777 
778 	if (cg_find_unified_root(root, sizeof(root), NULL))
779 		ksft_exit_skip("cgroup v2 isn't mounted\n");
780 
781 	if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
782 		if (cg_write(root, "cgroup.subtree_control", "+cpu"))
783 			ksft_exit_skip("Failed to set cpu controller\n");
784 
785 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
786 		switch (tests[i].fn(root)) {
787 		case KSFT_PASS:
788 			ksft_test_result_pass("%s\n", tests[i].name);
789 			break;
790 		case KSFT_SKIP:
791 			ksft_test_result_skip("%s\n", tests[i].name);
792 			break;
793 		default:
794 			ret = EXIT_FAILURE;
795 			ksft_test_result_fail("%s\n", tests[i].name);
796 			break;
797 		}
798 	}
799 
800 	return ret;
801 }
802