1 // SPDX-License-Identifier: GPL-2.0
2
3 #define _GNU_SOURCE
4 #include <linux/limits.h>
5 #include <sys/param.h>
6 #include <sys/sysinfo.h>
7 #include <sys/wait.h>
8 #include <errno.h>
9 #include <pthread.h>
10 #include <stdio.h>
11 #include <time.h>
12 #include <unistd.h>
13
14 #include "../kselftest.h"
15 #include "cgroup_util.h"
16
17 enum hog_clock_type {
18 // Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
19 CPU_HOG_CLOCK_PROCESS,
20 // Count elapsed time using system wallclock time.
21 CPU_HOG_CLOCK_WALL,
22 };
23
24 struct cpu_hogger {
25 char *cgroup;
26 pid_t pid;
27 long usage;
28 };
29
30 struct cpu_hog_func_param {
31 int nprocs;
32 struct timespec ts;
33 enum hog_clock_type clock_type;
34 };
35
36 /*
37 * This test creates two nested cgroups with and without enabling
38 * the cpu controller.
39 */
test_cpucg_subtree_control(const char * root)40 static int test_cpucg_subtree_control(const char *root)
41 {
42 char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
43 int ret = KSFT_FAIL;
44
45 // Create two nested cgroups with the cpu controller enabled.
46 parent = cg_name(root, "cpucg_test_0");
47 if (!parent)
48 goto cleanup;
49
50 if (cg_create(parent))
51 goto cleanup;
52
53 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
54 goto cleanup;
55
56 child = cg_name(parent, "cpucg_test_child");
57 if (!child)
58 goto cleanup;
59
60 if (cg_create(child))
61 goto cleanup;
62
63 if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
64 goto cleanup;
65
66 // Create two nested cgroups without enabling the cpu controller.
67 parent2 = cg_name(root, "cpucg_test_1");
68 if (!parent2)
69 goto cleanup;
70
71 if (cg_create(parent2))
72 goto cleanup;
73
74 child2 = cg_name(parent2, "cpucg_test_child");
75 if (!child2)
76 goto cleanup;
77
78 if (cg_create(child2))
79 goto cleanup;
80
81 if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
82 goto cleanup;
83
84 ret = KSFT_PASS;
85
86 cleanup:
87 cg_destroy(child);
88 free(child);
89 cg_destroy(child2);
90 free(child2);
91 cg_destroy(parent);
92 free(parent);
93 cg_destroy(parent2);
94 free(parent2);
95
96 return ret;
97 }
98
hog_cpu_thread_func(void * arg)99 static void *hog_cpu_thread_func(void *arg)
100 {
101 while (1)
102 ;
103
104 return NULL;
105 }
106
107 static struct timespec
timespec_sub(const struct timespec * lhs,const struct timespec * rhs)108 timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
109 {
110 struct timespec zero = {
111 .tv_sec = 0,
112 .tv_nsec = 0,
113 };
114 struct timespec ret;
115
116 if (lhs->tv_sec < rhs->tv_sec)
117 return zero;
118
119 ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
120
121 if (lhs->tv_nsec < rhs->tv_nsec) {
122 if (ret.tv_sec == 0)
123 return zero;
124
125 ret.tv_sec--;
126 ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
127 } else
128 ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
129
130 return ret;
131 }
132
hog_cpus_timed(const char * cgroup,void * arg)133 static int hog_cpus_timed(const char *cgroup, void *arg)
134 {
135 const struct cpu_hog_func_param *param =
136 (struct cpu_hog_func_param *)arg;
137 struct timespec ts_run = param->ts;
138 struct timespec ts_remaining = ts_run;
139 struct timespec ts_start;
140 int i, ret;
141
142 ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
143 if (ret != 0)
144 return ret;
145
146 for (i = 0; i < param->nprocs; i++) {
147 pthread_t tid;
148
149 ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
150 if (ret != 0)
151 return ret;
152 }
153
154 while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
155 struct timespec ts_total;
156
157 ret = nanosleep(&ts_remaining, NULL);
158 if (ret && errno != EINTR)
159 return ret;
160
161 if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
162 ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
163 if (ret != 0)
164 return ret;
165 } else {
166 struct timespec ts_current;
167
168 ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
169 if (ret != 0)
170 return ret;
171
172 ts_total = timespec_sub(&ts_current, &ts_start);
173 }
174
175 ts_remaining = timespec_sub(&ts_run, &ts_total);
176 }
177
178 return 0;
179 }
180
181 /*
182 * Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
183 * cpu.stat shows the expected output.
184 */
test_cpucg_stats(const char * root)185 static int test_cpucg_stats(const char *root)
186 {
187 int ret = KSFT_FAIL;
188 long usage_usec, user_usec, system_usec;
189 long usage_seconds = 2;
190 long expected_usage_usec = usage_seconds * USEC_PER_SEC;
191 char *cpucg;
192
193 cpucg = cg_name(root, "cpucg_test");
194 if (!cpucg)
195 goto cleanup;
196
197 if (cg_create(cpucg))
198 goto cleanup;
199
200 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
201 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
202 system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
203 if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
204 goto cleanup;
205
206 struct cpu_hog_func_param param = {
207 .nprocs = 1,
208 .ts = {
209 .tv_sec = usage_seconds,
210 .tv_nsec = 0,
211 },
212 .clock_type = CPU_HOG_CLOCK_PROCESS,
213 };
214 if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
215 goto cleanup;
216
217 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
218 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
219 if (user_usec <= 0)
220 goto cleanup;
221
222 if (!values_close_report(usage_usec, expected_usage_usec, 1))
223 goto cleanup;
224
225 ret = KSFT_PASS;
226
227 cleanup:
228 cg_destroy(cpucg);
229 free(cpucg);
230
231 return ret;
232 }
233
234 /*
235 * Creates a nice process that consumes CPU and checks that the elapsed
236 * usertime in the cgroup is close to the expected time.
237 */
test_cpucg_nice(const char * root)238 static int test_cpucg_nice(const char *root)
239 {
240 int ret = KSFT_FAIL;
241 int status;
242 long user_usec, nice_usec;
243 long usage_seconds = 2;
244 long expected_nice_usec = usage_seconds * USEC_PER_SEC;
245 char *cpucg;
246 pid_t pid;
247
248 cpucg = cg_name(root, "cpucg_test");
249 if (!cpucg)
250 goto cleanup;
251
252 if (cg_create(cpucg))
253 goto cleanup;
254
255 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
256 nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
257 if (nice_usec == -1)
258 ret = KSFT_SKIP;
259 if (user_usec != 0 || nice_usec != 0)
260 goto cleanup;
261
262 /*
263 * We fork here to create a new process that can be niced without
264 * polluting the nice value of other selftests
265 */
266 pid = fork();
267 if (pid < 0) {
268 goto cleanup;
269 } else if (pid == 0) {
270 struct cpu_hog_func_param param = {
271 .nprocs = 1,
272 .ts = {
273 .tv_sec = usage_seconds,
274 .tv_nsec = 0,
275 },
276 .clock_type = CPU_HOG_CLOCK_PROCESS,
277 };
278 char buf[64];
279 snprintf(buf, sizeof(buf), "%d", getpid());
280 if (cg_write(cpucg, "cgroup.procs", buf))
281 goto cleanup;
282
283 /* Try to keep niced CPU usage as constrained to hog_cpu as possible */
284 nice(1);
285 hog_cpus_timed(cpucg, ¶m);
286 exit(0);
287 } else {
288 waitpid(pid, &status, 0);
289 if (!WIFEXITED(status))
290 goto cleanup;
291
292 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
293 nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
294 if (!values_close_report(nice_usec, expected_nice_usec, 1))
295 goto cleanup;
296
297 ret = KSFT_PASS;
298 }
299
300 cleanup:
301 cg_destroy(cpucg);
302 free(cpucg);
303
304 return ret;
305 }
306
307 static int
run_cpucg_weight_test(const char * root,pid_t (* spawn_child)(const struct cpu_hogger * child),int (* validate)(const struct cpu_hogger * children,int num_children))308 run_cpucg_weight_test(
309 const char *root,
310 pid_t (*spawn_child)(const struct cpu_hogger *child),
311 int (*validate)(const struct cpu_hogger *children, int num_children))
312 {
313 int ret = KSFT_FAIL, i;
314 char *parent = NULL;
315 struct cpu_hogger children[3] = {};
316
317 parent = cg_name(root, "cpucg_test_0");
318 if (!parent)
319 goto cleanup;
320
321 if (cg_create(parent))
322 goto cleanup;
323
324 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
325 goto cleanup;
326
327 for (i = 0; i < ARRAY_SIZE(children); i++) {
328 children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
329 if (!children[i].cgroup)
330 goto cleanup;
331
332 if (cg_create(children[i].cgroup))
333 goto cleanup;
334
335 if (cg_write_numeric(children[i].cgroup, "cpu.weight",
336 50 * (i + 1)))
337 goto cleanup;
338 }
339
340 for (i = 0; i < ARRAY_SIZE(children); i++) {
341 pid_t pid = spawn_child(&children[i]);
342 if (pid <= 0)
343 goto cleanup;
344 children[i].pid = pid;
345 }
346
347 for (i = 0; i < ARRAY_SIZE(children); i++) {
348 int retcode;
349
350 waitpid(children[i].pid, &retcode, 0);
351 if (!WIFEXITED(retcode))
352 goto cleanup;
353 if (WEXITSTATUS(retcode))
354 goto cleanup;
355 }
356
357 for (i = 0; i < ARRAY_SIZE(children); i++)
358 children[i].usage = cg_read_key_long(children[i].cgroup,
359 "cpu.stat", "usage_usec");
360
361 if (validate(children, ARRAY_SIZE(children)))
362 goto cleanup;
363
364 ret = KSFT_PASS;
365 cleanup:
366 for (i = 0; i < ARRAY_SIZE(children); i++) {
367 cg_destroy(children[i].cgroup);
368 free(children[i].cgroup);
369 }
370 cg_destroy(parent);
371 free(parent);
372
373 return ret;
374 }
375
weight_hog_ncpus(const struct cpu_hogger * child,int ncpus)376 static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
377 {
378 long usage_seconds = 10;
379 struct cpu_hog_func_param param = {
380 .nprocs = ncpus,
381 .ts = {
382 .tv_sec = usage_seconds,
383 .tv_nsec = 0,
384 },
385 .clock_type = CPU_HOG_CLOCK_WALL,
386 };
387 return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)¶m);
388 }
389
weight_hog_all_cpus(const struct cpu_hogger * child)390 static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
391 {
392 return weight_hog_ncpus(child, get_nprocs());
393 }
394
395 static int
overprovision_validate(const struct cpu_hogger * children,int num_children)396 overprovision_validate(const struct cpu_hogger *children, int num_children)
397 {
398 int ret = KSFT_FAIL, i;
399
400 for (i = 0; i < num_children - 1; i++) {
401 long delta;
402
403 if (children[i + 1].usage <= children[i].usage)
404 goto cleanup;
405
406 delta = children[i + 1].usage - children[i].usage;
407 if (!values_close_report(delta, children[0].usage, 35))
408 goto cleanup;
409 }
410
411 ret = KSFT_PASS;
412 cleanup:
413 return ret;
414 }
415
416 /*
417 * First, this test creates the following hierarchy:
418 * A
419 * A/B cpu.weight = 50
420 * A/C cpu.weight = 100
421 * A/D cpu.weight = 150
422 *
423 * A separate process is then created for each child cgroup which spawns as
424 * many threads as there are cores, and hogs each CPU as much as possible
425 * for some time interval.
426 *
427 * Once all of the children have exited, we verify that each child cgroup
428 * was given proportional runtime as informed by their cpu.weight.
429 */
test_cpucg_weight_overprovisioned(const char * root)430 static int test_cpucg_weight_overprovisioned(const char *root)
431 {
432 return run_cpucg_weight_test(root, weight_hog_all_cpus,
433 overprovision_validate);
434 }
435
weight_hog_one_cpu(const struct cpu_hogger * child)436 static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
437 {
438 return weight_hog_ncpus(child, 1);
439 }
440
441 static int
underprovision_validate(const struct cpu_hogger * children,int num_children)442 underprovision_validate(const struct cpu_hogger *children, int num_children)
443 {
444 int ret = KSFT_FAIL, i;
445
446 for (i = 0; i < num_children - 1; i++) {
447 if (!values_close_report(children[i + 1].usage, children[0].usage, 15))
448 goto cleanup;
449 }
450
451 ret = KSFT_PASS;
452 cleanup:
453 return ret;
454 }
455
456 /*
457 * First, this test creates the following hierarchy:
458 * A
459 * A/B cpu.weight = 50
460 * A/C cpu.weight = 100
461 * A/D cpu.weight = 150
462 *
463 * A separate process is then created for each child cgroup which spawns a
464 * single thread that hogs a CPU. The testcase is only run on systems that
465 * have at least one core per-thread in the child processes.
466 *
467 * Once all of the children have exited, we verify that each child cgroup
468 * had roughly the same runtime despite having different cpu.weight.
469 */
test_cpucg_weight_underprovisioned(const char * root)470 static int test_cpucg_weight_underprovisioned(const char *root)
471 {
472 // Only run the test if there are enough cores to avoid overprovisioning
473 // the system.
474 if (get_nprocs() < 4)
475 return KSFT_SKIP;
476
477 return run_cpucg_weight_test(root, weight_hog_one_cpu,
478 underprovision_validate);
479 }
480
481 static int
run_cpucg_nested_weight_test(const char * root,bool overprovisioned)482 run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
483 {
484 int ret = KSFT_FAIL, i;
485 char *parent = NULL, *child = NULL;
486 struct cpu_hogger leaf[3] = {};
487 long nested_leaf_usage, child_usage;
488 int nprocs = get_nprocs();
489
490 if (!overprovisioned) {
491 if (nprocs < 4)
492 /*
493 * Only run the test if there are enough cores to avoid overprovisioning
494 * the system.
495 */
496 return KSFT_SKIP;
497 nprocs /= 4;
498 }
499
500 parent = cg_name(root, "cpucg_test");
501 child = cg_name(parent, "cpucg_child");
502 if (!parent || !child)
503 goto cleanup;
504
505 if (cg_create(parent))
506 goto cleanup;
507 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
508 goto cleanup;
509
510 if (cg_create(child))
511 goto cleanup;
512 if (cg_write(child, "cgroup.subtree_control", "+cpu"))
513 goto cleanup;
514 if (cg_write(child, "cpu.weight", "1000"))
515 goto cleanup;
516
517 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
518 const char *ancestor;
519 long weight;
520
521 if (i == 0) {
522 ancestor = parent;
523 weight = 1000;
524 } else {
525 ancestor = child;
526 weight = 5000;
527 }
528 leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
529 if (!leaf[i].cgroup)
530 goto cleanup;
531
532 if (cg_create(leaf[i].cgroup))
533 goto cleanup;
534
535 if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
536 goto cleanup;
537 }
538
539 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
540 pid_t pid;
541 struct cpu_hog_func_param param = {
542 .nprocs = nprocs,
543 .ts = {
544 .tv_sec = 10,
545 .tv_nsec = 0,
546 },
547 .clock_type = CPU_HOG_CLOCK_WALL,
548 };
549
550 pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
551 (void *)¶m);
552 if (pid <= 0)
553 goto cleanup;
554 leaf[i].pid = pid;
555 }
556
557 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
558 int retcode;
559
560 waitpid(leaf[i].pid, &retcode, 0);
561 if (!WIFEXITED(retcode))
562 goto cleanup;
563 if (WEXITSTATUS(retcode))
564 goto cleanup;
565 }
566
567 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
568 leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
569 "cpu.stat", "usage_usec");
570 if (leaf[i].usage <= 0)
571 goto cleanup;
572 }
573
574 nested_leaf_usage = leaf[1].usage + leaf[2].usage;
575 if (overprovisioned) {
576 if (!values_close_report(leaf[0].usage, nested_leaf_usage, 15))
577 goto cleanup;
578 } else if (!values_close_report(leaf[0].usage * 2, nested_leaf_usage, 15))
579 goto cleanup;
580
581
582 child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
583 if (child_usage <= 0)
584 goto cleanup;
585 if (!values_close_report(child_usage, nested_leaf_usage, 1))
586 goto cleanup;
587
588 ret = KSFT_PASS;
589 cleanup:
590 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
591 cg_destroy(leaf[i].cgroup);
592 free(leaf[i].cgroup);
593 }
594 cg_destroy(child);
595 free(child);
596 cg_destroy(parent);
597 free(parent);
598
599 return ret;
600 }
601
602 /*
603 * First, this test creates the following hierarchy:
604 * A
605 * A/B cpu.weight = 1000
606 * A/C cpu.weight = 1000
607 * A/C/D cpu.weight = 5000
608 * A/C/E cpu.weight = 5000
609 *
610 * A separate process is then created for each leaf, which spawn nproc threads
611 * that burn a CPU for a few seconds.
612 *
613 * Once all of those processes have exited, we verify that each of the leaf
614 * cgroups have roughly the same usage from cpu.stat.
615 */
616 static int
test_cpucg_nested_weight_overprovisioned(const char * root)617 test_cpucg_nested_weight_overprovisioned(const char *root)
618 {
619 return run_cpucg_nested_weight_test(root, true);
620 }
621
622 /*
623 * First, this test creates the following hierarchy:
624 * A
625 * A/B cpu.weight = 1000
626 * A/C cpu.weight = 1000
627 * A/C/D cpu.weight = 5000
628 * A/C/E cpu.weight = 5000
629 *
630 * A separate process is then created for each leaf, which nproc / 4 threads
631 * that burns a CPU for a few seconds.
632 *
633 * Once all of those processes have exited, we verify that each of the leaf
634 * cgroups have roughly the same usage from cpu.stat.
635 */
636 static int
test_cpucg_nested_weight_underprovisioned(const char * root)637 test_cpucg_nested_weight_underprovisioned(const char *root)
638 {
639 return run_cpucg_nested_weight_test(root, false);
640 }
641
642 /*
643 * This test creates a cgroup with some maximum value within a period, and
644 * verifies that a process in the cgroup is not overscheduled.
645 */
test_cpucg_max(const char * root)646 static int test_cpucg_max(const char *root)
647 {
648 int ret = KSFT_FAIL;
649 long quota_usec = 1000;
650 long default_period_usec = 100000; /* cpu.max's default period */
651 long duration_seconds = 1;
652
653 long duration_usec = duration_seconds * USEC_PER_SEC;
654 long usage_usec, n_periods, remainder_usec, expected_usage_usec;
655 char *cpucg;
656 char quota_buf[32];
657
658 snprintf(quota_buf, sizeof(quota_buf), "%ld", quota_usec);
659
660 cpucg = cg_name(root, "cpucg_test");
661 if (!cpucg)
662 goto cleanup;
663
664 if (cg_create(cpucg))
665 goto cleanup;
666
667 if (cg_write(cpucg, "cpu.max", quota_buf))
668 goto cleanup;
669
670 struct cpu_hog_func_param param = {
671 .nprocs = 1,
672 .ts = {
673 .tv_sec = duration_seconds,
674 .tv_nsec = 0,
675 },
676 .clock_type = CPU_HOG_CLOCK_WALL,
677 };
678 if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
679 goto cleanup;
680
681 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
682 if (usage_usec <= 0)
683 goto cleanup;
684
685 /*
686 * The following calculation applies only since
687 * the cpu hog is set to run as per wall-clock time
688 */
689 n_periods = duration_usec / default_period_usec;
690 remainder_usec = duration_usec - n_periods * default_period_usec;
691 expected_usage_usec
692 = n_periods * quota_usec + MIN(remainder_usec, quota_usec);
693
694 if (!values_close_report(usage_usec, expected_usage_usec, 10))
695 goto cleanup;
696
697 ret = KSFT_PASS;
698
699 cleanup:
700 cg_destroy(cpucg);
701 free(cpucg);
702
703 return ret;
704 }
705
706 /*
707 * This test verifies that a process inside of a nested cgroup whose parent
708 * group has a cpu.max value set, is properly throttled.
709 */
test_cpucg_max_nested(const char * root)710 static int test_cpucg_max_nested(const char *root)
711 {
712 int ret = KSFT_FAIL;
713 long quota_usec = 1000;
714 long default_period_usec = 100000; /* cpu.max's default period */
715 long duration_seconds = 1;
716
717 long duration_usec = duration_seconds * USEC_PER_SEC;
718 long usage_usec, n_periods, remainder_usec, expected_usage_usec;
719 char *parent, *child;
720 char quota_buf[32];
721
722 snprintf(quota_buf, sizeof(quota_buf), "%ld", quota_usec);
723
724 parent = cg_name(root, "cpucg_parent");
725 child = cg_name(parent, "cpucg_child");
726 if (!parent || !child)
727 goto cleanup;
728
729 if (cg_create(parent))
730 goto cleanup;
731
732 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
733 goto cleanup;
734
735 if (cg_create(child))
736 goto cleanup;
737
738 if (cg_write(parent, "cpu.max", quota_buf))
739 goto cleanup;
740
741 struct cpu_hog_func_param param = {
742 .nprocs = 1,
743 .ts = {
744 .tv_sec = duration_seconds,
745 .tv_nsec = 0,
746 },
747 .clock_type = CPU_HOG_CLOCK_WALL,
748 };
749 if (cg_run(child, hog_cpus_timed, (void *)¶m))
750 goto cleanup;
751
752 usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
753 if (usage_usec <= 0)
754 goto cleanup;
755
756 /*
757 * The following calculation applies only since
758 * the cpu hog is set to run as per wall-clock time
759 */
760 n_periods = duration_usec / default_period_usec;
761 remainder_usec = duration_usec - n_periods * default_period_usec;
762 expected_usage_usec
763 = n_periods * quota_usec + MIN(remainder_usec, quota_usec);
764
765 if (!values_close_report(usage_usec, expected_usage_usec, 10))
766 goto cleanup;
767
768 ret = KSFT_PASS;
769
770 cleanup:
771 cg_destroy(child);
772 free(child);
773 cg_destroy(parent);
774 free(parent);
775
776 return ret;
777 }
778
779 #define T(x) { x, #x }
780 struct cpucg_test {
781 int (*fn)(const char *root);
782 const char *name;
783 } tests[] = {
784 T(test_cpucg_subtree_control),
785 T(test_cpucg_stats),
786 T(test_cpucg_nice),
787 T(test_cpucg_weight_overprovisioned),
788 T(test_cpucg_weight_underprovisioned),
789 T(test_cpucg_nested_weight_overprovisioned),
790 T(test_cpucg_nested_weight_underprovisioned),
791 T(test_cpucg_max),
792 T(test_cpucg_max_nested),
793 };
794 #undef T
795
main(int argc,char * argv[])796 int main(int argc, char *argv[])
797 {
798 char root[PATH_MAX];
799 int i, ret = EXIT_SUCCESS;
800
801 if (cg_find_unified_root(root, sizeof(root), NULL))
802 ksft_exit_skip("cgroup v2 isn't mounted\n");
803
804 if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
805 if (cg_write(root, "cgroup.subtree_control", "+cpu"))
806 ksft_exit_skip("Failed to set cpu controller\n");
807
808 for (i = 0; i < ARRAY_SIZE(tests); i++) {
809 switch (tests[i].fn(root)) {
810 case KSFT_PASS:
811 ksft_test_result_pass("%s\n", tests[i].name);
812 break;
813 case KSFT_SKIP:
814 ksft_test_result_skip("%s\n", tests[i].name);
815 break;
816 default:
817 ret = EXIT_FAILURE;
818 ksft_test_result_fail("%s\n", tests[i].name);
819 break;
820 }
821 }
822
823 return ret;
824 }
825