1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #define _GNU_SOURCE
4 #include <linux/limits.h>
5 #include <linux/sched.h>
6 #include <sys/types.h>
7 #include <sys/mman.h>
8 #include <sys/mount.h>
9 #include <sys/stat.h>
10 #include <sys/wait.h>
11 #include <unistd.h>
12 #include <fcntl.h>
13 #include <sched.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <signal.h>
17 #include <string.h>
18 #include <pthread.h>
19
20 #include "kselftest.h"
21 #include "cgroup_util.h"
22
23 static bool nsdelegate;
24 #ifndef CLONE_NEWCGROUP
25 #define CLONE_NEWCGROUP 0
26 #endif
27
touch_anon(char * buf,size_t size)28 static int touch_anon(char *buf, size_t size)
29 {
30 int fd;
31 char *pos = buf;
32
33 fd = open("/dev/urandom", O_RDONLY);
34 if (fd < 0)
35 return -1;
36
37 while (size > 0) {
38 ssize_t ret = read(fd, pos, size);
39
40 if (ret < 0) {
41 if (errno != EINTR) {
42 close(fd);
43 return -1;
44 }
45 } else {
46 pos += ret;
47 size -= ret;
48 }
49 }
50 close(fd);
51
52 return 0;
53 }
54
alloc_and_touch_anon_noexit(const char * cgroup,void * arg)55 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
56 {
57 int ppid = getppid();
58 size_t size = (size_t)arg;
59 void *buf;
60
61 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
62 0, 0);
63 if (buf == MAP_FAILED)
64 return -1;
65
66 if (touch_anon((char *)buf, size)) {
67 munmap(buf, size);
68 return -1;
69 }
70
71 while (getppid() == ppid)
72 sleep(1);
73
74 munmap(buf, size);
75 return 0;
76 }
77
78 /*
79 * Create a child process that allocates and touches 100MB, then waits to be
80 * killed. Wait until the child is attached to the cgroup, kill all processes
81 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
82 * destroy the empty cgroup. The test helps detect race conditions between
83 * dying processes leaving the cgroup and cgroup destruction path.
84 */
test_cgcore_destroy(const char * root)85 static int test_cgcore_destroy(const char *root)
86 {
87 int ret = KSFT_FAIL;
88 char *cg_test = NULL;
89 int child_pid;
90 char buf[PAGE_SIZE];
91
92 cg_test = cg_name(root, "cg_test");
93
94 if (!cg_test)
95 goto cleanup;
96
97 for (int i = 0; i < 10; i++) {
98 if (cg_create(cg_test))
99 goto cleanup;
100
101 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
102 (void *) MB(100));
103
104 if (child_pid < 0)
105 goto cleanup;
106
107 /* wait for the child to enter cgroup */
108 if (cg_wait_for_proc_count(cg_test, 1))
109 goto cleanup;
110
111 if (cg_killall(cg_test))
112 goto cleanup;
113
114 /* wait for cgroup to be empty */
115 while (1) {
116 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
117 goto cleanup;
118 if (buf[0] == '\0')
119 break;
120 usleep(1000);
121 }
122
123 if (rmdir(cg_test))
124 goto cleanup;
125
126 if (waitpid(child_pid, NULL, 0) < 0)
127 goto cleanup;
128 }
129 ret = KSFT_PASS;
130 cleanup:
131 if (cg_test)
132 cg_destroy(cg_test);
133 free(cg_test);
134 return ret;
135 }
136
137 /*
138 * A(0) - B(0) - C(1)
139 * \ D(0)
140 *
141 * A, B and C's "populated" fields would be 1 while D's 0.
142 * test that after the one process in C is moved to root,
143 * A,B and C's "populated" fields would flip to "0" and file
144 * modified events will be generated on the
145 * "cgroup.events" files of both cgroups.
146 */
test_cgcore_populated(const char * root)147 static int test_cgcore_populated(const char *root)
148 {
149 int ret = KSFT_FAIL;
150 int err;
151 char *cg_test_a = NULL, *cg_test_b = NULL;
152 char *cg_test_c = NULL, *cg_test_d = NULL;
153 int cgroup_fd = -EBADF;
154 pid_t pid;
155
156 if (cg_test_v1_named)
157 return KSFT_SKIP;
158
159 cg_test_a = cg_name(root, "cg_test_a");
160 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
161 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
162 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
163
164 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
165 goto cleanup;
166
167 if (cg_create(cg_test_a))
168 goto cleanup;
169
170 if (cg_create(cg_test_b))
171 goto cleanup;
172
173 if (cg_create(cg_test_c))
174 goto cleanup;
175
176 if (cg_create(cg_test_d))
177 goto cleanup;
178
179 if (cg_enter_current(cg_test_c))
180 goto cleanup;
181
182 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
183 goto cleanup;
184
185 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
186 goto cleanup;
187
188 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
189 goto cleanup;
190
191 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
192 goto cleanup;
193
194 if (cg_enter_current(root))
195 goto cleanup;
196
197 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
198 goto cleanup;
199
200 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
201 goto cleanup;
202
203 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
204 goto cleanup;
205
206 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
207 goto cleanup;
208
209 /* Test that we can directly clone into a new cgroup. */
210 cgroup_fd = dirfd_open_opath(cg_test_d);
211 if (cgroup_fd < 0)
212 goto cleanup;
213
214 pid = clone_into_cgroup(cgroup_fd);
215 if (pid < 0) {
216 if (errno == ENOSYS)
217 goto cleanup_pass;
218 goto cleanup;
219 }
220
221 if (pid == 0) {
222 if (raise(SIGSTOP))
223 exit(EXIT_FAILURE);
224 exit(EXIT_SUCCESS);
225 }
226
227 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
228
229 (void)clone_reap(pid, WSTOPPED);
230 (void)kill(pid, SIGCONT);
231 (void)clone_reap(pid, WEXITED);
232
233 if (err)
234 goto cleanup;
235
236 if (cg_read_strcmp_wait(cg_test_d, "cgroup.events",
237 "populated 0\n"))
238 goto cleanup;
239
240 /* Remove cgroup. */
241 if (cg_test_d) {
242 cg_destroy(cg_test_d);
243 free(cg_test_d);
244 cg_test_d = NULL;
245 }
246
247 pid = clone_into_cgroup(cgroup_fd);
248 if (pid < 0)
249 goto cleanup_pass;
250 if (pid == 0)
251 exit(EXIT_SUCCESS);
252 (void)clone_reap(pid, WEXITED);
253 goto cleanup;
254
255 cleanup_pass:
256 ret = KSFT_PASS;
257
258 cleanup:
259 if (cg_test_d)
260 cg_destroy(cg_test_d);
261 if (cg_test_c)
262 cg_destroy(cg_test_c);
263 if (cg_test_b)
264 cg_destroy(cg_test_b);
265 if (cg_test_a)
266 cg_destroy(cg_test_a);
267 free(cg_test_d);
268 free(cg_test_c);
269 free(cg_test_b);
270 free(cg_test_a);
271 if (cgroup_fd >= 0)
272 close(cgroup_fd);
273 return ret;
274 }
275
276 /*
277 * A (domain threaded) - B (threaded) - C (domain)
278 *
279 * test that C can't be used until it is turned into a
280 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
281 * these cases. Operations which fail due to invalid topology use
282 * EOPNOTSUPP as the errno.
283 */
test_cgcore_invalid_domain(const char * root)284 static int test_cgcore_invalid_domain(const char *root)
285 {
286 int ret = KSFT_FAIL;
287 char *grandparent = NULL, *parent = NULL, *child = NULL;
288
289 if (cg_test_v1_named)
290 return KSFT_SKIP;
291
292 grandparent = cg_name(root, "cg_test_grandparent");
293 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
294 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
295 if (!parent || !child || !grandparent)
296 goto cleanup;
297
298 if (cg_create(grandparent))
299 goto cleanup;
300
301 if (cg_create(parent))
302 goto cleanup;
303
304 if (cg_create(child))
305 goto cleanup;
306
307 if (cg_write(parent, "cgroup.type", "threaded"))
308 goto cleanup;
309
310 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
311 goto cleanup;
312
313 if (!cg_enter_current(child))
314 goto cleanup;
315
316 if (errno != EOPNOTSUPP)
317 goto cleanup;
318
319 if (!clone_into_cgroup_run_wait(child))
320 goto cleanup;
321
322 if (errno == ENOSYS)
323 goto cleanup_pass;
324
325 if (errno != EOPNOTSUPP)
326 goto cleanup;
327
328 cleanup_pass:
329 ret = KSFT_PASS;
330
331 cleanup:
332 cg_enter_current(root);
333 if (child)
334 cg_destroy(child);
335 if (parent)
336 cg_destroy(parent);
337 if (grandparent)
338 cg_destroy(grandparent);
339 free(child);
340 free(parent);
341 free(grandparent);
342 return ret;
343 }
344
345 /*
346 * Test that when a child becomes threaded
347 * the parent type becomes domain threaded.
348 */
test_cgcore_parent_becomes_threaded(const char * root)349 static int test_cgcore_parent_becomes_threaded(const char *root)
350 {
351 int ret = KSFT_FAIL;
352 char *parent = NULL, *child = NULL;
353
354 if (cg_test_v1_named)
355 return KSFT_SKIP;
356
357 parent = cg_name(root, "cg_test_parent");
358 child = cg_name(root, "cg_test_parent/cg_test_child");
359 if (!parent || !child)
360 goto cleanup;
361
362 if (cg_create(parent))
363 goto cleanup;
364
365 if (cg_create(child))
366 goto cleanup;
367
368 if (cg_write(child, "cgroup.type", "threaded"))
369 goto cleanup;
370
371 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
372 goto cleanup;
373
374 ret = KSFT_PASS;
375
376 cleanup:
377 if (child)
378 cg_destroy(child);
379 if (parent)
380 cg_destroy(parent);
381 free(child);
382 free(parent);
383 return ret;
384
385 }
386
387 /*
388 * Test that there's no internal process constrain on threaded cgroups.
389 * You can add threads/processes on a parent with a controller enabled.
390 */
test_cgcore_no_internal_process_constraint_on_threads(const char * root)391 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
392 {
393 int ret = KSFT_FAIL;
394 char *parent = NULL, *child = NULL;
395
396 if (cg_test_v1_named ||
397 cg_read_strstr(root, "cgroup.controllers", "cpu") ||
398 cg_write(root, "cgroup.subtree_control", "+cpu")) {
399 ret = KSFT_SKIP;
400 goto cleanup;
401 }
402
403 parent = cg_name(root, "cg_test_parent");
404 child = cg_name(root, "cg_test_parent/cg_test_child");
405 if (!parent || !child)
406 goto cleanup;
407
408 if (cg_create(parent))
409 goto cleanup;
410
411 if (cg_create(child))
412 goto cleanup;
413
414 if (cg_write(parent, "cgroup.type", "threaded"))
415 goto cleanup;
416
417 if (cg_write(child, "cgroup.type", "threaded"))
418 goto cleanup;
419
420 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
421 goto cleanup;
422
423 if (cg_enter_current(parent))
424 goto cleanup;
425
426 ret = KSFT_PASS;
427
428 cleanup:
429 cg_enter_current(root);
430 cg_enter_current(root);
431 if (child)
432 cg_destroy(child);
433 if (parent)
434 cg_destroy(parent);
435 free(child);
436 free(parent);
437 return ret;
438 }
439
440 /*
441 * Test that you can't enable a controller on a child if it's not enabled
442 * on the parent.
443 */
test_cgcore_top_down_constraint_enable(const char * root)444 static int test_cgcore_top_down_constraint_enable(const char *root)
445 {
446 int ret = KSFT_FAIL;
447 char *parent = NULL, *child = NULL;
448
449 if (cg_test_v1_named)
450 return KSFT_SKIP;
451
452 parent = cg_name(root, "cg_test_parent");
453 child = cg_name(root, "cg_test_parent/cg_test_child");
454 if (!parent || !child)
455 goto cleanup;
456
457 if (cg_create(parent))
458 goto cleanup;
459
460 if (cg_create(child))
461 goto cleanup;
462
463 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
464 goto cleanup;
465
466 ret = KSFT_PASS;
467
468 cleanup:
469 if (child)
470 cg_destroy(child);
471 if (parent)
472 cg_destroy(parent);
473 free(child);
474 free(parent);
475 return ret;
476 }
477
478 /*
479 * Test that you can't disable a controller on a parent
480 * if it's enabled in a child.
481 */
test_cgcore_top_down_constraint_disable(const char * root)482 static int test_cgcore_top_down_constraint_disable(const char *root)
483 {
484 int ret = KSFT_FAIL;
485 char *parent = NULL, *child = NULL;
486
487 if (cg_test_v1_named)
488 return KSFT_SKIP;
489
490 parent = cg_name(root, "cg_test_parent");
491 child = cg_name(root, "cg_test_parent/cg_test_child");
492 if (!parent || !child)
493 goto cleanup;
494
495 if (cg_create(parent))
496 goto cleanup;
497
498 if (cg_create(child))
499 goto cleanup;
500
501 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
502 goto cleanup;
503
504 if (cg_write(child, "cgroup.subtree_control", "+memory"))
505 goto cleanup;
506
507 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
508 goto cleanup;
509
510 ret = KSFT_PASS;
511
512 cleanup:
513 if (child)
514 cg_destroy(child);
515 if (parent)
516 cg_destroy(parent);
517 free(child);
518 free(parent);
519 return ret;
520 }
521
522 /*
523 * Test internal process constraint.
524 * You can't add a pid to a domain parent if a controller is enabled.
525 */
test_cgcore_internal_process_constraint(const char * root)526 static int test_cgcore_internal_process_constraint(const char *root)
527 {
528 int ret = KSFT_FAIL;
529 char *parent = NULL, *child = NULL;
530
531 if (cg_test_v1_named)
532 return KSFT_SKIP;
533
534 parent = cg_name(root, "cg_test_parent");
535 child = cg_name(root, "cg_test_parent/cg_test_child");
536 if (!parent || !child)
537 goto cleanup;
538
539 if (cg_create(parent))
540 goto cleanup;
541
542 if (cg_create(child))
543 goto cleanup;
544
545 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
546 goto cleanup;
547
548 if (!cg_enter_current(parent))
549 goto cleanup;
550
551 if (!clone_into_cgroup_run_wait(parent))
552 goto cleanup;
553
554 ret = KSFT_PASS;
555
556 cleanup:
557 if (child)
558 cg_destroy(child);
559 if (parent)
560 cg_destroy(parent);
561 free(child);
562 free(parent);
563 return ret;
564 }
565
dummy_thread_fn(void * arg)566 static void *dummy_thread_fn(void *arg)
567 {
568 return (void *)(size_t)pause();
569 }
570
571 /*
572 * Test threadgroup migration.
573 * All threads of a process are migrated together.
574 */
test_cgcore_proc_migration(const char * root)575 static int test_cgcore_proc_migration(const char *root)
576 {
577 int ret = KSFT_FAIL;
578 int t, c_threads = 0, n_threads = 13;
579 char *src = NULL, *dst = NULL;
580 pthread_t threads[n_threads];
581
582 src = cg_name(root, "cg_src");
583 dst = cg_name(root, "cg_dst");
584 if (!src || !dst)
585 goto cleanup;
586
587 if (cg_create(src))
588 goto cleanup;
589 if (cg_create(dst))
590 goto cleanup;
591
592 if (cg_enter_current(src))
593 goto cleanup;
594
595 for (c_threads = 0; c_threads < n_threads; ++c_threads) {
596 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
597 goto cleanup;
598 }
599
600 cg_enter_current(dst);
601 if (cg_read_lc(dst, CG_THREADS_FILE) != n_threads + 1)
602 goto cleanup;
603
604 ret = KSFT_PASS;
605
606 cleanup:
607 for (t = 0; t < c_threads; ++t) {
608 pthread_cancel(threads[t]);
609 }
610
611 for (t = 0; t < c_threads; ++t) {
612 pthread_join(threads[t], NULL);
613 }
614
615 cg_enter_current(root);
616
617 if (dst)
618 cg_destroy(dst);
619 if (src)
620 cg_destroy(src);
621 free(dst);
622 free(src);
623 return ret;
624 }
625
migrating_thread_fn(void * arg)626 static void *migrating_thread_fn(void *arg)
627 {
628 int g, i, n_iterations = 1000;
629 char **grps = arg;
630 char lines[3][PATH_MAX];
631
632 for (g = 1; g < 3; ++g)
633 snprintf(lines[g], sizeof(lines[g]), CG_PATH_FORMAT, grps[g] + strlen(grps[0]));
634
635 for (i = 0; i < n_iterations; ++i) {
636 cg_enter_current_thread(grps[(i % 2) + 1]);
637
638 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
639 return (void *)-1;
640 }
641 return NULL;
642 }
643
644 /*
645 * Test single thread migration.
646 * Threaded cgroups allow successful migration of a thread.
647 */
test_cgcore_thread_migration(const char * root)648 static int test_cgcore_thread_migration(const char *root)
649 {
650 int ret = KSFT_FAIL;
651 char *dom = NULL;
652 char line[PATH_MAX];
653 char *grps[3] = { (char *)root, NULL, NULL };
654 pthread_t thr;
655 void *retval;
656
657 dom = cg_name(root, "cg_dom");
658 grps[1] = cg_name(root, "cg_dom/cg_src");
659 grps[2] = cg_name(root, "cg_dom/cg_dst");
660 if (!grps[1] || !grps[2] || !dom)
661 goto cleanup;
662
663 if (cg_create(dom))
664 goto cleanup;
665 if (cg_create(grps[1]))
666 goto cleanup;
667 if (cg_create(grps[2]))
668 goto cleanup;
669
670 if (!cg_test_v1_named) {
671 if (cg_write(grps[1], "cgroup.type", "threaded"))
672 goto cleanup;
673 if (cg_write(grps[2], "cgroup.type", "threaded"))
674 goto cleanup;
675 }
676
677 if (cg_enter_current(grps[1]))
678 goto cleanup;
679
680 if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
681 goto cleanup;
682
683 if (pthread_join(thr, &retval))
684 goto cleanup;
685
686 if (retval)
687 goto cleanup;
688
689 snprintf(line, sizeof(line), CG_PATH_FORMAT, grps[1] + strlen(grps[0]));
690 if (proc_read_strstr(0, 1, "cgroup", line))
691 goto cleanup;
692
693 ret = KSFT_PASS;
694
695 cleanup:
696 cg_enter_current(root);
697 if (grps[2])
698 cg_destroy(grps[2]);
699 if (grps[1])
700 cg_destroy(grps[1]);
701 if (dom)
702 cg_destroy(dom);
703 free(grps[2]);
704 free(grps[1]);
705 free(dom);
706 return ret;
707 }
708
709 /*
710 * cgroup migration permission check should be performed based on the
711 * credentials at the time of open instead of write.
712 */
test_cgcore_lesser_euid_open(const char * root)713 static int test_cgcore_lesser_euid_open(const char *root)
714 {
715 const uid_t test_euid = TEST_UID;
716 int ret = KSFT_FAIL;
717 char *cg_test_a = NULL, *cg_test_b = NULL;
718 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
719 int cg_test_b_procs_fd = -1;
720 uid_t saved_uid;
721
722 cg_test_a = cg_name(root, "cg_test_a");
723 cg_test_b = cg_name(root, "cg_test_b");
724
725 if (!cg_test_a || !cg_test_b)
726 goto cleanup;
727
728 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
729 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
730
731 if (!cg_test_a_procs || !cg_test_b_procs)
732 goto cleanup;
733
734 if (cg_create(cg_test_a) || cg_create(cg_test_b))
735 goto cleanup;
736
737 if (cg_enter_current(cg_test_a))
738 goto cleanup;
739
740 if (chown(cg_test_a_procs, test_euid, -1) ||
741 chown(cg_test_b_procs, test_euid, -1))
742 goto cleanup;
743
744 saved_uid = geteuid();
745 if (seteuid(test_euid))
746 goto cleanup;
747
748 cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
749
750 if (seteuid(saved_uid))
751 goto cleanup;
752
753 if (cg_test_b_procs_fd < 0)
754 goto cleanup;
755
756 if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
757 goto cleanup;
758
759 ret = KSFT_PASS;
760
761 cleanup:
762 cg_enter_current(root);
763 if (cg_test_b_procs_fd >= 0)
764 close(cg_test_b_procs_fd);
765 if (cg_test_b)
766 cg_destroy(cg_test_b);
767 if (cg_test_a)
768 cg_destroy(cg_test_a);
769 free(cg_test_b_procs);
770 free(cg_test_a_procs);
771 free(cg_test_b);
772 free(cg_test_a);
773 return ret;
774 }
775
776 struct lesser_ns_open_thread_arg {
777 const char *path;
778 int fd;
779 int err;
780 };
781
lesser_ns_open_thread_fn(void * arg)782 static int lesser_ns_open_thread_fn(void *arg)
783 {
784 struct lesser_ns_open_thread_arg *targ = arg;
785
786 targ->fd = open(targ->path, O_RDWR);
787 targ->err = errno;
788 return 0;
789 }
790
791 /*
792 * cgroup migration permission check should be performed based on the cgroup
793 * namespace at the time of open instead of write.
794 */
test_cgcore_lesser_ns_open(const char * root)795 static int test_cgcore_lesser_ns_open(const char *root)
796 {
797 static char stack[65536];
798 const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
799 int ret = KSFT_FAIL;
800 char *cg_test_a = NULL, *cg_test_b = NULL;
801 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
802 int cg_test_b_procs_fd = -1;
803 struct lesser_ns_open_thread_arg targ = { .fd = -1 };
804 pid_t pid;
805 int status;
806
807 if (!nsdelegate)
808 return KSFT_SKIP;
809
810 cg_test_a = cg_name(root, "cg_test_a");
811 cg_test_b = cg_name(root, "cg_test_b");
812
813 if (!cg_test_a || !cg_test_b)
814 goto cleanup;
815
816 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
817 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
818
819 if (!cg_test_a_procs || !cg_test_b_procs)
820 goto cleanup;
821
822 if (cg_create(cg_test_a) || cg_create(cg_test_b))
823 goto cleanup;
824
825 if (cg_enter_current(cg_test_b))
826 goto cleanup;
827
828 if (chown(cg_test_a_procs, test_euid, -1) ||
829 chown(cg_test_b_procs, test_euid, -1))
830 goto cleanup;
831
832 targ.path = cg_test_b_procs;
833 pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
834 CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
835 &targ);
836 if (pid < 0)
837 goto cleanup;
838
839 if (waitpid(pid, &status, 0) < 0)
840 goto cleanup;
841
842 if (!WIFEXITED(status))
843 goto cleanup;
844
845 cg_test_b_procs_fd = targ.fd;
846 if (cg_test_b_procs_fd < 0)
847 goto cleanup;
848
849 if (cg_enter_current(cg_test_a))
850 goto cleanup;
851
852 if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
853 goto cleanup;
854
855 ret = KSFT_PASS;
856
857 cleanup:
858 cg_enter_current(root);
859 if (cg_test_b_procs_fd >= 0)
860 close(cg_test_b_procs_fd);
861 if (cg_test_b)
862 cg_destroy(cg_test_b);
863 if (cg_test_a)
864 cg_destroy(cg_test_a);
865 free(cg_test_b_procs);
866 free(cg_test_a_procs);
867 free(cg_test_b);
868 free(cg_test_a);
869 return ret;
870 }
871
setup_named_v1_root(char * root,size_t len,const char * name)872 static int setup_named_v1_root(char *root, size_t len, const char *name)
873 {
874 char options[PATH_MAX];
875 int r;
876
877 r = snprintf(root, len, "/mnt/cg_selftest");
878 if (r < 0)
879 return r;
880
881 r = snprintf(options, sizeof(options), "none,name=%s", name);
882 if (r < 0)
883 return r;
884
885 r = mkdir(root, 0755);
886 if (r < 0 && errno != EEXIST)
887 return r;
888
889 r = mount("none", root, "cgroup", 0, options);
890 if (r < 0)
891 return r;
892
893 return 0;
894 }
895
cleanup_named_v1_root(char * root)896 static void cleanup_named_v1_root(char *root)
897 {
898 if (!cg_test_v1_named)
899 return;
900 umount(root);
901 rmdir(root);
902 }
903
904 #define T(x) { x, #x }
905 struct corecg_test {
906 int (*fn)(const char *root);
907 const char *name;
908 } tests[] = {
909 T(test_cgcore_internal_process_constraint),
910 T(test_cgcore_top_down_constraint_enable),
911 T(test_cgcore_top_down_constraint_disable),
912 T(test_cgcore_no_internal_process_constraint_on_threads),
913 T(test_cgcore_parent_becomes_threaded),
914 T(test_cgcore_invalid_domain),
915 T(test_cgcore_populated),
916 T(test_cgcore_proc_migration),
917 T(test_cgcore_thread_migration),
918 T(test_cgcore_destroy),
919 T(test_cgcore_lesser_euid_open),
920 T(test_cgcore_lesser_ns_open),
921 };
922 #undef T
923
main(int argc,char * argv[])924 int main(int argc, char *argv[])
925 {
926 char root[PATH_MAX];
927 int i;
928
929 ksft_print_header();
930 ksft_set_plan(ARRAY_SIZE(tests));
931 if (cg_find_unified_root(root, sizeof(root), &nsdelegate)) {
932 if (setup_named_v1_root(root, sizeof(root), CG_NAMED_NAME))
933 ksft_exit_skip("cgroup v2 isn't mounted and could not setup named v1 hierarchy\n");
934 cg_test_v1_named = true;
935 goto post_v2_setup;
936 }
937
938 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
939 if (cg_write(root, "cgroup.subtree_control", "+memory"))
940 ksft_exit_skip("Failed to set memory controller\n");
941
942 post_v2_setup:
943 for (i = 0; i < ARRAY_SIZE(tests); i++) {
944 switch (tests[i].fn(root)) {
945 case KSFT_PASS:
946 ksft_test_result_pass("%s\n", tests[i].name);
947 break;
948 case KSFT_SKIP:
949 ksft_test_result_skip("%s\n", tests[i].name);
950 break;
951 default:
952 ksft_test_result_fail("%s\n", tests[i].name);
953 break;
954 }
955 }
956
957 cleanup_named_v1_root(root);
958 ksft_finished();
959 }
960