1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #define _GNU_SOURCE 4 #include <linux/limits.h> 5 #include <linux/sched.h> 6 #include <sys/types.h> 7 #include <sys/mman.h> 8 #include <sys/mount.h> 9 #include <sys/stat.h> 10 #include <sys/wait.h> 11 #include <unistd.h> 12 #include <fcntl.h> 13 #include <sched.h> 14 #include <stdio.h> 15 #include <errno.h> 16 #include <signal.h> 17 #include <string.h> 18 #include <pthread.h> 19 20 #include "../kselftest.h" 21 #include "cgroup_util.h" 22 23 static bool nsdelegate; 24 #ifndef CLONE_NEWCGROUP 25 #define CLONE_NEWCGROUP 0 26 #endif 27 28 static int touch_anon(char *buf, size_t size) 29 { 30 int fd; 31 char *pos = buf; 32 33 fd = open("/dev/urandom", O_RDONLY); 34 if (fd < 0) 35 return -1; 36 37 while (size > 0) { 38 ssize_t ret = read(fd, pos, size); 39 40 if (ret < 0) { 41 if (errno != EINTR) { 42 close(fd); 43 return -1; 44 } 45 } else { 46 pos += ret; 47 size -= ret; 48 } 49 } 50 close(fd); 51 52 return 0; 53 } 54 55 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg) 56 { 57 int ppid = getppid(); 58 size_t size = (size_t)arg; 59 void *buf; 60 61 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 62 0, 0); 63 if (buf == MAP_FAILED) 64 return -1; 65 66 if (touch_anon((char *)buf, size)) { 67 munmap(buf, size); 68 return -1; 69 } 70 71 while (getppid() == ppid) 72 sleep(1); 73 74 munmap(buf, size); 75 return 0; 76 } 77 78 /* 79 * Create a child process that allocates and touches 100MB, then waits to be 80 * killed. Wait until the child is attached to the cgroup, kill all processes 81 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to 82 * destroy the empty cgroup. The test helps detect race conditions between 83 * dying processes leaving the cgroup and cgroup destruction path. 84 */ 85 static int test_cgcore_destroy(const char *root) 86 { 87 int ret = KSFT_FAIL; 88 char *cg_test = NULL; 89 int child_pid; 90 char buf[PAGE_SIZE]; 91 92 cg_test = cg_name(root, "cg_test"); 93 94 if (!cg_test) 95 goto cleanup; 96 97 for (int i = 0; i < 10; i++) { 98 if (cg_create(cg_test)) 99 goto cleanup; 100 101 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit, 102 (void *) MB(100)); 103 104 if (child_pid < 0) 105 goto cleanup; 106 107 /* wait for the child to enter cgroup */ 108 if (cg_wait_for_proc_count(cg_test, 1)) 109 goto cleanup; 110 111 if (cg_killall(cg_test)) 112 goto cleanup; 113 114 /* wait for cgroup to be empty */ 115 while (1) { 116 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf))) 117 goto cleanup; 118 if (buf[0] == '\0') 119 break; 120 usleep(1000); 121 } 122 123 if (rmdir(cg_test)) 124 goto cleanup; 125 126 if (waitpid(child_pid, NULL, 0) < 0) 127 goto cleanup; 128 } 129 ret = KSFT_PASS; 130 cleanup: 131 if (cg_test) 132 cg_destroy(cg_test); 133 free(cg_test); 134 return ret; 135 } 136 137 /* 138 * A(0) - B(0) - C(1) 139 * \ D(0) 140 * 141 * A, B and C's "populated" fields would be 1 while D's 0. 142 * test that after the one process in C is moved to root, 143 * A,B and C's "populated" fields would flip to "0" and file 144 * modified events will be generated on the 145 * "cgroup.events" files of both cgroups. 146 */ 147 static int test_cgcore_populated(const char *root) 148 { 149 int ret = KSFT_FAIL; 150 int err; 151 char *cg_test_a = NULL, *cg_test_b = NULL; 152 char *cg_test_c = NULL, *cg_test_d = NULL; 153 int cgroup_fd = -EBADF; 154 pid_t pid; 155 156 if (cg_test_v1_named) 157 return KSFT_SKIP; 158 159 cg_test_a = cg_name(root, "cg_test_a"); 160 cg_test_b = cg_name(root, "cg_test_a/cg_test_b"); 161 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c"); 162 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d"); 163 164 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d) 165 goto cleanup; 166 167 if (cg_create(cg_test_a)) 168 goto cleanup; 169 170 if (cg_create(cg_test_b)) 171 goto cleanup; 172 173 if (cg_create(cg_test_c)) 174 goto cleanup; 175 176 if (cg_create(cg_test_d)) 177 goto cleanup; 178 179 if (cg_enter_current(cg_test_c)) 180 goto cleanup; 181 182 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n")) 183 goto cleanup; 184 185 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n")) 186 goto cleanup; 187 188 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n")) 189 goto cleanup; 190 191 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n")) 192 goto cleanup; 193 194 if (cg_enter_current(root)) 195 goto cleanup; 196 197 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n")) 198 goto cleanup; 199 200 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n")) 201 goto cleanup; 202 203 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n")) 204 goto cleanup; 205 206 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n")) 207 goto cleanup; 208 209 /* Test that we can directly clone into a new cgroup. */ 210 cgroup_fd = dirfd_open_opath(cg_test_d); 211 if (cgroup_fd < 0) 212 goto cleanup; 213 214 pid = clone_into_cgroup(cgroup_fd); 215 if (pid < 0) { 216 if (errno == ENOSYS) 217 goto cleanup_pass; 218 goto cleanup; 219 } 220 221 if (pid == 0) { 222 if (raise(SIGSTOP)) 223 exit(EXIT_FAILURE); 224 exit(EXIT_SUCCESS); 225 } 226 227 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n"); 228 229 (void)clone_reap(pid, WSTOPPED); 230 (void)kill(pid, SIGCONT); 231 (void)clone_reap(pid, WEXITED); 232 233 if (err) 234 goto cleanup; 235 236 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n")) 237 goto cleanup; 238 239 /* Remove cgroup. */ 240 if (cg_test_d) { 241 cg_destroy(cg_test_d); 242 free(cg_test_d); 243 cg_test_d = NULL; 244 } 245 246 pid = clone_into_cgroup(cgroup_fd); 247 if (pid < 0) 248 goto cleanup_pass; 249 if (pid == 0) 250 exit(EXIT_SUCCESS); 251 (void)clone_reap(pid, WEXITED); 252 goto cleanup; 253 254 cleanup_pass: 255 ret = KSFT_PASS; 256 257 cleanup: 258 if (cg_test_d) 259 cg_destroy(cg_test_d); 260 if (cg_test_c) 261 cg_destroy(cg_test_c); 262 if (cg_test_b) 263 cg_destroy(cg_test_b); 264 if (cg_test_a) 265 cg_destroy(cg_test_a); 266 free(cg_test_d); 267 free(cg_test_c); 268 free(cg_test_b); 269 free(cg_test_a); 270 if (cgroup_fd >= 0) 271 close(cgroup_fd); 272 return ret; 273 } 274 275 /* 276 * A (domain threaded) - B (threaded) - C (domain) 277 * 278 * test that C can't be used until it is turned into a 279 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in 280 * these cases. Operations which fail due to invalid topology use 281 * EOPNOTSUPP as the errno. 282 */ 283 static int test_cgcore_invalid_domain(const char *root) 284 { 285 int ret = KSFT_FAIL; 286 char *grandparent = NULL, *parent = NULL, *child = NULL; 287 288 if (cg_test_v1_named) 289 return KSFT_SKIP; 290 291 grandparent = cg_name(root, "cg_test_grandparent"); 292 parent = cg_name(root, "cg_test_grandparent/cg_test_parent"); 293 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child"); 294 if (!parent || !child || !grandparent) 295 goto cleanup; 296 297 if (cg_create(grandparent)) 298 goto cleanup; 299 300 if (cg_create(parent)) 301 goto cleanup; 302 303 if (cg_create(child)) 304 goto cleanup; 305 306 if (cg_write(parent, "cgroup.type", "threaded")) 307 goto cleanup; 308 309 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n")) 310 goto cleanup; 311 312 if (!cg_enter_current(child)) 313 goto cleanup; 314 315 if (errno != EOPNOTSUPP) 316 goto cleanup; 317 318 if (!clone_into_cgroup_run_wait(child)) 319 goto cleanup; 320 321 if (errno == ENOSYS) 322 goto cleanup_pass; 323 324 if (errno != EOPNOTSUPP) 325 goto cleanup; 326 327 cleanup_pass: 328 ret = KSFT_PASS; 329 330 cleanup: 331 cg_enter_current(root); 332 if (child) 333 cg_destroy(child); 334 if (parent) 335 cg_destroy(parent); 336 if (grandparent) 337 cg_destroy(grandparent); 338 free(child); 339 free(parent); 340 free(grandparent); 341 return ret; 342 } 343 344 /* 345 * Test that when a child becomes threaded 346 * the parent type becomes domain threaded. 347 */ 348 static int test_cgcore_parent_becomes_threaded(const char *root) 349 { 350 int ret = KSFT_FAIL; 351 char *parent = NULL, *child = NULL; 352 353 if (cg_test_v1_named) 354 return KSFT_SKIP; 355 356 parent = cg_name(root, "cg_test_parent"); 357 child = cg_name(root, "cg_test_parent/cg_test_child"); 358 if (!parent || !child) 359 goto cleanup; 360 361 if (cg_create(parent)) 362 goto cleanup; 363 364 if (cg_create(child)) 365 goto cleanup; 366 367 if (cg_write(child, "cgroup.type", "threaded")) 368 goto cleanup; 369 370 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n")) 371 goto cleanup; 372 373 ret = KSFT_PASS; 374 375 cleanup: 376 if (child) 377 cg_destroy(child); 378 if (parent) 379 cg_destroy(parent); 380 free(child); 381 free(parent); 382 return ret; 383 384 } 385 386 /* 387 * Test that there's no internal process constrain on threaded cgroups. 388 * You can add threads/processes on a parent with a controller enabled. 389 */ 390 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root) 391 { 392 int ret = KSFT_FAIL; 393 char *parent = NULL, *child = NULL; 394 395 if (cg_test_v1_named || 396 cg_read_strstr(root, "cgroup.controllers", "cpu") || 397 cg_write(root, "cgroup.subtree_control", "+cpu")) { 398 ret = KSFT_SKIP; 399 goto cleanup; 400 } 401 402 parent = cg_name(root, "cg_test_parent"); 403 child = cg_name(root, "cg_test_parent/cg_test_child"); 404 if (!parent || !child) 405 goto cleanup; 406 407 if (cg_create(parent)) 408 goto cleanup; 409 410 if (cg_create(child)) 411 goto cleanup; 412 413 if (cg_write(parent, "cgroup.type", "threaded")) 414 goto cleanup; 415 416 if (cg_write(child, "cgroup.type", "threaded")) 417 goto cleanup; 418 419 if (cg_write(parent, "cgroup.subtree_control", "+cpu")) 420 goto cleanup; 421 422 if (cg_enter_current(parent)) 423 goto cleanup; 424 425 ret = KSFT_PASS; 426 427 cleanup: 428 cg_enter_current(root); 429 cg_enter_current(root); 430 if (child) 431 cg_destroy(child); 432 if (parent) 433 cg_destroy(parent); 434 free(child); 435 free(parent); 436 return ret; 437 } 438 439 /* 440 * Test that you can't enable a controller on a child if it's not enabled 441 * on the parent. 442 */ 443 static int test_cgcore_top_down_constraint_enable(const char *root) 444 { 445 int ret = KSFT_FAIL; 446 char *parent = NULL, *child = NULL; 447 448 if (cg_test_v1_named) 449 return KSFT_SKIP; 450 451 parent = cg_name(root, "cg_test_parent"); 452 child = cg_name(root, "cg_test_parent/cg_test_child"); 453 if (!parent || !child) 454 goto cleanup; 455 456 if (cg_create(parent)) 457 goto cleanup; 458 459 if (cg_create(child)) 460 goto cleanup; 461 462 if (!cg_write(child, "cgroup.subtree_control", "+memory")) 463 goto cleanup; 464 465 ret = KSFT_PASS; 466 467 cleanup: 468 if (child) 469 cg_destroy(child); 470 if (parent) 471 cg_destroy(parent); 472 free(child); 473 free(parent); 474 return ret; 475 } 476 477 /* 478 * Test that you can't disable a controller on a parent 479 * if it's enabled in a child. 480 */ 481 static int test_cgcore_top_down_constraint_disable(const char *root) 482 { 483 int ret = KSFT_FAIL; 484 char *parent = NULL, *child = NULL; 485 486 if (cg_test_v1_named) 487 return KSFT_SKIP; 488 489 parent = cg_name(root, "cg_test_parent"); 490 child = cg_name(root, "cg_test_parent/cg_test_child"); 491 if (!parent || !child) 492 goto cleanup; 493 494 if (cg_create(parent)) 495 goto cleanup; 496 497 if (cg_create(child)) 498 goto cleanup; 499 500 if (cg_write(parent, "cgroup.subtree_control", "+memory")) 501 goto cleanup; 502 503 if (cg_write(child, "cgroup.subtree_control", "+memory")) 504 goto cleanup; 505 506 if (!cg_write(parent, "cgroup.subtree_control", "-memory")) 507 goto cleanup; 508 509 ret = KSFT_PASS; 510 511 cleanup: 512 if (child) 513 cg_destroy(child); 514 if (parent) 515 cg_destroy(parent); 516 free(child); 517 free(parent); 518 return ret; 519 } 520 521 /* 522 * Test internal process constraint. 523 * You can't add a pid to a domain parent if a controller is enabled. 524 */ 525 static int test_cgcore_internal_process_constraint(const char *root) 526 { 527 int ret = KSFT_FAIL; 528 char *parent = NULL, *child = NULL; 529 530 if (cg_test_v1_named) 531 return KSFT_SKIP; 532 533 parent = cg_name(root, "cg_test_parent"); 534 child = cg_name(root, "cg_test_parent/cg_test_child"); 535 if (!parent || !child) 536 goto cleanup; 537 538 if (cg_create(parent)) 539 goto cleanup; 540 541 if (cg_create(child)) 542 goto cleanup; 543 544 if (cg_write(parent, "cgroup.subtree_control", "+memory")) 545 goto cleanup; 546 547 if (!cg_enter_current(parent)) 548 goto cleanup; 549 550 if (!clone_into_cgroup_run_wait(parent)) 551 goto cleanup; 552 553 ret = KSFT_PASS; 554 555 cleanup: 556 if (child) 557 cg_destroy(child); 558 if (parent) 559 cg_destroy(parent); 560 free(child); 561 free(parent); 562 return ret; 563 } 564 565 static void *dummy_thread_fn(void *arg) 566 { 567 return (void *)(size_t)pause(); 568 } 569 570 /* 571 * Test threadgroup migration. 572 * All threads of a process are migrated together. 573 */ 574 static int test_cgcore_proc_migration(const char *root) 575 { 576 int ret = KSFT_FAIL; 577 int t, c_threads = 0, n_threads = 13; 578 char *src = NULL, *dst = NULL; 579 pthread_t threads[n_threads]; 580 581 src = cg_name(root, "cg_src"); 582 dst = cg_name(root, "cg_dst"); 583 if (!src || !dst) 584 goto cleanup; 585 586 if (cg_create(src)) 587 goto cleanup; 588 if (cg_create(dst)) 589 goto cleanup; 590 591 if (cg_enter_current(src)) 592 goto cleanup; 593 594 for (c_threads = 0; c_threads < n_threads; ++c_threads) { 595 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL)) 596 goto cleanup; 597 } 598 599 cg_enter_current(dst); 600 if (cg_read_lc(dst, CG_THREADS_FILE) != n_threads + 1) 601 goto cleanup; 602 603 ret = KSFT_PASS; 604 605 cleanup: 606 for (t = 0; t < c_threads; ++t) { 607 pthread_cancel(threads[t]); 608 } 609 610 for (t = 0; t < c_threads; ++t) { 611 pthread_join(threads[t], NULL); 612 } 613 614 cg_enter_current(root); 615 616 if (dst) 617 cg_destroy(dst); 618 if (src) 619 cg_destroy(src); 620 free(dst); 621 free(src); 622 return ret; 623 } 624 625 static void *migrating_thread_fn(void *arg) 626 { 627 int g, i, n_iterations = 1000; 628 char **grps = arg; 629 char lines[3][PATH_MAX]; 630 631 for (g = 1; g < 3; ++g) 632 snprintf(lines[g], sizeof(lines[g]), CG_PATH_FORMAT, grps[g] + strlen(grps[0])); 633 634 for (i = 0; i < n_iterations; ++i) { 635 cg_enter_current_thread(grps[(i % 2) + 1]); 636 637 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1])) 638 return (void *)-1; 639 } 640 return NULL; 641 } 642 643 /* 644 * Test single thread migration. 645 * Threaded cgroups allow successful migration of a thread. 646 */ 647 static int test_cgcore_thread_migration(const char *root) 648 { 649 int ret = KSFT_FAIL; 650 char *dom = NULL; 651 char line[PATH_MAX]; 652 char *grps[3] = { (char *)root, NULL, NULL }; 653 pthread_t thr; 654 void *retval; 655 656 dom = cg_name(root, "cg_dom"); 657 grps[1] = cg_name(root, "cg_dom/cg_src"); 658 grps[2] = cg_name(root, "cg_dom/cg_dst"); 659 if (!grps[1] || !grps[2] || !dom) 660 goto cleanup; 661 662 if (cg_create(dom)) 663 goto cleanup; 664 if (cg_create(grps[1])) 665 goto cleanup; 666 if (cg_create(grps[2])) 667 goto cleanup; 668 669 if (!cg_test_v1_named) { 670 if (cg_write(grps[1], "cgroup.type", "threaded")) 671 goto cleanup; 672 if (cg_write(grps[2], "cgroup.type", "threaded")) 673 goto cleanup; 674 } 675 676 if (cg_enter_current(grps[1])) 677 goto cleanup; 678 679 if (pthread_create(&thr, NULL, migrating_thread_fn, grps)) 680 goto cleanup; 681 682 if (pthread_join(thr, &retval)) 683 goto cleanup; 684 685 if (retval) 686 goto cleanup; 687 688 snprintf(line, sizeof(line), CG_PATH_FORMAT, grps[1] + strlen(grps[0])); 689 if (proc_read_strstr(0, 1, "cgroup", line)) 690 goto cleanup; 691 692 ret = KSFT_PASS; 693 694 cleanup: 695 cg_enter_current(root); 696 if (grps[2]) 697 cg_destroy(grps[2]); 698 if (grps[1]) 699 cg_destroy(grps[1]); 700 if (dom) 701 cg_destroy(dom); 702 free(grps[2]); 703 free(grps[1]); 704 free(dom); 705 return ret; 706 } 707 708 /* 709 * cgroup migration permission check should be performed based on the 710 * credentials at the time of open instead of write. 711 */ 712 static int test_cgcore_lesser_euid_open(const char *root) 713 { 714 const uid_t test_euid = TEST_UID; 715 int ret = KSFT_FAIL; 716 char *cg_test_a = NULL, *cg_test_b = NULL; 717 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; 718 int cg_test_b_procs_fd = -1; 719 uid_t saved_uid; 720 721 cg_test_a = cg_name(root, "cg_test_a"); 722 cg_test_b = cg_name(root, "cg_test_b"); 723 724 if (!cg_test_a || !cg_test_b) 725 goto cleanup; 726 727 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); 728 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); 729 730 if (!cg_test_a_procs || !cg_test_b_procs) 731 goto cleanup; 732 733 if (cg_create(cg_test_a) || cg_create(cg_test_b)) 734 goto cleanup; 735 736 if (cg_enter_current(cg_test_a)) 737 goto cleanup; 738 739 if (chown(cg_test_a_procs, test_euid, -1) || 740 chown(cg_test_b_procs, test_euid, -1)) 741 goto cleanup; 742 743 saved_uid = geteuid(); 744 if (seteuid(test_euid)) 745 goto cleanup; 746 747 cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR); 748 749 if (seteuid(saved_uid)) 750 goto cleanup; 751 752 if (cg_test_b_procs_fd < 0) 753 goto cleanup; 754 755 if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES) 756 goto cleanup; 757 758 ret = KSFT_PASS; 759 760 cleanup: 761 cg_enter_current(root); 762 if (cg_test_b_procs_fd >= 0) 763 close(cg_test_b_procs_fd); 764 if (cg_test_b) 765 cg_destroy(cg_test_b); 766 if (cg_test_a) 767 cg_destroy(cg_test_a); 768 free(cg_test_b_procs); 769 free(cg_test_a_procs); 770 free(cg_test_b); 771 free(cg_test_a); 772 return ret; 773 } 774 775 struct lesser_ns_open_thread_arg { 776 const char *path; 777 int fd; 778 int err; 779 }; 780 781 static int lesser_ns_open_thread_fn(void *arg) 782 { 783 struct lesser_ns_open_thread_arg *targ = arg; 784 785 targ->fd = open(targ->path, O_RDWR); 786 targ->err = errno; 787 return 0; 788 } 789 790 /* 791 * cgroup migration permission check should be performed based on the cgroup 792 * namespace at the time of open instead of write. 793 */ 794 static int test_cgcore_lesser_ns_open(const char *root) 795 { 796 static char stack[65536]; 797 const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ 798 int ret = KSFT_FAIL; 799 char *cg_test_a = NULL, *cg_test_b = NULL; 800 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; 801 int cg_test_b_procs_fd = -1; 802 struct lesser_ns_open_thread_arg targ = { .fd = -1 }; 803 pid_t pid; 804 int status; 805 806 if (!nsdelegate) 807 return KSFT_SKIP; 808 809 cg_test_a = cg_name(root, "cg_test_a"); 810 cg_test_b = cg_name(root, "cg_test_b"); 811 812 if (!cg_test_a || !cg_test_b) 813 goto cleanup; 814 815 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); 816 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); 817 818 if (!cg_test_a_procs || !cg_test_b_procs) 819 goto cleanup; 820 821 if (cg_create(cg_test_a) || cg_create(cg_test_b)) 822 goto cleanup; 823 824 if (cg_enter_current(cg_test_b)) 825 goto cleanup; 826 827 if (chown(cg_test_a_procs, test_euid, -1) || 828 chown(cg_test_b_procs, test_euid, -1)) 829 goto cleanup; 830 831 targ.path = cg_test_b_procs; 832 pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack), 833 CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD, 834 &targ); 835 if (pid < 0) 836 goto cleanup; 837 838 if (waitpid(pid, &status, 0) < 0) 839 goto cleanup; 840 841 if (!WIFEXITED(status)) 842 goto cleanup; 843 844 cg_test_b_procs_fd = targ.fd; 845 if (cg_test_b_procs_fd < 0) 846 goto cleanup; 847 848 if (cg_enter_current(cg_test_a)) 849 goto cleanup; 850 851 if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT) 852 goto cleanup; 853 854 ret = KSFT_PASS; 855 856 cleanup: 857 cg_enter_current(root); 858 if (cg_test_b_procs_fd >= 0) 859 close(cg_test_b_procs_fd); 860 if (cg_test_b) 861 cg_destroy(cg_test_b); 862 if (cg_test_a) 863 cg_destroy(cg_test_a); 864 free(cg_test_b_procs); 865 free(cg_test_a_procs); 866 free(cg_test_b); 867 free(cg_test_a); 868 return ret; 869 } 870 871 static int setup_named_v1_root(char *root, size_t len, const char *name) 872 { 873 char options[PATH_MAX]; 874 int r; 875 876 r = snprintf(root, len, "/mnt/cg_selftest"); 877 if (r < 0) 878 return r; 879 880 r = snprintf(options, sizeof(options), "none,name=%s", name); 881 if (r < 0) 882 return r; 883 884 r = mkdir(root, 0755); 885 if (r < 0 && errno != EEXIST) 886 return r; 887 888 r = mount("none", root, "cgroup", 0, options); 889 if (r < 0) 890 return r; 891 892 return 0; 893 } 894 895 static void cleanup_named_v1_root(char *root) 896 { 897 if (!cg_test_v1_named) 898 return; 899 umount(root); 900 rmdir(root); 901 } 902 903 #define T(x) { x, #x } 904 struct corecg_test { 905 int (*fn)(const char *root); 906 const char *name; 907 } tests[] = { 908 T(test_cgcore_internal_process_constraint), 909 T(test_cgcore_top_down_constraint_enable), 910 T(test_cgcore_top_down_constraint_disable), 911 T(test_cgcore_no_internal_process_constraint_on_threads), 912 T(test_cgcore_parent_becomes_threaded), 913 T(test_cgcore_invalid_domain), 914 T(test_cgcore_populated), 915 T(test_cgcore_proc_migration), 916 T(test_cgcore_thread_migration), 917 T(test_cgcore_destroy), 918 T(test_cgcore_lesser_euid_open), 919 T(test_cgcore_lesser_ns_open), 920 }; 921 #undef T 922 923 int main(int argc, char *argv[]) 924 { 925 char root[PATH_MAX]; 926 int i, ret = EXIT_SUCCESS; 927 928 if (cg_find_unified_root(root, sizeof(root), &nsdelegate)) { 929 if (setup_named_v1_root(root, sizeof(root), CG_NAMED_NAME)) 930 ksft_exit_skip("cgroup v2 isn't mounted and could not setup named v1 hierarchy\n"); 931 cg_test_v1_named = true; 932 goto post_v2_setup; 933 } 934 935 if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) 936 if (cg_write(root, "cgroup.subtree_control", "+memory")) 937 ksft_exit_skip("Failed to set memory controller\n"); 938 939 post_v2_setup: 940 for (i = 0; i < ARRAY_SIZE(tests); i++) { 941 switch (tests[i].fn(root)) { 942 case KSFT_PASS: 943 ksft_test_result_pass("%s\n", tests[i].name); 944 break; 945 case KSFT_SKIP: 946 ksft_test_result_skip("%s\n", tests[i].name); 947 break; 948 default: 949 ret = EXIT_FAILURE; 950 ksft_test_result_fail("%s\n", tests[i].name); 951 break; 952 } 953 } 954 955 cleanup_named_v1_root(root); 956 return ret; 957 } 958