1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2021 Kyle Evans <kevans@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD"); 30 31 #include <sys/param.h> 32 #include <sys/cpuset.h> 33 #include <sys/jail.h> 34 #include <sys/procdesc.h> 35 #include <sys/select.h> 36 #include <sys/socket.h> 37 #include <sys/uio.h> 38 #include <sys/wait.h> 39 40 #include <errno.h> 41 #include <stdio.h> 42 #include <unistd.h> 43 44 #include <atf-c.h> 45 46 #define SP_PARENT 0 47 #define SP_CHILD 1 48 49 struct jail_test_info { 50 cpuset_t jail_tidmask; 51 cpusetid_t jail_cpuset; 52 cpusetid_t jail_child_cpuset; 53 }; 54 55 struct jail_test_cb_params { 56 struct jail_test_info info; 57 cpuset_t mask; 58 cpusetid_t rootid; 59 cpusetid_t setid; 60 }; 61 62 typedef void (*jail_test_cb)(struct jail_test_cb_params *); 63 64 #define FAILURE_JAIL 42 65 #define FAILURE_MASK 43 66 #define FAILURE_JAILSET 44 67 #define FAILURE_PIDSET 45 68 #define FAILURE_SEND 46 69 #define FAILURE_DEADLK 47 70 #define FAILURE_ATTACH 48 71 #define FAILURE_BADAFFIN 49 72 #define FAILURE_SUCCESS 50 73 74 static const char * 75 do_jail_errstr(int error) 76 { 77 78 switch (error) { 79 case FAILURE_JAIL: 80 return ("jail_set(2) failed"); 81 case FAILURE_MASK: 82 return ("Failed to get the thread cpuset mask"); 83 case FAILURE_JAILSET: 84 return ("Failed to get the jail setid"); 85 case FAILURE_PIDSET: 86 return ("Failed to get the pid setid"); 87 case FAILURE_SEND: 88 return ("Failed to send(2) cpuset information"); 89 case FAILURE_DEADLK: 90 return ("Deadlock hit trying to attach to jail"); 91 case FAILURE_ATTACH: 92 return ("jail_attach(2) failed"); 93 case FAILURE_BADAFFIN: 94 return ("Unexpected post-attach affinity"); 95 case FAILURE_SUCCESS: 96 return ("jail_attach(2) succeeded, but should have failed."); 97 default: 98 return (NULL); 99 } 100 } 101 102 static void 103 skip_ltncpu(int ncpu, cpuset_t *mask) 104 { 105 106 CPU_ZERO(mask); 107 ATF_REQUIRE_EQ(0, cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 108 -1, sizeof(*mask), mask)); 109 if (CPU_COUNT(mask) < ncpu) 110 atf_tc_skip("Test requires %d or more cores.", ncpu); 111 } 112 113 ATF_TC(newset); 114 ATF_TC_HEAD(newset, tc) 115 { 116 atf_tc_set_md_var(tc, "descr", "Test cpuset(2)"); 117 } 118 ATF_TC_BODY(newset, tc) 119 { 120 cpusetid_t nsetid, setid, qsetid; 121 122 /* Obtain our initial set id. */ 123 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1, 124 &setid)); 125 126 /* Create a new one. */ 127 ATF_REQUIRE_EQ(0, cpuset(&nsetid)); 128 ATF_CHECK(nsetid != setid); 129 130 /* Query id again, make sure it's equal to the one we just got. */ 131 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1, 132 &qsetid)); 133 ATF_CHECK_EQ(nsetid, qsetid); 134 } 135 136 ATF_TC(transient); 137 ATF_TC_HEAD(transient, tc) 138 { 139 atf_tc_set_md_var(tc, "descr", 140 "Test that transient cpusets are freed."); 141 } 142 ATF_TC_BODY(transient, tc) 143 { 144 cpusetid_t isetid, scratch, setid; 145 146 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, 147 &isetid)); 148 149 ATF_REQUIRE_EQ(0, cpuset(&setid)); 150 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET, 151 setid, &scratch)); 152 153 /* 154 * Return back to our initial cpuset; the kernel should free the cpuset 155 * we just created. 156 */ 157 ATF_REQUIRE_EQ(0, cpuset_setid(CPU_WHICH_PID, -1, isetid)); 158 ATF_REQUIRE_EQ(-1, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET, 159 setid, &scratch)); 160 ATF_CHECK_EQ(ESRCH, errno); 161 } 162 163 ATF_TC(deadlk); 164 ATF_TC_HEAD(deadlk, tc) 165 { 166 atf_tc_set_md_var(tc, "descr", "Test against disjoint cpusets."); 167 atf_tc_set_md_var(tc, "require.user", "root"); 168 } 169 ATF_TC_BODY(deadlk, tc) 170 { 171 cpusetid_t setid; 172 cpuset_t dismask, mask, omask; 173 int fcpu, i, found, ncpu, second; 174 175 /* Make sure we have 3 cpus, so we test partial overlap. */ 176 skip_ltncpu(3, &omask); 177 178 ATF_REQUIRE_EQ(0, cpuset(&setid)); 179 CPU_ZERO(&mask); 180 CPU_ZERO(&dismask); 181 CPU_COPY(&omask, &mask); 182 CPU_COPY(&omask, &dismask); 183 fcpu = CPU_FFS(&mask); 184 ncpu = CPU_COUNT(&mask); 185 186 /* 187 * Turn off all but the first two for mask, turn off the first for 188 * dismask and turn them all off for both after the third. 189 */ 190 for (i = fcpu - 1, found = 0; i < CPU_MAXSIZE && found != ncpu; i++) { 191 if (CPU_ISSET(i, &omask)) { 192 found++; 193 if (found == 1) { 194 CPU_CLR(i, &dismask); 195 } else if (found == 2) { 196 second = i; 197 } else if (found >= 3) { 198 CPU_CLR(i, &mask); 199 if (found > 3) 200 CPU_CLR(i, &dismask); 201 } 202 } 203 } 204 205 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 206 -1, sizeof(mask), &mask)); 207 208 /* Must be a strict subset! */ 209 ATF_REQUIRE_EQ(-1, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 210 -1, sizeof(dismask), &dismask)); 211 ATF_REQUIRE_EQ(EINVAL, errno); 212 213 /* 214 * We'll set our anonymous set to the 0,1 set that currently matches 215 * the process. If we then set the process to the 1,2 set that's in 216 * dismask, we should then personally be restricted down to the single 217 * overlapping CPOU. 218 */ 219 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 220 -1, sizeof(mask), &mask)); 221 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 222 -1, sizeof(dismask), &dismask)); 223 ATF_REQUIRE_EQ(0, cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 224 -1, sizeof(mask), &mask)); 225 ATF_REQUIRE_EQ(1, CPU_COUNT(&mask)); 226 ATF_REQUIRE(CPU_ISSET(second, &mask)); 227 228 /* 229 * Finally, clearing the overlap and attempting to set the process 230 * cpuset to a completely disjoint mask should fail, because this 231 * process will then not have anything to run on. 232 */ 233 CPU_CLR(second, &dismask); 234 ATF_REQUIRE_EQ(-1, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 235 -1, sizeof(dismask), &dismask)); 236 ATF_REQUIRE_EQ(EDEADLK, errno); 237 } 238 239 static int 240 do_jail(int sock) 241 { 242 struct jail_test_info info; 243 struct iovec iov[2]; 244 char *name; 245 int error; 246 247 if (asprintf(&name, "cpuset_%d", getpid()) == -1) 248 _exit(42); 249 250 iov[0].iov_base = "name"; 251 iov[0].iov_len = 5; 252 253 iov[1].iov_base = name; 254 iov[1].iov_len = strlen(name) + 1; 255 256 if (jail_set(iov, 2, JAIL_CREATE | JAIL_ATTACH) < 0) 257 return (FAILURE_JAIL); 258 259 /* Record parameters, kick them over, then make a swift exit. */ 260 CPU_ZERO(&info.jail_tidmask); 261 error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 262 -1, sizeof(info.jail_tidmask), &info.jail_tidmask); 263 if (error != 0) 264 return (FAILURE_MASK); 265 266 error = cpuset_getid(CPU_LEVEL_ROOT, CPU_WHICH_TID, -1, 267 &info.jail_cpuset); 268 if (error != 0) 269 return (FAILURE_JAILSET); 270 error = cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1, 271 &info.jail_child_cpuset); 272 if (error != 0) 273 return (FAILURE_PIDSET); 274 if (send(sock, &info, sizeof(info), 0) != sizeof(info)) 275 return (FAILURE_SEND); 276 return (0); 277 } 278 279 static void 280 do_jail_test(int ncpu, bool newset, jail_test_cb prologue, 281 jail_test_cb epilogue) 282 { 283 struct jail_test_cb_params cbp; 284 const char *errstr; 285 pid_t pid; 286 int error, sock, sockpair[2], status; 287 288 memset(&cbp.info, '\0', sizeof(cbp.info)); 289 290 skip_ltncpu(ncpu, &cbp.mask); 291 292 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_ROOT, CPU_WHICH_PID, -1, 293 &cbp.rootid)); 294 if (newset) 295 ATF_REQUIRE_EQ(0, cpuset(&cbp.setid)); 296 else 297 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 298 -1, &cbp.setid)); 299 /* Special hack for prison0; it uses cpuset 1 as the root. */ 300 if (cbp.rootid == 0) 301 cbp.rootid = 1; 302 303 /* Not every test needs early setup. */ 304 if (prologue != NULL) 305 (*prologue)(&cbp); 306 307 ATF_REQUIRE_EQ(0, socketpair(PF_UNIX, SOCK_STREAM, 0, sockpair)); 308 ATF_REQUIRE((pid = fork()) != -1); 309 310 if (pid == 0) { 311 /* Child */ 312 close(sockpair[SP_PARENT]); 313 sock = sockpair[SP_CHILD]; 314 315 _exit(do_jail(sock)); 316 } else { 317 /* Parent */ 318 sock = sockpair[SP_PARENT]; 319 close(sockpair[SP_CHILD]); 320 321 while ((error = waitpid(pid, &status, 0)) == -1 && 322 errno == EINTR) { 323 } 324 325 ATF_REQUIRE_EQ(sizeof(cbp.info), recv(sock, &cbp.info, 326 sizeof(cbp.info), 0)); 327 328 /* Sanity check the exit info. */ 329 ATF_REQUIRE_EQ(pid, error); 330 ATF_REQUIRE(WIFEXITED(status)); 331 if (WEXITSTATUS(status) != 0) { 332 errstr = do_jail_errstr(WEXITSTATUS(status)); 333 if (errstr != NULL) 334 atf_tc_fail("%s", errstr); 335 else 336 atf_tc_fail("Unknown error '%d'", 337 WEXITSTATUS(status)); 338 } 339 340 epilogue(&cbp); 341 } 342 } 343 344 static void 345 jail_attach_mutate_pro(struct jail_test_cb_params *cbp) 346 { 347 cpuset_t *mask; 348 int count; 349 350 mask = &cbp->mask; 351 352 /* Knock out the first cpu. */ 353 count = CPU_COUNT(mask); 354 CPU_CLR(CPU_FFS(mask) - 1, mask); 355 ATF_REQUIRE_EQ(count - 1, CPU_COUNT(mask)); 356 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 357 -1, sizeof(*mask), mask)); 358 } 359 360 static void 361 jail_attach_newbase_epi(struct jail_test_cb_params *cbp) 362 { 363 struct jail_test_info *info; 364 cpuset_t *mask; 365 366 info = &cbp->info; 367 mask = &cbp->mask; 368 369 /* 370 * The rootid test has been thrown in because a bug was discovered 371 * where any newly derived cpuset during attach would be parented to 372 * the wrong cpuset. Otherwise, we should observe that a new cpuset 373 * has been created for this process. 374 */ 375 ATF_REQUIRE(info->jail_cpuset != cbp->rootid); 376 ATF_REQUIRE(info->jail_cpuset != cbp->setid); 377 ATF_REQUIRE(info->jail_cpuset != info->jail_child_cpuset); 378 ATF_REQUIRE_EQ(0, CPU_CMP(mask, &info->jail_tidmask)); 379 } 380 381 ATF_TC(jail_attach_newbase); 382 ATF_TC_HEAD(jail_attach_newbase, tc) 383 { 384 atf_tc_set_md_var(tc, "descr", 385 "Test jail attachment effect on affinity with a new base cpuset."); 386 atf_tc_set_md_var(tc, "require.user", "root"); 387 } 388 ATF_TC_BODY(jail_attach_newbase, tc) 389 { 390 391 /* Need >= 2 cpus to test restriction. */ 392 do_jail_test(2, true, &jail_attach_mutate_pro, 393 &jail_attach_newbase_epi); 394 } 395 396 ATF_TC(jail_attach_newbase_plain); 397 ATF_TC_HEAD(jail_attach_newbase_plain, tc) 398 { 399 atf_tc_set_md_var(tc, "descr", 400 "Test jail attachment effect on affinity with a new, unmodified base cpuset."); 401 atf_tc_set_md_var(tc, "require.user", "root"); 402 } 403 ATF_TC_BODY(jail_attach_newbase_plain, tc) 404 { 405 406 do_jail_test(2, true, NULL, &jail_attach_newbase_epi); 407 } 408 409 /* 410 * Generic epilogue for tests that are expecting to use the jail's root cpuset 411 * with their own mask, whether that's been modified or not. 412 */ 413 static void 414 jail_attach_jset_epi(struct jail_test_cb_params *cbp) 415 { 416 struct jail_test_info *info; 417 cpuset_t *mask; 418 419 info = &cbp->info; 420 mask = &cbp->mask; 421 422 ATF_REQUIRE(info->jail_cpuset != cbp->setid); 423 ATF_REQUIRE_EQ(info->jail_cpuset, info->jail_child_cpuset); 424 ATF_REQUIRE_EQ(0, CPU_CMP(mask, &info->jail_tidmask)); 425 } 426 427 ATF_TC(jail_attach_prevbase); 428 ATF_TC_HEAD(jail_attach_prevbase, tc) 429 { 430 atf_tc_set_md_var(tc, "descr", 431 "Test jail attachment effect on affinity without a new base."); 432 atf_tc_set_md_var(tc, "require.user", "root"); 433 } 434 ATF_TC_BODY(jail_attach_prevbase, tc) 435 { 436 437 do_jail_test(2, false, &jail_attach_mutate_pro, &jail_attach_jset_epi); 438 } 439 440 static void 441 jail_attach_plain_pro(struct jail_test_cb_params *cbp) 442 { 443 444 if (cbp->setid != cbp->rootid) 445 atf_tc_skip("Must be running with the root cpuset."); 446 } 447 448 ATF_TC(jail_attach_plain); 449 ATF_TC_HEAD(jail_attach_plain, tc) 450 { 451 atf_tc_set_md_var(tc, "descr", 452 "Test jail attachment effect on affinity without specialization."); 453 atf_tc_set_md_var(tc, "require.user", "root"); 454 } 455 ATF_TC_BODY(jail_attach_plain, tc) 456 { 457 458 do_jail_test(1, false, &jail_attach_plain_pro, &jail_attach_jset_epi); 459 } 460 461 static int 462 jail_attach_disjoint_newjail(int fd) 463 { 464 struct iovec iov[2]; 465 char *name; 466 int jid; 467 468 if (asprintf(&name, "cpuset_%d", getpid()) == -1) 469 _exit(42); 470 471 iov[0].iov_base = "name"; 472 iov[0].iov_len = sizeof("name"); 473 474 iov[1].iov_base = name; 475 iov[1].iov_len = strlen(name) + 1; 476 477 if ((jid = jail_set(iov, 2, JAIL_CREATE | JAIL_ATTACH)) < 0) 478 return (FAILURE_JAIL); 479 480 /* Signal that we're ready. */ 481 write(fd, &jid, sizeof(jid)); 482 for (;;) { 483 /* Spin */ 484 } 485 } 486 487 static int 488 wait_jail(int fd, int pfd) 489 { 490 fd_set lset; 491 struct timeval tv; 492 int error, jid, maxfd; 493 494 FD_ZERO(&lset); 495 FD_SET(fd, &lset); 496 FD_SET(pfd, &lset); 497 498 maxfd = MAX(fd, pfd); 499 500 tv.tv_sec = 5; 501 tv.tv_usec = 0; 502 503 /* Wait for jid to be written. */ 504 do { 505 error = select(maxfd + 1, &lset, NULL, NULL, &tv); 506 } while (error == -1 && errno == EINTR); 507 508 if (error == 0) { 509 atf_tc_fail("Jail creator did not respond in time."); 510 } 511 512 ATF_REQUIRE_MSG(error > 0, "Unexpected error %d from select()", errno); 513 514 if (FD_ISSET(pfd, &lset)) { 515 /* Process died */ 516 atf_tc_fail("Jail creator died unexpectedly."); 517 } 518 519 ATF_REQUIRE(FD_ISSET(fd, &lset)); 520 ATF_REQUIRE_EQ(sizeof(jid), recv(fd, &jid, sizeof(jid), 0)); 521 522 return (jid); 523 } 524 525 static int 526 try_attach_child(int jid, cpuset_t *expected_mask) 527 { 528 cpuset_t mask; 529 530 if (jail_attach(jid) == -1) { 531 if (errno == EDEADLK) 532 return (FAILURE_DEADLK); 533 return (FAILURE_ATTACH); 534 } 535 536 if (expected_mask == NULL) 537 return (FAILURE_SUCCESS); 538 539 /* If we had an expected mask, check it against the new process mask. */ 540 CPU_ZERO(&mask); 541 if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, 542 -1, sizeof(mask), &mask) != 0) { 543 return (FAILURE_MASK); 544 } 545 546 if (CPU_CMP(expected_mask, &mask) != 0) 547 return (FAILURE_BADAFFIN); 548 549 return (0); 550 } 551 552 static void 553 try_attach(int jid, cpuset_t *expected_mask) 554 { 555 const char *errstr; 556 pid_t pid; 557 int error, fail, status; 558 559 ATF_REQUIRE(expected_mask != NULL); 560 ATF_REQUIRE((pid = fork()) != -1); 561 if (pid == 0) 562 _exit(try_attach_child(jid, expected_mask)); 563 564 while ((error = waitpid(pid, &status, 0)) == -1 && errno == EINTR) { 565 /* Try again. */ 566 } 567 568 /* Sanity check the exit info. */ 569 ATF_REQUIRE_EQ(pid, error); 570 ATF_REQUIRE(WIFEXITED(status)); 571 if ((fail = WEXITSTATUS(status)) != 0) { 572 errstr = do_jail_errstr(fail); 573 if (errstr != NULL) 574 atf_tc_fail("%s", errstr); 575 else 576 atf_tc_fail("Unknown error '%d'", WEXITSTATUS(status)); 577 } 578 } 579 580 ATF_TC(jail_attach_disjoint); 581 ATF_TC_HEAD(jail_attach_disjoint, tc) 582 { 583 atf_tc_set_md_var(tc, "descr", 584 "Test root attachment into completely disjoint jail cpuset."); 585 atf_tc_set_md_var(tc, "require.user", "root"); 586 } 587 ATF_TC_BODY(jail_attach_disjoint, tc) 588 { 589 cpuset_t smask, jmask; 590 int sockpair[2]; 591 cpusetid_t setid; 592 pid_t pid; 593 int fcpu, jid, pfd, sock, scpu; 594 595 ATF_REQUIRE_EQ(0, cpuset(&setid)); 596 597 skip_ltncpu(2, &jmask); 598 fcpu = CPU_FFS(&jmask) - 1; 599 ATF_REQUIRE_EQ(0, socketpair(PF_UNIX, SOCK_STREAM, 0, sockpair)); 600 601 /* We'll wait on the procdesc, too, so we can fail faster if it dies. */ 602 ATF_REQUIRE((pid = pdfork(&pfd, 0)) != -1); 603 604 if (pid == 0) { 605 /* First child sets up the jail. */ 606 sock = sockpair[SP_CHILD]; 607 close(sockpair[SP_PARENT]); 608 609 _exit(jail_attach_disjoint_newjail(sock)); 610 } 611 612 close(sockpair[SP_CHILD]); 613 sock = sockpair[SP_PARENT]; 614 615 ATF_REQUIRE((jid = wait_jail(sock, pfd)) > 0); 616 617 /* 618 * This process will be clamped down to the first cpu, while the jail 619 * will simply have the first CPU removed to make it a completely 620 * disjoint operation. 621 */ 622 CPU_ZERO(&smask); 623 CPU_SET(fcpu, &smask); 624 CPU_CLR(fcpu, &jmask); 625 626 /* 627 * We'll test with the first and second cpu set as well. Only the 628 * second cpu should be used. 629 */ 630 scpu = CPU_FFS(&jmask) - 1; 631 632 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_ROOT, CPU_WHICH_JAIL, 633 jid, sizeof(jmask), &jmask)); 634 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET, 635 setid, sizeof(smask), &smask)); 636 637 try_attach(jid, &jmask); 638 639 CPU_SET(scpu, &smask); 640 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET, 641 setid, sizeof(smask), &smask)); 642 643 CPU_CLR(fcpu, &smask); 644 try_attach(jid, &smask); 645 } 646 647 ATF_TC(badparent); 648 ATF_TC_HEAD(badparent, tc) 649 { 650 atf_tc_set_md_var(tc, "descr", 651 "Test parent assignment when assigning a new cpuset."); 652 } 653 ATF_TC_BODY(badparent, tc) 654 { 655 cpuset_t mask; 656 cpusetid_t finalsetid, origsetid, setid; 657 658 /* Need to mask off at least one CPU. */ 659 skip_ltncpu(2, &mask); 660 661 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1, 662 &origsetid)); 663 664 ATF_REQUIRE_EQ(0, cpuset(&setid)); 665 666 /* 667 * Mask off the first CPU, then we'll reparent ourselves to our original 668 * set. 669 */ 670 CPU_CLR(CPU_FFS(&mask) - 1, &mask); 671 ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, 672 -1, sizeof(mask), &mask)); 673 674 ATF_REQUIRE_EQ(0, cpuset_setid(CPU_WHICH_PID, -1, origsetid)); 675 ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1, 676 &finalsetid)); 677 678 ATF_REQUIRE_EQ(finalsetid, origsetid); 679 } 680 681 ATF_TP_ADD_TCS(tp) 682 { 683 684 ATF_TP_ADD_TC(tp, newset); 685 ATF_TP_ADD_TC(tp, transient); 686 ATF_TP_ADD_TC(tp, deadlk); 687 ATF_TP_ADD_TC(tp, jail_attach_newbase); 688 ATF_TP_ADD_TC(tp, jail_attach_newbase_plain); 689 ATF_TP_ADD_TC(tp, jail_attach_prevbase); 690 ATF_TP_ADD_TC(tp, jail_attach_plain); 691 ATF_TP_ADD_TC(tp, jail_attach_disjoint); 692 ATF_TP_ADD_TC(tp, badparent); 693 return (atf_no_error()); 694 } 695