1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/cpuset.h> 51 #include <sys/sx.h> 52 #include <sys/queue.h> 53 #include <sys/limits.h> 54 #include <sys/bus.h> 55 #include <sys/interrupt.h> 56 57 #include <vm/uma.h> 58 59 #ifdef DDB 60 #include <ddb/ddb.h> 61 #endif /* DDB */ 62 63 /* 64 * cpusets provide a mechanism for creating and manipulating sets of 65 * processors for the purpose of constraining the scheduling of threads to 66 * specific processors. 67 * 68 * Each process belongs to an identified set, by default this is set 1. Each 69 * thread may further restrict the cpus it may run on to a subset of this 70 * named set. This creates an anonymous set which other threads and processes 71 * may not join by number. 72 * 73 * The named set is referred to herein as the 'base' set to avoid ambiguity. 74 * This set is usually a child of a 'root' set while the anonymous set may 75 * simply be referred to as a mask. In the syscall api these are referred to 76 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 77 * 78 * Threads inherit their set from their creator whether it be anonymous or 79 * not. This means that anonymous sets are immutable because they may be 80 * shared. To modify an anonymous set a new set is created with the desired 81 * mask and the same parent as the existing anonymous set. This gives the 82 * illusion of each thread having a private mask. 83 * 84 * Via the syscall apis a user may ask to retrieve or modify the root, base, 85 * or mask that is discovered via a pid, tid, or setid. Modifying a set 86 * modifies all numbered and anonymous child sets to comply with the new mask. 87 * Modifying a pid or tid's mask applies only to that tid but must still 88 * exist within the assigned parent set. 89 * 90 * A thread may not be assigned to a group separate from other threads in 91 * the process. This is to remove ambiguity when the setid is queried with 92 * a pid argument. There is no other technical limitation. 93 * 94 * This somewhat complex arrangement is intended to make it easy for 95 * applications to query available processors and bind their threads to 96 * specific processors while also allowing administrators to dynamically 97 * reprovision by changing sets which apply to groups of processes. 98 * 99 * A simple application should not concern itself with sets at all and 100 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 101 * meaning 'curthread'. It may query available cpus for that tid with a 102 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 103 */ 104 static uma_zone_t cpuset_zone; 105 static struct mtx cpuset_lock; 106 static struct setlist cpuset_ids; 107 static struct unrhdr *cpuset_unr; 108 static struct cpuset *cpuset_zero; 109 110 /* Return the size of cpuset_t at the kernel level */ 111 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD, 112 0, sizeof(cpuset_t), "sizeof(cpuset_t)"); 113 114 cpuset_t *cpuset_root; 115 116 /* 117 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 118 */ 119 struct cpuset * 120 cpuset_ref(struct cpuset *set) 121 { 122 123 refcount_acquire(&set->cs_ref); 124 return (set); 125 } 126 127 /* 128 * Walks up the tree from 'set' to find the root. Returns the root 129 * referenced. 130 */ 131 static struct cpuset * 132 cpuset_refroot(struct cpuset *set) 133 { 134 135 for (; set->cs_parent != NULL; set = set->cs_parent) 136 if (set->cs_flags & CPU_SET_ROOT) 137 break; 138 cpuset_ref(set); 139 140 return (set); 141 } 142 143 /* 144 * Find the first non-anonymous set starting from 'set'. Returns this set 145 * referenced. May return the passed in set with an extra ref if it is 146 * not anonymous. 147 */ 148 static struct cpuset * 149 cpuset_refbase(struct cpuset *set) 150 { 151 152 if (set->cs_id == CPUSET_INVALID) 153 set = set->cs_parent; 154 cpuset_ref(set); 155 156 return (set); 157 } 158 159 /* 160 * Release a reference in a context where it is safe to allocate. 161 */ 162 void 163 cpuset_rel(struct cpuset *set) 164 { 165 cpusetid_t id; 166 167 if (refcount_release(&set->cs_ref) == 0) 168 return; 169 mtx_lock_spin(&cpuset_lock); 170 LIST_REMOVE(set, cs_siblings); 171 id = set->cs_id; 172 if (id != CPUSET_INVALID) 173 LIST_REMOVE(set, cs_link); 174 mtx_unlock_spin(&cpuset_lock); 175 cpuset_rel(set->cs_parent); 176 uma_zfree(cpuset_zone, set); 177 if (id != CPUSET_INVALID) 178 free_unr(cpuset_unr, id); 179 } 180 181 /* 182 * Deferred release must be used when in a context that is not safe to 183 * allocate/free. This places any unreferenced sets on the list 'head'. 184 */ 185 static void 186 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 187 { 188 189 if (refcount_release(&set->cs_ref) == 0) 190 return; 191 mtx_lock_spin(&cpuset_lock); 192 LIST_REMOVE(set, cs_siblings); 193 if (set->cs_id != CPUSET_INVALID) 194 LIST_REMOVE(set, cs_link); 195 LIST_INSERT_HEAD(head, set, cs_link); 196 mtx_unlock_spin(&cpuset_lock); 197 } 198 199 /* 200 * Complete a deferred release. Removes the set from the list provided to 201 * cpuset_rel_defer. 202 */ 203 static void 204 cpuset_rel_complete(struct cpuset *set) 205 { 206 LIST_REMOVE(set, cs_link); 207 cpuset_rel(set->cs_parent); 208 uma_zfree(cpuset_zone, set); 209 } 210 211 /* 212 * Find a set based on an id. Returns it with a ref. 213 */ 214 static struct cpuset * 215 cpuset_lookup(cpusetid_t setid, struct thread *td) 216 { 217 struct cpuset *set; 218 219 if (setid == CPUSET_INVALID) 220 return (NULL); 221 mtx_lock_spin(&cpuset_lock); 222 LIST_FOREACH(set, &cpuset_ids, cs_link) 223 if (set->cs_id == setid) 224 break; 225 if (set) 226 cpuset_ref(set); 227 mtx_unlock_spin(&cpuset_lock); 228 229 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 230 if (set != NULL && jailed(td->td_ucred)) { 231 struct cpuset *jset, *tset; 232 233 jset = td->td_ucred->cr_prison->pr_cpuset; 234 for (tset = set; tset != NULL; tset = tset->cs_parent) 235 if (tset == jset) 236 break; 237 if (tset == NULL) { 238 cpuset_rel(set); 239 set = NULL; 240 } 241 } 242 243 return (set); 244 } 245 246 /* 247 * Create a set in the space provided in 'set' with the provided parameters. 248 * The set is returned with a single ref. May return EDEADLK if the set 249 * will have no valid cpu based on restrictions from the parent. 250 */ 251 static int 252 _cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 253 cpusetid_t id) 254 { 255 256 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 257 return (EDEADLK); 258 CPU_COPY(mask, &set->cs_mask); 259 LIST_INIT(&set->cs_children); 260 refcount_init(&set->cs_ref, 1); 261 set->cs_flags = 0; 262 mtx_lock_spin(&cpuset_lock); 263 CPU_AND(mask, &parent->cs_mask); 264 set->cs_id = id; 265 set->cs_parent = cpuset_ref(parent); 266 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 267 if (set->cs_id != CPUSET_INVALID) 268 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 269 mtx_unlock_spin(&cpuset_lock); 270 271 return (0); 272 } 273 274 /* 275 * Create a new non-anonymous set with the requested parent and mask. May 276 * return failures if the mask is invalid or a new number can not be 277 * allocated. 278 */ 279 static int 280 cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 281 { 282 struct cpuset *set; 283 cpusetid_t id; 284 int error; 285 286 id = alloc_unr(cpuset_unr); 287 if (id == -1) 288 return (ENFILE); 289 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 290 error = _cpuset_create(set, parent, mask, id); 291 if (error == 0) 292 return (0); 293 free_unr(cpuset_unr, id); 294 uma_zfree(cpuset_zone, set); 295 296 return (error); 297 } 298 299 /* 300 * Recursively check for errors that would occur from applying mask to 301 * the tree of sets starting at 'set'. Checks for sets that would become 302 * empty as well as RDONLY flags. 303 */ 304 static int 305 cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 306 { 307 struct cpuset *nset; 308 cpuset_t newmask; 309 int error; 310 311 mtx_assert(&cpuset_lock, MA_OWNED); 312 if (set->cs_flags & CPU_SET_RDONLY) 313 return (EPERM); 314 if (!CPU_OVERLAP(&set->cs_mask, mask)) 315 return (EDEADLK); 316 CPU_COPY(&set->cs_mask, &newmask); 317 CPU_AND(&newmask, mask); 318 error = 0; 319 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 320 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 321 break; 322 return (error); 323 } 324 325 /* 326 * Applies the mask 'mask' without checking for empty sets or permissions. 327 */ 328 static void 329 cpuset_update(struct cpuset *set, cpuset_t *mask) 330 { 331 struct cpuset *nset; 332 333 mtx_assert(&cpuset_lock, MA_OWNED); 334 CPU_AND(&set->cs_mask, mask); 335 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 336 cpuset_update(nset, &set->cs_mask); 337 338 return; 339 } 340 341 /* 342 * Modify the set 'set' to use a copy of the mask provided. Apply this new 343 * mask to restrict all children in the tree. Checks for validity before 344 * applying the changes. 345 */ 346 static int 347 cpuset_modify(struct cpuset *set, cpuset_t *mask) 348 { 349 struct cpuset *root; 350 int error; 351 352 error = priv_check(curthread, PRIV_SCHED_CPUSET); 353 if (error) 354 return (error); 355 /* 356 * In case we are called from within the jail 357 * we do not allow modifying the dedicated root 358 * cpuset of the jail but may still allow to 359 * change child sets. 360 */ 361 if (jailed(curthread->td_ucred) && 362 set->cs_flags & CPU_SET_ROOT) 363 return (EPERM); 364 /* 365 * Verify that we have access to this set of 366 * cpus. 367 */ 368 root = set->cs_parent; 369 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 370 return (EINVAL); 371 mtx_lock_spin(&cpuset_lock); 372 error = cpuset_testupdate(set, mask); 373 if (error) 374 goto out; 375 cpuset_update(set, mask); 376 CPU_COPY(mask, &set->cs_mask); 377 out: 378 mtx_unlock_spin(&cpuset_lock); 379 380 return (error); 381 } 382 383 /* 384 * Resolve the 'which' parameter of several cpuset apis. 385 * 386 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 387 * checks for permission via p_cansched(). 388 * 389 * For WHICH_SET returns a valid set with a new reference. 390 * 391 * -1 may be supplied for any argument to mean the current proc/thread or 392 * the base set of the current thread. May fail with ESRCH/EPERM. 393 */ 394 static int 395 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 396 struct cpuset **setp) 397 { 398 struct cpuset *set; 399 struct thread *td; 400 struct proc *p; 401 int error; 402 403 *pp = p = NULL; 404 *tdp = td = NULL; 405 *setp = set = NULL; 406 switch (which) { 407 case CPU_WHICH_PID: 408 if (id == -1) { 409 PROC_LOCK(curproc); 410 p = curproc; 411 break; 412 } 413 if ((p = pfind(id)) == NULL) 414 return (ESRCH); 415 break; 416 case CPU_WHICH_TID: 417 if (id == -1) { 418 PROC_LOCK(curproc); 419 p = curproc; 420 td = curthread; 421 break; 422 } 423 td = tdfind(id, -1); 424 if (td == NULL) 425 return (ESRCH); 426 p = td->td_proc; 427 break; 428 case CPU_WHICH_CPUSET: 429 if (id == -1) { 430 thread_lock(curthread); 431 set = cpuset_refbase(curthread->td_cpuset); 432 thread_unlock(curthread); 433 } else 434 set = cpuset_lookup(id, curthread); 435 if (set) { 436 *setp = set; 437 return (0); 438 } 439 return (ESRCH); 440 case CPU_WHICH_JAIL: 441 { 442 /* Find `set' for prison with given id. */ 443 struct prison *pr; 444 445 sx_slock(&allprison_lock); 446 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 447 sx_sunlock(&allprison_lock); 448 if (pr == NULL) 449 return (ESRCH); 450 cpuset_ref(pr->pr_cpuset); 451 *setp = pr->pr_cpuset; 452 mtx_unlock(&pr->pr_mtx); 453 return (0); 454 } 455 case CPU_WHICH_IRQ: 456 return (0); 457 default: 458 return (EINVAL); 459 } 460 error = p_cansched(curthread, p); 461 if (error) { 462 PROC_UNLOCK(p); 463 return (error); 464 } 465 if (td == NULL) 466 td = FIRST_THREAD_IN_PROC(p); 467 *pp = p; 468 *tdp = td; 469 return (0); 470 } 471 472 /* 473 * Create an anonymous set with the provided mask in the space provided by 474 * 'fset'. If the passed in set is anonymous we use its parent otherwise 475 * the new set is a child of 'set'. 476 */ 477 static int 478 cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 479 { 480 struct cpuset *parent; 481 482 if (set->cs_id == CPUSET_INVALID) 483 parent = set->cs_parent; 484 else 485 parent = set; 486 if (!CPU_SUBSET(&parent->cs_mask, mask)) 487 return (EDEADLK); 488 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 489 } 490 491 /* 492 * Handle two cases for replacing the base set or mask of an entire process. 493 * 494 * 1) Set is non-null and mask is null. This reparents all anonymous sets 495 * to the provided set and replaces all non-anonymous td_cpusets with the 496 * provided set. 497 * 2) Mask is non-null and set is null. This replaces or creates anonymous 498 * sets for every thread with the existing base as a parent. 499 * 500 * This is overly complicated because we can't allocate while holding a 501 * spinlock and spinlocks must be held while changing and examining thread 502 * state. 503 */ 504 static int 505 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 506 { 507 struct setlist freelist; 508 struct setlist droplist; 509 struct cpuset *tdset; 510 struct cpuset *nset; 511 struct thread *td; 512 struct proc *p; 513 int threads; 514 int nfree; 515 int error; 516 /* 517 * The algorithm requires two passes due to locking considerations. 518 * 519 * 1) Lookup the process and acquire the locks in the required order. 520 * 2) If enough cpusets have not been allocated release the locks and 521 * allocate them. Loop. 522 */ 523 LIST_INIT(&freelist); 524 LIST_INIT(&droplist); 525 nfree = 0; 526 for (;;) { 527 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 528 if (error) 529 goto out; 530 if (nfree >= p->p_numthreads) 531 break; 532 threads = p->p_numthreads; 533 PROC_UNLOCK(p); 534 for (; nfree < threads; nfree++) { 535 nset = uma_zalloc(cpuset_zone, M_WAITOK); 536 LIST_INSERT_HEAD(&freelist, nset, cs_link); 537 } 538 } 539 PROC_LOCK_ASSERT(p, MA_OWNED); 540 /* 541 * Now that the appropriate locks are held and we have enough cpusets, 542 * make sure the operation will succeed before applying changes. The 543 * proc lock prevents td_cpuset from changing between calls. 544 */ 545 error = 0; 546 FOREACH_THREAD_IN_PROC(p, td) { 547 thread_lock(td); 548 tdset = td->td_cpuset; 549 /* 550 * Verify that a new mask doesn't specify cpus outside of 551 * the set the thread is a member of. 552 */ 553 if (mask) { 554 if (tdset->cs_id == CPUSET_INVALID) 555 tdset = tdset->cs_parent; 556 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 557 error = EDEADLK; 558 /* 559 * Verify that a new set won't leave an existing thread 560 * mask without a cpu to run on. It can, however, restrict 561 * the set. 562 */ 563 } else if (tdset->cs_id == CPUSET_INVALID) { 564 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 565 error = EDEADLK; 566 } 567 thread_unlock(td); 568 if (error) 569 goto unlock_out; 570 } 571 /* 572 * Replace each thread's cpuset while using deferred release. We 573 * must do this because the thread lock must be held while operating 574 * on the thread and this limits the type of operations allowed. 575 */ 576 FOREACH_THREAD_IN_PROC(p, td) { 577 thread_lock(td); 578 /* 579 * If we presently have an anonymous set or are applying a 580 * mask we must create an anonymous shadow set. That is 581 * either parented to our existing base or the supplied set. 582 * 583 * If we have a base set with no anonymous shadow we simply 584 * replace it outright. 585 */ 586 tdset = td->td_cpuset; 587 if (tdset->cs_id == CPUSET_INVALID || mask) { 588 nset = LIST_FIRST(&freelist); 589 LIST_REMOVE(nset, cs_link); 590 if (mask) 591 error = cpuset_shadow(tdset, nset, mask); 592 else 593 error = _cpuset_create(nset, set, 594 &tdset->cs_mask, CPUSET_INVALID); 595 if (error) { 596 LIST_INSERT_HEAD(&freelist, nset, cs_link); 597 thread_unlock(td); 598 break; 599 } 600 } else 601 nset = cpuset_ref(set); 602 cpuset_rel_defer(&droplist, tdset); 603 td->td_cpuset = nset; 604 sched_affinity(td); 605 thread_unlock(td); 606 } 607 unlock_out: 608 PROC_UNLOCK(p); 609 out: 610 while ((nset = LIST_FIRST(&droplist)) != NULL) 611 cpuset_rel_complete(nset); 612 while ((nset = LIST_FIRST(&freelist)) != NULL) { 613 LIST_REMOVE(nset, cs_link); 614 uma_zfree(cpuset_zone, nset); 615 } 616 return (error); 617 } 618 619 /* 620 * Apply an anonymous mask to a single thread. 621 */ 622 int 623 cpuset_setthread(lwpid_t id, cpuset_t *mask) 624 { 625 struct cpuset *nset; 626 struct cpuset *set; 627 struct thread *td; 628 struct proc *p; 629 int error; 630 631 nset = uma_zalloc(cpuset_zone, M_WAITOK); 632 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 633 if (error) 634 goto out; 635 set = NULL; 636 thread_lock(td); 637 error = cpuset_shadow(td->td_cpuset, nset, mask); 638 if (error == 0) { 639 set = td->td_cpuset; 640 td->td_cpuset = nset; 641 sched_affinity(td); 642 nset = NULL; 643 } 644 thread_unlock(td); 645 PROC_UNLOCK(p); 646 if (set) 647 cpuset_rel(set); 648 out: 649 if (nset) 650 uma_zfree(cpuset_zone, nset); 651 return (error); 652 } 653 654 /* 655 * Creates the cpuset for thread0. We make two sets: 656 * 657 * 0 - The root set which should represent all valid processors in the 658 * system. It is initially created with a mask of all processors 659 * because we don't know what processors are valid until cpuset_init() 660 * runs. This set is immutable. 661 * 1 - The default set which all processes are a member of until changed. 662 * This allows an administrator to move all threads off of given cpus to 663 * dedicate them to high priority tasks or save power etc. 664 */ 665 struct cpuset * 666 cpuset_thread0(void) 667 { 668 struct cpuset *set; 669 int error; 670 671 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 672 NULL, NULL, UMA_ALIGN_PTR, 0); 673 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 674 /* 675 * Create the root system set for the whole machine. Doesn't use 676 * cpuset_create() due to NULL parent. 677 */ 678 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 679 set->cs_mask.__bits[0] = -1; 680 LIST_INIT(&set->cs_children); 681 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 682 set->cs_ref = 1; 683 set->cs_flags = CPU_SET_ROOT; 684 cpuset_zero = set; 685 cpuset_root = &set->cs_mask; 686 /* 687 * Now derive a default, modifiable set from that to give out. 688 */ 689 set = uma_zalloc(cpuset_zone, M_WAITOK); 690 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 691 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 692 /* 693 * Initialize the unit allocator. 0 and 1 are allocated above. 694 */ 695 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 696 697 return (set); 698 } 699 700 /* 701 * Create a cpuset, which would be cpuset_create() but 702 * mark the new 'set' as root. 703 * 704 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 705 * for that. 706 * 707 * In case of no error, returns the set in *setp locked with a reference. 708 */ 709 int 710 cpuset_create_root(struct prison *pr, struct cpuset **setp) 711 { 712 struct cpuset *set; 713 int error; 714 715 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 716 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 717 718 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 719 if (error) 720 return (error); 721 722 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 723 __func__, __LINE__)); 724 725 /* Mark the set as root. */ 726 set = *setp; 727 set->cs_flags |= CPU_SET_ROOT; 728 729 return (0); 730 } 731 732 int 733 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 734 { 735 int error; 736 737 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 738 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 739 740 cpuset_ref(set); 741 error = cpuset_setproc(p->p_pid, set, NULL); 742 if (error) 743 return (error); 744 cpuset_rel(set); 745 return (0); 746 } 747 748 /* 749 * This is called once the final set of system cpus is known. Modifies 750 * the root set and all children and mark the root read-only. 751 */ 752 static void 753 cpuset_init(void *arg) 754 { 755 cpuset_t mask; 756 757 CPU_ZERO(&mask); 758 #ifdef SMP 759 mask.__bits[0] = all_cpus; 760 #else 761 mask.__bits[0] = 1; 762 #endif 763 if (cpuset_modify(cpuset_zero, &mask)) 764 panic("Can't set initial cpuset mask.\n"); 765 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 766 } 767 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 768 769 #ifndef _SYS_SYSPROTO_H_ 770 struct cpuset_args { 771 cpusetid_t *setid; 772 }; 773 #endif 774 int 775 cpuset(struct thread *td, struct cpuset_args *uap) 776 { 777 struct cpuset *root; 778 struct cpuset *set; 779 int error; 780 781 thread_lock(td); 782 root = cpuset_refroot(td->td_cpuset); 783 thread_unlock(td); 784 error = cpuset_create(&set, root, &root->cs_mask); 785 cpuset_rel(root); 786 if (error) 787 return (error); 788 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 789 if (error == 0) 790 error = cpuset_setproc(-1, set, NULL); 791 cpuset_rel(set); 792 return (error); 793 } 794 795 #ifndef _SYS_SYSPROTO_H_ 796 struct cpuset_setid_args { 797 cpuwhich_t which; 798 id_t id; 799 cpusetid_t setid; 800 }; 801 #endif 802 int 803 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 804 { 805 struct cpuset *set; 806 int error; 807 808 /* 809 * Presently we only support per-process sets. 810 */ 811 if (uap->which != CPU_WHICH_PID) 812 return (EINVAL); 813 set = cpuset_lookup(uap->setid, td); 814 if (set == NULL) 815 return (ESRCH); 816 error = cpuset_setproc(uap->id, set, NULL); 817 cpuset_rel(set); 818 return (error); 819 } 820 821 #ifndef _SYS_SYSPROTO_H_ 822 struct cpuset_getid_args { 823 cpulevel_t level; 824 cpuwhich_t which; 825 id_t id; 826 cpusetid_t *setid; 827 #endif 828 int 829 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 830 { 831 struct cpuset *nset; 832 struct cpuset *set; 833 struct thread *ttd; 834 struct proc *p; 835 cpusetid_t id; 836 int error; 837 838 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 839 return (EINVAL); 840 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 841 if (error) 842 return (error); 843 switch (uap->which) { 844 case CPU_WHICH_TID: 845 case CPU_WHICH_PID: 846 thread_lock(ttd); 847 set = cpuset_refbase(ttd->td_cpuset); 848 thread_unlock(ttd); 849 PROC_UNLOCK(p); 850 break; 851 case CPU_WHICH_CPUSET: 852 case CPU_WHICH_JAIL: 853 break; 854 case CPU_WHICH_IRQ: 855 return (EINVAL); 856 } 857 switch (uap->level) { 858 case CPU_LEVEL_ROOT: 859 nset = cpuset_refroot(set); 860 cpuset_rel(set); 861 set = nset; 862 break; 863 case CPU_LEVEL_CPUSET: 864 break; 865 case CPU_LEVEL_WHICH: 866 break; 867 } 868 id = set->cs_id; 869 cpuset_rel(set); 870 if (error == 0) 871 error = copyout(&id, uap->setid, sizeof(id)); 872 873 return (error); 874 } 875 876 #ifndef _SYS_SYSPROTO_H_ 877 struct cpuset_getaffinity_args { 878 cpulevel_t level; 879 cpuwhich_t which; 880 id_t id; 881 size_t cpusetsize; 882 cpuset_t *mask; 883 }; 884 #endif 885 int 886 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 887 { 888 struct thread *ttd; 889 struct cpuset *nset; 890 struct cpuset *set; 891 struct proc *p; 892 cpuset_t *mask; 893 int error; 894 size_t size; 895 896 if (uap->cpusetsize < sizeof(cpuset_t) || 897 uap->cpusetsize > CPU_MAXSIZE / NBBY) 898 return (ERANGE); 899 size = uap->cpusetsize; 900 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 901 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 902 if (error) 903 goto out; 904 switch (uap->level) { 905 case CPU_LEVEL_ROOT: 906 case CPU_LEVEL_CPUSET: 907 switch (uap->which) { 908 case CPU_WHICH_TID: 909 case CPU_WHICH_PID: 910 thread_lock(ttd); 911 set = cpuset_ref(ttd->td_cpuset); 912 thread_unlock(ttd); 913 break; 914 case CPU_WHICH_CPUSET: 915 case CPU_WHICH_JAIL: 916 break; 917 case CPU_WHICH_IRQ: 918 error = EINVAL; 919 goto out; 920 } 921 if (uap->level == CPU_LEVEL_ROOT) 922 nset = cpuset_refroot(set); 923 else 924 nset = cpuset_refbase(set); 925 CPU_COPY(&nset->cs_mask, mask); 926 cpuset_rel(nset); 927 break; 928 case CPU_LEVEL_WHICH: 929 switch (uap->which) { 930 case CPU_WHICH_TID: 931 thread_lock(ttd); 932 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 933 thread_unlock(ttd); 934 break; 935 case CPU_WHICH_PID: 936 FOREACH_THREAD_IN_PROC(p, ttd) { 937 thread_lock(ttd); 938 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 939 thread_unlock(ttd); 940 } 941 break; 942 case CPU_WHICH_CPUSET: 943 case CPU_WHICH_JAIL: 944 CPU_COPY(&set->cs_mask, mask); 945 break; 946 case CPU_WHICH_IRQ: 947 error = intr_getaffinity(uap->id, mask); 948 break; 949 } 950 break; 951 default: 952 error = EINVAL; 953 break; 954 } 955 if (set) 956 cpuset_rel(set); 957 if (p) 958 PROC_UNLOCK(p); 959 if (error == 0) 960 error = copyout(mask, uap->mask, size); 961 out: 962 free(mask, M_TEMP); 963 return (error); 964 } 965 966 #ifndef _SYS_SYSPROTO_H_ 967 struct cpuset_setaffinity_args { 968 cpulevel_t level; 969 cpuwhich_t which; 970 id_t id; 971 size_t cpusetsize; 972 const cpuset_t *mask; 973 }; 974 #endif 975 int 976 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 977 { 978 struct cpuset *nset; 979 struct cpuset *set; 980 struct thread *ttd; 981 struct proc *p; 982 cpuset_t *mask; 983 int error; 984 985 if (uap->cpusetsize < sizeof(cpuset_t) || 986 uap->cpusetsize > CPU_MAXSIZE / NBBY) 987 return (ERANGE); 988 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 989 error = copyin(uap->mask, mask, uap->cpusetsize); 990 if (error) 991 goto out; 992 /* 993 * Verify that no high bits are set. 994 */ 995 if (uap->cpusetsize > sizeof(cpuset_t)) { 996 char *end; 997 char *cp; 998 999 end = cp = (char *)&mask->__bits; 1000 end += uap->cpusetsize; 1001 cp += sizeof(cpuset_t); 1002 while (cp != end) 1003 if (*cp++ != 0) { 1004 error = EINVAL; 1005 goto out; 1006 } 1007 1008 } 1009 switch (uap->level) { 1010 case CPU_LEVEL_ROOT: 1011 case CPU_LEVEL_CPUSET: 1012 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1013 if (error) 1014 break; 1015 switch (uap->which) { 1016 case CPU_WHICH_TID: 1017 case CPU_WHICH_PID: 1018 thread_lock(ttd); 1019 set = cpuset_ref(ttd->td_cpuset); 1020 thread_unlock(ttd); 1021 PROC_UNLOCK(p); 1022 break; 1023 case CPU_WHICH_CPUSET: 1024 case CPU_WHICH_JAIL: 1025 break; 1026 case CPU_WHICH_IRQ: 1027 error = EINVAL; 1028 goto out; 1029 } 1030 if (uap->level == CPU_LEVEL_ROOT) 1031 nset = cpuset_refroot(set); 1032 else 1033 nset = cpuset_refbase(set); 1034 error = cpuset_modify(nset, mask); 1035 cpuset_rel(nset); 1036 cpuset_rel(set); 1037 break; 1038 case CPU_LEVEL_WHICH: 1039 switch (uap->which) { 1040 case CPU_WHICH_TID: 1041 error = cpuset_setthread(uap->id, mask); 1042 break; 1043 case CPU_WHICH_PID: 1044 error = cpuset_setproc(uap->id, NULL, mask); 1045 break; 1046 case CPU_WHICH_CPUSET: 1047 case CPU_WHICH_JAIL: 1048 error = cpuset_which(uap->which, uap->id, &p, 1049 &ttd, &set); 1050 if (error == 0) { 1051 error = cpuset_modify(set, mask); 1052 cpuset_rel(set); 1053 } 1054 break; 1055 case CPU_WHICH_IRQ: 1056 error = intr_setaffinity(uap->id, mask); 1057 break; 1058 default: 1059 error = EINVAL; 1060 break; 1061 } 1062 break; 1063 default: 1064 error = EINVAL; 1065 break; 1066 } 1067 out: 1068 free(mask, M_TEMP); 1069 return (error); 1070 } 1071 1072 #ifdef DDB 1073 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1074 { 1075 struct cpuset *set; 1076 int cpu, once; 1077 1078 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1079 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1080 set, set->cs_id, set->cs_ref, set->cs_flags, 1081 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1082 db_printf(" mask="); 1083 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1084 if (CPU_ISSET(cpu, &set->cs_mask)) { 1085 if (once == 0) { 1086 db_printf("%d", cpu); 1087 once = 1; 1088 } else 1089 db_printf(",%d", cpu); 1090 } 1091 } 1092 db_printf("\n"); 1093 if (db_pager_quit) 1094 break; 1095 } 1096 } 1097 #endif /* DDB */ 1098