1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/capsicum.h> 51 #include <sys/cpuset.h> 52 #include <sys/sx.h> 53 #include <sys/queue.h> 54 #include <sys/libkern.h> 55 #include <sys/limits.h> 56 #include <sys/bus.h> 57 #include <sys/interrupt.h> 58 59 #include <vm/uma.h> 60 #include <vm/vm.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_param.h> 63 64 #ifdef DDB 65 #include <ddb/ddb.h> 66 #endif /* DDB */ 67 68 /* 69 * cpusets provide a mechanism for creating and manipulating sets of 70 * processors for the purpose of constraining the scheduling of threads to 71 * specific processors. 72 * 73 * Each process belongs to an identified set, by default this is set 1. Each 74 * thread may further restrict the cpus it may run on to a subset of this 75 * named set. This creates an anonymous set which other threads and processes 76 * may not join by number. 77 * 78 * The named set is referred to herein as the 'base' set to avoid ambiguity. 79 * This set is usually a child of a 'root' set while the anonymous set may 80 * simply be referred to as a mask. In the syscall api these are referred to 81 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 82 * 83 * Threads inherit their set from their creator whether it be anonymous or 84 * not. This means that anonymous sets are immutable because they may be 85 * shared. To modify an anonymous set a new set is created with the desired 86 * mask and the same parent as the existing anonymous set. This gives the 87 * illusion of each thread having a private mask. 88 * 89 * Via the syscall apis a user may ask to retrieve or modify the root, base, 90 * or mask that is discovered via a pid, tid, or setid. Modifying a set 91 * modifies all numbered and anonymous child sets to comply with the new mask. 92 * Modifying a pid or tid's mask applies only to that tid but must still 93 * exist within the assigned parent set. 94 * 95 * A thread may not be assigned to a group separate from other threads in 96 * the process. This is to remove ambiguity when the setid is queried with 97 * a pid argument. There is no other technical limitation. 98 * 99 * This somewhat complex arrangement is intended to make it easy for 100 * applications to query available processors and bind their threads to 101 * specific processors while also allowing administrators to dynamically 102 * reprovision by changing sets which apply to groups of processes. 103 * 104 * A simple application should not concern itself with sets at all and 105 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 106 * meaning 'curthread'. It may query available cpus for that tid with a 107 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 108 */ 109 static uma_zone_t cpuset_zone; 110 static struct mtx cpuset_lock; 111 static struct setlist cpuset_ids; 112 static struct unrhdr *cpuset_unr; 113 static struct cpuset *cpuset_zero, *cpuset_default; 114 115 /* Return the size of cpuset_t at the kernel level */ 116 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 117 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 118 119 cpuset_t *cpuset_root; 120 cpuset_t cpuset_domain[MAXMEMDOM]; 121 122 /* 123 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 124 */ 125 struct cpuset * 126 cpuset_ref(struct cpuset *set) 127 { 128 129 refcount_acquire(&set->cs_ref); 130 return (set); 131 } 132 133 /* 134 * Walks up the tree from 'set' to find the root. Returns the root 135 * referenced. 136 */ 137 static struct cpuset * 138 cpuset_refroot(struct cpuset *set) 139 { 140 141 for (; set->cs_parent != NULL; set = set->cs_parent) 142 if (set->cs_flags & CPU_SET_ROOT) 143 break; 144 cpuset_ref(set); 145 146 return (set); 147 } 148 149 /* 150 * Find the first non-anonymous set starting from 'set'. Returns this set 151 * referenced. May return the passed in set with an extra ref if it is 152 * not anonymous. 153 */ 154 static struct cpuset * 155 cpuset_refbase(struct cpuset *set) 156 { 157 158 if (set->cs_id == CPUSET_INVALID) 159 set = set->cs_parent; 160 cpuset_ref(set); 161 162 return (set); 163 } 164 165 /* 166 * Release a reference in a context where it is safe to allocate. 167 */ 168 void 169 cpuset_rel(struct cpuset *set) 170 { 171 cpusetid_t id; 172 173 if (refcount_release(&set->cs_ref) == 0) 174 return; 175 mtx_lock_spin(&cpuset_lock); 176 LIST_REMOVE(set, cs_siblings); 177 id = set->cs_id; 178 if (id != CPUSET_INVALID) 179 LIST_REMOVE(set, cs_link); 180 mtx_unlock_spin(&cpuset_lock); 181 cpuset_rel(set->cs_parent); 182 uma_zfree(cpuset_zone, set); 183 if (id != CPUSET_INVALID) 184 free_unr(cpuset_unr, id); 185 } 186 187 /* 188 * Deferred release must be used when in a context that is not safe to 189 * allocate/free. This places any unreferenced sets on the list 'head'. 190 */ 191 static void 192 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 193 { 194 195 if (refcount_release(&set->cs_ref) == 0) 196 return; 197 mtx_lock_spin(&cpuset_lock); 198 LIST_REMOVE(set, cs_siblings); 199 if (set->cs_id != CPUSET_INVALID) 200 LIST_REMOVE(set, cs_link); 201 LIST_INSERT_HEAD(head, set, cs_link); 202 mtx_unlock_spin(&cpuset_lock); 203 } 204 205 /* 206 * Complete a deferred release. Removes the set from the list provided to 207 * cpuset_rel_defer. 208 */ 209 static void 210 cpuset_rel_complete(struct cpuset *set) 211 { 212 LIST_REMOVE(set, cs_link); 213 cpuset_rel(set->cs_parent); 214 uma_zfree(cpuset_zone, set); 215 } 216 217 /* 218 * Find a set based on an id. Returns it with a ref. 219 */ 220 static struct cpuset * 221 cpuset_lookup(cpusetid_t setid, struct thread *td) 222 { 223 struct cpuset *set; 224 225 if (setid == CPUSET_INVALID) 226 return (NULL); 227 mtx_lock_spin(&cpuset_lock); 228 LIST_FOREACH(set, &cpuset_ids, cs_link) 229 if (set->cs_id == setid) 230 break; 231 if (set) 232 cpuset_ref(set); 233 mtx_unlock_spin(&cpuset_lock); 234 235 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 236 if (set != NULL && jailed(td->td_ucred)) { 237 struct cpuset *jset, *tset; 238 239 jset = td->td_ucred->cr_prison->pr_cpuset; 240 for (tset = set; tset != NULL; tset = tset->cs_parent) 241 if (tset == jset) 242 break; 243 if (tset == NULL) { 244 cpuset_rel(set); 245 set = NULL; 246 } 247 } 248 249 return (set); 250 } 251 252 /* 253 * Create a set in the space provided in 'set' with the provided parameters. 254 * The set is returned with a single ref. May return EDEADLK if the set 255 * will have no valid cpu based on restrictions from the parent. 256 */ 257 static int 258 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 259 cpusetid_t id) 260 { 261 262 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 263 return (EDEADLK); 264 CPU_COPY(mask, &set->cs_mask); 265 LIST_INIT(&set->cs_children); 266 refcount_init(&set->cs_ref, 1); 267 set->cs_flags = 0; 268 mtx_lock_spin(&cpuset_lock); 269 CPU_AND(&set->cs_mask, &parent->cs_mask); 270 set->cs_id = id; 271 set->cs_parent = cpuset_ref(parent); 272 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 273 if (set->cs_id != CPUSET_INVALID) 274 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 275 mtx_unlock_spin(&cpuset_lock); 276 277 return (0); 278 } 279 280 /* 281 * Create a new non-anonymous set with the requested parent and mask. May 282 * return failures if the mask is invalid or a new number can not be 283 * allocated. 284 */ 285 static int 286 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 287 { 288 struct cpuset *set; 289 cpusetid_t id; 290 int error; 291 292 id = alloc_unr(cpuset_unr); 293 if (id == -1) 294 return (ENFILE); 295 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 296 error = _cpuset_create(set, parent, mask, id); 297 if (error == 0) 298 return (0); 299 free_unr(cpuset_unr, id); 300 uma_zfree(cpuset_zone, set); 301 302 return (error); 303 } 304 305 /* 306 * Recursively check for errors that would occur from applying mask to 307 * the tree of sets starting at 'set'. Checks for sets that would become 308 * empty as well as RDONLY flags. 309 */ 310 static int 311 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 312 { 313 struct cpuset *nset; 314 cpuset_t newmask; 315 int error; 316 317 mtx_assert(&cpuset_lock, MA_OWNED); 318 if (set->cs_flags & CPU_SET_RDONLY) 319 return (EPERM); 320 if (check_mask) { 321 if (!CPU_OVERLAP(&set->cs_mask, mask)) 322 return (EDEADLK); 323 CPU_COPY(&set->cs_mask, &newmask); 324 CPU_AND(&newmask, mask); 325 } else 326 CPU_COPY(mask, &newmask); 327 error = 0; 328 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 329 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 330 break; 331 return (error); 332 } 333 334 /* 335 * Applies the mask 'mask' without checking for empty sets or permissions. 336 */ 337 static void 338 cpuset_update(struct cpuset *set, cpuset_t *mask) 339 { 340 struct cpuset *nset; 341 342 mtx_assert(&cpuset_lock, MA_OWNED); 343 CPU_AND(&set->cs_mask, mask); 344 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 345 cpuset_update(nset, &set->cs_mask); 346 347 return; 348 } 349 350 /* 351 * Modify the set 'set' to use a copy of the mask provided. Apply this new 352 * mask to restrict all children in the tree. Checks for validity before 353 * applying the changes. 354 */ 355 static int 356 cpuset_modify(struct cpuset *set, cpuset_t *mask) 357 { 358 struct cpuset *root; 359 int error; 360 361 error = priv_check(curthread, PRIV_SCHED_CPUSET); 362 if (error) 363 return (error); 364 /* 365 * In case we are called from within the jail 366 * we do not allow modifying the dedicated root 367 * cpuset of the jail but may still allow to 368 * change child sets. 369 */ 370 if (jailed(curthread->td_ucred) && 371 set->cs_flags & CPU_SET_ROOT) 372 return (EPERM); 373 /* 374 * Verify that we have access to this set of 375 * cpus. 376 */ 377 root = set->cs_parent; 378 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 379 return (EINVAL); 380 mtx_lock_spin(&cpuset_lock); 381 error = cpuset_testupdate(set, mask, 0); 382 if (error) 383 goto out; 384 CPU_COPY(mask, &set->cs_mask); 385 cpuset_update(set, mask); 386 out: 387 mtx_unlock_spin(&cpuset_lock); 388 389 return (error); 390 } 391 392 /* 393 * Resolve the 'which' parameter of several cpuset apis. 394 * 395 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 396 * checks for permission via p_cansched(). 397 * 398 * For WHICH_SET returns a valid set with a new reference. 399 * 400 * -1 may be supplied for any argument to mean the current proc/thread or 401 * the base set of the current thread. May fail with ESRCH/EPERM. 402 */ 403 int 404 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 405 struct cpuset **setp) 406 { 407 struct cpuset *set; 408 struct thread *td; 409 struct proc *p; 410 int error; 411 412 *pp = p = NULL; 413 *tdp = td = NULL; 414 *setp = set = NULL; 415 switch (which) { 416 case CPU_WHICH_PID: 417 if (id == -1) { 418 PROC_LOCK(curproc); 419 p = curproc; 420 break; 421 } 422 if ((p = pfind(id)) == NULL) 423 return (ESRCH); 424 break; 425 case CPU_WHICH_TID: 426 if (id == -1) { 427 PROC_LOCK(curproc); 428 p = curproc; 429 td = curthread; 430 break; 431 } 432 td = tdfind(id, -1); 433 if (td == NULL) 434 return (ESRCH); 435 p = td->td_proc; 436 break; 437 case CPU_WHICH_CPUSET: 438 if (id == -1) { 439 thread_lock(curthread); 440 set = cpuset_refbase(curthread->td_cpuset); 441 thread_unlock(curthread); 442 } else 443 set = cpuset_lookup(id, curthread); 444 if (set) { 445 *setp = set; 446 return (0); 447 } 448 return (ESRCH); 449 case CPU_WHICH_JAIL: 450 { 451 /* Find `set' for prison with given id. */ 452 struct prison *pr; 453 454 sx_slock(&allprison_lock); 455 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 456 sx_sunlock(&allprison_lock); 457 if (pr == NULL) 458 return (ESRCH); 459 cpuset_ref(pr->pr_cpuset); 460 *setp = pr->pr_cpuset; 461 mtx_unlock(&pr->pr_mtx); 462 return (0); 463 } 464 case CPU_WHICH_IRQ: 465 case CPU_WHICH_DOMAIN: 466 return (0); 467 default: 468 return (EINVAL); 469 } 470 error = p_cansched(curthread, p); 471 if (error) { 472 PROC_UNLOCK(p); 473 return (error); 474 } 475 if (td == NULL) 476 td = FIRST_THREAD_IN_PROC(p); 477 *pp = p; 478 *tdp = td; 479 return (0); 480 } 481 482 /* 483 * Create an anonymous set with the provided mask in the space provided by 484 * 'fset'. If the passed in set is anonymous we use its parent otherwise 485 * the new set is a child of 'set'. 486 */ 487 static int 488 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 489 { 490 struct cpuset *parent; 491 492 if (set->cs_id == CPUSET_INVALID) 493 parent = set->cs_parent; 494 else 495 parent = set; 496 if (!CPU_SUBSET(&parent->cs_mask, mask)) 497 return (EDEADLK); 498 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 499 } 500 501 /* 502 * Handle two cases for replacing the base set or mask of an entire process. 503 * 504 * 1) Set is non-null and mask is null. This reparents all anonymous sets 505 * to the provided set and replaces all non-anonymous td_cpusets with the 506 * provided set. 507 * 2) Mask is non-null and set is null. This replaces or creates anonymous 508 * sets for every thread with the existing base as a parent. 509 * 510 * This is overly complicated because we can't allocate while holding a 511 * spinlock and spinlocks must be held while changing and examining thread 512 * state. 513 */ 514 static int 515 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 516 { 517 struct setlist freelist; 518 struct setlist droplist; 519 struct cpuset *tdset; 520 struct cpuset *nset; 521 struct thread *td; 522 struct proc *p; 523 int threads; 524 int nfree; 525 int error; 526 527 /* 528 * The algorithm requires two passes due to locking considerations. 529 * 530 * 1) Lookup the process and acquire the locks in the required order. 531 * 2) If enough cpusets have not been allocated release the locks and 532 * allocate them. Loop. 533 */ 534 LIST_INIT(&freelist); 535 LIST_INIT(&droplist); 536 nfree = 0; 537 for (;;) { 538 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 539 if (error) 540 goto out; 541 if (nfree >= p->p_numthreads) 542 break; 543 threads = p->p_numthreads; 544 PROC_UNLOCK(p); 545 for (; nfree < threads; nfree++) { 546 nset = uma_zalloc(cpuset_zone, M_WAITOK); 547 LIST_INSERT_HEAD(&freelist, nset, cs_link); 548 } 549 } 550 PROC_LOCK_ASSERT(p, MA_OWNED); 551 /* 552 * Now that the appropriate locks are held and we have enough cpusets, 553 * make sure the operation will succeed before applying changes. The 554 * proc lock prevents td_cpuset from changing between calls. 555 */ 556 error = 0; 557 FOREACH_THREAD_IN_PROC(p, td) { 558 thread_lock(td); 559 tdset = td->td_cpuset; 560 /* 561 * Verify that a new mask doesn't specify cpus outside of 562 * the set the thread is a member of. 563 */ 564 if (mask) { 565 if (tdset->cs_id == CPUSET_INVALID) 566 tdset = tdset->cs_parent; 567 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 568 error = EDEADLK; 569 /* 570 * Verify that a new set won't leave an existing thread 571 * mask without a cpu to run on. It can, however, restrict 572 * the set. 573 */ 574 } else if (tdset->cs_id == CPUSET_INVALID) { 575 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 576 error = EDEADLK; 577 } 578 thread_unlock(td); 579 if (error) 580 goto unlock_out; 581 } 582 /* 583 * Replace each thread's cpuset while using deferred release. We 584 * must do this because the thread lock must be held while operating 585 * on the thread and this limits the type of operations allowed. 586 */ 587 FOREACH_THREAD_IN_PROC(p, td) { 588 thread_lock(td); 589 /* 590 * If we presently have an anonymous set or are applying a 591 * mask we must create an anonymous shadow set. That is 592 * either parented to our existing base or the supplied set. 593 * 594 * If we have a base set with no anonymous shadow we simply 595 * replace it outright. 596 */ 597 tdset = td->td_cpuset; 598 if (tdset->cs_id == CPUSET_INVALID || mask) { 599 nset = LIST_FIRST(&freelist); 600 LIST_REMOVE(nset, cs_link); 601 if (mask) 602 error = cpuset_shadow(tdset, nset, mask); 603 else 604 error = _cpuset_create(nset, set, 605 &tdset->cs_mask, CPUSET_INVALID); 606 if (error) { 607 LIST_INSERT_HEAD(&freelist, nset, cs_link); 608 thread_unlock(td); 609 break; 610 } 611 } else 612 nset = cpuset_ref(set); 613 cpuset_rel_defer(&droplist, tdset); 614 td->td_cpuset = nset; 615 sched_affinity(td); 616 thread_unlock(td); 617 } 618 unlock_out: 619 PROC_UNLOCK(p); 620 out: 621 while ((nset = LIST_FIRST(&droplist)) != NULL) 622 cpuset_rel_complete(nset); 623 while ((nset = LIST_FIRST(&freelist)) != NULL) { 624 LIST_REMOVE(nset, cs_link); 625 uma_zfree(cpuset_zone, nset); 626 } 627 return (error); 628 } 629 630 /* 631 * Return a string representing a valid layout for a cpuset_t object. 632 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 633 */ 634 char * 635 cpusetobj_strprint(char *buf, const cpuset_t *set) 636 { 637 char *tbuf; 638 size_t i, bytesp, bufsiz; 639 640 tbuf = buf; 641 bytesp = 0; 642 bufsiz = CPUSETBUFSIZ; 643 644 for (i = 0; i < (_NCPUWORDS - 1); i++) { 645 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 646 bufsiz -= bytesp; 647 tbuf += bytesp; 648 } 649 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 650 return (buf); 651 } 652 653 /* 654 * Build a valid cpuset_t object from a string representation. 655 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 656 */ 657 int 658 cpusetobj_strscan(cpuset_t *set, const char *buf) 659 { 660 u_int nwords; 661 int i, ret; 662 663 if (strlen(buf) > CPUSETBUFSIZ - 1) 664 return (-1); 665 666 /* Allow to pass a shorter version of the mask when necessary. */ 667 nwords = 1; 668 for (i = 0; buf[i] != '\0'; i++) 669 if (buf[i] == ',') 670 nwords++; 671 if (nwords > _NCPUWORDS) 672 return (-1); 673 674 CPU_ZERO(set); 675 for (i = 0; i < (nwords - 1); i++) { 676 ret = sscanf(buf, "%lx,", &set->__bits[i]); 677 if (ret == 0 || ret == -1) 678 return (-1); 679 buf = strstr(buf, ","); 680 if (buf == NULL) 681 return (-1); 682 buf++; 683 } 684 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 685 if (ret == 0 || ret == -1) 686 return (-1); 687 return (0); 688 } 689 690 /* 691 * Apply an anonymous mask to a single thread. 692 */ 693 int 694 cpuset_setthread(lwpid_t id, cpuset_t *mask) 695 { 696 struct cpuset *nset; 697 struct cpuset *set; 698 struct thread *td; 699 struct proc *p; 700 int error; 701 702 nset = uma_zalloc(cpuset_zone, M_WAITOK); 703 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 704 if (error) 705 goto out; 706 set = NULL; 707 thread_lock(td); 708 error = cpuset_shadow(td->td_cpuset, nset, mask); 709 if (error == 0) { 710 set = td->td_cpuset; 711 td->td_cpuset = nset; 712 sched_affinity(td); 713 nset = NULL; 714 } 715 thread_unlock(td); 716 PROC_UNLOCK(p); 717 if (set) 718 cpuset_rel(set); 719 out: 720 if (nset) 721 uma_zfree(cpuset_zone, nset); 722 return (error); 723 } 724 725 /* 726 * Apply new cpumask to the ithread. 727 */ 728 int 729 cpuset_setithread(lwpid_t id, int cpu) 730 { 731 struct cpuset *nset, *rset; 732 struct cpuset *parent, *old_set; 733 struct thread *td; 734 struct proc *p; 735 cpusetid_t cs_id; 736 cpuset_t mask; 737 int error; 738 739 nset = uma_zalloc(cpuset_zone, M_WAITOK); 740 rset = uma_zalloc(cpuset_zone, M_WAITOK); 741 cs_id = CPUSET_INVALID; 742 743 CPU_ZERO(&mask); 744 if (cpu == NOCPU) 745 CPU_COPY(cpuset_root, &mask); 746 else 747 CPU_SET(cpu, &mask); 748 749 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); 750 if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID)) 751 goto out; 752 753 /* cpuset_which() returns with PROC_LOCK held. */ 754 old_set = td->td_cpuset; 755 756 if (cpu == NOCPU) { 757 758 /* 759 * roll back to default set. We're not using cpuset_shadow() 760 * here because we can fail CPU_SUBSET() check. This can happen 761 * if default set does not contain all CPUs. 762 */ 763 error = _cpuset_create(nset, cpuset_default, &mask, 764 CPUSET_INVALID); 765 766 goto applyset; 767 } 768 769 if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && 770 old_set->cs_parent->cs_id == 1)) { 771 772 /* 773 * Current set is either default (1) or 774 * shadowed version of default set. 775 * 776 * Allocate new root set to be able to shadow it 777 * with any mask. 778 */ 779 error = _cpuset_create(rset, cpuset_zero, 780 &cpuset_zero->cs_mask, cs_id); 781 if (error != 0) { 782 PROC_UNLOCK(p); 783 goto out; 784 } 785 rset->cs_flags |= CPU_SET_ROOT; 786 parent = rset; 787 rset = NULL; 788 cs_id = CPUSET_INVALID; 789 } else { 790 /* Assume existing set was already allocated by previous call */ 791 parent = old_set; 792 old_set = NULL; 793 } 794 795 error = cpuset_shadow(parent, nset, &mask); 796 applyset: 797 if (error == 0) { 798 thread_lock(td); 799 td->td_cpuset = nset; 800 sched_affinity(td); 801 thread_unlock(td); 802 nset = NULL; 803 } else 804 old_set = NULL; 805 PROC_UNLOCK(p); 806 if (old_set != NULL) 807 cpuset_rel(old_set); 808 out: 809 if (nset != NULL) 810 uma_zfree(cpuset_zone, nset); 811 if (rset != NULL) 812 uma_zfree(cpuset_zone, rset); 813 if (cs_id != CPUSET_INVALID) 814 free_unr(cpuset_unr, cs_id); 815 return (error); 816 } 817 818 819 /* 820 * Creates system-wide cpusets and the cpuset for thread0 including two 821 * sets: 822 * 823 * 0 - The root set which should represent all valid processors in the 824 * system. It is initially created with a mask of all processors 825 * because we don't know what processors are valid until cpuset_init() 826 * runs. This set is immutable. 827 * 1 - The default set which all processes are a member of until changed. 828 * This allows an administrator to move all threads off of given cpus to 829 * dedicate them to high priority tasks or save power etc. 830 */ 831 struct cpuset * 832 cpuset_thread0(void) 833 { 834 struct cpuset *set; 835 int error, i; 836 837 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 838 NULL, NULL, UMA_ALIGN_PTR, 0); 839 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 840 841 /* 842 * Create the root system set for the whole machine. Doesn't use 843 * cpuset_create() due to NULL parent. 844 */ 845 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 846 CPU_FILL(&set->cs_mask); 847 LIST_INIT(&set->cs_children); 848 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 849 set->cs_ref = 1; 850 set->cs_flags = CPU_SET_ROOT; 851 cpuset_zero = set; 852 cpuset_root = &set->cs_mask; 853 854 /* 855 * Now derive a default, modifiable set from that to give out. 856 */ 857 set = uma_zalloc(cpuset_zone, M_WAITOK); 858 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 859 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 860 cpuset_default = set; 861 862 /* 863 * Initialize the unit allocator. 0 and 1 are allocated above. 864 */ 865 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 866 867 /* 868 * If MD code has not initialized per-domain cpusets, place all 869 * CPUs in domain 0. 870 */ 871 for (i = 0; i < MAXMEMDOM; i++) 872 if (!CPU_EMPTY(&cpuset_domain[i])) 873 goto domains_set; 874 CPU_COPY(&all_cpus, &cpuset_domain[0]); 875 domains_set: 876 877 return (set); 878 } 879 880 /* 881 * Create a cpuset, which would be cpuset_create() but 882 * mark the new 'set' as root. 883 * 884 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 885 * for that. 886 * 887 * In case of no error, returns the set in *setp locked with a reference. 888 */ 889 int 890 cpuset_create_root(struct prison *pr, struct cpuset **setp) 891 { 892 struct cpuset *set; 893 int error; 894 895 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 896 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 897 898 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 899 if (error) 900 return (error); 901 902 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 903 __func__, __LINE__)); 904 905 /* Mark the set as root. */ 906 set = *setp; 907 set->cs_flags |= CPU_SET_ROOT; 908 909 return (0); 910 } 911 912 int 913 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 914 { 915 int error; 916 917 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 918 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 919 920 cpuset_ref(set); 921 error = cpuset_setproc(p->p_pid, set, NULL); 922 if (error) 923 return (error); 924 cpuset_rel(set); 925 return (0); 926 } 927 928 /* 929 * This is called once the final set of system cpus is known. Modifies 930 * the root set and all children and mark the root read-only. 931 */ 932 static void 933 cpuset_init(void *arg) 934 { 935 cpuset_t mask; 936 937 mask = all_cpus; 938 if (cpuset_modify(cpuset_zero, &mask)) 939 panic("Can't set initial cpuset mask.\n"); 940 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 941 } 942 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 943 944 #ifndef _SYS_SYSPROTO_H_ 945 struct cpuset_args { 946 cpusetid_t *setid; 947 }; 948 #endif 949 int 950 sys_cpuset(struct thread *td, struct cpuset_args *uap) 951 { 952 struct cpuset *root; 953 struct cpuset *set; 954 int error; 955 956 thread_lock(td); 957 root = cpuset_refroot(td->td_cpuset); 958 thread_unlock(td); 959 error = cpuset_create(&set, root, &root->cs_mask); 960 cpuset_rel(root); 961 if (error) 962 return (error); 963 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 964 if (error == 0) 965 error = cpuset_setproc(-1, set, NULL); 966 cpuset_rel(set); 967 return (error); 968 } 969 970 #ifndef _SYS_SYSPROTO_H_ 971 struct cpuset_setid_args { 972 cpuwhich_t which; 973 id_t id; 974 cpusetid_t setid; 975 }; 976 #endif 977 int 978 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 979 { 980 981 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 982 } 983 984 int 985 kern_cpuset_setid(struct thread *td, cpuwhich_t which, 986 id_t id, cpusetid_t setid) 987 { 988 struct cpuset *set; 989 int error; 990 991 /* 992 * Presently we only support per-process sets. 993 */ 994 if (which != CPU_WHICH_PID) 995 return (EINVAL); 996 set = cpuset_lookup(setid, td); 997 if (set == NULL) 998 return (ESRCH); 999 error = cpuset_setproc(id, set, NULL); 1000 cpuset_rel(set); 1001 return (error); 1002 } 1003 1004 #ifndef _SYS_SYSPROTO_H_ 1005 struct cpuset_getid_args { 1006 cpulevel_t level; 1007 cpuwhich_t which; 1008 id_t id; 1009 cpusetid_t *setid; 1010 }; 1011 #endif 1012 int 1013 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1014 { 1015 1016 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1017 uap->setid)); 1018 } 1019 1020 int 1021 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1022 id_t id, cpusetid_t *setid) 1023 { 1024 struct cpuset *nset; 1025 struct cpuset *set; 1026 struct thread *ttd; 1027 struct proc *p; 1028 cpusetid_t tmpid; 1029 int error; 1030 1031 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1032 return (EINVAL); 1033 error = cpuset_which(which, id, &p, &ttd, &set); 1034 if (error) 1035 return (error); 1036 switch (which) { 1037 case CPU_WHICH_TID: 1038 case CPU_WHICH_PID: 1039 thread_lock(ttd); 1040 set = cpuset_refbase(ttd->td_cpuset); 1041 thread_unlock(ttd); 1042 PROC_UNLOCK(p); 1043 break; 1044 case CPU_WHICH_CPUSET: 1045 case CPU_WHICH_JAIL: 1046 break; 1047 case CPU_WHICH_IRQ: 1048 case CPU_WHICH_DOMAIN: 1049 return (EINVAL); 1050 } 1051 switch (level) { 1052 case CPU_LEVEL_ROOT: 1053 nset = cpuset_refroot(set); 1054 cpuset_rel(set); 1055 set = nset; 1056 break; 1057 case CPU_LEVEL_CPUSET: 1058 break; 1059 case CPU_LEVEL_WHICH: 1060 break; 1061 } 1062 tmpid = set->cs_id; 1063 cpuset_rel(set); 1064 if (error == 0) 1065 error = copyout(&tmpid, setid, sizeof(id)); 1066 1067 return (error); 1068 } 1069 1070 #ifndef _SYS_SYSPROTO_H_ 1071 struct cpuset_getaffinity_args { 1072 cpulevel_t level; 1073 cpuwhich_t which; 1074 id_t id; 1075 size_t cpusetsize; 1076 cpuset_t *mask; 1077 }; 1078 #endif 1079 int 1080 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1081 { 1082 1083 return (kern_cpuset_getaffinity(td, uap->level, uap->which, 1084 uap->id, uap->cpusetsize, uap->mask)); 1085 } 1086 1087 int 1088 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1089 id_t id, size_t cpusetsize, cpuset_t *maskp) 1090 { 1091 struct thread *ttd; 1092 struct cpuset *nset; 1093 struct cpuset *set; 1094 struct proc *p; 1095 cpuset_t *mask; 1096 int error; 1097 size_t size; 1098 1099 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1100 return (ERANGE); 1101 /* In Capability mode, you can only get your own CPU set. */ 1102 if (IN_CAPABILITY_MODE(td)) { 1103 if (level != CPU_LEVEL_WHICH) 1104 return (ECAPMODE); 1105 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1106 return (ECAPMODE); 1107 if (id != -1) 1108 return (ECAPMODE); 1109 } 1110 size = cpusetsize; 1111 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 1112 error = cpuset_which(which, id, &p, &ttd, &set); 1113 if (error) 1114 goto out; 1115 switch (level) { 1116 case CPU_LEVEL_ROOT: 1117 case CPU_LEVEL_CPUSET: 1118 switch (which) { 1119 case CPU_WHICH_TID: 1120 case CPU_WHICH_PID: 1121 thread_lock(ttd); 1122 set = cpuset_ref(ttd->td_cpuset); 1123 thread_unlock(ttd); 1124 break; 1125 case CPU_WHICH_CPUSET: 1126 case CPU_WHICH_JAIL: 1127 break; 1128 case CPU_WHICH_IRQ: 1129 case CPU_WHICH_INTRHANDLER: 1130 case CPU_WHICH_ITHREAD: 1131 case CPU_WHICH_DOMAIN: 1132 error = EINVAL; 1133 goto out; 1134 } 1135 if (level == CPU_LEVEL_ROOT) 1136 nset = cpuset_refroot(set); 1137 else 1138 nset = cpuset_refbase(set); 1139 CPU_COPY(&nset->cs_mask, mask); 1140 cpuset_rel(nset); 1141 break; 1142 case CPU_LEVEL_WHICH: 1143 switch (which) { 1144 case CPU_WHICH_TID: 1145 thread_lock(ttd); 1146 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1147 thread_unlock(ttd); 1148 break; 1149 case CPU_WHICH_PID: 1150 FOREACH_THREAD_IN_PROC(p, ttd) { 1151 thread_lock(ttd); 1152 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1153 thread_unlock(ttd); 1154 } 1155 break; 1156 case CPU_WHICH_CPUSET: 1157 case CPU_WHICH_JAIL: 1158 CPU_COPY(&set->cs_mask, mask); 1159 break; 1160 case CPU_WHICH_IRQ: 1161 case CPU_WHICH_INTRHANDLER: 1162 case CPU_WHICH_ITHREAD: 1163 error = intr_getaffinity(id, which, mask); 1164 break; 1165 case CPU_WHICH_DOMAIN: 1166 if (id < 0 || id >= MAXMEMDOM) 1167 error = ESRCH; 1168 else 1169 CPU_COPY(&cpuset_domain[id], mask); 1170 break; 1171 } 1172 break; 1173 default: 1174 error = EINVAL; 1175 break; 1176 } 1177 if (set) 1178 cpuset_rel(set); 1179 if (p) 1180 PROC_UNLOCK(p); 1181 if (error == 0) 1182 error = copyout(mask, maskp, size); 1183 out: 1184 free(mask, M_TEMP); 1185 return (error); 1186 } 1187 1188 #ifndef _SYS_SYSPROTO_H_ 1189 struct cpuset_setaffinity_args { 1190 cpulevel_t level; 1191 cpuwhich_t which; 1192 id_t id; 1193 size_t cpusetsize; 1194 const cpuset_t *mask; 1195 }; 1196 #endif 1197 int 1198 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1199 { 1200 1201 return (kern_cpuset_setaffinity(td, uap->level, uap->which, 1202 uap->id, uap->cpusetsize, uap->mask)); 1203 } 1204 1205 int 1206 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1207 id_t id, size_t cpusetsize, const cpuset_t *maskp) 1208 { 1209 struct cpuset *nset; 1210 struct cpuset *set; 1211 struct thread *ttd; 1212 struct proc *p; 1213 cpuset_t *mask; 1214 int error; 1215 1216 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1217 return (ERANGE); 1218 /* In Capability mode, you can only set your own CPU set. */ 1219 if (IN_CAPABILITY_MODE(td)) { 1220 if (level != CPU_LEVEL_WHICH) 1221 return (ECAPMODE); 1222 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1223 return (ECAPMODE); 1224 if (id != -1) 1225 return (ECAPMODE); 1226 } 1227 mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1228 error = copyin(maskp, mask, cpusetsize); 1229 if (error) 1230 goto out; 1231 /* 1232 * Verify that no high bits are set. 1233 */ 1234 if (cpusetsize > sizeof(cpuset_t)) { 1235 char *end; 1236 char *cp; 1237 1238 end = cp = (char *)&mask->__bits; 1239 end += cpusetsize; 1240 cp += sizeof(cpuset_t); 1241 while (cp != end) 1242 if (*cp++ != 0) { 1243 error = EINVAL; 1244 goto out; 1245 } 1246 1247 } 1248 switch (level) { 1249 case CPU_LEVEL_ROOT: 1250 case CPU_LEVEL_CPUSET: 1251 error = cpuset_which(which, id, &p, &ttd, &set); 1252 if (error) 1253 break; 1254 switch (which) { 1255 case CPU_WHICH_TID: 1256 case CPU_WHICH_PID: 1257 thread_lock(ttd); 1258 set = cpuset_ref(ttd->td_cpuset); 1259 thread_unlock(ttd); 1260 PROC_UNLOCK(p); 1261 break; 1262 case CPU_WHICH_CPUSET: 1263 case CPU_WHICH_JAIL: 1264 break; 1265 case CPU_WHICH_IRQ: 1266 case CPU_WHICH_INTRHANDLER: 1267 case CPU_WHICH_ITHREAD: 1268 case CPU_WHICH_DOMAIN: 1269 error = EINVAL; 1270 goto out; 1271 } 1272 if (level == CPU_LEVEL_ROOT) 1273 nset = cpuset_refroot(set); 1274 else 1275 nset = cpuset_refbase(set); 1276 error = cpuset_modify(nset, mask); 1277 cpuset_rel(nset); 1278 cpuset_rel(set); 1279 break; 1280 case CPU_LEVEL_WHICH: 1281 switch (which) { 1282 case CPU_WHICH_TID: 1283 error = cpuset_setthread(id, mask); 1284 break; 1285 case CPU_WHICH_PID: 1286 error = cpuset_setproc(id, NULL, mask); 1287 break; 1288 case CPU_WHICH_CPUSET: 1289 case CPU_WHICH_JAIL: 1290 error = cpuset_which(which, id, &p, &ttd, &set); 1291 if (error == 0) { 1292 error = cpuset_modify(set, mask); 1293 cpuset_rel(set); 1294 } 1295 break; 1296 case CPU_WHICH_IRQ: 1297 case CPU_WHICH_INTRHANDLER: 1298 case CPU_WHICH_ITHREAD: 1299 error = intr_setaffinity(id, which, mask); 1300 break; 1301 default: 1302 error = EINVAL; 1303 break; 1304 } 1305 break; 1306 default: 1307 error = EINVAL; 1308 break; 1309 } 1310 out: 1311 free(mask, M_TEMP); 1312 return (error); 1313 } 1314 1315 #ifdef DDB 1316 void 1317 ddb_display_cpuset(const cpuset_t *set) 1318 { 1319 int cpu, once; 1320 1321 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1322 if (CPU_ISSET(cpu, set)) { 1323 if (once == 0) { 1324 db_printf("%d", cpu); 1325 once = 1; 1326 } else 1327 db_printf(",%d", cpu); 1328 } 1329 } 1330 if (once == 0) 1331 db_printf("<none>"); 1332 } 1333 1334 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1335 { 1336 struct cpuset *set; 1337 1338 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1339 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1340 set, set->cs_id, set->cs_ref, set->cs_flags, 1341 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1342 db_printf(" mask="); 1343 ddb_display_cpuset(&set->cs_mask); 1344 db_printf("\n"); 1345 if (db_pager_quit) 1346 break; 1347 } 1348 } 1349 #endif /* DDB */ 1350