1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 5 * All rights reserved. 6 * 7 * Copyright (c) 2008 Nokia Corporation 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice unmodified, this list of conditions, and the following 15 * disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ddb.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/refcount.h> 49 #include <sys/sched.h> 50 #include <sys/smp.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/capsicum.h> 53 #include <sys/cpuset.h> 54 #include <sys/sx.h> 55 #include <sys/queue.h> 56 #include <sys/libkern.h> 57 #include <sys/limits.h> 58 #include <sys/bus.h> 59 #include <sys/interrupt.h> 60 61 #include <vm/uma.h> 62 #include <vm/vm.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_param.h> 65 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif /* DDB */ 69 70 /* 71 * cpusets provide a mechanism for creating and manipulating sets of 72 * processors for the purpose of constraining the scheduling of threads to 73 * specific processors. 74 * 75 * Each process belongs to an identified set, by default this is set 1. Each 76 * thread may further restrict the cpus it may run on to a subset of this 77 * named set. This creates an anonymous set which other threads and processes 78 * may not join by number. 79 * 80 * The named set is referred to herein as the 'base' set to avoid ambiguity. 81 * This set is usually a child of a 'root' set while the anonymous set may 82 * simply be referred to as a mask. In the syscall api these are referred to 83 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 84 * 85 * Threads inherit their set from their creator whether it be anonymous or 86 * not. This means that anonymous sets are immutable because they may be 87 * shared. To modify an anonymous set a new set is created with the desired 88 * mask and the same parent as the existing anonymous set. This gives the 89 * illusion of each thread having a private mask. 90 * 91 * Via the syscall apis a user may ask to retrieve or modify the root, base, 92 * or mask that is discovered via a pid, tid, or setid. Modifying a set 93 * modifies all numbered and anonymous child sets to comply with the new mask. 94 * Modifying a pid or tid's mask applies only to that tid but must still 95 * exist within the assigned parent set. 96 * 97 * A thread may not be assigned to a group separate from other threads in 98 * the process. This is to remove ambiguity when the setid is queried with 99 * a pid argument. There is no other technical limitation. 100 * 101 * This somewhat complex arrangement is intended to make it easy for 102 * applications to query available processors and bind their threads to 103 * specific processors while also allowing administrators to dynamically 104 * reprovision by changing sets which apply to groups of processes. 105 * 106 * A simple application should not concern itself with sets at all and 107 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 108 * meaning 'curthread'. It may query available cpus for that tid with a 109 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 110 */ 111 static uma_zone_t cpuset_zone; 112 static struct mtx cpuset_lock; 113 static struct setlist cpuset_ids; 114 static struct unrhdr *cpuset_unr; 115 static struct cpuset *cpuset_zero, *cpuset_default; 116 117 /* Return the size of cpuset_t at the kernel level */ 118 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 119 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 120 121 cpuset_t *cpuset_root; 122 cpuset_t cpuset_domain[MAXMEMDOM]; 123 124 /* 125 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 126 */ 127 struct cpuset * 128 cpuset_ref(struct cpuset *set) 129 { 130 131 refcount_acquire(&set->cs_ref); 132 return (set); 133 } 134 135 /* 136 * Walks up the tree from 'set' to find the root. Returns the root 137 * referenced. 138 */ 139 static struct cpuset * 140 cpuset_refroot(struct cpuset *set) 141 { 142 143 for (; set->cs_parent != NULL; set = set->cs_parent) 144 if (set->cs_flags & CPU_SET_ROOT) 145 break; 146 cpuset_ref(set); 147 148 return (set); 149 } 150 151 /* 152 * Find the first non-anonymous set starting from 'set'. Returns this set 153 * referenced. May return the passed in set with an extra ref if it is 154 * not anonymous. 155 */ 156 static struct cpuset * 157 cpuset_refbase(struct cpuset *set) 158 { 159 160 if (set->cs_id == CPUSET_INVALID) 161 set = set->cs_parent; 162 cpuset_ref(set); 163 164 return (set); 165 } 166 167 /* 168 * Release a reference in a context where it is safe to allocate. 169 */ 170 void 171 cpuset_rel(struct cpuset *set) 172 { 173 cpusetid_t id; 174 175 if (refcount_release(&set->cs_ref) == 0) 176 return; 177 mtx_lock_spin(&cpuset_lock); 178 LIST_REMOVE(set, cs_siblings); 179 id = set->cs_id; 180 if (id != CPUSET_INVALID) 181 LIST_REMOVE(set, cs_link); 182 mtx_unlock_spin(&cpuset_lock); 183 cpuset_rel(set->cs_parent); 184 uma_zfree(cpuset_zone, set); 185 if (id != CPUSET_INVALID) 186 free_unr(cpuset_unr, id); 187 } 188 189 /* 190 * Deferred release must be used when in a context that is not safe to 191 * allocate/free. This places any unreferenced sets on the list 'head'. 192 */ 193 static void 194 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 195 { 196 197 if (refcount_release(&set->cs_ref) == 0) 198 return; 199 mtx_lock_spin(&cpuset_lock); 200 LIST_REMOVE(set, cs_siblings); 201 if (set->cs_id != CPUSET_INVALID) 202 LIST_REMOVE(set, cs_link); 203 LIST_INSERT_HEAD(head, set, cs_link); 204 mtx_unlock_spin(&cpuset_lock); 205 } 206 207 /* 208 * Complete a deferred release. Removes the set from the list provided to 209 * cpuset_rel_defer. 210 */ 211 static void 212 cpuset_rel_complete(struct cpuset *set) 213 { 214 LIST_REMOVE(set, cs_link); 215 cpuset_rel(set->cs_parent); 216 uma_zfree(cpuset_zone, set); 217 } 218 219 /* 220 * Find a set based on an id. Returns it with a ref. 221 */ 222 static struct cpuset * 223 cpuset_lookup(cpusetid_t setid, struct thread *td) 224 { 225 struct cpuset *set; 226 227 if (setid == CPUSET_INVALID) 228 return (NULL); 229 mtx_lock_spin(&cpuset_lock); 230 LIST_FOREACH(set, &cpuset_ids, cs_link) 231 if (set->cs_id == setid) 232 break; 233 if (set) 234 cpuset_ref(set); 235 mtx_unlock_spin(&cpuset_lock); 236 237 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 238 if (set != NULL && jailed(td->td_ucred)) { 239 struct cpuset *jset, *tset; 240 241 jset = td->td_ucred->cr_prison->pr_cpuset; 242 for (tset = set; tset != NULL; tset = tset->cs_parent) 243 if (tset == jset) 244 break; 245 if (tset == NULL) { 246 cpuset_rel(set); 247 set = NULL; 248 } 249 } 250 251 return (set); 252 } 253 254 /* 255 * Create a set in the space provided in 'set' with the provided parameters. 256 * The set is returned with a single ref. May return EDEADLK if the set 257 * will have no valid cpu based on restrictions from the parent. 258 */ 259 static int 260 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 261 cpusetid_t id) 262 { 263 264 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 265 return (EDEADLK); 266 CPU_COPY(mask, &set->cs_mask); 267 LIST_INIT(&set->cs_children); 268 refcount_init(&set->cs_ref, 1); 269 set->cs_flags = 0; 270 mtx_lock_spin(&cpuset_lock); 271 CPU_AND(&set->cs_mask, &parent->cs_mask); 272 set->cs_id = id; 273 set->cs_parent = cpuset_ref(parent); 274 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 275 if (set->cs_id != CPUSET_INVALID) 276 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 277 mtx_unlock_spin(&cpuset_lock); 278 279 return (0); 280 } 281 282 /* 283 * Create a new non-anonymous set with the requested parent and mask. May 284 * return failures if the mask is invalid or a new number can not be 285 * allocated. 286 */ 287 static int 288 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 289 { 290 struct cpuset *set; 291 cpusetid_t id; 292 int error; 293 294 id = alloc_unr(cpuset_unr); 295 if (id == -1) 296 return (ENFILE); 297 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 298 error = _cpuset_create(set, parent, mask, id); 299 if (error == 0) 300 return (0); 301 free_unr(cpuset_unr, id); 302 uma_zfree(cpuset_zone, set); 303 304 return (error); 305 } 306 307 /* 308 * Recursively check for errors that would occur from applying mask to 309 * the tree of sets starting at 'set'. Checks for sets that would become 310 * empty as well as RDONLY flags. 311 */ 312 static int 313 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 314 { 315 struct cpuset *nset; 316 cpuset_t newmask; 317 int error; 318 319 mtx_assert(&cpuset_lock, MA_OWNED); 320 if (set->cs_flags & CPU_SET_RDONLY) 321 return (EPERM); 322 if (check_mask) { 323 if (!CPU_OVERLAP(&set->cs_mask, mask)) 324 return (EDEADLK); 325 CPU_COPY(&set->cs_mask, &newmask); 326 CPU_AND(&newmask, mask); 327 } else 328 CPU_COPY(mask, &newmask); 329 error = 0; 330 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 331 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 332 break; 333 return (error); 334 } 335 336 /* 337 * Applies the mask 'mask' without checking for empty sets or permissions. 338 */ 339 static void 340 cpuset_update(struct cpuset *set, cpuset_t *mask) 341 { 342 struct cpuset *nset; 343 344 mtx_assert(&cpuset_lock, MA_OWNED); 345 CPU_AND(&set->cs_mask, mask); 346 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 347 cpuset_update(nset, &set->cs_mask); 348 349 return; 350 } 351 352 /* 353 * Modify the set 'set' to use a copy of the mask provided. Apply this new 354 * mask to restrict all children in the tree. Checks for validity before 355 * applying the changes. 356 */ 357 static int 358 cpuset_modify(struct cpuset *set, cpuset_t *mask) 359 { 360 struct cpuset *root; 361 int error; 362 363 error = priv_check(curthread, PRIV_SCHED_CPUSET); 364 if (error) 365 return (error); 366 /* 367 * In case we are called from within the jail 368 * we do not allow modifying the dedicated root 369 * cpuset of the jail but may still allow to 370 * change child sets. 371 */ 372 if (jailed(curthread->td_ucred) && 373 set->cs_flags & CPU_SET_ROOT) 374 return (EPERM); 375 /* 376 * Verify that we have access to this set of 377 * cpus. 378 */ 379 root = set->cs_parent; 380 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 381 return (EINVAL); 382 mtx_lock_spin(&cpuset_lock); 383 error = cpuset_testupdate(set, mask, 0); 384 if (error) 385 goto out; 386 CPU_COPY(mask, &set->cs_mask); 387 cpuset_update(set, mask); 388 out: 389 mtx_unlock_spin(&cpuset_lock); 390 391 return (error); 392 } 393 394 /* 395 * Resolve the 'which' parameter of several cpuset apis. 396 * 397 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 398 * checks for permission via p_cansched(). 399 * 400 * For WHICH_SET returns a valid set with a new reference. 401 * 402 * -1 may be supplied for any argument to mean the current proc/thread or 403 * the base set of the current thread. May fail with ESRCH/EPERM. 404 */ 405 int 406 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 407 struct cpuset **setp) 408 { 409 struct cpuset *set; 410 struct thread *td; 411 struct proc *p; 412 int error; 413 414 *pp = p = NULL; 415 *tdp = td = NULL; 416 *setp = set = NULL; 417 switch (which) { 418 case CPU_WHICH_PID: 419 if (id == -1) { 420 PROC_LOCK(curproc); 421 p = curproc; 422 break; 423 } 424 if ((p = pfind(id)) == NULL) 425 return (ESRCH); 426 break; 427 case CPU_WHICH_TID: 428 if (id == -1) { 429 PROC_LOCK(curproc); 430 p = curproc; 431 td = curthread; 432 break; 433 } 434 td = tdfind(id, -1); 435 if (td == NULL) 436 return (ESRCH); 437 p = td->td_proc; 438 break; 439 case CPU_WHICH_CPUSET: 440 if (id == -1) { 441 thread_lock(curthread); 442 set = cpuset_refbase(curthread->td_cpuset); 443 thread_unlock(curthread); 444 } else 445 set = cpuset_lookup(id, curthread); 446 if (set) { 447 *setp = set; 448 return (0); 449 } 450 return (ESRCH); 451 case CPU_WHICH_JAIL: 452 { 453 /* Find `set' for prison with given id. */ 454 struct prison *pr; 455 456 sx_slock(&allprison_lock); 457 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 458 sx_sunlock(&allprison_lock); 459 if (pr == NULL) 460 return (ESRCH); 461 cpuset_ref(pr->pr_cpuset); 462 *setp = pr->pr_cpuset; 463 mtx_unlock(&pr->pr_mtx); 464 return (0); 465 } 466 case CPU_WHICH_IRQ: 467 case CPU_WHICH_DOMAIN: 468 return (0); 469 default: 470 return (EINVAL); 471 } 472 error = p_cansched(curthread, p); 473 if (error) { 474 PROC_UNLOCK(p); 475 return (error); 476 } 477 if (td == NULL) 478 td = FIRST_THREAD_IN_PROC(p); 479 *pp = p; 480 *tdp = td; 481 return (0); 482 } 483 484 /* 485 * Create an anonymous set with the provided mask in the space provided by 486 * 'fset'. If the passed in set is anonymous we use its parent otherwise 487 * the new set is a child of 'set'. 488 */ 489 static int 490 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 491 { 492 struct cpuset *parent; 493 494 if (set->cs_id == CPUSET_INVALID) 495 parent = set->cs_parent; 496 else 497 parent = set; 498 if (!CPU_SUBSET(&parent->cs_mask, mask)) 499 return (EDEADLK); 500 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 501 } 502 503 /* 504 * Handle two cases for replacing the base set or mask of an entire process. 505 * 506 * 1) Set is non-null and mask is null. This reparents all anonymous sets 507 * to the provided set and replaces all non-anonymous td_cpusets with the 508 * provided set. 509 * 2) Mask is non-null and set is null. This replaces or creates anonymous 510 * sets for every thread with the existing base as a parent. 511 * 512 * This is overly complicated because we can't allocate while holding a 513 * spinlock and spinlocks must be held while changing and examining thread 514 * state. 515 */ 516 static int 517 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 518 { 519 struct setlist freelist; 520 struct setlist droplist; 521 struct cpuset *tdset; 522 struct cpuset *nset; 523 struct thread *td; 524 struct proc *p; 525 int threads; 526 int nfree; 527 int error; 528 529 /* 530 * The algorithm requires two passes due to locking considerations. 531 * 532 * 1) Lookup the process and acquire the locks in the required order. 533 * 2) If enough cpusets have not been allocated release the locks and 534 * allocate them. Loop. 535 */ 536 LIST_INIT(&freelist); 537 LIST_INIT(&droplist); 538 nfree = 0; 539 for (;;) { 540 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 541 if (error) 542 goto out; 543 if (nfree >= p->p_numthreads) 544 break; 545 threads = p->p_numthreads; 546 PROC_UNLOCK(p); 547 for (; nfree < threads; nfree++) { 548 nset = uma_zalloc(cpuset_zone, M_WAITOK); 549 LIST_INSERT_HEAD(&freelist, nset, cs_link); 550 } 551 } 552 PROC_LOCK_ASSERT(p, MA_OWNED); 553 /* 554 * Now that the appropriate locks are held and we have enough cpusets, 555 * make sure the operation will succeed before applying changes. The 556 * proc lock prevents td_cpuset from changing between calls. 557 */ 558 error = 0; 559 FOREACH_THREAD_IN_PROC(p, td) { 560 thread_lock(td); 561 tdset = td->td_cpuset; 562 /* 563 * Verify that a new mask doesn't specify cpus outside of 564 * the set the thread is a member of. 565 */ 566 if (mask) { 567 if (tdset->cs_id == CPUSET_INVALID) 568 tdset = tdset->cs_parent; 569 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 570 error = EDEADLK; 571 /* 572 * Verify that a new set won't leave an existing thread 573 * mask without a cpu to run on. It can, however, restrict 574 * the set. 575 */ 576 } else if (tdset->cs_id == CPUSET_INVALID) { 577 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 578 error = EDEADLK; 579 } 580 thread_unlock(td); 581 if (error) 582 goto unlock_out; 583 } 584 /* 585 * Replace each thread's cpuset while using deferred release. We 586 * must do this because the thread lock must be held while operating 587 * on the thread and this limits the type of operations allowed. 588 */ 589 FOREACH_THREAD_IN_PROC(p, td) { 590 thread_lock(td); 591 /* 592 * If we presently have an anonymous set or are applying a 593 * mask we must create an anonymous shadow set. That is 594 * either parented to our existing base or the supplied set. 595 * 596 * If we have a base set with no anonymous shadow we simply 597 * replace it outright. 598 */ 599 tdset = td->td_cpuset; 600 if (tdset->cs_id == CPUSET_INVALID || mask) { 601 nset = LIST_FIRST(&freelist); 602 LIST_REMOVE(nset, cs_link); 603 if (mask) 604 error = cpuset_shadow(tdset, nset, mask); 605 else 606 error = _cpuset_create(nset, set, 607 &tdset->cs_mask, CPUSET_INVALID); 608 if (error) { 609 LIST_INSERT_HEAD(&freelist, nset, cs_link); 610 thread_unlock(td); 611 break; 612 } 613 } else 614 nset = cpuset_ref(set); 615 cpuset_rel_defer(&droplist, tdset); 616 td->td_cpuset = nset; 617 sched_affinity(td); 618 thread_unlock(td); 619 } 620 unlock_out: 621 PROC_UNLOCK(p); 622 out: 623 while ((nset = LIST_FIRST(&droplist)) != NULL) 624 cpuset_rel_complete(nset); 625 while ((nset = LIST_FIRST(&freelist)) != NULL) { 626 LIST_REMOVE(nset, cs_link); 627 uma_zfree(cpuset_zone, nset); 628 } 629 return (error); 630 } 631 632 /* 633 * Return a string representing a valid layout for a cpuset_t object. 634 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 635 */ 636 char * 637 cpusetobj_strprint(char *buf, const cpuset_t *set) 638 { 639 char *tbuf; 640 size_t i, bytesp, bufsiz; 641 642 tbuf = buf; 643 bytesp = 0; 644 bufsiz = CPUSETBUFSIZ; 645 646 for (i = 0; i < (_NCPUWORDS - 1); i++) { 647 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 648 bufsiz -= bytesp; 649 tbuf += bytesp; 650 } 651 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 652 return (buf); 653 } 654 655 /* 656 * Build a valid cpuset_t object from a string representation. 657 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 658 */ 659 int 660 cpusetobj_strscan(cpuset_t *set, const char *buf) 661 { 662 u_int nwords; 663 int i, ret; 664 665 if (strlen(buf) > CPUSETBUFSIZ - 1) 666 return (-1); 667 668 /* Allow to pass a shorter version of the mask when necessary. */ 669 nwords = 1; 670 for (i = 0; buf[i] != '\0'; i++) 671 if (buf[i] == ',') 672 nwords++; 673 if (nwords > _NCPUWORDS) 674 return (-1); 675 676 CPU_ZERO(set); 677 for (i = 0; i < (nwords - 1); i++) { 678 ret = sscanf(buf, "%lx,", &set->__bits[i]); 679 if (ret == 0 || ret == -1) 680 return (-1); 681 buf = strstr(buf, ","); 682 if (buf == NULL) 683 return (-1); 684 buf++; 685 } 686 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 687 if (ret == 0 || ret == -1) 688 return (-1); 689 return (0); 690 } 691 692 /* 693 * Apply an anonymous mask to a single thread. 694 */ 695 int 696 cpuset_setthread(lwpid_t id, cpuset_t *mask) 697 { 698 struct cpuset *nset; 699 struct cpuset *set; 700 struct thread *td; 701 struct proc *p; 702 int error; 703 704 nset = uma_zalloc(cpuset_zone, M_WAITOK); 705 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 706 if (error) 707 goto out; 708 set = NULL; 709 thread_lock(td); 710 error = cpuset_shadow(td->td_cpuset, nset, mask); 711 if (error == 0) { 712 set = td->td_cpuset; 713 td->td_cpuset = nset; 714 sched_affinity(td); 715 nset = NULL; 716 } 717 thread_unlock(td); 718 PROC_UNLOCK(p); 719 if (set) 720 cpuset_rel(set); 721 out: 722 if (nset) 723 uma_zfree(cpuset_zone, nset); 724 return (error); 725 } 726 727 /* 728 * Apply new cpumask to the ithread. 729 */ 730 int 731 cpuset_setithread(lwpid_t id, int cpu) 732 { 733 struct cpuset *nset, *rset; 734 struct cpuset *parent, *old_set; 735 struct thread *td; 736 struct proc *p; 737 cpusetid_t cs_id; 738 cpuset_t mask; 739 int error; 740 741 nset = uma_zalloc(cpuset_zone, M_WAITOK); 742 rset = uma_zalloc(cpuset_zone, M_WAITOK); 743 cs_id = CPUSET_INVALID; 744 745 CPU_ZERO(&mask); 746 if (cpu == NOCPU) 747 CPU_COPY(cpuset_root, &mask); 748 else 749 CPU_SET(cpu, &mask); 750 751 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); 752 if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID)) 753 goto out; 754 755 /* cpuset_which() returns with PROC_LOCK held. */ 756 old_set = td->td_cpuset; 757 758 if (cpu == NOCPU) { 759 760 /* 761 * roll back to default set. We're not using cpuset_shadow() 762 * here because we can fail CPU_SUBSET() check. This can happen 763 * if default set does not contain all CPUs. 764 */ 765 error = _cpuset_create(nset, cpuset_default, &mask, 766 CPUSET_INVALID); 767 768 goto applyset; 769 } 770 771 if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && 772 old_set->cs_parent->cs_id == 1)) { 773 774 /* 775 * Current set is either default (1) or 776 * shadowed version of default set. 777 * 778 * Allocate new root set to be able to shadow it 779 * with any mask. 780 */ 781 error = _cpuset_create(rset, cpuset_zero, 782 &cpuset_zero->cs_mask, cs_id); 783 if (error != 0) { 784 PROC_UNLOCK(p); 785 goto out; 786 } 787 rset->cs_flags |= CPU_SET_ROOT; 788 parent = rset; 789 rset = NULL; 790 cs_id = CPUSET_INVALID; 791 } else { 792 /* Assume existing set was already allocated by previous call */ 793 parent = old_set; 794 old_set = NULL; 795 } 796 797 error = cpuset_shadow(parent, nset, &mask); 798 applyset: 799 if (error == 0) { 800 thread_lock(td); 801 td->td_cpuset = nset; 802 sched_affinity(td); 803 thread_unlock(td); 804 nset = NULL; 805 } else 806 old_set = NULL; 807 PROC_UNLOCK(p); 808 if (old_set != NULL) 809 cpuset_rel(old_set); 810 out: 811 if (nset != NULL) 812 uma_zfree(cpuset_zone, nset); 813 if (rset != NULL) 814 uma_zfree(cpuset_zone, rset); 815 if (cs_id != CPUSET_INVALID) 816 free_unr(cpuset_unr, cs_id); 817 return (error); 818 } 819 820 821 /* 822 * Creates system-wide cpusets and the cpuset for thread0 including two 823 * sets: 824 * 825 * 0 - The root set which should represent all valid processors in the 826 * system. It is initially created with a mask of all processors 827 * because we don't know what processors are valid until cpuset_init() 828 * runs. This set is immutable. 829 * 1 - The default set which all processes are a member of until changed. 830 * This allows an administrator to move all threads off of given cpus to 831 * dedicate them to high priority tasks or save power etc. 832 */ 833 struct cpuset * 834 cpuset_thread0(void) 835 { 836 struct cpuset *set; 837 int error, i; 838 839 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 840 NULL, NULL, UMA_ALIGN_PTR, 0); 841 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 842 843 /* 844 * Create the root system set for the whole machine. Doesn't use 845 * cpuset_create() due to NULL parent. 846 */ 847 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 848 CPU_FILL(&set->cs_mask); 849 LIST_INIT(&set->cs_children); 850 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 851 set->cs_ref = 1; 852 set->cs_flags = CPU_SET_ROOT; 853 cpuset_zero = set; 854 cpuset_root = &set->cs_mask; 855 856 /* 857 * Now derive a default, modifiable set from that to give out. 858 */ 859 set = uma_zalloc(cpuset_zone, M_WAITOK); 860 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 861 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 862 cpuset_default = set; 863 864 /* 865 * Initialize the unit allocator. 0 and 1 are allocated above. 866 */ 867 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 868 869 /* 870 * If MD code has not initialized per-domain cpusets, place all 871 * CPUs in domain 0. 872 */ 873 for (i = 0; i < MAXMEMDOM; i++) 874 if (!CPU_EMPTY(&cpuset_domain[i])) 875 goto domains_set; 876 CPU_COPY(&all_cpus, &cpuset_domain[0]); 877 domains_set: 878 879 return (set); 880 } 881 882 /* 883 * Create a cpuset, which would be cpuset_create() but 884 * mark the new 'set' as root. 885 * 886 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 887 * for that. 888 * 889 * In case of no error, returns the set in *setp locked with a reference. 890 */ 891 int 892 cpuset_create_root(struct prison *pr, struct cpuset **setp) 893 { 894 struct cpuset *set; 895 int error; 896 897 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 898 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 899 900 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 901 if (error) 902 return (error); 903 904 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 905 __func__, __LINE__)); 906 907 /* Mark the set as root. */ 908 set = *setp; 909 set->cs_flags |= CPU_SET_ROOT; 910 911 return (0); 912 } 913 914 int 915 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 916 { 917 int error; 918 919 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 920 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 921 922 cpuset_ref(set); 923 error = cpuset_setproc(p->p_pid, set, NULL); 924 if (error) 925 return (error); 926 cpuset_rel(set); 927 return (0); 928 } 929 930 /* 931 * This is called once the final set of system cpus is known. Modifies 932 * the root set and all children and mark the root read-only. 933 */ 934 static void 935 cpuset_init(void *arg) 936 { 937 cpuset_t mask; 938 939 mask = all_cpus; 940 if (cpuset_modify(cpuset_zero, &mask)) 941 panic("Can't set initial cpuset mask.\n"); 942 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 943 } 944 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 945 946 #ifndef _SYS_SYSPROTO_H_ 947 struct cpuset_args { 948 cpusetid_t *setid; 949 }; 950 #endif 951 int 952 sys_cpuset(struct thread *td, struct cpuset_args *uap) 953 { 954 struct cpuset *root; 955 struct cpuset *set; 956 int error; 957 958 thread_lock(td); 959 root = cpuset_refroot(td->td_cpuset); 960 thread_unlock(td); 961 error = cpuset_create(&set, root, &root->cs_mask); 962 cpuset_rel(root); 963 if (error) 964 return (error); 965 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 966 if (error == 0) 967 error = cpuset_setproc(-1, set, NULL); 968 cpuset_rel(set); 969 return (error); 970 } 971 972 #ifndef _SYS_SYSPROTO_H_ 973 struct cpuset_setid_args { 974 cpuwhich_t which; 975 id_t id; 976 cpusetid_t setid; 977 }; 978 #endif 979 int 980 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 981 { 982 983 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 984 } 985 986 int 987 kern_cpuset_setid(struct thread *td, cpuwhich_t which, 988 id_t id, cpusetid_t setid) 989 { 990 struct cpuset *set; 991 int error; 992 993 /* 994 * Presently we only support per-process sets. 995 */ 996 if (which != CPU_WHICH_PID) 997 return (EINVAL); 998 set = cpuset_lookup(setid, td); 999 if (set == NULL) 1000 return (ESRCH); 1001 error = cpuset_setproc(id, set, NULL); 1002 cpuset_rel(set); 1003 return (error); 1004 } 1005 1006 #ifndef _SYS_SYSPROTO_H_ 1007 struct cpuset_getid_args { 1008 cpulevel_t level; 1009 cpuwhich_t which; 1010 id_t id; 1011 cpusetid_t *setid; 1012 }; 1013 #endif 1014 int 1015 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1016 { 1017 1018 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1019 uap->setid)); 1020 } 1021 1022 int 1023 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1024 id_t id, cpusetid_t *setid) 1025 { 1026 struct cpuset *nset; 1027 struct cpuset *set; 1028 struct thread *ttd; 1029 struct proc *p; 1030 cpusetid_t tmpid; 1031 int error; 1032 1033 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1034 return (EINVAL); 1035 error = cpuset_which(which, id, &p, &ttd, &set); 1036 if (error) 1037 return (error); 1038 switch (which) { 1039 case CPU_WHICH_TID: 1040 case CPU_WHICH_PID: 1041 thread_lock(ttd); 1042 set = cpuset_refbase(ttd->td_cpuset); 1043 thread_unlock(ttd); 1044 PROC_UNLOCK(p); 1045 break; 1046 case CPU_WHICH_CPUSET: 1047 case CPU_WHICH_JAIL: 1048 break; 1049 case CPU_WHICH_IRQ: 1050 case CPU_WHICH_DOMAIN: 1051 return (EINVAL); 1052 } 1053 switch (level) { 1054 case CPU_LEVEL_ROOT: 1055 nset = cpuset_refroot(set); 1056 cpuset_rel(set); 1057 set = nset; 1058 break; 1059 case CPU_LEVEL_CPUSET: 1060 break; 1061 case CPU_LEVEL_WHICH: 1062 break; 1063 } 1064 tmpid = set->cs_id; 1065 cpuset_rel(set); 1066 if (error == 0) 1067 error = copyout(&tmpid, setid, sizeof(tmpid)); 1068 1069 return (error); 1070 } 1071 1072 #ifndef _SYS_SYSPROTO_H_ 1073 struct cpuset_getaffinity_args { 1074 cpulevel_t level; 1075 cpuwhich_t which; 1076 id_t id; 1077 size_t cpusetsize; 1078 cpuset_t *mask; 1079 }; 1080 #endif 1081 int 1082 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1083 { 1084 1085 return (kern_cpuset_getaffinity(td, uap->level, uap->which, 1086 uap->id, uap->cpusetsize, uap->mask)); 1087 } 1088 1089 int 1090 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1091 id_t id, size_t cpusetsize, cpuset_t *maskp) 1092 { 1093 struct thread *ttd; 1094 struct cpuset *nset; 1095 struct cpuset *set; 1096 struct proc *p; 1097 cpuset_t *mask; 1098 int error; 1099 size_t size; 1100 1101 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1102 return (ERANGE); 1103 /* In Capability mode, you can only get your own CPU set. */ 1104 if (IN_CAPABILITY_MODE(td)) { 1105 if (level != CPU_LEVEL_WHICH) 1106 return (ECAPMODE); 1107 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1108 return (ECAPMODE); 1109 if (id != -1) 1110 return (ECAPMODE); 1111 } 1112 size = cpusetsize; 1113 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 1114 error = cpuset_which(which, id, &p, &ttd, &set); 1115 if (error) 1116 goto out; 1117 switch (level) { 1118 case CPU_LEVEL_ROOT: 1119 case CPU_LEVEL_CPUSET: 1120 switch (which) { 1121 case CPU_WHICH_TID: 1122 case CPU_WHICH_PID: 1123 thread_lock(ttd); 1124 set = cpuset_ref(ttd->td_cpuset); 1125 thread_unlock(ttd); 1126 break; 1127 case CPU_WHICH_CPUSET: 1128 case CPU_WHICH_JAIL: 1129 break; 1130 case CPU_WHICH_IRQ: 1131 case CPU_WHICH_INTRHANDLER: 1132 case CPU_WHICH_ITHREAD: 1133 case CPU_WHICH_DOMAIN: 1134 error = EINVAL; 1135 goto out; 1136 } 1137 if (level == CPU_LEVEL_ROOT) 1138 nset = cpuset_refroot(set); 1139 else 1140 nset = cpuset_refbase(set); 1141 CPU_COPY(&nset->cs_mask, mask); 1142 cpuset_rel(nset); 1143 break; 1144 case CPU_LEVEL_WHICH: 1145 switch (which) { 1146 case CPU_WHICH_TID: 1147 thread_lock(ttd); 1148 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1149 thread_unlock(ttd); 1150 break; 1151 case CPU_WHICH_PID: 1152 FOREACH_THREAD_IN_PROC(p, ttd) { 1153 thread_lock(ttd); 1154 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1155 thread_unlock(ttd); 1156 } 1157 break; 1158 case CPU_WHICH_CPUSET: 1159 case CPU_WHICH_JAIL: 1160 CPU_COPY(&set->cs_mask, mask); 1161 break; 1162 case CPU_WHICH_IRQ: 1163 case CPU_WHICH_INTRHANDLER: 1164 case CPU_WHICH_ITHREAD: 1165 error = intr_getaffinity(id, which, mask); 1166 break; 1167 case CPU_WHICH_DOMAIN: 1168 if (id < 0 || id >= MAXMEMDOM) 1169 error = ESRCH; 1170 else 1171 CPU_COPY(&cpuset_domain[id], mask); 1172 break; 1173 } 1174 break; 1175 default: 1176 error = EINVAL; 1177 break; 1178 } 1179 if (set) 1180 cpuset_rel(set); 1181 if (p) 1182 PROC_UNLOCK(p); 1183 if (error == 0) 1184 error = copyout(mask, maskp, size); 1185 out: 1186 free(mask, M_TEMP); 1187 return (error); 1188 } 1189 1190 #ifndef _SYS_SYSPROTO_H_ 1191 struct cpuset_setaffinity_args { 1192 cpulevel_t level; 1193 cpuwhich_t which; 1194 id_t id; 1195 size_t cpusetsize; 1196 const cpuset_t *mask; 1197 }; 1198 #endif 1199 int 1200 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1201 { 1202 1203 return (kern_cpuset_setaffinity(td, uap->level, uap->which, 1204 uap->id, uap->cpusetsize, uap->mask)); 1205 } 1206 1207 int 1208 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1209 id_t id, size_t cpusetsize, const cpuset_t *maskp) 1210 { 1211 struct cpuset *nset; 1212 struct cpuset *set; 1213 struct thread *ttd; 1214 struct proc *p; 1215 cpuset_t *mask; 1216 int error; 1217 1218 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1219 return (ERANGE); 1220 /* In Capability mode, you can only set your own CPU set. */ 1221 if (IN_CAPABILITY_MODE(td)) { 1222 if (level != CPU_LEVEL_WHICH) 1223 return (ECAPMODE); 1224 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1225 return (ECAPMODE); 1226 if (id != -1) 1227 return (ECAPMODE); 1228 } 1229 mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1230 error = copyin(maskp, mask, cpusetsize); 1231 if (error) 1232 goto out; 1233 /* 1234 * Verify that no high bits are set. 1235 */ 1236 if (cpusetsize > sizeof(cpuset_t)) { 1237 char *end; 1238 char *cp; 1239 1240 end = cp = (char *)&mask->__bits; 1241 end += cpusetsize; 1242 cp += sizeof(cpuset_t); 1243 while (cp != end) 1244 if (*cp++ != 0) { 1245 error = EINVAL; 1246 goto out; 1247 } 1248 1249 } 1250 switch (level) { 1251 case CPU_LEVEL_ROOT: 1252 case CPU_LEVEL_CPUSET: 1253 error = cpuset_which(which, id, &p, &ttd, &set); 1254 if (error) 1255 break; 1256 switch (which) { 1257 case CPU_WHICH_TID: 1258 case CPU_WHICH_PID: 1259 thread_lock(ttd); 1260 set = cpuset_ref(ttd->td_cpuset); 1261 thread_unlock(ttd); 1262 PROC_UNLOCK(p); 1263 break; 1264 case CPU_WHICH_CPUSET: 1265 case CPU_WHICH_JAIL: 1266 break; 1267 case CPU_WHICH_IRQ: 1268 case CPU_WHICH_INTRHANDLER: 1269 case CPU_WHICH_ITHREAD: 1270 case CPU_WHICH_DOMAIN: 1271 error = EINVAL; 1272 goto out; 1273 } 1274 if (level == CPU_LEVEL_ROOT) 1275 nset = cpuset_refroot(set); 1276 else 1277 nset = cpuset_refbase(set); 1278 error = cpuset_modify(nset, mask); 1279 cpuset_rel(nset); 1280 cpuset_rel(set); 1281 break; 1282 case CPU_LEVEL_WHICH: 1283 switch (which) { 1284 case CPU_WHICH_TID: 1285 error = cpuset_setthread(id, mask); 1286 break; 1287 case CPU_WHICH_PID: 1288 error = cpuset_setproc(id, NULL, mask); 1289 break; 1290 case CPU_WHICH_CPUSET: 1291 case CPU_WHICH_JAIL: 1292 error = cpuset_which(which, id, &p, &ttd, &set); 1293 if (error == 0) { 1294 error = cpuset_modify(set, mask); 1295 cpuset_rel(set); 1296 } 1297 break; 1298 case CPU_WHICH_IRQ: 1299 case CPU_WHICH_INTRHANDLER: 1300 case CPU_WHICH_ITHREAD: 1301 error = intr_setaffinity(id, which, mask); 1302 break; 1303 default: 1304 error = EINVAL; 1305 break; 1306 } 1307 break; 1308 default: 1309 error = EINVAL; 1310 break; 1311 } 1312 out: 1313 free(mask, M_TEMP); 1314 return (error); 1315 } 1316 1317 #ifdef DDB 1318 void 1319 ddb_display_cpuset(const cpuset_t *set) 1320 { 1321 int cpu, once; 1322 1323 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1324 if (CPU_ISSET(cpu, set)) { 1325 if (once == 0) { 1326 db_printf("%d", cpu); 1327 once = 1; 1328 } else 1329 db_printf(",%d", cpu); 1330 } 1331 } 1332 if (once == 0) 1333 db_printf("<none>"); 1334 } 1335 1336 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1337 { 1338 struct cpuset *set; 1339 1340 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1341 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1342 set, set->cs_id, set->cs_ref, set->cs_flags, 1343 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1344 db_printf(" mask="); 1345 ddb_display_cpuset(&set->cs_mask); 1346 db_printf("\n"); 1347 if (db_pager_quit) 1348 break; 1349 } 1350 } 1351 #endif /* DDB */ 1352