1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/cpuset.h> 51 #include <sys/sx.h> 52 #include <sys/queue.h> 53 #include <sys/libkern.h> 54 #include <sys/limits.h> 55 #include <sys/bus.h> 56 #include <sys/interrupt.h> 57 58 #include <vm/uma.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif /* DDB */ 63 64 /* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask. 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a group separate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query available cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105 static uma_zone_t cpuset_zone; 106 static struct mtx cpuset_lock; 107 static struct setlist cpuset_ids; 108 static struct unrhdr *cpuset_unr; 109 static struct cpuset *cpuset_zero, *cpuset_default; 110 111 /* Return the size of cpuset_t at the kernel level */ 112 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD, 113 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 114 115 cpuset_t *cpuset_root; 116 117 /* 118 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 119 */ 120 struct cpuset * 121 cpuset_ref(struct cpuset *set) 122 { 123 124 refcount_acquire(&set->cs_ref); 125 return (set); 126 } 127 128 /* 129 * Walks up the tree from 'set' to find the root. Returns the root 130 * referenced. 131 */ 132 static struct cpuset * 133 cpuset_refroot(struct cpuset *set) 134 { 135 136 for (; set->cs_parent != NULL; set = set->cs_parent) 137 if (set->cs_flags & CPU_SET_ROOT) 138 break; 139 cpuset_ref(set); 140 141 return (set); 142 } 143 144 /* 145 * Find the first non-anonymous set starting from 'set'. Returns this set 146 * referenced. May return the passed in set with an extra ref if it is 147 * not anonymous. 148 */ 149 static struct cpuset * 150 cpuset_refbase(struct cpuset *set) 151 { 152 153 if (set->cs_id == CPUSET_INVALID) 154 set = set->cs_parent; 155 cpuset_ref(set); 156 157 return (set); 158 } 159 160 /* 161 * Release a reference in a context where it is safe to allocate. 162 */ 163 void 164 cpuset_rel(struct cpuset *set) 165 { 166 cpusetid_t id; 167 168 if (refcount_release(&set->cs_ref) == 0) 169 return; 170 mtx_lock_spin(&cpuset_lock); 171 LIST_REMOVE(set, cs_siblings); 172 id = set->cs_id; 173 if (id != CPUSET_INVALID) 174 LIST_REMOVE(set, cs_link); 175 mtx_unlock_spin(&cpuset_lock); 176 cpuset_rel(set->cs_parent); 177 uma_zfree(cpuset_zone, set); 178 if (id != CPUSET_INVALID) 179 free_unr(cpuset_unr, id); 180 } 181 182 /* 183 * Deferred release must be used when in a context that is not safe to 184 * allocate/free. This places any unreferenced sets on the list 'head'. 185 */ 186 static void 187 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 188 { 189 190 if (refcount_release(&set->cs_ref) == 0) 191 return; 192 mtx_lock_spin(&cpuset_lock); 193 LIST_REMOVE(set, cs_siblings); 194 if (set->cs_id != CPUSET_INVALID) 195 LIST_REMOVE(set, cs_link); 196 LIST_INSERT_HEAD(head, set, cs_link); 197 mtx_unlock_spin(&cpuset_lock); 198 } 199 200 /* 201 * Complete a deferred release. Removes the set from the list provided to 202 * cpuset_rel_defer. 203 */ 204 static void 205 cpuset_rel_complete(struct cpuset *set) 206 { 207 LIST_REMOVE(set, cs_link); 208 cpuset_rel(set->cs_parent); 209 uma_zfree(cpuset_zone, set); 210 } 211 212 /* 213 * Find a set based on an id. Returns it with a ref. 214 */ 215 static struct cpuset * 216 cpuset_lookup(cpusetid_t setid, struct thread *td) 217 { 218 struct cpuset *set; 219 220 if (setid == CPUSET_INVALID) 221 return (NULL); 222 mtx_lock_spin(&cpuset_lock); 223 LIST_FOREACH(set, &cpuset_ids, cs_link) 224 if (set->cs_id == setid) 225 break; 226 if (set) 227 cpuset_ref(set); 228 mtx_unlock_spin(&cpuset_lock); 229 230 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 231 if (set != NULL && jailed(td->td_ucred)) { 232 struct cpuset *jset, *tset; 233 234 jset = td->td_ucred->cr_prison->pr_cpuset; 235 for (tset = set; tset != NULL; tset = tset->cs_parent) 236 if (tset == jset) 237 break; 238 if (tset == NULL) { 239 cpuset_rel(set); 240 set = NULL; 241 } 242 } 243 244 return (set); 245 } 246 247 /* 248 * Create a set in the space provided in 'set' with the provided parameters. 249 * The set is returned with a single ref. May return EDEADLK if the set 250 * will have no valid cpu based on restrictions from the parent. 251 */ 252 static int 253 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 254 cpusetid_t id) 255 { 256 257 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 258 return (EDEADLK); 259 CPU_COPY(mask, &set->cs_mask); 260 LIST_INIT(&set->cs_children); 261 refcount_init(&set->cs_ref, 1); 262 set->cs_flags = 0; 263 mtx_lock_spin(&cpuset_lock); 264 CPU_AND(&set->cs_mask, &parent->cs_mask); 265 set->cs_id = id; 266 set->cs_parent = cpuset_ref(parent); 267 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 268 if (set->cs_id != CPUSET_INVALID) 269 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 270 mtx_unlock_spin(&cpuset_lock); 271 272 return (0); 273 } 274 275 /* 276 * Create a new non-anonymous set with the requested parent and mask. May 277 * return failures if the mask is invalid or a new number can not be 278 * allocated. 279 */ 280 static int 281 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 282 { 283 struct cpuset *set; 284 cpusetid_t id; 285 int error; 286 287 id = alloc_unr(cpuset_unr); 288 if (id == -1) 289 return (ENFILE); 290 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 291 error = _cpuset_create(set, parent, mask, id); 292 if (error == 0) 293 return (0); 294 free_unr(cpuset_unr, id); 295 uma_zfree(cpuset_zone, set); 296 297 return (error); 298 } 299 300 /* 301 * Recursively check for errors that would occur from applying mask to 302 * the tree of sets starting at 'set'. Checks for sets that would become 303 * empty as well as RDONLY flags. 304 */ 305 static int 306 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 307 { 308 struct cpuset *nset; 309 cpuset_t newmask; 310 int error; 311 312 mtx_assert(&cpuset_lock, MA_OWNED); 313 if (set->cs_flags & CPU_SET_RDONLY) 314 return (EPERM); 315 if (check_mask) { 316 if (!CPU_OVERLAP(&set->cs_mask, mask)) 317 return (EDEADLK); 318 CPU_COPY(&set->cs_mask, &newmask); 319 CPU_AND(&newmask, mask); 320 } else 321 CPU_COPY(mask, &newmask); 322 error = 0; 323 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 324 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 325 break; 326 return (error); 327 } 328 329 /* 330 * Applies the mask 'mask' without checking for empty sets or permissions. 331 */ 332 static void 333 cpuset_update(struct cpuset *set, cpuset_t *mask) 334 { 335 struct cpuset *nset; 336 337 mtx_assert(&cpuset_lock, MA_OWNED); 338 CPU_AND(&set->cs_mask, mask); 339 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 340 cpuset_update(nset, &set->cs_mask); 341 342 return; 343 } 344 345 /* 346 * Modify the set 'set' to use a copy of the mask provided. Apply this new 347 * mask to restrict all children in the tree. Checks for validity before 348 * applying the changes. 349 */ 350 static int 351 cpuset_modify(struct cpuset *set, cpuset_t *mask) 352 { 353 struct cpuset *root; 354 int error; 355 356 error = priv_check(curthread, PRIV_SCHED_CPUSET); 357 if (error) 358 return (error); 359 /* 360 * In case we are called from within the jail 361 * we do not allow modifying the dedicated root 362 * cpuset of the jail but may still allow to 363 * change child sets. 364 */ 365 if (jailed(curthread->td_ucred) && 366 set->cs_flags & CPU_SET_ROOT) 367 return (EPERM); 368 /* 369 * Verify that we have access to this set of 370 * cpus. 371 */ 372 root = set->cs_parent; 373 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 374 return (EINVAL); 375 mtx_lock_spin(&cpuset_lock); 376 error = cpuset_testupdate(set, mask, 0); 377 if (error) 378 goto out; 379 CPU_COPY(mask, &set->cs_mask); 380 cpuset_update(set, mask); 381 out: 382 mtx_unlock_spin(&cpuset_lock); 383 384 return (error); 385 } 386 387 /* 388 * Resolve the 'which' parameter of several cpuset apis. 389 * 390 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 391 * checks for permission via p_cansched(). 392 * 393 * For WHICH_SET returns a valid set with a new reference. 394 * 395 * -1 may be supplied for any argument to mean the current proc/thread or 396 * the base set of the current thread. May fail with ESRCH/EPERM. 397 */ 398 static int 399 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 400 struct cpuset **setp) 401 { 402 struct cpuset *set; 403 struct thread *td; 404 struct proc *p; 405 int error; 406 407 *pp = p = NULL; 408 *tdp = td = NULL; 409 *setp = set = NULL; 410 switch (which) { 411 case CPU_WHICH_PID: 412 if (id == -1) { 413 PROC_LOCK(curproc); 414 p = curproc; 415 break; 416 } 417 if ((p = pfind(id)) == NULL) 418 return (ESRCH); 419 break; 420 case CPU_WHICH_TID: 421 if (id == -1) { 422 PROC_LOCK(curproc); 423 p = curproc; 424 td = curthread; 425 break; 426 } 427 td = tdfind(id, -1); 428 if (td == NULL) 429 return (ESRCH); 430 p = td->td_proc; 431 break; 432 case CPU_WHICH_CPUSET: 433 if (id == -1) { 434 thread_lock(curthread); 435 set = cpuset_refbase(curthread->td_cpuset); 436 thread_unlock(curthread); 437 } else 438 set = cpuset_lookup(id, curthread); 439 if (set) { 440 *setp = set; 441 return (0); 442 } 443 return (ESRCH); 444 case CPU_WHICH_JAIL: 445 { 446 /* Find `set' for prison with given id. */ 447 struct prison *pr; 448 449 sx_slock(&allprison_lock); 450 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 451 sx_sunlock(&allprison_lock); 452 if (pr == NULL) 453 return (ESRCH); 454 cpuset_ref(pr->pr_cpuset); 455 *setp = pr->pr_cpuset; 456 mtx_unlock(&pr->pr_mtx); 457 return (0); 458 } 459 case CPU_WHICH_IRQ: 460 return (0); 461 default: 462 return (EINVAL); 463 } 464 error = p_cansched(curthread, p); 465 if (error) { 466 PROC_UNLOCK(p); 467 return (error); 468 } 469 if (td == NULL) 470 td = FIRST_THREAD_IN_PROC(p); 471 *pp = p; 472 *tdp = td; 473 return (0); 474 } 475 476 /* 477 * Create an anonymous set with the provided mask in the space provided by 478 * 'fset'. If the passed in set is anonymous we use its parent otherwise 479 * the new set is a child of 'set'. 480 */ 481 static int 482 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 483 { 484 struct cpuset *parent; 485 486 if (set->cs_id == CPUSET_INVALID) 487 parent = set->cs_parent; 488 else 489 parent = set; 490 if (!CPU_SUBSET(&parent->cs_mask, mask)) 491 return (EDEADLK); 492 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 493 } 494 495 /* 496 * Handle two cases for replacing the base set or mask of an entire process. 497 * 498 * 1) Set is non-null and mask is null. This reparents all anonymous sets 499 * to the provided set and replaces all non-anonymous td_cpusets with the 500 * provided set. 501 * 2) Mask is non-null and set is null. This replaces or creates anonymous 502 * sets for every thread with the existing base as a parent. 503 * 504 * This is overly complicated because we can't allocate while holding a 505 * spinlock and spinlocks must be held while changing and examining thread 506 * state. 507 */ 508 static int 509 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 510 { 511 struct setlist freelist; 512 struct setlist droplist; 513 struct cpuset *tdset; 514 struct cpuset *nset; 515 struct thread *td; 516 struct proc *p; 517 int threads; 518 int nfree; 519 int error; 520 /* 521 * The algorithm requires two passes due to locking considerations. 522 * 523 * 1) Lookup the process and acquire the locks in the required order. 524 * 2) If enough cpusets have not been allocated release the locks and 525 * allocate them. Loop. 526 */ 527 LIST_INIT(&freelist); 528 LIST_INIT(&droplist); 529 nfree = 0; 530 for (;;) { 531 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 532 if (error) 533 goto out; 534 if (nfree >= p->p_numthreads) 535 break; 536 threads = p->p_numthreads; 537 PROC_UNLOCK(p); 538 for (; nfree < threads; nfree++) { 539 nset = uma_zalloc(cpuset_zone, M_WAITOK); 540 LIST_INSERT_HEAD(&freelist, nset, cs_link); 541 } 542 } 543 PROC_LOCK_ASSERT(p, MA_OWNED); 544 /* 545 * Now that the appropriate locks are held and we have enough cpusets, 546 * make sure the operation will succeed before applying changes. The 547 * proc lock prevents td_cpuset from changing between calls. 548 */ 549 error = 0; 550 FOREACH_THREAD_IN_PROC(p, td) { 551 thread_lock(td); 552 tdset = td->td_cpuset; 553 /* 554 * Verify that a new mask doesn't specify cpus outside of 555 * the set the thread is a member of. 556 */ 557 if (mask) { 558 if (tdset->cs_id == CPUSET_INVALID) 559 tdset = tdset->cs_parent; 560 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 561 error = EDEADLK; 562 /* 563 * Verify that a new set won't leave an existing thread 564 * mask without a cpu to run on. It can, however, restrict 565 * the set. 566 */ 567 } else if (tdset->cs_id == CPUSET_INVALID) { 568 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 569 error = EDEADLK; 570 } 571 thread_unlock(td); 572 if (error) 573 goto unlock_out; 574 } 575 /* 576 * Replace each thread's cpuset while using deferred release. We 577 * must do this because the thread lock must be held while operating 578 * on the thread and this limits the type of operations allowed. 579 */ 580 FOREACH_THREAD_IN_PROC(p, td) { 581 thread_lock(td); 582 /* 583 * If we presently have an anonymous set or are applying a 584 * mask we must create an anonymous shadow set. That is 585 * either parented to our existing base or the supplied set. 586 * 587 * If we have a base set with no anonymous shadow we simply 588 * replace it outright. 589 */ 590 tdset = td->td_cpuset; 591 if (tdset->cs_id == CPUSET_INVALID || mask) { 592 nset = LIST_FIRST(&freelist); 593 LIST_REMOVE(nset, cs_link); 594 if (mask) 595 error = cpuset_shadow(tdset, nset, mask); 596 else 597 error = _cpuset_create(nset, set, 598 &tdset->cs_mask, CPUSET_INVALID); 599 if (error) { 600 LIST_INSERT_HEAD(&freelist, nset, cs_link); 601 thread_unlock(td); 602 break; 603 } 604 } else 605 nset = cpuset_ref(set); 606 cpuset_rel_defer(&droplist, tdset); 607 td->td_cpuset = nset; 608 sched_affinity(td); 609 thread_unlock(td); 610 } 611 unlock_out: 612 PROC_UNLOCK(p); 613 out: 614 while ((nset = LIST_FIRST(&droplist)) != NULL) 615 cpuset_rel_complete(nset); 616 while ((nset = LIST_FIRST(&freelist)) != NULL) { 617 LIST_REMOVE(nset, cs_link); 618 uma_zfree(cpuset_zone, nset); 619 } 620 return (error); 621 } 622 623 /* 624 * Return a string representing a valid layout for a cpuset_t object. 625 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 626 */ 627 char * 628 cpusetobj_strprint(char *buf, const cpuset_t *set) 629 { 630 char *tbuf; 631 size_t i, bytesp, bufsiz; 632 633 tbuf = buf; 634 bytesp = 0; 635 bufsiz = CPUSETBUFSIZ; 636 637 for (i = 0; i < (_NCPUWORDS - 1); i++) { 638 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 639 bufsiz -= bytesp; 640 tbuf += bytesp; 641 } 642 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 643 return (buf); 644 } 645 646 /* 647 * Build a valid cpuset_t object from a string representation. 648 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 649 */ 650 int 651 cpusetobj_strscan(cpuset_t *set, const char *buf) 652 { 653 u_int nwords; 654 int i, ret; 655 656 if (strlen(buf) > CPUSETBUFSIZ - 1) 657 return (-1); 658 659 /* Allow to pass a shorter version of the mask when necessary. */ 660 nwords = 1; 661 for (i = 0; buf[i] != '\0'; i++) 662 if (buf[i] == ',') 663 nwords++; 664 if (nwords > _NCPUWORDS) 665 return (-1); 666 667 CPU_ZERO(set); 668 for (i = 0; i < (nwords - 1); i++) { 669 ret = sscanf(buf, "%lx,", &set->__bits[i]); 670 if (ret == 0 || ret == -1) 671 return (-1); 672 buf = strstr(buf, ","); 673 if (buf == NULL) 674 return (-1); 675 buf++; 676 } 677 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 678 if (ret == 0 || ret == -1) 679 return (-1); 680 return (0); 681 } 682 683 /* 684 * Apply an anonymous mask to a single thread. 685 */ 686 int 687 cpuset_setthread(lwpid_t id, cpuset_t *mask) 688 { 689 struct cpuset *nset; 690 struct cpuset *set; 691 struct thread *td; 692 struct proc *p; 693 int error; 694 695 nset = uma_zalloc(cpuset_zone, M_WAITOK); 696 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 697 if (error) 698 goto out; 699 set = NULL; 700 thread_lock(td); 701 error = cpuset_shadow(td->td_cpuset, nset, mask); 702 if (error == 0) { 703 set = td->td_cpuset; 704 td->td_cpuset = nset; 705 sched_affinity(td); 706 nset = NULL; 707 } 708 thread_unlock(td); 709 PROC_UNLOCK(p); 710 if (set) 711 cpuset_rel(set); 712 out: 713 if (nset) 714 uma_zfree(cpuset_zone, nset); 715 return (error); 716 } 717 718 /* 719 * Apply new cpumask to the ithread. 720 */ 721 int 722 cpuset_setithread(lwpid_t id, int cpu) 723 { 724 struct cpuset *nset, *rset; 725 struct cpuset *parent, *old_set; 726 struct thread *td; 727 struct proc *p; 728 cpusetid_t cs_id; 729 cpuset_t mask; 730 int error; 731 732 nset = uma_zalloc(cpuset_zone, M_WAITOK); 733 rset = uma_zalloc(cpuset_zone, M_WAITOK); 734 cs_id = CPUSET_INVALID; 735 736 CPU_ZERO(&mask); 737 if (cpu == NOCPU) 738 CPU_COPY(cpuset_root, &mask); 739 else 740 CPU_SET(cpu, &mask); 741 742 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); 743 if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID)) 744 goto out; 745 746 /* cpuset_which() returns with PROC_LOCK held. */ 747 old_set = td->td_cpuset; 748 749 if (cpu == NOCPU) { 750 751 /* 752 * roll back to default set. We're not using cpuset_shadow() 753 * here because we can fail CPU_SUBSET() check. This can happen 754 * if default set does not contain all CPUs. 755 */ 756 error = _cpuset_create(nset, cpuset_default, &mask, 757 CPUSET_INVALID); 758 759 goto applyset; 760 } 761 762 if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && 763 old_set->cs_parent->cs_id == 1)) { 764 765 /* 766 * Current set is either default (1) or 767 * shadowed version of default set. 768 * 769 * Allocate new root set to be able to shadow it 770 * with any mask. 771 */ 772 error = _cpuset_create(rset, cpuset_zero, 773 &cpuset_zero->cs_mask, cs_id); 774 if (error != 0) { 775 PROC_UNLOCK(p); 776 goto out; 777 } 778 rset->cs_flags |= CPU_SET_ROOT; 779 parent = rset; 780 rset = NULL; 781 cs_id = CPUSET_INVALID; 782 } else { 783 /* Assume existing set was already allocated by previous call */ 784 parent = old_set; 785 old_set = NULL; 786 } 787 788 error = cpuset_shadow(parent, nset, &mask); 789 applyset: 790 if (error == 0) { 791 thread_lock(td); 792 td->td_cpuset = nset; 793 sched_affinity(td); 794 thread_unlock(td); 795 nset = NULL; 796 } else 797 old_set = NULL; 798 PROC_UNLOCK(p); 799 if (old_set != NULL) 800 cpuset_rel(old_set); 801 out: 802 if (nset != NULL) 803 uma_zfree(cpuset_zone, nset); 804 if (rset != NULL) 805 uma_zfree(cpuset_zone, rset); 806 if (cs_id != CPUSET_INVALID) 807 free_unr(cpuset_unr, cs_id); 808 return (error); 809 } 810 811 812 /* 813 * Creates the cpuset for thread0. We make two sets: 814 * 815 * 0 - The root set which should represent all valid processors in the 816 * system. It is initially created with a mask of all processors 817 * because we don't know what processors are valid until cpuset_init() 818 * runs. This set is immutable. 819 * 1 - The default set which all processes are a member of until changed. 820 * This allows an administrator to move all threads off of given cpus to 821 * dedicate them to high priority tasks or save power etc. 822 */ 823 struct cpuset * 824 cpuset_thread0(void) 825 { 826 struct cpuset *set; 827 int error; 828 829 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 830 NULL, NULL, UMA_ALIGN_PTR, 0); 831 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 832 833 /* 834 * Create the root system set for the whole machine. Doesn't use 835 * cpuset_create() due to NULL parent. 836 */ 837 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 838 CPU_FILL(&set->cs_mask); 839 LIST_INIT(&set->cs_children); 840 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 841 set->cs_ref = 1; 842 set->cs_flags = CPU_SET_ROOT; 843 cpuset_zero = set; 844 cpuset_root = &set->cs_mask; 845 846 /* 847 * Now derive a default, modifiable set from that to give out. 848 */ 849 set = uma_zalloc(cpuset_zone, M_WAITOK); 850 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 851 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 852 cpuset_default = set; 853 854 /* 855 * Initialize the unit allocator. 0 and 1 are allocated above. 856 */ 857 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 858 859 return (set); 860 } 861 862 /* 863 * Create a cpuset, which would be cpuset_create() but 864 * mark the new 'set' as root. 865 * 866 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 867 * for that. 868 * 869 * In case of no error, returns the set in *setp locked with a reference. 870 */ 871 int 872 cpuset_create_root(struct prison *pr, struct cpuset **setp) 873 { 874 struct cpuset *set; 875 int error; 876 877 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 878 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 879 880 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 881 if (error) 882 return (error); 883 884 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 885 __func__, __LINE__)); 886 887 /* Mark the set as root. */ 888 set = *setp; 889 set->cs_flags |= CPU_SET_ROOT; 890 891 return (0); 892 } 893 894 int 895 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 896 { 897 int error; 898 899 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 900 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 901 902 cpuset_ref(set); 903 error = cpuset_setproc(p->p_pid, set, NULL); 904 if (error) 905 return (error); 906 cpuset_rel(set); 907 return (0); 908 } 909 910 /* 911 * This is called once the final set of system cpus is known. Modifies 912 * the root set and all children and mark the root read-only. 913 */ 914 static void 915 cpuset_init(void *arg) 916 { 917 cpuset_t mask; 918 919 mask = all_cpus; 920 if (cpuset_modify(cpuset_zero, &mask)) 921 panic("Can't set initial cpuset mask.\n"); 922 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 923 } 924 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 925 926 #ifndef _SYS_SYSPROTO_H_ 927 struct cpuset_args { 928 cpusetid_t *setid; 929 }; 930 #endif 931 int 932 sys_cpuset(struct thread *td, struct cpuset_args *uap) 933 { 934 struct cpuset *root; 935 struct cpuset *set; 936 int error; 937 938 thread_lock(td); 939 root = cpuset_refroot(td->td_cpuset); 940 thread_unlock(td); 941 error = cpuset_create(&set, root, &root->cs_mask); 942 cpuset_rel(root); 943 if (error) 944 return (error); 945 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 946 if (error == 0) 947 error = cpuset_setproc(-1, set, NULL); 948 cpuset_rel(set); 949 return (error); 950 } 951 952 #ifndef _SYS_SYSPROTO_H_ 953 struct cpuset_setid_args { 954 cpuwhich_t which; 955 id_t id; 956 cpusetid_t setid; 957 }; 958 #endif 959 int 960 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 961 { 962 struct cpuset *set; 963 int error; 964 965 /* 966 * Presently we only support per-process sets. 967 */ 968 if (uap->which != CPU_WHICH_PID) 969 return (EINVAL); 970 set = cpuset_lookup(uap->setid, td); 971 if (set == NULL) 972 return (ESRCH); 973 error = cpuset_setproc(uap->id, set, NULL); 974 cpuset_rel(set); 975 return (error); 976 } 977 978 #ifndef _SYS_SYSPROTO_H_ 979 struct cpuset_getid_args { 980 cpulevel_t level; 981 cpuwhich_t which; 982 id_t id; 983 cpusetid_t *setid; 984 }; 985 #endif 986 int 987 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 988 { 989 struct cpuset *nset; 990 struct cpuset *set; 991 struct thread *ttd; 992 struct proc *p; 993 cpusetid_t id; 994 int error; 995 996 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 997 return (EINVAL); 998 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 999 if (error) 1000 return (error); 1001 switch (uap->which) { 1002 case CPU_WHICH_TID: 1003 case CPU_WHICH_PID: 1004 thread_lock(ttd); 1005 set = cpuset_refbase(ttd->td_cpuset); 1006 thread_unlock(ttd); 1007 PROC_UNLOCK(p); 1008 break; 1009 case CPU_WHICH_CPUSET: 1010 case CPU_WHICH_JAIL: 1011 break; 1012 case CPU_WHICH_IRQ: 1013 return (EINVAL); 1014 } 1015 switch (uap->level) { 1016 case CPU_LEVEL_ROOT: 1017 nset = cpuset_refroot(set); 1018 cpuset_rel(set); 1019 set = nset; 1020 break; 1021 case CPU_LEVEL_CPUSET: 1022 break; 1023 case CPU_LEVEL_WHICH: 1024 break; 1025 } 1026 id = set->cs_id; 1027 cpuset_rel(set); 1028 if (error == 0) 1029 error = copyout(&id, uap->setid, sizeof(id)); 1030 1031 return (error); 1032 } 1033 1034 #ifndef _SYS_SYSPROTO_H_ 1035 struct cpuset_getaffinity_args { 1036 cpulevel_t level; 1037 cpuwhich_t which; 1038 id_t id; 1039 size_t cpusetsize; 1040 cpuset_t *mask; 1041 }; 1042 #endif 1043 int 1044 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1045 { 1046 struct thread *ttd; 1047 struct cpuset *nset; 1048 struct cpuset *set; 1049 struct proc *p; 1050 cpuset_t *mask; 1051 int error; 1052 size_t size; 1053 1054 if (uap->cpusetsize < sizeof(cpuset_t) || 1055 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1056 return (ERANGE); 1057 size = uap->cpusetsize; 1058 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 1059 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1060 if (error) 1061 goto out; 1062 switch (uap->level) { 1063 case CPU_LEVEL_ROOT: 1064 case CPU_LEVEL_CPUSET: 1065 switch (uap->which) { 1066 case CPU_WHICH_TID: 1067 case CPU_WHICH_PID: 1068 thread_lock(ttd); 1069 set = cpuset_ref(ttd->td_cpuset); 1070 thread_unlock(ttd); 1071 break; 1072 case CPU_WHICH_CPUSET: 1073 case CPU_WHICH_JAIL: 1074 break; 1075 case CPU_WHICH_IRQ: 1076 error = EINVAL; 1077 goto out; 1078 } 1079 if (uap->level == CPU_LEVEL_ROOT) 1080 nset = cpuset_refroot(set); 1081 else 1082 nset = cpuset_refbase(set); 1083 CPU_COPY(&nset->cs_mask, mask); 1084 cpuset_rel(nset); 1085 break; 1086 case CPU_LEVEL_WHICH: 1087 switch (uap->which) { 1088 case CPU_WHICH_TID: 1089 thread_lock(ttd); 1090 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1091 thread_unlock(ttd); 1092 break; 1093 case CPU_WHICH_PID: 1094 FOREACH_THREAD_IN_PROC(p, ttd) { 1095 thread_lock(ttd); 1096 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1097 thread_unlock(ttd); 1098 } 1099 break; 1100 case CPU_WHICH_CPUSET: 1101 case CPU_WHICH_JAIL: 1102 CPU_COPY(&set->cs_mask, mask); 1103 break; 1104 case CPU_WHICH_IRQ: 1105 error = intr_getaffinity(uap->id, mask); 1106 break; 1107 } 1108 break; 1109 default: 1110 error = EINVAL; 1111 break; 1112 } 1113 if (set) 1114 cpuset_rel(set); 1115 if (p) 1116 PROC_UNLOCK(p); 1117 if (error == 0) 1118 error = copyout(mask, uap->mask, size); 1119 out: 1120 free(mask, M_TEMP); 1121 return (error); 1122 } 1123 1124 #ifndef _SYS_SYSPROTO_H_ 1125 struct cpuset_setaffinity_args { 1126 cpulevel_t level; 1127 cpuwhich_t which; 1128 id_t id; 1129 size_t cpusetsize; 1130 const cpuset_t *mask; 1131 }; 1132 #endif 1133 int 1134 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1135 { 1136 struct cpuset *nset; 1137 struct cpuset *set; 1138 struct thread *ttd; 1139 struct proc *p; 1140 cpuset_t *mask; 1141 int error; 1142 1143 if (uap->cpusetsize < sizeof(cpuset_t) || 1144 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1145 return (ERANGE); 1146 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1147 error = copyin(uap->mask, mask, uap->cpusetsize); 1148 if (error) 1149 goto out; 1150 /* 1151 * Verify that no high bits are set. 1152 */ 1153 if (uap->cpusetsize > sizeof(cpuset_t)) { 1154 char *end; 1155 char *cp; 1156 1157 end = cp = (char *)&mask->__bits; 1158 end += uap->cpusetsize; 1159 cp += sizeof(cpuset_t); 1160 while (cp != end) 1161 if (*cp++ != 0) { 1162 error = EINVAL; 1163 goto out; 1164 } 1165 1166 } 1167 switch (uap->level) { 1168 case CPU_LEVEL_ROOT: 1169 case CPU_LEVEL_CPUSET: 1170 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1171 if (error) 1172 break; 1173 switch (uap->which) { 1174 case CPU_WHICH_TID: 1175 case CPU_WHICH_PID: 1176 thread_lock(ttd); 1177 set = cpuset_ref(ttd->td_cpuset); 1178 thread_unlock(ttd); 1179 PROC_UNLOCK(p); 1180 break; 1181 case CPU_WHICH_CPUSET: 1182 case CPU_WHICH_JAIL: 1183 break; 1184 case CPU_WHICH_IRQ: 1185 error = EINVAL; 1186 goto out; 1187 } 1188 if (uap->level == CPU_LEVEL_ROOT) 1189 nset = cpuset_refroot(set); 1190 else 1191 nset = cpuset_refbase(set); 1192 error = cpuset_modify(nset, mask); 1193 cpuset_rel(nset); 1194 cpuset_rel(set); 1195 break; 1196 case CPU_LEVEL_WHICH: 1197 switch (uap->which) { 1198 case CPU_WHICH_TID: 1199 error = cpuset_setthread(uap->id, mask); 1200 break; 1201 case CPU_WHICH_PID: 1202 error = cpuset_setproc(uap->id, NULL, mask); 1203 break; 1204 case CPU_WHICH_CPUSET: 1205 case CPU_WHICH_JAIL: 1206 error = cpuset_which(uap->which, uap->id, &p, 1207 &ttd, &set); 1208 if (error == 0) { 1209 error = cpuset_modify(set, mask); 1210 cpuset_rel(set); 1211 } 1212 break; 1213 case CPU_WHICH_IRQ: 1214 error = intr_setaffinity(uap->id, mask); 1215 break; 1216 default: 1217 error = EINVAL; 1218 break; 1219 } 1220 break; 1221 default: 1222 error = EINVAL; 1223 break; 1224 } 1225 out: 1226 free(mask, M_TEMP); 1227 return (error); 1228 } 1229 1230 #ifdef DDB 1231 void 1232 ddb_display_cpuset(const cpuset_t *set) 1233 { 1234 int cpu, once; 1235 1236 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1237 if (CPU_ISSET(cpu, set)) { 1238 if (once == 0) { 1239 db_printf("%d", cpu); 1240 once = 1; 1241 } else 1242 db_printf(",%d", cpu); 1243 } 1244 } 1245 if (once == 0) 1246 db_printf("<none>"); 1247 } 1248 1249 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1250 { 1251 struct cpuset *set; 1252 1253 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1254 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1255 set, set->cs_id, set->cs_ref, set->cs_flags, 1256 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1257 db_printf(" mask="); 1258 ddb_display_cpuset(&set->cs_mask); 1259 db_printf("\n"); 1260 if (db_pager_quit) 1261 break; 1262 } 1263 } 1264 #endif /* DDB */ 1265