1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/cpuset.h> 51 #include <sys/sx.h> 52 #include <sys/queue.h> 53 #include <sys/libkern.h> 54 #include <sys/limits.h> 55 #include <sys/bus.h> 56 #include <sys/interrupt.h> 57 58 #include <vm/uma.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif /* DDB */ 63 64 /* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask. 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a group separate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query available cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105 static uma_zone_t cpuset_zone; 106 static struct mtx cpuset_lock; 107 static struct setlist cpuset_ids; 108 static struct unrhdr *cpuset_unr; 109 static struct cpuset *cpuset_zero, *cpuset_default; 110 111 /* Return the size of cpuset_t at the kernel level */ 112 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD, 113 0, sizeof(cpuset_t), "sizeof(cpuset_t)"); 114 115 cpuset_t *cpuset_root; 116 117 /* 118 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 119 */ 120 struct cpuset * 121 cpuset_ref(struct cpuset *set) 122 { 123 124 refcount_acquire(&set->cs_ref); 125 return (set); 126 } 127 128 /* 129 * Walks up the tree from 'set' to find the root. Returns the root 130 * referenced. 131 */ 132 static struct cpuset * 133 cpuset_refroot(struct cpuset *set) 134 { 135 136 for (; set->cs_parent != NULL; set = set->cs_parent) 137 if (set->cs_flags & CPU_SET_ROOT) 138 break; 139 cpuset_ref(set); 140 141 return (set); 142 } 143 144 /* 145 * Find the first non-anonymous set starting from 'set'. Returns this set 146 * referenced. May return the passed in set with an extra ref if it is 147 * not anonymous. 148 */ 149 static struct cpuset * 150 cpuset_refbase(struct cpuset *set) 151 { 152 153 if (set->cs_id == CPUSET_INVALID) 154 set = set->cs_parent; 155 cpuset_ref(set); 156 157 return (set); 158 } 159 160 /* 161 * Release a reference in a context where it is safe to allocate. 162 */ 163 void 164 cpuset_rel(struct cpuset *set) 165 { 166 cpusetid_t id; 167 168 if (refcount_release(&set->cs_ref) == 0) 169 return; 170 mtx_lock_spin(&cpuset_lock); 171 LIST_REMOVE(set, cs_siblings); 172 id = set->cs_id; 173 if (id != CPUSET_INVALID) 174 LIST_REMOVE(set, cs_link); 175 mtx_unlock_spin(&cpuset_lock); 176 cpuset_rel(set->cs_parent); 177 uma_zfree(cpuset_zone, set); 178 if (id != CPUSET_INVALID) 179 free_unr(cpuset_unr, id); 180 } 181 182 /* 183 * Deferred release must be used when in a context that is not safe to 184 * allocate/free. This places any unreferenced sets on the list 'head'. 185 */ 186 static void 187 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 188 { 189 190 if (refcount_release(&set->cs_ref) == 0) 191 return; 192 mtx_lock_spin(&cpuset_lock); 193 LIST_REMOVE(set, cs_siblings); 194 if (set->cs_id != CPUSET_INVALID) 195 LIST_REMOVE(set, cs_link); 196 LIST_INSERT_HEAD(head, set, cs_link); 197 mtx_unlock_spin(&cpuset_lock); 198 } 199 200 /* 201 * Complete a deferred release. Removes the set from the list provided to 202 * cpuset_rel_defer. 203 */ 204 static void 205 cpuset_rel_complete(struct cpuset *set) 206 { 207 LIST_REMOVE(set, cs_link); 208 cpuset_rel(set->cs_parent); 209 uma_zfree(cpuset_zone, set); 210 } 211 212 /* 213 * Find a set based on an id. Returns it with a ref. 214 */ 215 static struct cpuset * 216 cpuset_lookup(cpusetid_t setid, struct thread *td) 217 { 218 struct cpuset *set; 219 220 if (setid == CPUSET_INVALID) 221 return (NULL); 222 mtx_lock_spin(&cpuset_lock); 223 LIST_FOREACH(set, &cpuset_ids, cs_link) 224 if (set->cs_id == setid) 225 break; 226 if (set) 227 cpuset_ref(set); 228 mtx_unlock_spin(&cpuset_lock); 229 230 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 231 if (set != NULL && jailed(td->td_ucred)) { 232 struct cpuset *jset, *tset; 233 234 jset = td->td_ucred->cr_prison->pr_cpuset; 235 for (tset = set; tset != NULL; tset = tset->cs_parent) 236 if (tset == jset) 237 break; 238 if (tset == NULL) { 239 cpuset_rel(set); 240 set = NULL; 241 } 242 } 243 244 return (set); 245 } 246 247 /* 248 * Create a set in the space provided in 'set' with the provided parameters. 249 * The set is returned with a single ref. May return EDEADLK if the set 250 * will have no valid cpu based on restrictions from the parent. 251 */ 252 static int 253 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 254 cpusetid_t id) 255 { 256 257 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 258 return (EDEADLK); 259 CPU_COPY(mask, &set->cs_mask); 260 LIST_INIT(&set->cs_children); 261 refcount_init(&set->cs_ref, 1); 262 set->cs_flags = 0; 263 mtx_lock_spin(&cpuset_lock); 264 CPU_AND(&set->cs_mask, &parent->cs_mask); 265 set->cs_id = id; 266 set->cs_parent = cpuset_ref(parent); 267 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 268 if (set->cs_id != CPUSET_INVALID) 269 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 270 mtx_unlock_spin(&cpuset_lock); 271 272 return (0); 273 } 274 275 /* 276 * Create a new non-anonymous set with the requested parent and mask. May 277 * return failures if the mask is invalid or a new number can not be 278 * allocated. 279 */ 280 static int 281 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 282 { 283 struct cpuset *set; 284 cpusetid_t id; 285 int error; 286 287 id = alloc_unr(cpuset_unr); 288 if (id == -1) 289 return (ENFILE); 290 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 291 error = _cpuset_create(set, parent, mask, id); 292 if (error == 0) 293 return (0); 294 free_unr(cpuset_unr, id); 295 uma_zfree(cpuset_zone, set); 296 297 return (error); 298 } 299 300 /* 301 * Recursively check for errors that would occur from applying mask to 302 * the tree of sets starting at 'set'. Checks for sets that would become 303 * empty as well as RDONLY flags. 304 */ 305 static int 306 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 307 { 308 struct cpuset *nset; 309 cpuset_t newmask; 310 int error; 311 312 mtx_assert(&cpuset_lock, MA_OWNED); 313 if (set->cs_flags & CPU_SET_RDONLY) 314 return (EPERM); 315 if (check_mask) { 316 if (!CPU_OVERLAP(&set->cs_mask, mask)) 317 return (EDEADLK); 318 CPU_COPY(&set->cs_mask, &newmask); 319 CPU_AND(&newmask, mask); 320 } else 321 CPU_COPY(mask, &newmask); 322 error = 0; 323 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 324 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 325 break; 326 return (error); 327 } 328 329 /* 330 * Applies the mask 'mask' without checking for empty sets or permissions. 331 */ 332 static void 333 cpuset_update(struct cpuset *set, cpuset_t *mask) 334 { 335 struct cpuset *nset; 336 337 mtx_assert(&cpuset_lock, MA_OWNED); 338 CPU_AND(&set->cs_mask, mask); 339 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 340 cpuset_update(nset, &set->cs_mask); 341 342 return; 343 } 344 345 /* 346 * Modify the set 'set' to use a copy of the mask provided. Apply this new 347 * mask to restrict all children in the tree. Checks for validity before 348 * applying the changes. 349 */ 350 static int 351 cpuset_modify(struct cpuset *set, cpuset_t *mask) 352 { 353 struct cpuset *root; 354 int error; 355 356 error = priv_check(curthread, PRIV_SCHED_CPUSET); 357 if (error) 358 return (error); 359 /* 360 * In case we are called from within the jail 361 * we do not allow modifying the dedicated root 362 * cpuset of the jail but may still allow to 363 * change child sets. 364 */ 365 if (jailed(curthread->td_ucred) && 366 set->cs_flags & CPU_SET_ROOT) 367 return (EPERM); 368 /* 369 * Verify that we have access to this set of 370 * cpus. 371 */ 372 root = set->cs_parent; 373 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 374 return (EINVAL); 375 mtx_lock_spin(&cpuset_lock); 376 error = cpuset_testupdate(set, mask, 0); 377 if (error) 378 goto out; 379 CPU_COPY(mask, &set->cs_mask); 380 cpuset_update(set, mask); 381 out: 382 mtx_unlock_spin(&cpuset_lock); 383 384 return (error); 385 } 386 387 /* 388 * Resolve the 'which' parameter of several cpuset apis. 389 * 390 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 391 * checks for permission via p_cansched(). 392 * 393 * For WHICH_SET returns a valid set with a new reference. 394 * 395 * -1 may be supplied for any argument to mean the current proc/thread or 396 * the base set of the current thread. May fail with ESRCH/EPERM. 397 */ 398 static int 399 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 400 struct cpuset **setp) 401 { 402 struct cpuset *set; 403 struct thread *td; 404 struct proc *p; 405 int error; 406 407 *pp = p = NULL; 408 *tdp = td = NULL; 409 *setp = set = NULL; 410 switch (which) { 411 case CPU_WHICH_PID: 412 if (id == -1) { 413 PROC_LOCK(curproc); 414 p = curproc; 415 break; 416 } 417 if ((p = pfind(id)) == NULL) 418 return (ESRCH); 419 break; 420 case CPU_WHICH_TID: 421 if (id == -1) { 422 PROC_LOCK(curproc); 423 p = curproc; 424 td = curthread; 425 break; 426 } 427 td = tdfind(id, -1); 428 if (td == NULL) 429 return (ESRCH); 430 p = td->td_proc; 431 break; 432 case CPU_WHICH_CPUSET: 433 if (id == -1) { 434 thread_lock(curthread); 435 set = cpuset_refbase(curthread->td_cpuset); 436 thread_unlock(curthread); 437 } else 438 set = cpuset_lookup(id, curthread); 439 if (set) { 440 *setp = set; 441 return (0); 442 } 443 return (ESRCH); 444 case CPU_WHICH_JAIL: 445 { 446 /* Find `set' for prison with given id. */ 447 struct prison *pr; 448 449 sx_slock(&allprison_lock); 450 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 451 sx_sunlock(&allprison_lock); 452 if (pr == NULL) 453 return (ESRCH); 454 cpuset_ref(pr->pr_cpuset); 455 *setp = pr->pr_cpuset; 456 mtx_unlock(&pr->pr_mtx); 457 return (0); 458 } 459 case CPU_WHICH_IRQ: 460 return (0); 461 default: 462 return (EINVAL); 463 } 464 error = p_cansched(curthread, p); 465 if (error) { 466 PROC_UNLOCK(p); 467 return (error); 468 } 469 if (td == NULL) 470 td = FIRST_THREAD_IN_PROC(p); 471 *pp = p; 472 *tdp = td; 473 return (0); 474 } 475 476 /* 477 * Create an anonymous set with the provided mask in the space provided by 478 * 'fset'. If the passed in set is anonymous we use its parent otherwise 479 * the new set is a child of 'set'. 480 */ 481 static int 482 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 483 { 484 struct cpuset *parent; 485 486 if (set->cs_id == CPUSET_INVALID) 487 parent = set->cs_parent; 488 else 489 parent = set; 490 if (!CPU_SUBSET(&parent->cs_mask, mask)) 491 return (EDEADLK); 492 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 493 } 494 495 /* 496 * Handle two cases for replacing the base set or mask of an entire process. 497 * 498 * 1) Set is non-null and mask is null. This reparents all anonymous sets 499 * to the provided set and replaces all non-anonymous td_cpusets with the 500 * provided set. 501 * 2) Mask is non-null and set is null. This replaces or creates anonymous 502 * sets for every thread with the existing base as a parent. 503 * 504 * This is overly complicated because we can't allocate while holding a 505 * spinlock and spinlocks must be held while changing and examining thread 506 * state. 507 */ 508 static int 509 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 510 { 511 struct setlist freelist; 512 struct setlist droplist; 513 struct cpuset *tdset; 514 struct cpuset *nset; 515 struct thread *td; 516 struct proc *p; 517 int threads; 518 int nfree; 519 int error; 520 /* 521 * The algorithm requires two passes due to locking considerations. 522 * 523 * 1) Lookup the process and acquire the locks in the required order. 524 * 2) If enough cpusets have not been allocated release the locks and 525 * allocate them. Loop. 526 */ 527 LIST_INIT(&freelist); 528 LIST_INIT(&droplist); 529 nfree = 0; 530 for (;;) { 531 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 532 if (error) 533 goto out; 534 if (nfree >= p->p_numthreads) 535 break; 536 threads = p->p_numthreads; 537 PROC_UNLOCK(p); 538 for (; nfree < threads; nfree++) { 539 nset = uma_zalloc(cpuset_zone, M_WAITOK); 540 LIST_INSERT_HEAD(&freelist, nset, cs_link); 541 } 542 } 543 PROC_LOCK_ASSERT(p, MA_OWNED); 544 /* 545 * Now that the appropriate locks are held and we have enough cpusets, 546 * make sure the operation will succeed before applying changes. The 547 * proc lock prevents td_cpuset from changing between calls. 548 */ 549 error = 0; 550 FOREACH_THREAD_IN_PROC(p, td) { 551 thread_lock(td); 552 tdset = td->td_cpuset; 553 /* 554 * Verify that a new mask doesn't specify cpus outside of 555 * the set the thread is a member of. 556 */ 557 if (mask) { 558 if (tdset->cs_id == CPUSET_INVALID) 559 tdset = tdset->cs_parent; 560 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 561 error = EDEADLK; 562 /* 563 * Verify that a new set won't leave an existing thread 564 * mask without a cpu to run on. It can, however, restrict 565 * the set. 566 */ 567 } else if (tdset->cs_id == CPUSET_INVALID) { 568 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 569 error = EDEADLK; 570 } 571 thread_unlock(td); 572 if (error) 573 goto unlock_out; 574 } 575 /* 576 * Replace each thread's cpuset while using deferred release. We 577 * must do this because the thread lock must be held while operating 578 * on the thread and this limits the type of operations allowed. 579 */ 580 FOREACH_THREAD_IN_PROC(p, td) { 581 thread_lock(td); 582 /* 583 * If we presently have an anonymous set or are applying a 584 * mask we must create an anonymous shadow set. That is 585 * either parented to our existing base or the supplied set. 586 * 587 * If we have a base set with no anonymous shadow we simply 588 * replace it outright. 589 */ 590 tdset = td->td_cpuset; 591 if (tdset->cs_id == CPUSET_INVALID || mask) { 592 nset = LIST_FIRST(&freelist); 593 LIST_REMOVE(nset, cs_link); 594 if (mask) 595 error = cpuset_shadow(tdset, nset, mask); 596 else 597 error = _cpuset_create(nset, set, 598 &tdset->cs_mask, CPUSET_INVALID); 599 if (error) { 600 LIST_INSERT_HEAD(&freelist, nset, cs_link); 601 thread_unlock(td); 602 break; 603 } 604 } else 605 nset = cpuset_ref(set); 606 cpuset_rel_defer(&droplist, tdset); 607 td->td_cpuset = nset; 608 sched_affinity(td); 609 thread_unlock(td); 610 } 611 unlock_out: 612 PROC_UNLOCK(p); 613 out: 614 while ((nset = LIST_FIRST(&droplist)) != NULL) 615 cpuset_rel_complete(nset); 616 while ((nset = LIST_FIRST(&freelist)) != NULL) { 617 LIST_REMOVE(nset, cs_link); 618 uma_zfree(cpuset_zone, nset); 619 } 620 return (error); 621 } 622 623 /* 624 * Return a string representing a valid layout for a cpuset_t object. 625 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 626 */ 627 char * 628 cpusetobj_strprint(char *buf, const cpuset_t *set) 629 { 630 char *tbuf; 631 size_t i, bytesp, bufsiz; 632 633 tbuf = buf; 634 bytesp = 0; 635 bufsiz = CPUSETBUFSIZ; 636 637 for (i = 0; i < (_NCPUWORDS - 1); i++) { 638 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 639 bufsiz -= bytesp; 640 tbuf += bytesp; 641 } 642 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 643 return (buf); 644 } 645 646 /* 647 * Build a valid cpuset_t object from a string representation. 648 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 649 */ 650 int 651 cpusetobj_strscan(cpuset_t *set, const char *buf) 652 { 653 u_int nwords; 654 int i, ret; 655 656 if (strlen(buf) > CPUSETBUFSIZ - 1) 657 return (-1); 658 659 /* Allow to pass a shorter version of the mask when necessary. */ 660 nwords = 1; 661 for (i = 0; buf[i] != '\0'; i++) 662 if (buf[i] == ',') 663 nwords++; 664 if (nwords > _NCPUWORDS) 665 return (-1); 666 667 CPU_ZERO(set); 668 for (i = 0; i < (nwords - 1); i++) { 669 ret = sscanf(buf, "%lx,", &set->__bits[i]); 670 if (ret == 0 || ret == -1) 671 return (-1); 672 buf = strstr(buf, ","); 673 if (buf == NULL) 674 return (-1); 675 buf++; 676 } 677 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 678 if (ret == 0 || ret == -1) 679 return (-1); 680 return (0); 681 } 682 683 /* 684 * Apply an anonymous mask to a single thread. 685 */ 686 int 687 cpuset_setthread(lwpid_t id, cpuset_t *mask) 688 { 689 struct cpuset *nset; 690 struct cpuset *set; 691 struct thread *td; 692 struct proc *p; 693 int error; 694 695 nset = uma_zalloc(cpuset_zone, M_WAITOK); 696 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 697 if (error) 698 goto out; 699 set = NULL; 700 thread_lock(td); 701 error = cpuset_shadow(td->td_cpuset, nset, mask); 702 if (error == 0) { 703 set = td->td_cpuset; 704 td->td_cpuset = nset; 705 sched_affinity(td); 706 nset = NULL; 707 } 708 thread_unlock(td); 709 PROC_UNLOCK(p); 710 if (set) 711 cpuset_rel(set); 712 out: 713 if (nset) 714 uma_zfree(cpuset_zone, nset); 715 return (error); 716 } 717 718 /* 719 * Apply new cpumask to the ithread. 720 */ 721 int 722 cpuset_setithread(lwpid_t id, u_char cpu) 723 { 724 struct cpuset *nset, *rset; 725 struct cpuset *parent, *old_set; 726 struct thread *td; 727 struct proc *p; 728 cpusetid_t cs_id; 729 cpuset_t mask; 730 int error; 731 732 nset = uma_zalloc(cpuset_zone, M_WAITOK); 733 rset = uma_zalloc(cpuset_zone, M_WAITOK); 734 735 CPU_ZERO(&mask); 736 if (cpu == NOCPU) 737 CPU_COPY(cpuset_root, &mask); 738 else 739 CPU_SET(cpu, &mask); 740 741 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); 742 if (((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID) || error != 0) 743 goto out; 744 745 thread_lock(td); 746 old_set = td->td_cpuset; 747 748 if (cpu == NOCPU) { 749 /* 750 * roll back to default set. We're not using cpuset_shadow() 751 * here because we can fail CPU_SUBSET() check. This can happen 752 * if default set does not contain all CPUs. 753 */ 754 error = _cpuset_create(nset, cpuset_default, &mask, 755 CPUSET_INVALID); 756 757 goto applyset; 758 } 759 760 if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && 761 old_set->cs_parent->cs_id == 1)) { 762 /* Default mask, we need to use new root set */ 763 error = _cpuset_create(rset, cpuset_zero, 764 &cpuset_zero->cs_mask, cs_id); 765 if (error != 0) { 766 PROC_UNLOCK(p); 767 goto out; 768 } 769 rset->cs_flags |= CPU_SET_ROOT; 770 parent = rset; 771 rset = NULL; 772 cs_id = CPUSET_INVALID; 773 } else { 774 /* Assume existing set was already allocated by previous call */ 775 parent = td->td_cpuset; 776 old_set = NULL; 777 } 778 779 error = cpuset_shadow(parent, nset, &mask); 780 applyset: 781 if (error == 0) { 782 td->td_cpuset = nset; 783 sched_affinity(td); 784 nset = NULL; 785 } 786 thread_unlock(td); 787 PROC_UNLOCK(p); 788 if (old_set != NULL) 789 cpuset_rel(old_set); 790 out: 791 if (nset != NULL) 792 uma_zfree(cpuset_zone, nset); 793 if (rset != NULL) 794 uma_zfree(cpuset_zone, rset); 795 if (cs_id != CPUSET_INVALID) 796 free_unr(cpuset_unr, cs_id); 797 return (error); 798 } 799 800 801 /* 802 * Creates the cpuset for thread0. We make two sets: 803 * 804 * 0 - The root set which should represent all valid processors in the 805 * system. It is initially created with a mask of all processors 806 * because we don't know what processors are valid until cpuset_init() 807 * runs. This set is immutable. 808 * 1 - The default set which all processes are a member of until changed. 809 * This allows an administrator to move all threads off of given cpus to 810 * dedicate them to high priority tasks or save power etc. 811 */ 812 struct cpuset * 813 cpuset_thread0(void) 814 { 815 struct cpuset *set; 816 int error; 817 818 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 819 NULL, NULL, UMA_ALIGN_PTR, 0); 820 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 821 822 /* 823 * Create the root system set for the whole machine. Doesn't use 824 * cpuset_create() due to NULL parent. 825 */ 826 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 827 CPU_FILL(&set->cs_mask); 828 LIST_INIT(&set->cs_children); 829 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 830 set->cs_ref = 1; 831 set->cs_flags = CPU_SET_ROOT; 832 cpuset_zero = set; 833 cpuset_root = &set->cs_mask; 834 835 /* 836 * Now derive a default, modifiable set from that to give out. 837 */ 838 set = uma_zalloc(cpuset_zone, M_WAITOK); 839 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 840 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 841 cpuset_default = set; 842 843 /* 844 * Initialize the unit allocator. 0 and 1 are allocated above. 845 */ 846 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 847 848 return (set); 849 } 850 851 /* 852 * Create a cpuset, which would be cpuset_create() but 853 * mark the new 'set' as root. 854 * 855 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 856 * for that. 857 * 858 * In case of no error, returns the set in *setp locked with a reference. 859 */ 860 int 861 cpuset_create_root(struct prison *pr, struct cpuset **setp) 862 { 863 struct cpuset *set; 864 int error; 865 866 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 867 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 868 869 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 870 if (error) 871 return (error); 872 873 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 874 __func__, __LINE__)); 875 876 /* Mark the set as root. */ 877 set = *setp; 878 set->cs_flags |= CPU_SET_ROOT; 879 880 return (0); 881 } 882 883 int 884 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 885 { 886 int error; 887 888 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 889 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 890 891 cpuset_ref(set); 892 error = cpuset_setproc(p->p_pid, set, NULL); 893 if (error) 894 return (error); 895 cpuset_rel(set); 896 return (0); 897 } 898 899 /* 900 * This is called once the final set of system cpus is known. Modifies 901 * the root set and all children and mark the root read-only. 902 */ 903 static void 904 cpuset_init(void *arg) 905 { 906 cpuset_t mask; 907 908 mask = all_cpus; 909 if (cpuset_modify(cpuset_zero, &mask)) 910 panic("Can't set initial cpuset mask.\n"); 911 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 912 } 913 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 914 915 #ifndef _SYS_SYSPROTO_H_ 916 struct cpuset_args { 917 cpusetid_t *setid; 918 }; 919 #endif 920 int 921 sys_cpuset(struct thread *td, struct cpuset_args *uap) 922 { 923 struct cpuset *root; 924 struct cpuset *set; 925 int error; 926 927 thread_lock(td); 928 root = cpuset_refroot(td->td_cpuset); 929 thread_unlock(td); 930 error = cpuset_create(&set, root, &root->cs_mask); 931 cpuset_rel(root); 932 if (error) 933 return (error); 934 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 935 if (error == 0) 936 error = cpuset_setproc(-1, set, NULL); 937 cpuset_rel(set); 938 return (error); 939 } 940 941 #ifndef _SYS_SYSPROTO_H_ 942 struct cpuset_setid_args { 943 cpuwhich_t which; 944 id_t id; 945 cpusetid_t setid; 946 }; 947 #endif 948 int 949 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 950 { 951 struct cpuset *set; 952 int error; 953 954 /* 955 * Presently we only support per-process sets. 956 */ 957 if (uap->which != CPU_WHICH_PID) 958 return (EINVAL); 959 set = cpuset_lookup(uap->setid, td); 960 if (set == NULL) 961 return (ESRCH); 962 error = cpuset_setproc(uap->id, set, NULL); 963 cpuset_rel(set); 964 return (error); 965 } 966 967 #ifndef _SYS_SYSPROTO_H_ 968 struct cpuset_getid_args { 969 cpulevel_t level; 970 cpuwhich_t which; 971 id_t id; 972 cpusetid_t *setid; 973 }; 974 #endif 975 int 976 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 977 { 978 struct cpuset *nset; 979 struct cpuset *set; 980 struct thread *ttd; 981 struct proc *p; 982 cpusetid_t id; 983 int error; 984 985 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 986 return (EINVAL); 987 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 988 if (error) 989 return (error); 990 switch (uap->which) { 991 case CPU_WHICH_TID: 992 case CPU_WHICH_PID: 993 thread_lock(ttd); 994 set = cpuset_refbase(ttd->td_cpuset); 995 thread_unlock(ttd); 996 PROC_UNLOCK(p); 997 break; 998 case CPU_WHICH_CPUSET: 999 case CPU_WHICH_JAIL: 1000 break; 1001 case CPU_WHICH_IRQ: 1002 return (EINVAL); 1003 } 1004 switch (uap->level) { 1005 case CPU_LEVEL_ROOT: 1006 nset = cpuset_refroot(set); 1007 cpuset_rel(set); 1008 set = nset; 1009 break; 1010 case CPU_LEVEL_CPUSET: 1011 break; 1012 case CPU_LEVEL_WHICH: 1013 break; 1014 } 1015 id = set->cs_id; 1016 cpuset_rel(set); 1017 if (error == 0) 1018 error = copyout(&id, uap->setid, sizeof(id)); 1019 1020 return (error); 1021 } 1022 1023 #ifndef _SYS_SYSPROTO_H_ 1024 struct cpuset_getaffinity_args { 1025 cpulevel_t level; 1026 cpuwhich_t which; 1027 id_t id; 1028 size_t cpusetsize; 1029 cpuset_t *mask; 1030 }; 1031 #endif 1032 int 1033 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1034 { 1035 struct thread *ttd; 1036 struct cpuset *nset; 1037 struct cpuset *set; 1038 struct proc *p; 1039 cpuset_t *mask; 1040 int error; 1041 size_t size; 1042 1043 if (uap->cpusetsize < sizeof(cpuset_t) || 1044 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1045 return (ERANGE); 1046 size = uap->cpusetsize; 1047 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 1048 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1049 if (error) 1050 goto out; 1051 switch (uap->level) { 1052 case CPU_LEVEL_ROOT: 1053 case CPU_LEVEL_CPUSET: 1054 switch (uap->which) { 1055 case CPU_WHICH_TID: 1056 case CPU_WHICH_PID: 1057 thread_lock(ttd); 1058 set = cpuset_ref(ttd->td_cpuset); 1059 thread_unlock(ttd); 1060 break; 1061 case CPU_WHICH_CPUSET: 1062 case CPU_WHICH_JAIL: 1063 break; 1064 case CPU_WHICH_IRQ: 1065 error = EINVAL; 1066 goto out; 1067 } 1068 if (uap->level == CPU_LEVEL_ROOT) 1069 nset = cpuset_refroot(set); 1070 else 1071 nset = cpuset_refbase(set); 1072 CPU_COPY(&nset->cs_mask, mask); 1073 cpuset_rel(nset); 1074 break; 1075 case CPU_LEVEL_WHICH: 1076 switch (uap->which) { 1077 case CPU_WHICH_TID: 1078 thread_lock(ttd); 1079 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1080 thread_unlock(ttd); 1081 break; 1082 case CPU_WHICH_PID: 1083 FOREACH_THREAD_IN_PROC(p, ttd) { 1084 thread_lock(ttd); 1085 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1086 thread_unlock(ttd); 1087 } 1088 break; 1089 case CPU_WHICH_CPUSET: 1090 case CPU_WHICH_JAIL: 1091 CPU_COPY(&set->cs_mask, mask); 1092 break; 1093 case CPU_WHICH_IRQ: 1094 error = intr_getaffinity(uap->id, mask); 1095 break; 1096 } 1097 break; 1098 default: 1099 error = EINVAL; 1100 break; 1101 } 1102 if (set) 1103 cpuset_rel(set); 1104 if (p) 1105 PROC_UNLOCK(p); 1106 if (error == 0) 1107 error = copyout(mask, uap->mask, size); 1108 out: 1109 free(mask, M_TEMP); 1110 return (error); 1111 } 1112 1113 #ifndef _SYS_SYSPROTO_H_ 1114 struct cpuset_setaffinity_args { 1115 cpulevel_t level; 1116 cpuwhich_t which; 1117 id_t id; 1118 size_t cpusetsize; 1119 const cpuset_t *mask; 1120 }; 1121 #endif 1122 int 1123 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1124 { 1125 struct cpuset *nset; 1126 struct cpuset *set; 1127 struct thread *ttd; 1128 struct proc *p; 1129 cpuset_t *mask; 1130 int error; 1131 1132 if (uap->cpusetsize < sizeof(cpuset_t) || 1133 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1134 return (ERANGE); 1135 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1136 error = copyin(uap->mask, mask, uap->cpusetsize); 1137 if (error) 1138 goto out; 1139 /* 1140 * Verify that no high bits are set. 1141 */ 1142 if (uap->cpusetsize > sizeof(cpuset_t)) { 1143 char *end; 1144 char *cp; 1145 1146 end = cp = (char *)&mask->__bits; 1147 end += uap->cpusetsize; 1148 cp += sizeof(cpuset_t); 1149 while (cp != end) 1150 if (*cp++ != 0) { 1151 error = EINVAL; 1152 goto out; 1153 } 1154 1155 } 1156 switch (uap->level) { 1157 case CPU_LEVEL_ROOT: 1158 case CPU_LEVEL_CPUSET: 1159 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1160 if (error) 1161 break; 1162 switch (uap->which) { 1163 case CPU_WHICH_TID: 1164 case CPU_WHICH_PID: 1165 thread_lock(ttd); 1166 set = cpuset_ref(ttd->td_cpuset); 1167 thread_unlock(ttd); 1168 PROC_UNLOCK(p); 1169 break; 1170 case CPU_WHICH_CPUSET: 1171 case CPU_WHICH_JAIL: 1172 break; 1173 case CPU_WHICH_IRQ: 1174 error = EINVAL; 1175 goto out; 1176 } 1177 if (uap->level == CPU_LEVEL_ROOT) 1178 nset = cpuset_refroot(set); 1179 else 1180 nset = cpuset_refbase(set); 1181 error = cpuset_modify(nset, mask); 1182 cpuset_rel(nset); 1183 cpuset_rel(set); 1184 break; 1185 case CPU_LEVEL_WHICH: 1186 switch (uap->which) { 1187 case CPU_WHICH_TID: 1188 error = cpuset_setthread(uap->id, mask); 1189 break; 1190 case CPU_WHICH_PID: 1191 error = cpuset_setproc(uap->id, NULL, mask); 1192 break; 1193 case CPU_WHICH_CPUSET: 1194 case CPU_WHICH_JAIL: 1195 error = cpuset_which(uap->which, uap->id, &p, 1196 &ttd, &set); 1197 if (error == 0) { 1198 error = cpuset_modify(set, mask); 1199 cpuset_rel(set); 1200 } 1201 break; 1202 case CPU_WHICH_IRQ: 1203 error = intr_setaffinity(uap->id, mask); 1204 break; 1205 default: 1206 error = EINVAL; 1207 break; 1208 } 1209 break; 1210 default: 1211 error = EINVAL; 1212 break; 1213 } 1214 out: 1215 free(mask, M_TEMP); 1216 return (error); 1217 } 1218 1219 #ifdef DDB 1220 void 1221 ddb_display_cpuset(const cpuset_t *set) 1222 { 1223 int cpu, once; 1224 1225 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1226 if (CPU_ISSET(cpu, set)) { 1227 if (once == 0) { 1228 db_printf("%d", cpu); 1229 once = 1; 1230 } else 1231 db_printf(",%d", cpu); 1232 } 1233 } 1234 if (once == 0) 1235 db_printf("<none>"); 1236 } 1237 1238 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1239 { 1240 struct cpuset *set; 1241 1242 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1243 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1244 set, set->cs_id, set->cs_ref, set->cs_flags, 1245 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1246 db_printf(" mask="); 1247 ddb_display_cpuset(&set->cs_mask); 1248 db_printf("\n"); 1249 if (db_pager_quit) 1250 break; 1251 } 1252 } 1253 #endif /* DDB */ 1254