1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/cpuset.h> 51 #include <sys/sx.h> 52 #include <sys/queue.h> 53 #include <sys/libkern.h> 54 #include <sys/limits.h> 55 #include <sys/bus.h> 56 #include <sys/interrupt.h> 57 58 #include <vm/uma.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif /* DDB */ 63 64 /* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask. 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a group separate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query available cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105 static uma_zone_t cpuset_zone; 106 static struct mtx cpuset_lock; 107 static struct setlist cpuset_ids; 108 static struct unrhdr *cpuset_unr; 109 static struct cpuset *cpuset_zero; 110 111 /* Return the size of cpuset_t at the kernel level */ 112 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD, 113 0, sizeof(cpuset_t), "sizeof(cpuset_t)"); 114 115 cpuset_t *cpuset_root; 116 117 /* 118 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 119 */ 120 struct cpuset * 121 cpuset_ref(struct cpuset *set) 122 { 123 124 refcount_acquire(&set->cs_ref); 125 return (set); 126 } 127 128 /* 129 * Walks up the tree from 'set' to find the root. Returns the root 130 * referenced. 131 */ 132 static struct cpuset * 133 cpuset_refroot(struct cpuset *set) 134 { 135 136 for (; set->cs_parent != NULL; set = set->cs_parent) 137 if (set->cs_flags & CPU_SET_ROOT) 138 break; 139 cpuset_ref(set); 140 141 return (set); 142 } 143 144 /* 145 * Find the first non-anonymous set starting from 'set'. Returns this set 146 * referenced. May return the passed in set with an extra ref if it is 147 * not anonymous. 148 */ 149 static struct cpuset * 150 cpuset_refbase(struct cpuset *set) 151 { 152 153 if (set->cs_id == CPUSET_INVALID) 154 set = set->cs_parent; 155 cpuset_ref(set); 156 157 return (set); 158 } 159 160 /* 161 * Release a reference in a context where it is safe to allocate. 162 */ 163 void 164 cpuset_rel(struct cpuset *set) 165 { 166 cpusetid_t id; 167 168 if (refcount_release(&set->cs_ref) == 0) 169 return; 170 mtx_lock_spin(&cpuset_lock); 171 LIST_REMOVE(set, cs_siblings); 172 id = set->cs_id; 173 if (id != CPUSET_INVALID) 174 LIST_REMOVE(set, cs_link); 175 mtx_unlock_spin(&cpuset_lock); 176 cpuset_rel(set->cs_parent); 177 uma_zfree(cpuset_zone, set); 178 if (id != CPUSET_INVALID) 179 free_unr(cpuset_unr, id); 180 } 181 182 /* 183 * Deferred release must be used when in a context that is not safe to 184 * allocate/free. This places any unreferenced sets on the list 'head'. 185 */ 186 static void 187 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 188 { 189 190 if (refcount_release(&set->cs_ref) == 0) 191 return; 192 mtx_lock_spin(&cpuset_lock); 193 LIST_REMOVE(set, cs_siblings); 194 if (set->cs_id != CPUSET_INVALID) 195 LIST_REMOVE(set, cs_link); 196 LIST_INSERT_HEAD(head, set, cs_link); 197 mtx_unlock_spin(&cpuset_lock); 198 } 199 200 /* 201 * Complete a deferred release. Removes the set from the list provided to 202 * cpuset_rel_defer. 203 */ 204 static void 205 cpuset_rel_complete(struct cpuset *set) 206 { 207 LIST_REMOVE(set, cs_link); 208 cpuset_rel(set->cs_parent); 209 uma_zfree(cpuset_zone, set); 210 } 211 212 /* 213 * Find a set based on an id. Returns it with a ref. 214 */ 215 static struct cpuset * 216 cpuset_lookup(cpusetid_t setid, struct thread *td) 217 { 218 struct cpuset *set; 219 220 if (setid == CPUSET_INVALID) 221 return (NULL); 222 mtx_lock_spin(&cpuset_lock); 223 LIST_FOREACH(set, &cpuset_ids, cs_link) 224 if (set->cs_id == setid) 225 break; 226 if (set) 227 cpuset_ref(set); 228 mtx_unlock_spin(&cpuset_lock); 229 230 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 231 if (set != NULL && jailed(td->td_ucred)) { 232 struct cpuset *jset, *tset; 233 234 jset = td->td_ucred->cr_prison->pr_cpuset; 235 for (tset = set; tset != NULL; tset = tset->cs_parent) 236 if (tset == jset) 237 break; 238 if (tset == NULL) { 239 cpuset_rel(set); 240 set = NULL; 241 } 242 } 243 244 return (set); 245 } 246 247 /* 248 * Create a set in the space provided in 'set' with the provided parameters. 249 * The set is returned with a single ref. May return EDEADLK if the set 250 * will have no valid cpu based on restrictions from the parent. 251 */ 252 static int 253 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 254 cpusetid_t id) 255 { 256 257 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 258 return (EDEADLK); 259 CPU_COPY(mask, &set->cs_mask); 260 LIST_INIT(&set->cs_children); 261 refcount_init(&set->cs_ref, 1); 262 set->cs_flags = 0; 263 mtx_lock_spin(&cpuset_lock); 264 CPU_AND(&set->cs_mask, &parent->cs_mask); 265 set->cs_id = id; 266 set->cs_parent = cpuset_ref(parent); 267 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 268 if (set->cs_id != CPUSET_INVALID) 269 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 270 mtx_unlock_spin(&cpuset_lock); 271 272 return (0); 273 } 274 275 /* 276 * Create a new non-anonymous set with the requested parent and mask. May 277 * return failures if the mask is invalid or a new number can not be 278 * allocated. 279 */ 280 static int 281 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 282 { 283 struct cpuset *set; 284 cpusetid_t id; 285 int error; 286 287 id = alloc_unr(cpuset_unr); 288 if (id == -1) 289 return (ENFILE); 290 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 291 error = _cpuset_create(set, parent, mask, id); 292 if (error == 0) 293 return (0); 294 free_unr(cpuset_unr, id); 295 uma_zfree(cpuset_zone, set); 296 297 return (error); 298 } 299 300 /* 301 * Recursively check for errors that would occur from applying mask to 302 * the tree of sets starting at 'set'. Checks for sets that would become 303 * empty as well as RDONLY flags. 304 */ 305 static int 306 cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 307 { 308 struct cpuset *nset; 309 cpuset_t newmask; 310 int error; 311 312 mtx_assert(&cpuset_lock, MA_OWNED); 313 if (set->cs_flags & CPU_SET_RDONLY) 314 return (EPERM); 315 if (!CPU_OVERLAP(&set->cs_mask, mask)) 316 return (EDEADLK); 317 CPU_COPY(&set->cs_mask, &newmask); 318 CPU_AND(&newmask, mask); 319 error = 0; 320 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 321 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 322 break; 323 return (error); 324 } 325 326 /* 327 * Applies the mask 'mask' without checking for empty sets or permissions. 328 */ 329 static void 330 cpuset_update(struct cpuset *set, cpuset_t *mask) 331 { 332 struct cpuset *nset; 333 334 mtx_assert(&cpuset_lock, MA_OWNED); 335 CPU_AND(&set->cs_mask, mask); 336 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 337 cpuset_update(nset, &set->cs_mask); 338 339 return; 340 } 341 342 /* 343 * Modify the set 'set' to use a copy of the mask provided. Apply this new 344 * mask to restrict all children in the tree. Checks for validity before 345 * applying the changes. 346 */ 347 static int 348 cpuset_modify(struct cpuset *set, cpuset_t *mask) 349 { 350 struct cpuset *root; 351 int error; 352 353 error = priv_check(curthread, PRIV_SCHED_CPUSET); 354 if (error) 355 return (error); 356 /* 357 * In case we are called from within the jail 358 * we do not allow modifying the dedicated root 359 * cpuset of the jail but may still allow to 360 * change child sets. 361 */ 362 if (jailed(curthread->td_ucred) && 363 set->cs_flags & CPU_SET_ROOT) 364 return (EPERM); 365 /* 366 * Verify that we have access to this set of 367 * cpus. 368 */ 369 root = set->cs_parent; 370 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 371 return (EINVAL); 372 mtx_lock_spin(&cpuset_lock); 373 error = cpuset_testupdate(set, mask); 374 if (error) 375 goto out; 376 cpuset_update(set, mask); 377 CPU_COPY(mask, &set->cs_mask); 378 out: 379 mtx_unlock_spin(&cpuset_lock); 380 381 return (error); 382 } 383 384 /* 385 * Resolve the 'which' parameter of several cpuset apis. 386 * 387 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 388 * checks for permission via p_cansched(). 389 * 390 * For WHICH_SET returns a valid set with a new reference. 391 * 392 * -1 may be supplied for any argument to mean the current proc/thread or 393 * the base set of the current thread. May fail with ESRCH/EPERM. 394 */ 395 static int 396 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 397 struct cpuset **setp) 398 { 399 struct cpuset *set; 400 struct thread *td; 401 struct proc *p; 402 int error; 403 404 *pp = p = NULL; 405 *tdp = td = NULL; 406 *setp = set = NULL; 407 switch (which) { 408 case CPU_WHICH_PID: 409 if (id == -1) { 410 PROC_LOCK(curproc); 411 p = curproc; 412 break; 413 } 414 if ((p = pfind(id)) == NULL) 415 return (ESRCH); 416 break; 417 case CPU_WHICH_TID: 418 if (id == -1) { 419 PROC_LOCK(curproc); 420 p = curproc; 421 td = curthread; 422 break; 423 } 424 td = tdfind(id, -1); 425 if (td == NULL) 426 return (ESRCH); 427 p = td->td_proc; 428 break; 429 case CPU_WHICH_CPUSET: 430 if (id == -1) { 431 thread_lock(curthread); 432 set = cpuset_refbase(curthread->td_cpuset); 433 thread_unlock(curthread); 434 } else 435 set = cpuset_lookup(id, curthread); 436 if (set) { 437 *setp = set; 438 return (0); 439 } 440 return (ESRCH); 441 case CPU_WHICH_JAIL: 442 { 443 /* Find `set' for prison with given id. */ 444 struct prison *pr; 445 446 sx_slock(&allprison_lock); 447 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 448 sx_sunlock(&allprison_lock); 449 if (pr == NULL) 450 return (ESRCH); 451 cpuset_ref(pr->pr_cpuset); 452 *setp = pr->pr_cpuset; 453 mtx_unlock(&pr->pr_mtx); 454 return (0); 455 } 456 case CPU_WHICH_IRQ: 457 return (0); 458 default: 459 return (EINVAL); 460 } 461 error = p_cansched(curthread, p); 462 if (error) { 463 PROC_UNLOCK(p); 464 return (error); 465 } 466 if (td == NULL) 467 td = FIRST_THREAD_IN_PROC(p); 468 *pp = p; 469 *tdp = td; 470 return (0); 471 } 472 473 /* 474 * Create an anonymous set with the provided mask in the space provided by 475 * 'fset'. If the passed in set is anonymous we use its parent otherwise 476 * the new set is a child of 'set'. 477 */ 478 static int 479 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 480 { 481 struct cpuset *parent; 482 483 if (set->cs_id == CPUSET_INVALID) 484 parent = set->cs_parent; 485 else 486 parent = set; 487 if (!CPU_SUBSET(&parent->cs_mask, mask)) 488 return (EDEADLK); 489 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 490 } 491 492 /* 493 * Handle two cases for replacing the base set or mask of an entire process. 494 * 495 * 1) Set is non-null and mask is null. This reparents all anonymous sets 496 * to the provided set and replaces all non-anonymous td_cpusets with the 497 * provided set. 498 * 2) Mask is non-null and set is null. This replaces or creates anonymous 499 * sets for every thread with the existing base as a parent. 500 * 501 * This is overly complicated because we can't allocate while holding a 502 * spinlock and spinlocks must be held while changing and examining thread 503 * state. 504 */ 505 static int 506 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 507 { 508 struct setlist freelist; 509 struct setlist droplist; 510 struct cpuset *tdset; 511 struct cpuset *nset; 512 struct thread *td; 513 struct proc *p; 514 int threads; 515 int nfree; 516 int error; 517 /* 518 * The algorithm requires two passes due to locking considerations. 519 * 520 * 1) Lookup the process and acquire the locks in the required order. 521 * 2) If enough cpusets have not been allocated release the locks and 522 * allocate them. Loop. 523 */ 524 LIST_INIT(&freelist); 525 LIST_INIT(&droplist); 526 nfree = 0; 527 for (;;) { 528 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 529 if (error) 530 goto out; 531 if (nfree >= p->p_numthreads) 532 break; 533 threads = p->p_numthreads; 534 PROC_UNLOCK(p); 535 for (; nfree < threads; nfree++) { 536 nset = uma_zalloc(cpuset_zone, M_WAITOK); 537 LIST_INSERT_HEAD(&freelist, nset, cs_link); 538 } 539 } 540 PROC_LOCK_ASSERT(p, MA_OWNED); 541 /* 542 * Now that the appropriate locks are held and we have enough cpusets, 543 * make sure the operation will succeed before applying changes. The 544 * proc lock prevents td_cpuset from changing between calls. 545 */ 546 error = 0; 547 FOREACH_THREAD_IN_PROC(p, td) { 548 thread_lock(td); 549 tdset = td->td_cpuset; 550 /* 551 * Verify that a new mask doesn't specify cpus outside of 552 * the set the thread is a member of. 553 */ 554 if (mask) { 555 if (tdset->cs_id == CPUSET_INVALID) 556 tdset = tdset->cs_parent; 557 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 558 error = EDEADLK; 559 /* 560 * Verify that a new set won't leave an existing thread 561 * mask without a cpu to run on. It can, however, restrict 562 * the set. 563 */ 564 } else if (tdset->cs_id == CPUSET_INVALID) { 565 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 566 error = EDEADLK; 567 } 568 thread_unlock(td); 569 if (error) 570 goto unlock_out; 571 } 572 /* 573 * Replace each thread's cpuset while using deferred release. We 574 * must do this because the thread lock must be held while operating 575 * on the thread and this limits the type of operations allowed. 576 */ 577 FOREACH_THREAD_IN_PROC(p, td) { 578 thread_lock(td); 579 /* 580 * If we presently have an anonymous set or are applying a 581 * mask we must create an anonymous shadow set. That is 582 * either parented to our existing base or the supplied set. 583 * 584 * If we have a base set with no anonymous shadow we simply 585 * replace it outright. 586 */ 587 tdset = td->td_cpuset; 588 if (tdset->cs_id == CPUSET_INVALID || mask) { 589 nset = LIST_FIRST(&freelist); 590 LIST_REMOVE(nset, cs_link); 591 if (mask) 592 error = cpuset_shadow(tdset, nset, mask); 593 else 594 error = _cpuset_create(nset, set, 595 &tdset->cs_mask, CPUSET_INVALID); 596 if (error) { 597 LIST_INSERT_HEAD(&freelist, nset, cs_link); 598 thread_unlock(td); 599 break; 600 } 601 } else 602 nset = cpuset_ref(set); 603 cpuset_rel_defer(&droplist, tdset); 604 td->td_cpuset = nset; 605 sched_affinity(td); 606 thread_unlock(td); 607 } 608 unlock_out: 609 PROC_UNLOCK(p); 610 out: 611 while ((nset = LIST_FIRST(&droplist)) != NULL) 612 cpuset_rel_complete(nset); 613 while ((nset = LIST_FIRST(&freelist)) != NULL) { 614 LIST_REMOVE(nset, cs_link); 615 uma_zfree(cpuset_zone, nset); 616 } 617 return (error); 618 } 619 620 /* 621 * Calculate the ffs() of the cpuset. 622 */ 623 int 624 cpusetobj_ffs(const cpuset_t *set) 625 { 626 size_t i; 627 int cbit; 628 629 cbit = 0; 630 for (i = 0; i < _NCPUWORDS; i++) { 631 if (set->__bits[i] != 0) { 632 cbit = ffsl(set->__bits[i]); 633 cbit += i * _NCPUBITS; 634 break; 635 } 636 } 637 return (cbit); 638 } 639 640 /* 641 * Return a string representing a valid layout for a cpuset_t object. 642 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 643 */ 644 char * 645 cpusetobj_strprint(char *buf, const cpuset_t *set) 646 { 647 char *tbuf; 648 size_t i, bytesp, bufsiz; 649 650 tbuf = buf; 651 bytesp = 0; 652 bufsiz = CPUSETBUFSIZ; 653 654 for (i = _NCPUWORDS - 1; i > 0; i--) { 655 bytesp = snprintf(tbuf, bufsiz, "%lx, ", set->__bits[i]); 656 bufsiz -= bytesp; 657 tbuf += bytesp; 658 } 659 snprintf(tbuf, bufsiz, "%lx", set->__bits[0]); 660 return (buf); 661 } 662 663 /* 664 * Build a valid cpuset_t object from a string representation. 665 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 666 */ 667 int 668 cpusetobj_strscan(cpuset_t *set, const char *buf) 669 { 670 u_int nwords; 671 int i, ret; 672 673 if (strlen(buf) > CPUSETBUFSIZ - 1) 674 return (-1); 675 676 /* Allow to pass a shorter version of the mask when necessary. */ 677 nwords = 1; 678 for (i = 0; buf[i] != '\0'; i++) 679 if (buf[i] == ',') 680 nwords++; 681 if (nwords > _NCPUWORDS) 682 return (-1); 683 684 CPU_ZERO(set); 685 for (i = nwords - 1; i > 0; i--) { 686 ret = sscanf(buf, "%lx, ", &set->__bits[i]); 687 if (ret == 0 || ret == -1) 688 return (-1); 689 buf = strstr(buf, " "); 690 if (buf == NULL) 691 return (-1); 692 buf++; 693 } 694 ret = sscanf(buf, "%lx", &set->__bits[0]); 695 if (ret == 0 || ret == -1) 696 return (-1); 697 return (0); 698 } 699 700 /* 701 * Apply an anonymous mask to a single thread. 702 */ 703 int 704 cpuset_setthread(lwpid_t id, cpuset_t *mask) 705 { 706 struct cpuset *nset; 707 struct cpuset *set; 708 struct thread *td; 709 struct proc *p; 710 int error; 711 712 nset = uma_zalloc(cpuset_zone, M_WAITOK); 713 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 714 if (error) 715 goto out; 716 set = NULL; 717 thread_lock(td); 718 error = cpuset_shadow(td->td_cpuset, nset, mask); 719 if (error == 0) { 720 set = td->td_cpuset; 721 td->td_cpuset = nset; 722 sched_affinity(td); 723 nset = NULL; 724 } 725 thread_unlock(td); 726 PROC_UNLOCK(p); 727 if (set) 728 cpuset_rel(set); 729 out: 730 if (nset) 731 uma_zfree(cpuset_zone, nset); 732 return (error); 733 } 734 735 /* 736 * Creates the cpuset for thread0. We make two sets: 737 * 738 * 0 - The root set which should represent all valid processors in the 739 * system. It is initially created with a mask of all processors 740 * because we don't know what processors are valid until cpuset_init() 741 * runs. This set is immutable. 742 * 1 - The default set which all processes are a member of until changed. 743 * This allows an administrator to move all threads off of given cpus to 744 * dedicate them to high priority tasks or save power etc. 745 */ 746 struct cpuset * 747 cpuset_thread0(void) 748 { 749 struct cpuset *set; 750 int error; 751 752 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 753 NULL, NULL, UMA_ALIGN_PTR, 0); 754 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 755 /* 756 * Create the root system set for the whole machine. Doesn't use 757 * cpuset_create() due to NULL parent. 758 */ 759 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 760 CPU_FILL(&set->cs_mask); 761 LIST_INIT(&set->cs_children); 762 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 763 set->cs_ref = 1; 764 set->cs_flags = CPU_SET_ROOT; 765 cpuset_zero = set; 766 cpuset_root = &set->cs_mask; 767 /* 768 * Now derive a default, modifiable set from that to give out. 769 */ 770 set = uma_zalloc(cpuset_zone, M_WAITOK); 771 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 772 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 773 /* 774 * Initialize the unit allocator. 0 and 1 are allocated above. 775 */ 776 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 777 778 return (set); 779 } 780 781 /* 782 * Create a cpuset, which would be cpuset_create() but 783 * mark the new 'set' as root. 784 * 785 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 786 * for that. 787 * 788 * In case of no error, returns the set in *setp locked with a reference. 789 */ 790 int 791 cpuset_create_root(struct prison *pr, struct cpuset **setp) 792 { 793 struct cpuset *set; 794 int error; 795 796 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 797 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 798 799 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 800 if (error) 801 return (error); 802 803 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 804 __func__, __LINE__)); 805 806 /* Mark the set as root. */ 807 set = *setp; 808 set->cs_flags |= CPU_SET_ROOT; 809 810 return (0); 811 } 812 813 int 814 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 815 { 816 int error; 817 818 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 819 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 820 821 cpuset_ref(set); 822 error = cpuset_setproc(p->p_pid, set, NULL); 823 if (error) 824 return (error); 825 cpuset_rel(set); 826 return (0); 827 } 828 829 /* 830 * This is called once the final set of system cpus is known. Modifies 831 * the root set and all children and mark the root read-only. 832 */ 833 static void 834 cpuset_init(void *arg) 835 { 836 cpuset_t mask; 837 838 mask = all_cpus; 839 if (cpuset_modify(cpuset_zero, &mask)) 840 panic("Can't set initial cpuset mask.\n"); 841 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 842 } 843 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 844 845 #ifndef _SYS_SYSPROTO_H_ 846 struct cpuset_args { 847 cpusetid_t *setid; 848 }; 849 #endif 850 int 851 sys_cpuset(struct thread *td, struct cpuset_args *uap) 852 { 853 struct cpuset *root; 854 struct cpuset *set; 855 int error; 856 857 thread_lock(td); 858 root = cpuset_refroot(td->td_cpuset); 859 thread_unlock(td); 860 error = cpuset_create(&set, root, &root->cs_mask); 861 cpuset_rel(root); 862 if (error) 863 return (error); 864 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 865 if (error == 0) 866 error = cpuset_setproc(-1, set, NULL); 867 cpuset_rel(set); 868 return (error); 869 } 870 871 #ifndef _SYS_SYSPROTO_H_ 872 struct cpuset_setid_args { 873 cpuwhich_t which; 874 id_t id; 875 cpusetid_t setid; 876 }; 877 #endif 878 int 879 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 880 { 881 struct cpuset *set; 882 int error; 883 884 /* 885 * Presently we only support per-process sets. 886 */ 887 if (uap->which != CPU_WHICH_PID) 888 return (EINVAL); 889 set = cpuset_lookup(uap->setid, td); 890 if (set == NULL) 891 return (ESRCH); 892 error = cpuset_setproc(uap->id, set, NULL); 893 cpuset_rel(set); 894 return (error); 895 } 896 897 #ifndef _SYS_SYSPROTO_H_ 898 struct cpuset_getid_args { 899 cpulevel_t level; 900 cpuwhich_t which; 901 id_t id; 902 cpusetid_t *setid; 903 #endif 904 int 905 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 906 { 907 struct cpuset *nset; 908 struct cpuset *set; 909 struct thread *ttd; 910 struct proc *p; 911 cpusetid_t id; 912 int error; 913 914 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 915 return (EINVAL); 916 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 917 if (error) 918 return (error); 919 switch (uap->which) { 920 case CPU_WHICH_TID: 921 case CPU_WHICH_PID: 922 thread_lock(ttd); 923 set = cpuset_refbase(ttd->td_cpuset); 924 thread_unlock(ttd); 925 PROC_UNLOCK(p); 926 break; 927 case CPU_WHICH_CPUSET: 928 case CPU_WHICH_JAIL: 929 break; 930 case CPU_WHICH_IRQ: 931 return (EINVAL); 932 } 933 switch (uap->level) { 934 case CPU_LEVEL_ROOT: 935 nset = cpuset_refroot(set); 936 cpuset_rel(set); 937 set = nset; 938 break; 939 case CPU_LEVEL_CPUSET: 940 break; 941 case CPU_LEVEL_WHICH: 942 break; 943 } 944 id = set->cs_id; 945 cpuset_rel(set); 946 if (error == 0) 947 error = copyout(&id, uap->setid, sizeof(id)); 948 949 return (error); 950 } 951 952 #ifndef _SYS_SYSPROTO_H_ 953 struct cpuset_getaffinity_args { 954 cpulevel_t level; 955 cpuwhich_t which; 956 id_t id; 957 size_t cpusetsize; 958 cpuset_t *mask; 959 }; 960 #endif 961 int 962 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 963 { 964 struct thread *ttd; 965 struct cpuset *nset; 966 struct cpuset *set; 967 struct proc *p; 968 cpuset_t *mask; 969 int error; 970 size_t size; 971 972 if (uap->cpusetsize < sizeof(cpuset_t) || 973 uap->cpusetsize > CPU_MAXSIZE / NBBY) 974 return (ERANGE); 975 size = uap->cpusetsize; 976 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 977 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 978 if (error) 979 goto out; 980 switch (uap->level) { 981 case CPU_LEVEL_ROOT: 982 case CPU_LEVEL_CPUSET: 983 switch (uap->which) { 984 case CPU_WHICH_TID: 985 case CPU_WHICH_PID: 986 thread_lock(ttd); 987 set = cpuset_ref(ttd->td_cpuset); 988 thread_unlock(ttd); 989 break; 990 case CPU_WHICH_CPUSET: 991 case CPU_WHICH_JAIL: 992 break; 993 case CPU_WHICH_IRQ: 994 error = EINVAL; 995 goto out; 996 } 997 if (uap->level == CPU_LEVEL_ROOT) 998 nset = cpuset_refroot(set); 999 else 1000 nset = cpuset_refbase(set); 1001 CPU_COPY(&nset->cs_mask, mask); 1002 cpuset_rel(nset); 1003 break; 1004 case CPU_LEVEL_WHICH: 1005 switch (uap->which) { 1006 case CPU_WHICH_TID: 1007 thread_lock(ttd); 1008 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1009 thread_unlock(ttd); 1010 break; 1011 case CPU_WHICH_PID: 1012 FOREACH_THREAD_IN_PROC(p, ttd) { 1013 thread_lock(ttd); 1014 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1015 thread_unlock(ttd); 1016 } 1017 break; 1018 case CPU_WHICH_CPUSET: 1019 case CPU_WHICH_JAIL: 1020 CPU_COPY(&set->cs_mask, mask); 1021 break; 1022 case CPU_WHICH_IRQ: 1023 error = intr_getaffinity(uap->id, mask); 1024 break; 1025 } 1026 break; 1027 default: 1028 error = EINVAL; 1029 break; 1030 } 1031 if (set) 1032 cpuset_rel(set); 1033 if (p) 1034 PROC_UNLOCK(p); 1035 if (error == 0) 1036 error = copyout(mask, uap->mask, size); 1037 out: 1038 free(mask, M_TEMP); 1039 return (error); 1040 } 1041 1042 #ifndef _SYS_SYSPROTO_H_ 1043 struct cpuset_setaffinity_args { 1044 cpulevel_t level; 1045 cpuwhich_t which; 1046 id_t id; 1047 size_t cpusetsize; 1048 const cpuset_t *mask; 1049 }; 1050 #endif 1051 int 1052 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1053 { 1054 struct cpuset *nset; 1055 struct cpuset *set; 1056 struct thread *ttd; 1057 struct proc *p; 1058 cpuset_t *mask; 1059 int error; 1060 1061 if (uap->cpusetsize < sizeof(cpuset_t) || 1062 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1063 return (ERANGE); 1064 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1065 error = copyin(uap->mask, mask, uap->cpusetsize); 1066 if (error) 1067 goto out; 1068 /* 1069 * Verify that no high bits are set. 1070 */ 1071 if (uap->cpusetsize > sizeof(cpuset_t)) { 1072 char *end; 1073 char *cp; 1074 1075 end = cp = (char *)&mask->__bits; 1076 end += uap->cpusetsize; 1077 cp += sizeof(cpuset_t); 1078 while (cp != end) 1079 if (*cp++ != 0) { 1080 error = EINVAL; 1081 goto out; 1082 } 1083 1084 } 1085 switch (uap->level) { 1086 case CPU_LEVEL_ROOT: 1087 case CPU_LEVEL_CPUSET: 1088 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1089 if (error) 1090 break; 1091 switch (uap->which) { 1092 case CPU_WHICH_TID: 1093 case CPU_WHICH_PID: 1094 thread_lock(ttd); 1095 set = cpuset_ref(ttd->td_cpuset); 1096 thread_unlock(ttd); 1097 PROC_UNLOCK(p); 1098 break; 1099 case CPU_WHICH_CPUSET: 1100 case CPU_WHICH_JAIL: 1101 break; 1102 case CPU_WHICH_IRQ: 1103 error = EINVAL; 1104 goto out; 1105 } 1106 if (uap->level == CPU_LEVEL_ROOT) 1107 nset = cpuset_refroot(set); 1108 else 1109 nset = cpuset_refbase(set); 1110 error = cpuset_modify(nset, mask); 1111 cpuset_rel(nset); 1112 cpuset_rel(set); 1113 break; 1114 case CPU_LEVEL_WHICH: 1115 switch (uap->which) { 1116 case CPU_WHICH_TID: 1117 error = cpuset_setthread(uap->id, mask); 1118 break; 1119 case CPU_WHICH_PID: 1120 error = cpuset_setproc(uap->id, NULL, mask); 1121 break; 1122 case CPU_WHICH_CPUSET: 1123 case CPU_WHICH_JAIL: 1124 error = cpuset_which(uap->which, uap->id, &p, 1125 &ttd, &set); 1126 if (error == 0) { 1127 error = cpuset_modify(set, mask); 1128 cpuset_rel(set); 1129 } 1130 break; 1131 case CPU_WHICH_IRQ: 1132 error = intr_setaffinity(uap->id, mask); 1133 break; 1134 default: 1135 error = EINVAL; 1136 break; 1137 } 1138 break; 1139 default: 1140 error = EINVAL; 1141 break; 1142 } 1143 out: 1144 free(mask, M_TEMP); 1145 return (error); 1146 } 1147 1148 #ifdef DDB 1149 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1150 { 1151 struct cpuset *set; 1152 int cpu, once; 1153 1154 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1155 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1156 set, set->cs_id, set->cs_ref, set->cs_flags, 1157 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1158 db_printf(" mask="); 1159 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1160 if (CPU_ISSET(cpu, &set->cs_mask)) { 1161 if (once == 0) { 1162 db_printf("%d", cpu); 1163 once = 1; 1164 } else 1165 db_printf(",%d", cpu); 1166 } 1167 } 1168 db_printf("\n"); 1169 if (db_pager_quit) 1170 break; 1171 } 1172 } 1173 #endif /* DDB */ 1174