1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/jail.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/refcount.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/cpuset.h> 51 #include <sys/sx.h> 52 #include <sys/queue.h> 53 #include <sys/libkern.h> 54 #include <sys/limits.h> 55 #include <sys/bus.h> 56 #include <sys/interrupt.h> 57 58 #include <vm/uma.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif /* DDB */ 63 64 /* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask. 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a group separate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query available cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105 static uma_zone_t cpuset_zone; 106 static struct mtx cpuset_lock; 107 static struct setlist cpuset_ids; 108 static struct unrhdr *cpuset_unr; 109 static struct cpuset *cpuset_zero; 110 111 /* Return the size of cpuset_t at the kernel level */ 112 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD, 113 0, sizeof(cpuset_t), "sizeof(cpuset_t)"); 114 115 cpuset_t *cpuset_root; 116 117 /* 118 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 119 */ 120 struct cpuset * 121 cpuset_ref(struct cpuset *set) 122 { 123 124 refcount_acquire(&set->cs_ref); 125 return (set); 126 } 127 128 /* 129 * Walks up the tree from 'set' to find the root. Returns the root 130 * referenced. 131 */ 132 static struct cpuset * 133 cpuset_refroot(struct cpuset *set) 134 { 135 136 for (; set->cs_parent != NULL; set = set->cs_parent) 137 if (set->cs_flags & CPU_SET_ROOT) 138 break; 139 cpuset_ref(set); 140 141 return (set); 142 } 143 144 /* 145 * Find the first non-anonymous set starting from 'set'. Returns this set 146 * referenced. May return the passed in set with an extra ref if it is 147 * not anonymous. 148 */ 149 static struct cpuset * 150 cpuset_refbase(struct cpuset *set) 151 { 152 153 if (set->cs_id == CPUSET_INVALID) 154 set = set->cs_parent; 155 cpuset_ref(set); 156 157 return (set); 158 } 159 160 /* 161 * Release a reference in a context where it is safe to allocate. 162 */ 163 void 164 cpuset_rel(struct cpuset *set) 165 { 166 cpusetid_t id; 167 168 if (refcount_release(&set->cs_ref) == 0) 169 return; 170 mtx_lock_spin(&cpuset_lock); 171 LIST_REMOVE(set, cs_siblings); 172 id = set->cs_id; 173 if (id != CPUSET_INVALID) 174 LIST_REMOVE(set, cs_link); 175 mtx_unlock_spin(&cpuset_lock); 176 cpuset_rel(set->cs_parent); 177 uma_zfree(cpuset_zone, set); 178 if (id != CPUSET_INVALID) 179 free_unr(cpuset_unr, id); 180 } 181 182 /* 183 * Deferred release must be used when in a context that is not safe to 184 * allocate/free. This places any unreferenced sets on the list 'head'. 185 */ 186 static void 187 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 188 { 189 190 if (refcount_release(&set->cs_ref) == 0) 191 return; 192 mtx_lock_spin(&cpuset_lock); 193 LIST_REMOVE(set, cs_siblings); 194 if (set->cs_id != CPUSET_INVALID) 195 LIST_REMOVE(set, cs_link); 196 LIST_INSERT_HEAD(head, set, cs_link); 197 mtx_unlock_spin(&cpuset_lock); 198 } 199 200 /* 201 * Complete a deferred release. Removes the set from the list provided to 202 * cpuset_rel_defer. 203 */ 204 static void 205 cpuset_rel_complete(struct cpuset *set) 206 { 207 LIST_REMOVE(set, cs_link); 208 cpuset_rel(set->cs_parent); 209 uma_zfree(cpuset_zone, set); 210 } 211 212 /* 213 * Find a set based on an id. Returns it with a ref. 214 */ 215 static struct cpuset * 216 cpuset_lookup(cpusetid_t setid, struct thread *td) 217 { 218 struct cpuset *set; 219 220 if (setid == CPUSET_INVALID) 221 return (NULL); 222 mtx_lock_spin(&cpuset_lock); 223 LIST_FOREACH(set, &cpuset_ids, cs_link) 224 if (set->cs_id == setid) 225 break; 226 if (set) 227 cpuset_ref(set); 228 mtx_unlock_spin(&cpuset_lock); 229 230 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 231 if (set != NULL && jailed(td->td_ucred)) { 232 struct cpuset *jset, *tset; 233 234 jset = td->td_ucred->cr_prison->pr_cpuset; 235 for (tset = set; tset != NULL; tset = tset->cs_parent) 236 if (tset == jset) 237 break; 238 if (tset == NULL) { 239 cpuset_rel(set); 240 set = NULL; 241 } 242 } 243 244 return (set); 245 } 246 247 /* 248 * Create a set in the space provided in 'set' with the provided parameters. 249 * The set is returned with a single ref. May return EDEADLK if the set 250 * will have no valid cpu based on restrictions from the parent. 251 */ 252 static int 253 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 254 cpusetid_t id) 255 { 256 257 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 258 return (EDEADLK); 259 CPU_COPY(mask, &set->cs_mask); 260 LIST_INIT(&set->cs_children); 261 refcount_init(&set->cs_ref, 1); 262 set->cs_flags = 0; 263 mtx_lock_spin(&cpuset_lock); 264 CPU_AND(&set->cs_mask, &parent->cs_mask); 265 set->cs_id = id; 266 set->cs_parent = cpuset_ref(parent); 267 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 268 if (set->cs_id != CPUSET_INVALID) 269 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 270 mtx_unlock_spin(&cpuset_lock); 271 272 return (0); 273 } 274 275 /* 276 * Create a new non-anonymous set with the requested parent and mask. May 277 * return failures if the mask is invalid or a new number can not be 278 * allocated. 279 */ 280 static int 281 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 282 { 283 struct cpuset *set; 284 cpusetid_t id; 285 int error; 286 287 id = alloc_unr(cpuset_unr); 288 if (id == -1) 289 return (ENFILE); 290 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 291 error = _cpuset_create(set, parent, mask, id); 292 if (error == 0) 293 return (0); 294 free_unr(cpuset_unr, id); 295 uma_zfree(cpuset_zone, set); 296 297 return (error); 298 } 299 300 /* 301 * Recursively check for errors that would occur from applying mask to 302 * the tree of sets starting at 'set'. Checks for sets that would become 303 * empty as well as RDONLY flags. 304 */ 305 static int 306 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 307 { 308 struct cpuset *nset; 309 cpuset_t newmask; 310 int error; 311 312 mtx_assert(&cpuset_lock, MA_OWNED); 313 if (set->cs_flags & CPU_SET_RDONLY) 314 return (EPERM); 315 if (check_mask) { 316 if (!CPU_OVERLAP(&set->cs_mask, mask)) 317 return (EDEADLK); 318 CPU_COPY(&set->cs_mask, &newmask); 319 CPU_AND(&newmask, mask); 320 } else 321 CPU_COPY(mask, &newmask); 322 error = 0; 323 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 324 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 325 break; 326 return (error); 327 } 328 329 /* 330 * Applies the mask 'mask' without checking for empty sets or permissions. 331 */ 332 static void 333 cpuset_update(struct cpuset *set, cpuset_t *mask) 334 { 335 struct cpuset *nset; 336 337 mtx_assert(&cpuset_lock, MA_OWNED); 338 CPU_AND(&set->cs_mask, mask); 339 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 340 cpuset_update(nset, &set->cs_mask); 341 342 return; 343 } 344 345 /* 346 * Modify the set 'set' to use a copy of the mask provided. Apply this new 347 * mask to restrict all children in the tree. Checks for validity before 348 * applying the changes. 349 */ 350 static int 351 cpuset_modify(struct cpuset *set, cpuset_t *mask) 352 { 353 struct cpuset *root; 354 int error; 355 356 error = priv_check(curthread, PRIV_SCHED_CPUSET); 357 if (error) 358 return (error); 359 /* 360 * In case we are called from within the jail 361 * we do not allow modifying the dedicated root 362 * cpuset of the jail but may still allow to 363 * change child sets. 364 */ 365 if (jailed(curthread->td_ucred) && 366 set->cs_flags & CPU_SET_ROOT) 367 return (EPERM); 368 /* 369 * Verify that we have access to this set of 370 * cpus. 371 */ 372 root = set->cs_parent; 373 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 374 return (EINVAL); 375 mtx_lock_spin(&cpuset_lock); 376 error = cpuset_testupdate(set, mask, 0); 377 if (error) 378 goto out; 379 CPU_COPY(mask, &set->cs_mask); 380 cpuset_update(set, mask); 381 out: 382 mtx_unlock_spin(&cpuset_lock); 383 384 return (error); 385 } 386 387 /* 388 * Resolve the 'which' parameter of several cpuset apis. 389 * 390 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 391 * checks for permission via p_cansched(). 392 * 393 * For WHICH_SET returns a valid set with a new reference. 394 * 395 * -1 may be supplied for any argument to mean the current proc/thread or 396 * the base set of the current thread. May fail with ESRCH/EPERM. 397 */ 398 static int 399 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 400 struct cpuset **setp) 401 { 402 struct cpuset *set; 403 struct thread *td; 404 struct proc *p; 405 int error; 406 407 *pp = p = NULL; 408 *tdp = td = NULL; 409 *setp = set = NULL; 410 switch (which) { 411 case CPU_WHICH_PID: 412 if (id == -1) { 413 PROC_LOCK(curproc); 414 p = curproc; 415 break; 416 } 417 if ((p = pfind(id)) == NULL) 418 return (ESRCH); 419 break; 420 case CPU_WHICH_TID: 421 if (id == -1) { 422 PROC_LOCK(curproc); 423 p = curproc; 424 td = curthread; 425 break; 426 } 427 td = tdfind(id, -1); 428 if (td == NULL) 429 return (ESRCH); 430 p = td->td_proc; 431 break; 432 case CPU_WHICH_CPUSET: 433 if (id == -1) { 434 thread_lock(curthread); 435 set = cpuset_refbase(curthread->td_cpuset); 436 thread_unlock(curthread); 437 } else 438 set = cpuset_lookup(id, curthread); 439 if (set) { 440 *setp = set; 441 return (0); 442 } 443 return (ESRCH); 444 case CPU_WHICH_JAIL: 445 { 446 /* Find `set' for prison with given id. */ 447 struct prison *pr; 448 449 sx_slock(&allprison_lock); 450 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 451 sx_sunlock(&allprison_lock); 452 if (pr == NULL) 453 return (ESRCH); 454 cpuset_ref(pr->pr_cpuset); 455 *setp = pr->pr_cpuset; 456 mtx_unlock(&pr->pr_mtx); 457 return (0); 458 } 459 case CPU_WHICH_IRQ: 460 return (0); 461 default: 462 return (EINVAL); 463 } 464 error = p_cansched(curthread, p); 465 if (error) { 466 PROC_UNLOCK(p); 467 return (error); 468 } 469 if (td == NULL) 470 td = FIRST_THREAD_IN_PROC(p); 471 *pp = p; 472 *tdp = td; 473 return (0); 474 } 475 476 /* 477 * Create an anonymous set with the provided mask in the space provided by 478 * 'fset'. If the passed in set is anonymous we use its parent otherwise 479 * the new set is a child of 'set'. 480 */ 481 static int 482 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 483 { 484 struct cpuset *parent; 485 486 if (set->cs_id == CPUSET_INVALID) 487 parent = set->cs_parent; 488 else 489 parent = set; 490 if (!CPU_SUBSET(&parent->cs_mask, mask)) 491 return (EDEADLK); 492 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 493 } 494 495 /* 496 * Handle two cases for replacing the base set or mask of an entire process. 497 * 498 * 1) Set is non-null and mask is null. This reparents all anonymous sets 499 * to the provided set and replaces all non-anonymous td_cpusets with the 500 * provided set. 501 * 2) Mask is non-null and set is null. This replaces or creates anonymous 502 * sets for every thread with the existing base as a parent. 503 * 504 * This is overly complicated because we can't allocate while holding a 505 * spinlock and spinlocks must be held while changing and examining thread 506 * state. 507 */ 508 static int 509 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 510 { 511 struct setlist freelist; 512 struct setlist droplist; 513 struct cpuset *tdset; 514 struct cpuset *nset; 515 struct thread *td; 516 struct proc *p; 517 int threads; 518 int nfree; 519 int error; 520 /* 521 * The algorithm requires two passes due to locking considerations. 522 * 523 * 1) Lookup the process and acquire the locks in the required order. 524 * 2) If enough cpusets have not been allocated release the locks and 525 * allocate them. Loop. 526 */ 527 LIST_INIT(&freelist); 528 LIST_INIT(&droplist); 529 nfree = 0; 530 for (;;) { 531 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 532 if (error) 533 goto out; 534 if (nfree >= p->p_numthreads) 535 break; 536 threads = p->p_numthreads; 537 PROC_UNLOCK(p); 538 for (; nfree < threads; nfree++) { 539 nset = uma_zalloc(cpuset_zone, M_WAITOK); 540 LIST_INSERT_HEAD(&freelist, nset, cs_link); 541 } 542 } 543 PROC_LOCK_ASSERT(p, MA_OWNED); 544 /* 545 * Now that the appropriate locks are held and we have enough cpusets, 546 * make sure the operation will succeed before applying changes. The 547 * proc lock prevents td_cpuset from changing between calls. 548 */ 549 error = 0; 550 FOREACH_THREAD_IN_PROC(p, td) { 551 thread_lock(td); 552 tdset = td->td_cpuset; 553 /* 554 * Verify that a new mask doesn't specify cpus outside of 555 * the set the thread is a member of. 556 */ 557 if (mask) { 558 if (tdset->cs_id == CPUSET_INVALID) 559 tdset = tdset->cs_parent; 560 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 561 error = EDEADLK; 562 /* 563 * Verify that a new set won't leave an existing thread 564 * mask without a cpu to run on. It can, however, restrict 565 * the set. 566 */ 567 } else if (tdset->cs_id == CPUSET_INVALID) { 568 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 569 error = EDEADLK; 570 } 571 thread_unlock(td); 572 if (error) 573 goto unlock_out; 574 } 575 /* 576 * Replace each thread's cpuset while using deferred release. We 577 * must do this because the thread lock must be held while operating 578 * on the thread and this limits the type of operations allowed. 579 */ 580 FOREACH_THREAD_IN_PROC(p, td) { 581 thread_lock(td); 582 /* 583 * If we presently have an anonymous set or are applying a 584 * mask we must create an anonymous shadow set. That is 585 * either parented to our existing base or the supplied set. 586 * 587 * If we have a base set with no anonymous shadow we simply 588 * replace it outright. 589 */ 590 tdset = td->td_cpuset; 591 if (tdset->cs_id == CPUSET_INVALID || mask) { 592 nset = LIST_FIRST(&freelist); 593 LIST_REMOVE(nset, cs_link); 594 if (mask) 595 error = cpuset_shadow(tdset, nset, mask); 596 else 597 error = _cpuset_create(nset, set, 598 &tdset->cs_mask, CPUSET_INVALID); 599 if (error) { 600 LIST_INSERT_HEAD(&freelist, nset, cs_link); 601 thread_unlock(td); 602 break; 603 } 604 } else 605 nset = cpuset_ref(set); 606 cpuset_rel_defer(&droplist, tdset); 607 td->td_cpuset = nset; 608 sched_affinity(td); 609 thread_unlock(td); 610 } 611 unlock_out: 612 PROC_UNLOCK(p); 613 out: 614 while ((nset = LIST_FIRST(&droplist)) != NULL) 615 cpuset_rel_complete(nset); 616 while ((nset = LIST_FIRST(&freelist)) != NULL) { 617 LIST_REMOVE(nset, cs_link); 618 uma_zfree(cpuset_zone, nset); 619 } 620 return (error); 621 } 622 623 /* 624 * Return a string representing a valid layout for a cpuset_t object. 625 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 626 */ 627 char * 628 cpusetobj_strprint(char *buf, const cpuset_t *set) 629 { 630 char *tbuf; 631 size_t i, bytesp, bufsiz; 632 633 tbuf = buf; 634 bytesp = 0; 635 bufsiz = CPUSETBUFSIZ; 636 637 for (i = 0; i < (_NCPUWORDS - 1); i++) { 638 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 639 bufsiz -= bytesp; 640 tbuf += bytesp; 641 } 642 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 643 return (buf); 644 } 645 646 /* 647 * Build a valid cpuset_t object from a string representation. 648 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 649 */ 650 int 651 cpusetobj_strscan(cpuset_t *set, const char *buf) 652 { 653 u_int nwords; 654 int i, ret; 655 656 if (strlen(buf) > CPUSETBUFSIZ - 1) 657 return (-1); 658 659 /* Allow to pass a shorter version of the mask when necessary. */ 660 nwords = 1; 661 for (i = 0; buf[i] != '\0'; i++) 662 if (buf[i] == ',') 663 nwords++; 664 if (nwords > _NCPUWORDS) 665 return (-1); 666 667 CPU_ZERO(set); 668 for (i = 0; i < (nwords - 1); i++) { 669 ret = sscanf(buf, "%lx,", &set->__bits[i]); 670 if (ret == 0 || ret == -1) 671 return (-1); 672 buf = strstr(buf, ","); 673 if (buf == NULL) 674 return (-1); 675 buf++; 676 } 677 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 678 if (ret == 0 || ret == -1) 679 return (-1); 680 return (0); 681 } 682 683 /* 684 * Apply an anonymous mask to a single thread. 685 */ 686 int 687 cpuset_setthread(lwpid_t id, cpuset_t *mask) 688 { 689 struct cpuset *nset; 690 struct cpuset *set; 691 struct thread *td; 692 struct proc *p; 693 int error; 694 695 nset = uma_zalloc(cpuset_zone, M_WAITOK); 696 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 697 if (error) 698 goto out; 699 set = NULL; 700 thread_lock(td); 701 error = cpuset_shadow(td->td_cpuset, nset, mask); 702 if (error == 0) { 703 set = td->td_cpuset; 704 td->td_cpuset = nset; 705 sched_affinity(td); 706 nset = NULL; 707 } 708 thread_unlock(td); 709 PROC_UNLOCK(p); 710 if (set) 711 cpuset_rel(set); 712 out: 713 if (nset) 714 uma_zfree(cpuset_zone, nset); 715 return (error); 716 } 717 718 /* 719 * Creates the cpuset for thread0. We make two sets: 720 * 721 * 0 - The root set which should represent all valid processors in the 722 * system. It is initially created with a mask of all processors 723 * because we don't know what processors are valid until cpuset_init() 724 * runs. This set is immutable. 725 * 1 - The default set which all processes are a member of until changed. 726 * This allows an administrator to move all threads off of given cpus to 727 * dedicate them to high priority tasks or save power etc. 728 */ 729 struct cpuset * 730 cpuset_thread0(void) 731 { 732 struct cpuset *set; 733 int error; 734 735 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 736 NULL, NULL, UMA_ALIGN_PTR, 0); 737 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 738 /* 739 * Create the root system set for the whole machine. Doesn't use 740 * cpuset_create() due to NULL parent. 741 */ 742 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 743 CPU_FILL(&set->cs_mask); 744 LIST_INIT(&set->cs_children); 745 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 746 set->cs_ref = 1; 747 set->cs_flags = CPU_SET_ROOT; 748 cpuset_zero = set; 749 cpuset_root = &set->cs_mask; 750 /* 751 * Now derive a default, modifiable set from that to give out. 752 */ 753 set = uma_zalloc(cpuset_zone, M_WAITOK); 754 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 755 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 756 /* 757 * Initialize the unit allocator. 0 and 1 are allocated above. 758 */ 759 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 760 761 return (set); 762 } 763 764 /* 765 * Create a cpuset, which would be cpuset_create() but 766 * mark the new 'set' as root. 767 * 768 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 769 * for that. 770 * 771 * In case of no error, returns the set in *setp locked with a reference. 772 */ 773 int 774 cpuset_create_root(struct prison *pr, struct cpuset **setp) 775 { 776 struct cpuset *set; 777 int error; 778 779 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 780 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 781 782 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 783 if (error) 784 return (error); 785 786 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 787 __func__, __LINE__)); 788 789 /* Mark the set as root. */ 790 set = *setp; 791 set->cs_flags |= CPU_SET_ROOT; 792 793 return (0); 794 } 795 796 int 797 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 798 { 799 int error; 800 801 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 802 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 803 804 cpuset_ref(set); 805 error = cpuset_setproc(p->p_pid, set, NULL); 806 if (error) 807 return (error); 808 cpuset_rel(set); 809 return (0); 810 } 811 812 /* 813 * This is called once the final set of system cpus is known. Modifies 814 * the root set and all children and mark the root read-only. 815 */ 816 static void 817 cpuset_init(void *arg) 818 { 819 cpuset_t mask; 820 821 mask = all_cpus; 822 if (cpuset_modify(cpuset_zero, &mask)) 823 panic("Can't set initial cpuset mask.\n"); 824 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 825 } 826 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 827 828 #ifndef _SYS_SYSPROTO_H_ 829 struct cpuset_args { 830 cpusetid_t *setid; 831 }; 832 #endif 833 int 834 sys_cpuset(struct thread *td, struct cpuset_args *uap) 835 { 836 struct cpuset *root; 837 struct cpuset *set; 838 int error; 839 840 thread_lock(td); 841 root = cpuset_refroot(td->td_cpuset); 842 thread_unlock(td); 843 error = cpuset_create(&set, root, &root->cs_mask); 844 cpuset_rel(root); 845 if (error) 846 return (error); 847 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 848 if (error == 0) 849 error = cpuset_setproc(-1, set, NULL); 850 cpuset_rel(set); 851 return (error); 852 } 853 854 #ifndef _SYS_SYSPROTO_H_ 855 struct cpuset_setid_args { 856 cpuwhich_t which; 857 id_t id; 858 cpusetid_t setid; 859 }; 860 #endif 861 int 862 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 863 { 864 struct cpuset *set; 865 int error; 866 867 /* 868 * Presently we only support per-process sets. 869 */ 870 if (uap->which != CPU_WHICH_PID) 871 return (EINVAL); 872 set = cpuset_lookup(uap->setid, td); 873 if (set == NULL) 874 return (ESRCH); 875 error = cpuset_setproc(uap->id, set, NULL); 876 cpuset_rel(set); 877 return (error); 878 } 879 880 #ifndef _SYS_SYSPROTO_H_ 881 struct cpuset_getid_args { 882 cpulevel_t level; 883 cpuwhich_t which; 884 id_t id; 885 cpusetid_t *setid; 886 }; 887 #endif 888 int 889 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 890 { 891 struct cpuset *nset; 892 struct cpuset *set; 893 struct thread *ttd; 894 struct proc *p; 895 cpusetid_t id; 896 int error; 897 898 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 899 return (EINVAL); 900 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 901 if (error) 902 return (error); 903 switch (uap->which) { 904 case CPU_WHICH_TID: 905 case CPU_WHICH_PID: 906 thread_lock(ttd); 907 set = cpuset_refbase(ttd->td_cpuset); 908 thread_unlock(ttd); 909 PROC_UNLOCK(p); 910 break; 911 case CPU_WHICH_CPUSET: 912 case CPU_WHICH_JAIL: 913 break; 914 case CPU_WHICH_IRQ: 915 return (EINVAL); 916 } 917 switch (uap->level) { 918 case CPU_LEVEL_ROOT: 919 nset = cpuset_refroot(set); 920 cpuset_rel(set); 921 set = nset; 922 break; 923 case CPU_LEVEL_CPUSET: 924 break; 925 case CPU_LEVEL_WHICH: 926 break; 927 } 928 id = set->cs_id; 929 cpuset_rel(set); 930 if (error == 0) 931 error = copyout(&id, uap->setid, sizeof(id)); 932 933 return (error); 934 } 935 936 #ifndef _SYS_SYSPROTO_H_ 937 struct cpuset_getaffinity_args { 938 cpulevel_t level; 939 cpuwhich_t which; 940 id_t id; 941 size_t cpusetsize; 942 cpuset_t *mask; 943 }; 944 #endif 945 int 946 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 947 { 948 struct thread *ttd; 949 struct cpuset *nset; 950 struct cpuset *set; 951 struct proc *p; 952 cpuset_t *mask; 953 int error; 954 size_t size; 955 956 if (uap->cpusetsize < sizeof(cpuset_t) || 957 uap->cpusetsize > CPU_MAXSIZE / NBBY) 958 return (ERANGE); 959 size = uap->cpusetsize; 960 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 961 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 962 if (error) 963 goto out; 964 switch (uap->level) { 965 case CPU_LEVEL_ROOT: 966 case CPU_LEVEL_CPUSET: 967 switch (uap->which) { 968 case CPU_WHICH_TID: 969 case CPU_WHICH_PID: 970 thread_lock(ttd); 971 set = cpuset_ref(ttd->td_cpuset); 972 thread_unlock(ttd); 973 break; 974 case CPU_WHICH_CPUSET: 975 case CPU_WHICH_JAIL: 976 break; 977 case CPU_WHICH_IRQ: 978 error = EINVAL; 979 goto out; 980 } 981 if (uap->level == CPU_LEVEL_ROOT) 982 nset = cpuset_refroot(set); 983 else 984 nset = cpuset_refbase(set); 985 CPU_COPY(&nset->cs_mask, mask); 986 cpuset_rel(nset); 987 break; 988 case CPU_LEVEL_WHICH: 989 switch (uap->which) { 990 case CPU_WHICH_TID: 991 thread_lock(ttd); 992 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 993 thread_unlock(ttd); 994 break; 995 case CPU_WHICH_PID: 996 FOREACH_THREAD_IN_PROC(p, ttd) { 997 thread_lock(ttd); 998 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 999 thread_unlock(ttd); 1000 } 1001 break; 1002 case CPU_WHICH_CPUSET: 1003 case CPU_WHICH_JAIL: 1004 CPU_COPY(&set->cs_mask, mask); 1005 break; 1006 case CPU_WHICH_IRQ: 1007 error = intr_getaffinity(uap->id, mask); 1008 break; 1009 } 1010 break; 1011 default: 1012 error = EINVAL; 1013 break; 1014 } 1015 if (set) 1016 cpuset_rel(set); 1017 if (p) 1018 PROC_UNLOCK(p); 1019 if (error == 0) 1020 error = copyout(mask, uap->mask, size); 1021 out: 1022 free(mask, M_TEMP); 1023 return (error); 1024 } 1025 1026 #ifndef _SYS_SYSPROTO_H_ 1027 struct cpuset_setaffinity_args { 1028 cpulevel_t level; 1029 cpuwhich_t which; 1030 id_t id; 1031 size_t cpusetsize; 1032 const cpuset_t *mask; 1033 }; 1034 #endif 1035 int 1036 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1037 { 1038 struct cpuset *nset; 1039 struct cpuset *set; 1040 struct thread *ttd; 1041 struct proc *p; 1042 cpuset_t *mask; 1043 int error; 1044 1045 if (uap->cpusetsize < sizeof(cpuset_t) || 1046 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1047 return (ERANGE); 1048 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1049 error = copyin(uap->mask, mask, uap->cpusetsize); 1050 if (error) 1051 goto out; 1052 /* 1053 * Verify that no high bits are set. 1054 */ 1055 if (uap->cpusetsize > sizeof(cpuset_t)) { 1056 char *end; 1057 char *cp; 1058 1059 end = cp = (char *)&mask->__bits; 1060 end += uap->cpusetsize; 1061 cp += sizeof(cpuset_t); 1062 while (cp != end) 1063 if (*cp++ != 0) { 1064 error = EINVAL; 1065 goto out; 1066 } 1067 1068 } 1069 switch (uap->level) { 1070 case CPU_LEVEL_ROOT: 1071 case CPU_LEVEL_CPUSET: 1072 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1073 if (error) 1074 break; 1075 switch (uap->which) { 1076 case CPU_WHICH_TID: 1077 case CPU_WHICH_PID: 1078 thread_lock(ttd); 1079 set = cpuset_ref(ttd->td_cpuset); 1080 thread_unlock(ttd); 1081 PROC_UNLOCK(p); 1082 break; 1083 case CPU_WHICH_CPUSET: 1084 case CPU_WHICH_JAIL: 1085 break; 1086 case CPU_WHICH_IRQ: 1087 error = EINVAL; 1088 goto out; 1089 } 1090 if (uap->level == CPU_LEVEL_ROOT) 1091 nset = cpuset_refroot(set); 1092 else 1093 nset = cpuset_refbase(set); 1094 error = cpuset_modify(nset, mask); 1095 cpuset_rel(nset); 1096 cpuset_rel(set); 1097 break; 1098 case CPU_LEVEL_WHICH: 1099 switch (uap->which) { 1100 case CPU_WHICH_TID: 1101 error = cpuset_setthread(uap->id, mask); 1102 break; 1103 case CPU_WHICH_PID: 1104 error = cpuset_setproc(uap->id, NULL, mask); 1105 break; 1106 case CPU_WHICH_CPUSET: 1107 case CPU_WHICH_JAIL: 1108 error = cpuset_which(uap->which, uap->id, &p, 1109 &ttd, &set); 1110 if (error == 0) { 1111 error = cpuset_modify(set, mask); 1112 cpuset_rel(set); 1113 } 1114 break; 1115 case CPU_WHICH_IRQ: 1116 error = intr_setaffinity(uap->id, mask); 1117 break; 1118 default: 1119 error = EINVAL; 1120 break; 1121 } 1122 break; 1123 default: 1124 error = EINVAL; 1125 break; 1126 } 1127 out: 1128 free(mask, M_TEMP); 1129 return (error); 1130 } 1131 1132 #ifdef DDB 1133 void 1134 ddb_display_cpuset(const cpuset_t *set) 1135 { 1136 int cpu, once; 1137 1138 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1139 if (CPU_ISSET(cpu, set)) { 1140 if (once == 0) { 1141 db_printf("%d", cpu); 1142 once = 1; 1143 } else 1144 db_printf(",%d", cpu); 1145 } 1146 } 1147 if (once == 0) 1148 db_printf("<none>"); 1149 } 1150 1151 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1152 { 1153 struct cpuset *set; 1154 1155 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1156 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1157 set, set->cs_id, set->cs_ref, set->cs_flags, 1158 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1159 db_printf(" mask="); 1160 ddb_display_cpuset(&set->cs_mask); 1161 db_printf("\n"); 1162 if (db_pager_quit) 1163 break; 1164 } 1165 } 1166 #endif /* DDB */ 1167