1 /*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/sysproto.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/refcount.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/syscallsubr.h> 49 #include <sys/cpuset.h> 50 #include <sys/sx.h> 51 #include <sys/refcount.h> 52 #include <sys/queue.h> 53 #include <sys/limits.h> 54 #include <sys/bus.h> 55 #include <sys/interrupt.h> 56 #include <sys/jail.h> /* Must come after sys/proc.h */ 57 58 #include <vm/uma.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif /* DDB */ 63 64 /* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask.A 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a a group seperate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query availble cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105 static uma_zone_t cpuset_zone; 106 static struct mtx cpuset_lock; 107 static struct setlist cpuset_ids; 108 static struct unrhdr *cpuset_unr; 109 static struct cpuset *cpuset_zero; 110 111 cpuset_t *cpuset_root; 112 113 /* 114 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 115 */ 116 struct cpuset * 117 cpuset_ref(struct cpuset *set) 118 { 119 120 refcount_acquire(&set->cs_ref); 121 return (set); 122 } 123 124 /* 125 * Walks up the tree from 'set' to find the root. Returns the root 126 * referenced. 127 */ 128 static struct cpuset * 129 cpuset_refroot(struct cpuset *set) 130 { 131 132 for (; set->cs_parent != NULL; set = set->cs_parent) 133 if (set->cs_flags & CPU_SET_ROOT) 134 break; 135 cpuset_ref(set); 136 137 return (set); 138 } 139 140 /* 141 * Find the first non-anonymous set starting from 'set'. Returns this set 142 * referenced. May return the passed in set with an extra ref if it is 143 * not anonymous. 144 */ 145 static struct cpuset * 146 cpuset_refbase(struct cpuset *set) 147 { 148 149 if (set->cs_id == CPUSET_INVALID) 150 set = set->cs_parent; 151 cpuset_ref(set); 152 153 return (set); 154 } 155 156 /* 157 * Release a reference in a context where it is safe to allocte. 158 */ 159 void 160 cpuset_rel(struct cpuset *set) 161 { 162 cpusetid_t id; 163 164 if (refcount_release(&set->cs_ref) == 0) 165 return; 166 mtx_lock_spin(&cpuset_lock); 167 LIST_REMOVE(set, cs_siblings); 168 id = set->cs_id; 169 if (id != CPUSET_INVALID) 170 LIST_REMOVE(set, cs_link); 171 mtx_unlock_spin(&cpuset_lock); 172 cpuset_rel(set->cs_parent); 173 uma_zfree(cpuset_zone, set); 174 if (id != CPUSET_INVALID) 175 free_unr(cpuset_unr, id); 176 } 177 178 /* 179 * Deferred release must be used when in a context that is not safe to 180 * allocate/free. This places any unreferenced sets on the list 'head'. 181 */ 182 static void 183 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 184 { 185 186 if (refcount_release(&set->cs_ref) == 0) 187 return; 188 mtx_lock_spin(&cpuset_lock); 189 LIST_REMOVE(set, cs_siblings); 190 if (set->cs_id != CPUSET_INVALID) 191 LIST_REMOVE(set, cs_link); 192 LIST_INSERT_HEAD(head, set, cs_link); 193 mtx_unlock_spin(&cpuset_lock); 194 } 195 196 /* 197 * Complete a deferred release. Removes the set from the list provided to 198 * cpuset_rel_defer. 199 */ 200 static void 201 cpuset_rel_complete(struct cpuset *set) 202 { 203 LIST_REMOVE(set, cs_link); 204 cpuset_rel(set->cs_parent); 205 uma_zfree(cpuset_zone, set); 206 } 207 208 /* 209 * Find a set based on an id. Returns it with a ref. 210 */ 211 static struct cpuset * 212 cpuset_lookup(cpusetid_t setid, struct thread *td) 213 { 214 struct cpuset *set; 215 216 if (setid == CPUSET_INVALID) 217 return (NULL); 218 mtx_lock_spin(&cpuset_lock); 219 LIST_FOREACH(set, &cpuset_ids, cs_link) 220 if (set->cs_id == setid) 221 break; 222 if (set) 223 cpuset_ref(set); 224 mtx_unlock_spin(&cpuset_lock); 225 226 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 227 if (set != NULL && jailed(td->td_ucred)) { 228 struct cpuset *rset, *jset; 229 struct prison *pr; 230 231 rset = cpuset_refroot(set); 232 233 pr = td->td_ucred->cr_prison; 234 mtx_lock(&pr->pr_mtx); 235 cpuset_ref(pr->pr_cpuset); 236 jset = pr->pr_cpuset; 237 mtx_unlock(&pr->pr_mtx); 238 239 if (jset->cs_id != rset->cs_id) { 240 cpuset_rel(set); 241 set = NULL; 242 } 243 cpuset_rel(jset); 244 cpuset_rel(rset); 245 } 246 247 return (set); 248 } 249 250 /* 251 * Create a set in the space provided in 'set' with the provided parameters. 252 * The set is returned with a single ref. May return EDEADLK if the set 253 * will have no valid cpu based on restrictions from the parent. 254 */ 255 static int 256 _cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 257 cpusetid_t id) 258 { 259 260 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 261 return (EDEADLK); 262 CPU_COPY(mask, &set->cs_mask); 263 LIST_INIT(&set->cs_children); 264 refcount_init(&set->cs_ref, 1); 265 set->cs_flags = 0; 266 mtx_lock_spin(&cpuset_lock); 267 CPU_AND(mask, &parent->cs_mask); 268 set->cs_id = id; 269 set->cs_parent = cpuset_ref(parent); 270 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 271 if (set->cs_id != CPUSET_INVALID) 272 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 273 mtx_unlock_spin(&cpuset_lock); 274 275 return (0); 276 } 277 278 /* 279 * Create a new non-anonymous set with the requested parent and mask. May 280 * return failures if the mask is invalid or a new number can not be 281 * allocated. 282 */ 283 static int 284 cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 285 { 286 struct cpuset *set; 287 cpusetid_t id; 288 int error; 289 290 id = alloc_unr(cpuset_unr); 291 if (id == -1) 292 return (ENFILE); 293 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 294 error = _cpuset_create(set, parent, mask, id); 295 if (error == 0) 296 return (0); 297 free_unr(cpuset_unr, id); 298 uma_zfree(cpuset_zone, set); 299 300 return (error); 301 } 302 303 /* 304 * Recursively check for errors that would occur from applying mask to 305 * the tree of sets starting at 'set'. Checks for sets that would become 306 * empty as well as RDONLY flags. 307 */ 308 static int 309 cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 310 { 311 struct cpuset *nset; 312 cpuset_t newmask; 313 int error; 314 315 mtx_assert(&cpuset_lock, MA_OWNED); 316 if (set->cs_flags & CPU_SET_RDONLY) 317 return (EPERM); 318 if (!CPU_OVERLAP(&set->cs_mask, mask)) 319 return (EDEADLK); 320 CPU_COPY(&set->cs_mask, &newmask); 321 CPU_AND(&newmask, mask); 322 error = 0; 323 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 324 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 325 break; 326 return (error); 327 } 328 329 /* 330 * Applies the mask 'mask' without checking for empty sets or permissions. 331 */ 332 static void 333 cpuset_update(struct cpuset *set, cpuset_t *mask) 334 { 335 struct cpuset *nset; 336 337 mtx_assert(&cpuset_lock, MA_OWNED); 338 CPU_AND(&set->cs_mask, mask); 339 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 340 cpuset_update(nset, &set->cs_mask); 341 342 return; 343 } 344 345 /* 346 * Modify the set 'set' to use a copy of the mask provided. Apply this new 347 * mask to restrict all children in the tree. Checks for validity before 348 * applying the changes. 349 */ 350 static int 351 cpuset_modify(struct cpuset *set, cpuset_t *mask) 352 { 353 struct cpuset *root; 354 int error; 355 356 error = priv_check(curthread, PRIV_SCHED_CPUSET); 357 if (error) 358 return (error); 359 /* 360 * Verify that we have access to this set of 361 * cpus. 362 */ 363 root = set->cs_parent; 364 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 365 return (EINVAL); 366 mtx_lock_spin(&cpuset_lock); 367 error = cpuset_testupdate(set, mask); 368 if (error) 369 goto out; 370 cpuset_update(set, mask); 371 CPU_COPY(mask, &set->cs_mask); 372 out: 373 mtx_unlock_spin(&cpuset_lock); 374 375 return (error); 376 } 377 378 /* 379 * Resolve the 'which' parameter of several cpuset apis. 380 * 381 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 382 * checks for permission via p_cansched(). 383 * 384 * For WHICH_SET returns a valid set with a new reference. 385 * 386 * -1 may be supplied for any argument to mean the current proc/thread or 387 * the base set of the current thread. May fail with ESRCH/EPERM. 388 */ 389 static int 390 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 391 struct cpuset **setp) 392 { 393 struct cpuset *set; 394 struct thread *td; 395 struct proc *p; 396 int error; 397 398 *pp = p = NULL; 399 *tdp = td = NULL; 400 *setp = set = NULL; 401 switch (which) { 402 case CPU_WHICH_PID: 403 if (id == -1) { 404 PROC_LOCK(curproc); 405 p = curproc; 406 break; 407 } 408 if ((p = pfind(id)) == NULL) 409 return (ESRCH); 410 break; 411 case CPU_WHICH_TID: 412 if (id == -1) { 413 PROC_LOCK(curproc); 414 p = curproc; 415 td = curthread; 416 break; 417 } 418 sx_slock(&allproc_lock); 419 FOREACH_PROC_IN_SYSTEM(p) { 420 PROC_LOCK(p); 421 FOREACH_THREAD_IN_PROC(p, td) 422 if (td->td_tid == id) 423 break; 424 if (td != NULL) 425 break; 426 PROC_UNLOCK(p); 427 } 428 sx_sunlock(&allproc_lock); 429 if (td == NULL) 430 return (ESRCH); 431 break; 432 case CPU_WHICH_CPUSET: 433 if (id == -1) { 434 thread_lock(curthread); 435 set = cpuset_refbase(curthread->td_cpuset); 436 thread_unlock(curthread); 437 } else 438 set = cpuset_lookup(id, curthread); 439 if (set) { 440 *setp = set; 441 return (0); 442 } 443 return (ESRCH); 444 case CPU_WHICH_JAIL: 445 { 446 /* Find `set' for prison with given id. */ 447 struct prison *pr; 448 449 sx_slock(&allprison_lock); 450 pr = prison_find(id); 451 sx_sunlock(&allprison_lock); 452 if (pr == NULL) 453 return (ESRCH); 454 if (jailed(curthread->td_ucred)) { 455 if (curthread->td_ucred->cr_prison == pr) { 456 cpuset_ref(pr->pr_cpuset); 457 set = pr->pr_cpuset; 458 } 459 } else { 460 cpuset_ref(pr->pr_cpuset); 461 set = pr->pr_cpuset; 462 } 463 mtx_unlock(&pr->pr_mtx); 464 if (set) { 465 *setp = set; 466 return (0); 467 } 468 return (ESRCH); 469 } 470 case CPU_WHICH_IRQ: 471 return (0); 472 default: 473 return (EINVAL); 474 } 475 error = p_cansched(curthread, p); 476 if (error) { 477 PROC_UNLOCK(p); 478 return (error); 479 } 480 if (td == NULL) 481 td = FIRST_THREAD_IN_PROC(p); 482 *pp = p; 483 *tdp = td; 484 return (0); 485 } 486 487 /* 488 * Create an anonymous set with the provided mask in the space provided by 489 * 'fset'. If the passed in set is anonymous we use its parent otherwise 490 * the new set is a child of 'set'. 491 */ 492 static int 493 cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 494 { 495 struct cpuset *parent; 496 497 if (set->cs_id == CPUSET_INVALID) 498 parent = set->cs_parent; 499 else 500 parent = set; 501 if (!CPU_SUBSET(&parent->cs_mask, mask)) 502 return (EDEADLK); 503 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 504 } 505 506 /* 507 * Handle two cases for replacing the base set or mask of an entire process. 508 * 509 * 1) Set is non-null and mask is null. This reparents all anonymous sets 510 * to the provided set and replaces all non-anonymous td_cpusets with the 511 * provided set. 512 * 2) Mask is non-null and set is null. This replaces or creates anonymous 513 * sets for every thread with the existing base as a parent. 514 * 515 * This is overly complicated because we can't allocate while holding a 516 * spinlock and spinlocks must be held while changing and examining thread 517 * state. 518 */ 519 static int 520 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 521 { 522 struct setlist freelist; 523 struct setlist droplist; 524 struct cpuset *tdset; 525 struct cpuset *nset; 526 struct thread *td; 527 struct proc *p; 528 int threads; 529 int nfree; 530 int error; 531 /* 532 * The algorithm requires two passes due to locking considerations. 533 * 534 * 1) Lookup the process and acquire the locks in the required order. 535 * 2) If enough cpusets have not been allocated release the locks and 536 * allocate them. Loop. 537 */ 538 LIST_INIT(&freelist); 539 LIST_INIT(&droplist); 540 nfree = 0; 541 for (;;) { 542 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 543 if (error) 544 goto out; 545 if (nfree >= p->p_numthreads) 546 break; 547 threads = p->p_numthreads; 548 PROC_UNLOCK(p); 549 for (; nfree < threads; nfree++) { 550 nset = uma_zalloc(cpuset_zone, M_WAITOK); 551 LIST_INSERT_HEAD(&freelist, nset, cs_link); 552 } 553 } 554 PROC_LOCK_ASSERT(p, MA_OWNED); 555 /* 556 * Now that the appropriate locks are held and we have enough cpusets, 557 * make sure the operation will succeed before applying changes. The 558 * proc lock prevents td_cpuset from changing between calls. 559 */ 560 error = 0; 561 FOREACH_THREAD_IN_PROC(p, td) { 562 thread_lock(td); 563 tdset = td->td_cpuset; 564 /* 565 * Verify that a new mask doesn't specify cpus outside of 566 * the set the thread is a member of. 567 */ 568 if (mask) { 569 if (tdset->cs_id == CPUSET_INVALID) 570 tdset = tdset->cs_parent; 571 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 572 error = EDEADLK; 573 /* 574 * Verify that a new set won't leave an existing thread 575 * mask without a cpu to run on. It can, however, restrict 576 * the set. 577 */ 578 } else if (tdset->cs_id == CPUSET_INVALID) { 579 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 580 error = EDEADLK; 581 } 582 thread_unlock(td); 583 if (error) 584 goto unlock_out; 585 } 586 /* 587 * Replace each thread's cpuset while using deferred release. We 588 * must do this because the thread lock must be held while operating 589 * on the thread and this limits the type of operations allowed. 590 */ 591 FOREACH_THREAD_IN_PROC(p, td) { 592 thread_lock(td); 593 /* 594 * If we presently have an anonymous set or are applying a 595 * mask we must create an anonymous shadow set. That is 596 * either parented to our existing base or the supplied set. 597 * 598 * If we have a base set with no anonymous shadow we simply 599 * replace it outright. 600 */ 601 tdset = td->td_cpuset; 602 if (tdset->cs_id == CPUSET_INVALID || mask) { 603 nset = LIST_FIRST(&freelist); 604 LIST_REMOVE(nset, cs_link); 605 if (mask) 606 error = cpuset_shadow(tdset, nset, mask); 607 else 608 error = _cpuset_create(nset, set, 609 &tdset->cs_mask, CPUSET_INVALID); 610 if (error) { 611 LIST_INSERT_HEAD(&freelist, nset, cs_link); 612 thread_unlock(td); 613 break; 614 } 615 } else 616 nset = cpuset_ref(set); 617 cpuset_rel_defer(&droplist, tdset); 618 td->td_cpuset = nset; 619 sched_affinity(td); 620 thread_unlock(td); 621 } 622 unlock_out: 623 PROC_UNLOCK(p); 624 out: 625 while ((nset = LIST_FIRST(&droplist)) != NULL) 626 cpuset_rel_complete(nset); 627 while ((nset = LIST_FIRST(&freelist)) != NULL) { 628 LIST_REMOVE(nset, cs_link); 629 uma_zfree(cpuset_zone, nset); 630 } 631 return (error); 632 } 633 634 /* 635 * Apply an anonymous mask to a single thread. 636 */ 637 int 638 cpuset_setthread(lwpid_t id, cpuset_t *mask) 639 { 640 struct cpuset *nset; 641 struct cpuset *set; 642 struct thread *td; 643 struct proc *p; 644 int error; 645 646 nset = uma_zalloc(cpuset_zone, M_WAITOK); 647 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 648 if (error) 649 goto out; 650 set = NULL; 651 thread_lock(td); 652 error = cpuset_shadow(td->td_cpuset, nset, mask); 653 if (error == 0) { 654 set = td->td_cpuset; 655 td->td_cpuset = nset; 656 sched_affinity(td); 657 nset = NULL; 658 } 659 thread_unlock(td); 660 PROC_UNLOCK(p); 661 if (set) 662 cpuset_rel(set); 663 out: 664 if (nset) 665 uma_zfree(cpuset_zone, nset); 666 return (error); 667 } 668 669 /* 670 * Creates the cpuset for thread0. We make two sets: 671 * 672 * 0 - The root set which should represent all valid processors in the 673 * system. It is initially created with a mask of all processors 674 * because we don't know what processors are valid until cpuset_init() 675 * runs. This set is immutable. 676 * 1 - The default set which all processes are a member of until changed. 677 * This allows an administrator to move all threads off of given cpus to 678 * dedicate them to high priority tasks or save power etc. 679 */ 680 struct cpuset * 681 cpuset_thread0(void) 682 { 683 struct cpuset *set; 684 int error; 685 686 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 687 NULL, NULL, UMA_ALIGN_PTR, 0); 688 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 689 /* 690 * Create the root system set for the whole machine. Doesn't use 691 * cpuset_create() due to NULL parent. 692 */ 693 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 694 set->cs_mask.__bits[0] = -1; 695 LIST_INIT(&set->cs_children); 696 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 697 set->cs_ref = 1; 698 set->cs_flags = CPU_SET_ROOT; 699 cpuset_zero = set; 700 cpuset_root = &set->cs_mask; 701 /* 702 * Now derive a default, modifiable set from that to give out. 703 */ 704 set = uma_zalloc(cpuset_zone, M_WAITOK); 705 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 706 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 707 /* 708 * Initialize the unit allocator. 0 and 1 are allocated above. 709 */ 710 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 711 712 return (set); 713 } 714 715 /* 716 * Create a cpuset, which would be cpuset_create() but 717 * mark the new 'set' as root. 718 * 719 * We are not going to reparent the td to it. Use cpuset_reparentproc() for that. 720 * 721 * In case of no error, returns the set in *setp locked with a reference. 722 */ 723 int 724 cpuset_create_root(struct thread *td, struct cpuset **setp) 725 { 726 struct cpuset *root; 727 struct cpuset *set; 728 int error; 729 730 KASSERT(td != NULL, ("[%s:%d] invalid td", __func__, __LINE__)); 731 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 732 733 thread_lock(td); 734 root = cpuset_refroot(td->td_cpuset); 735 thread_unlock(td); 736 737 error = cpuset_create(setp, td->td_cpuset, &root->cs_mask); 738 cpuset_rel(root); 739 if (error) 740 return (error); 741 742 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 743 __func__, __LINE__)); 744 745 /* Mark the set as root. */ 746 set = *setp; 747 set->cs_flags |= CPU_SET_ROOT; 748 749 return (0); 750 } 751 752 int 753 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 754 { 755 int error; 756 757 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 758 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 759 760 cpuset_ref(set); 761 error = cpuset_setproc(p->p_pid, set, NULL); 762 if (error) 763 return (error); 764 cpuset_rel(set); 765 return (0); 766 } 767 768 /* 769 * This is called once the final set of system cpus is known. Modifies 770 * the root set and all children and mark the root readonly. 771 */ 772 static void 773 cpuset_init(void *arg) 774 { 775 cpuset_t mask; 776 777 CPU_ZERO(&mask); 778 #ifdef SMP 779 mask.__bits[0] = all_cpus; 780 #else 781 mask.__bits[0] = 1; 782 #endif 783 if (cpuset_modify(cpuset_zero, &mask)) 784 panic("Can't set initial cpuset mask.\n"); 785 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 786 } 787 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 788 789 #ifndef _SYS_SYSPROTO_H_ 790 struct cpuset_args { 791 cpusetid_t *setid; 792 }; 793 #endif 794 int 795 cpuset(struct thread *td, struct cpuset_args *uap) 796 { 797 struct cpuset *root; 798 struct cpuset *set; 799 int error; 800 801 thread_lock(td); 802 root = cpuset_refroot(td->td_cpuset); 803 thread_unlock(td); 804 error = cpuset_create(&set, root, &root->cs_mask); 805 cpuset_rel(root); 806 if (error) 807 return (error); 808 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 809 if (error == 0) 810 error = cpuset_setproc(-1, set, NULL); 811 cpuset_rel(set); 812 return (error); 813 } 814 815 #ifndef _SYS_SYSPROTO_H_ 816 struct cpuset_setid_args { 817 cpuwhich_t which; 818 id_t id; 819 cpusetid_t setid; 820 }; 821 #endif 822 int 823 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 824 { 825 struct cpuset *set; 826 int error; 827 828 /* 829 * Presently we only support per-process sets. 830 */ 831 if (uap->which != CPU_WHICH_PID) 832 return (EINVAL); 833 set = cpuset_lookup(uap->setid, td); 834 if (set == NULL) 835 return (ESRCH); 836 error = cpuset_setproc(uap->id, set, NULL); 837 cpuset_rel(set); 838 return (error); 839 } 840 841 #ifndef _SYS_SYSPROTO_H_ 842 struct cpuset_getid_args { 843 cpulevel_t level; 844 cpuwhich_t which; 845 id_t id; 846 cpusetid_t *setid; 847 #endif 848 int 849 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 850 { 851 struct cpuset *nset; 852 struct cpuset *set; 853 struct thread *ttd; 854 struct proc *p; 855 cpusetid_t id; 856 int error; 857 858 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 859 return (EINVAL); 860 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 861 if (error) 862 return (error); 863 switch (uap->which) { 864 case CPU_WHICH_TID: 865 case CPU_WHICH_PID: 866 thread_lock(ttd); 867 set = cpuset_refbase(ttd->td_cpuset); 868 thread_unlock(ttd); 869 PROC_UNLOCK(p); 870 break; 871 case CPU_WHICH_CPUSET: 872 case CPU_WHICH_JAIL: 873 break; 874 case CPU_WHICH_IRQ: 875 return (EINVAL); 876 } 877 switch (uap->level) { 878 case CPU_LEVEL_ROOT: 879 nset = cpuset_refroot(set); 880 cpuset_rel(set); 881 set = nset; 882 break; 883 case CPU_LEVEL_CPUSET: 884 break; 885 case CPU_LEVEL_WHICH: 886 break; 887 } 888 id = set->cs_id; 889 cpuset_rel(set); 890 if (error == 0) 891 error = copyout(&id, uap->setid, sizeof(id)); 892 893 return (error); 894 } 895 896 #ifndef _SYS_SYSPROTO_H_ 897 struct cpuset_getaffinity_args { 898 cpulevel_t level; 899 cpuwhich_t which; 900 id_t id; 901 size_t cpusetsize; 902 cpuset_t *mask; 903 }; 904 #endif 905 int 906 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 907 { 908 struct thread *ttd; 909 struct cpuset *nset; 910 struct cpuset *set; 911 struct proc *p; 912 cpuset_t *mask; 913 int error; 914 size_t size; 915 916 if (uap->cpusetsize < sizeof(cpuset_t) || 917 uap->cpusetsize > CPU_MAXSIZE / NBBY) 918 return (ERANGE); 919 size = uap->cpusetsize; 920 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 921 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 922 if (error) 923 goto out; 924 switch (uap->level) { 925 case CPU_LEVEL_ROOT: 926 case CPU_LEVEL_CPUSET: 927 switch (uap->which) { 928 case CPU_WHICH_TID: 929 case CPU_WHICH_PID: 930 thread_lock(ttd); 931 set = cpuset_ref(ttd->td_cpuset); 932 thread_unlock(ttd); 933 break; 934 case CPU_WHICH_CPUSET: 935 case CPU_WHICH_JAIL: 936 break; 937 case CPU_WHICH_IRQ: 938 error = EINVAL; 939 goto out; 940 } 941 if (uap->level == CPU_LEVEL_ROOT) 942 nset = cpuset_refroot(set); 943 else 944 nset = cpuset_refbase(set); 945 CPU_COPY(&nset->cs_mask, mask); 946 cpuset_rel(nset); 947 break; 948 case CPU_LEVEL_WHICH: 949 switch (uap->which) { 950 case CPU_WHICH_TID: 951 thread_lock(ttd); 952 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 953 thread_unlock(ttd); 954 break; 955 case CPU_WHICH_PID: 956 FOREACH_THREAD_IN_PROC(p, ttd) { 957 thread_lock(ttd); 958 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 959 thread_unlock(ttd); 960 } 961 break; 962 case CPU_WHICH_CPUSET: 963 case CPU_WHICH_JAIL: 964 CPU_COPY(&set->cs_mask, mask); 965 break; 966 case CPU_WHICH_IRQ: 967 error = intr_getaffinity(uap->id, mask); 968 break; 969 } 970 break; 971 default: 972 error = EINVAL; 973 break; 974 } 975 if (set) 976 cpuset_rel(set); 977 if (p) 978 PROC_UNLOCK(p); 979 if (error == 0) 980 error = copyout(mask, uap->mask, size); 981 out: 982 free(mask, M_TEMP); 983 return (error); 984 } 985 986 #ifndef _SYS_SYSPROTO_H_ 987 struct cpuset_setaffinity_args { 988 cpulevel_t level; 989 cpuwhich_t which; 990 id_t id; 991 size_t cpusetsize; 992 const cpuset_t *mask; 993 }; 994 #endif 995 int 996 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 997 { 998 struct cpuset *nset; 999 struct cpuset *set; 1000 struct thread *ttd; 1001 struct proc *p; 1002 cpuset_t *mask; 1003 int error; 1004 1005 if (uap->cpusetsize < sizeof(cpuset_t) || 1006 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1007 return (ERANGE); 1008 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1009 error = copyin(uap->mask, mask, uap->cpusetsize); 1010 if (error) 1011 goto out; 1012 /* 1013 * Verify that no high bits are set. 1014 */ 1015 if (uap->cpusetsize > sizeof(cpuset_t)) { 1016 char *end; 1017 char *cp; 1018 1019 end = cp = (char *)&mask->__bits; 1020 end += uap->cpusetsize; 1021 cp += sizeof(cpuset_t); 1022 while (cp != end) 1023 if (*cp++ != 0) { 1024 error = EINVAL; 1025 goto out; 1026 } 1027 1028 } 1029 switch (uap->level) { 1030 case CPU_LEVEL_ROOT: 1031 case CPU_LEVEL_CPUSET: 1032 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1033 if (error) 1034 break; 1035 switch (uap->which) { 1036 case CPU_WHICH_TID: 1037 case CPU_WHICH_PID: 1038 thread_lock(ttd); 1039 set = cpuset_ref(ttd->td_cpuset); 1040 thread_unlock(ttd); 1041 PROC_UNLOCK(p); 1042 break; 1043 case CPU_WHICH_CPUSET: 1044 case CPU_WHICH_JAIL: 1045 break; 1046 case CPU_WHICH_IRQ: 1047 error = EINVAL; 1048 goto out; 1049 } 1050 if (uap->level == CPU_LEVEL_ROOT) 1051 nset = cpuset_refroot(set); 1052 else 1053 nset = cpuset_refbase(set); 1054 error = cpuset_modify(nset, mask); 1055 cpuset_rel(nset); 1056 cpuset_rel(set); 1057 break; 1058 case CPU_LEVEL_WHICH: 1059 switch (uap->which) { 1060 case CPU_WHICH_TID: 1061 error = cpuset_setthread(uap->id, mask); 1062 break; 1063 case CPU_WHICH_PID: 1064 error = cpuset_setproc(uap->id, NULL, mask); 1065 break; 1066 case CPU_WHICH_CPUSET: 1067 case CPU_WHICH_JAIL: 1068 error = cpuset_which(uap->which, uap->id, &p, 1069 &ttd, &set); 1070 if (error == 0) { 1071 error = cpuset_modify(set, mask); 1072 cpuset_rel(set); 1073 } 1074 break; 1075 case CPU_WHICH_IRQ: 1076 error = intr_setaffinity(uap->id, mask); 1077 break; 1078 default: 1079 error = EINVAL; 1080 break; 1081 } 1082 break; 1083 default: 1084 error = EINVAL; 1085 break; 1086 } 1087 out: 1088 free(mask, M_TEMP); 1089 return (error); 1090 } 1091 1092 #ifdef DDB 1093 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1094 { 1095 struct cpuset *set; 1096 int cpu, once; 1097 1098 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1099 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1100 set, set->cs_id, set->cs_ref, set->cs_flags, 1101 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1102 db_printf(" mask="); 1103 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1104 if (CPU_ISSET(cpu, &set->cs_mask)) { 1105 if (once == 0) { 1106 db_printf("%d", cpu); 1107 once = 1; 1108 } else 1109 db_printf(",%d", cpu); 1110 } 1111 } 1112 db_printf("\n"); 1113 if (db_pager_quit) 1114 break; 1115 } 1116 } 1117 #endif /* DDB */ 1118