1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 5 * All rights reserved. 6 * 7 * Copyright (c) 2008 Nokia Corporation 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice unmodified, this list of conditions, and the following 15 * disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ddb.h" 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/ctype.h> 43 #include <sys/sysproto.h> 44 #include <sys/jail.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/refcount.h> 52 #include <sys/sched.h> 53 #include <sys/smp.h> 54 #include <sys/syscallsubr.h> 55 #include <sys/capsicum.h> 56 #include <sys/cpuset.h> 57 #include <sys/domainset.h> 58 #include <sys/sx.h> 59 #include <sys/queue.h> 60 #include <sys/libkern.h> 61 #include <sys/limits.h> 62 #include <sys/bus.h> 63 #include <sys/interrupt.h> 64 #include <sys/vmmeter.h> 65 #include <sys/ktrace.h> 66 67 #include <vm/uma.h> 68 #include <vm/vm.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_pageout.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_phys.h> 75 #include <vm/vm_pagequeue.h> 76 77 #ifdef DDB 78 #include <ddb/ddb.h> 79 #endif /* DDB */ 80 81 /* 82 * cpusets provide a mechanism for creating and manipulating sets of 83 * processors for the purpose of constraining the scheduling of threads to 84 * specific processors. 85 * 86 * Each process belongs to an identified set, by default this is set 1. Each 87 * thread may further restrict the cpus it may run on to a subset of this 88 * named set. This creates an anonymous set which other threads and processes 89 * may not join by number. 90 * 91 * The named set is referred to herein as the 'base' set to avoid ambiguity. 92 * This set is usually a child of a 'root' set while the anonymous set may 93 * simply be referred to as a mask. In the syscall api these are referred to 94 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 95 * 96 * Threads inherit their set from their creator whether it be anonymous or 97 * not. This means that anonymous sets are immutable because they may be 98 * shared. To modify an anonymous set a new set is created with the desired 99 * mask and the same parent as the existing anonymous set. This gives the 100 * illusion of each thread having a private mask. 101 * 102 * Via the syscall apis a user may ask to retrieve or modify the root, base, 103 * or mask that is discovered via a pid, tid, or setid. Modifying a set 104 * modifies all numbered and anonymous child sets to comply with the new mask. 105 * Modifying a pid or tid's mask applies only to that tid but must still 106 * exist within the assigned parent set. 107 * 108 * A thread may not be assigned to a group separate from other threads in 109 * the process. This is to remove ambiguity when the setid is queried with 110 * a pid argument. There is no other technical limitation. 111 * 112 * This somewhat complex arrangement is intended to make it easy for 113 * applications to query available processors and bind their threads to 114 * specific processors while also allowing administrators to dynamically 115 * reprovision by changing sets which apply to groups of processes. 116 * 117 * A simple application should not concern itself with sets at all and 118 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 119 * meaning 'curthread'. It may query available cpus for that tid with a 120 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 121 */ 122 123 LIST_HEAD(domainlist, domainset); 124 struct domainset __read_mostly domainset_firsttouch; 125 struct domainset __read_mostly domainset_fixed[MAXMEMDOM]; 126 struct domainset __read_mostly domainset_interleave; 127 struct domainset __read_mostly domainset_prefer[MAXMEMDOM]; 128 struct domainset __read_mostly domainset_roundrobin; 129 130 static uma_zone_t cpuset_zone; 131 static uma_zone_t domainset_zone; 132 static struct mtx cpuset_lock; 133 static struct setlist cpuset_ids; 134 static struct domainlist cpuset_domains; 135 static struct unrhdr *cpuset_unr; 136 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel; 137 static struct domainset *domainset0, *domainset2; 138 139 /* Return the size of cpuset_t at the kernel level */ 140 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 141 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 142 143 cpuset_t *cpuset_root; 144 cpuset_t cpuset_domain[MAXMEMDOM]; 145 146 static int domainset_valid(const struct domainset *, const struct domainset *); 147 148 /* 149 * Find the first non-anonymous set starting from 'set'. 150 */ 151 static struct cpuset * 152 cpuset_getbase(struct cpuset *set) 153 { 154 155 if (set->cs_id == CPUSET_INVALID) 156 set = set->cs_parent; 157 return (set); 158 } 159 160 /* 161 * Walks up the tree from 'set' to find the root. 162 */ 163 static struct cpuset * 164 cpuset_getroot(struct cpuset *set) 165 { 166 167 while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL) 168 set = set->cs_parent; 169 return (set); 170 } 171 172 /* 173 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 174 */ 175 struct cpuset * 176 cpuset_ref(struct cpuset *set) 177 { 178 179 refcount_acquire(&set->cs_ref); 180 return (set); 181 } 182 183 /* 184 * Walks up the tree from 'set' to find the root. Returns the root 185 * referenced. 186 */ 187 static struct cpuset * 188 cpuset_refroot(struct cpuset *set) 189 { 190 191 return (cpuset_ref(cpuset_getroot(set))); 192 } 193 194 /* 195 * Find the first non-anonymous set starting from 'set'. Returns this set 196 * referenced. May return the passed in set with an extra ref if it is 197 * not anonymous. 198 */ 199 static struct cpuset * 200 cpuset_refbase(struct cpuset *set) 201 { 202 203 return (cpuset_ref(cpuset_getbase(set))); 204 } 205 206 /* 207 * Release a reference in a context where it is safe to allocate. 208 */ 209 void 210 cpuset_rel(struct cpuset *set) 211 { 212 cpusetid_t id; 213 214 if (refcount_release_if_not_last(&set->cs_ref)) 215 return; 216 mtx_lock_spin(&cpuset_lock); 217 if (!refcount_release(&set->cs_ref)) { 218 mtx_unlock_spin(&cpuset_lock); 219 return; 220 } 221 LIST_REMOVE(set, cs_siblings); 222 id = set->cs_id; 223 if (id != CPUSET_INVALID) 224 LIST_REMOVE(set, cs_link); 225 mtx_unlock_spin(&cpuset_lock); 226 cpuset_rel(set->cs_parent); 227 uma_zfree(cpuset_zone, set); 228 if (id != CPUSET_INVALID) 229 free_unr(cpuset_unr, id); 230 } 231 232 /* 233 * Deferred release must be used when in a context that is not safe to 234 * allocate/free. This places any unreferenced sets on the list 'head'. 235 */ 236 static void 237 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 238 { 239 240 if (refcount_release_if_not_last(&set->cs_ref)) 241 return; 242 mtx_lock_spin(&cpuset_lock); 243 if (!refcount_release(&set->cs_ref)) { 244 mtx_unlock_spin(&cpuset_lock); 245 return; 246 } 247 LIST_REMOVE(set, cs_siblings); 248 if (set->cs_id != CPUSET_INVALID) 249 LIST_REMOVE(set, cs_link); 250 LIST_INSERT_HEAD(head, set, cs_link); 251 mtx_unlock_spin(&cpuset_lock); 252 } 253 254 /* 255 * Complete a deferred release. Removes the set from the list provided to 256 * cpuset_rel_defer. 257 */ 258 static void 259 cpuset_rel_complete(struct cpuset *set) 260 { 261 cpusetid_t id; 262 263 id = set->cs_id; 264 LIST_REMOVE(set, cs_link); 265 cpuset_rel(set->cs_parent); 266 uma_zfree(cpuset_zone, set); 267 if (id != CPUSET_INVALID) 268 free_unr(cpuset_unr, id); 269 } 270 271 /* 272 * Find a set based on an id. Returns it with a ref. 273 */ 274 static struct cpuset * 275 cpuset_lookup(cpusetid_t setid, struct thread *td) 276 { 277 struct cpuset *set; 278 279 if (setid == CPUSET_INVALID) 280 return (NULL); 281 mtx_lock_spin(&cpuset_lock); 282 LIST_FOREACH(set, &cpuset_ids, cs_link) 283 if (set->cs_id == setid) 284 break; 285 if (set) 286 cpuset_ref(set); 287 mtx_unlock_spin(&cpuset_lock); 288 289 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 290 if (set != NULL && jailed(td->td_ucred)) { 291 struct cpuset *jset, *tset; 292 293 jset = td->td_ucred->cr_prison->pr_cpuset; 294 for (tset = set; tset != NULL; tset = tset->cs_parent) 295 if (tset == jset) 296 break; 297 if (tset == NULL) { 298 cpuset_rel(set); 299 set = NULL; 300 } 301 } 302 303 return (set); 304 } 305 306 /* 307 * Initialize a set in the space provided in 'set' with the provided parameters. 308 * The set is returned with a single ref. May return EDEADLK if the set 309 * will have no valid cpu based on restrictions from the parent. 310 */ 311 static int 312 cpuset_init(struct cpuset *set, struct cpuset *parent, 313 const cpuset_t *mask, struct domainset *domain, cpusetid_t id) 314 { 315 316 if (domain == NULL) 317 domain = parent->cs_domain; 318 if (mask == NULL) 319 mask = &parent->cs_mask; 320 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 321 return (EDEADLK); 322 /* The domain must be prepared ahead of time. */ 323 if (!domainset_valid(parent->cs_domain, domain)) 324 return (EDEADLK); 325 CPU_COPY(mask, &set->cs_mask); 326 LIST_INIT(&set->cs_children); 327 refcount_init(&set->cs_ref, 1); 328 set->cs_flags = 0; 329 mtx_lock_spin(&cpuset_lock); 330 set->cs_domain = domain; 331 CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask); 332 set->cs_id = id; 333 set->cs_parent = cpuset_ref(parent); 334 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 335 if (set->cs_id != CPUSET_INVALID) 336 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 337 mtx_unlock_spin(&cpuset_lock); 338 339 return (0); 340 } 341 342 /* 343 * Create a new non-anonymous set with the requested parent and mask. May 344 * return failures if the mask is invalid or a new number can not be 345 * allocated. 346 * 347 * If *setp is not NULL, then it will be used as-is. The caller must take 348 * into account that *setp will be inserted at the head of cpuset_ids and 349 * plan any potentially conflicting cs_link usage accordingly. 350 */ 351 static int 352 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 353 { 354 struct cpuset *set; 355 cpusetid_t id; 356 int error; 357 bool dofree; 358 359 id = alloc_unr(cpuset_unr); 360 if (id == -1) 361 return (ENFILE); 362 dofree = (*setp == NULL); 363 if (*setp != NULL) 364 set = *setp; 365 else 366 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 367 error = cpuset_init(set, parent, mask, NULL, id); 368 if (error == 0) 369 return (0); 370 free_unr(cpuset_unr, id); 371 if (dofree) 372 uma_zfree(cpuset_zone, set); 373 374 return (error); 375 } 376 377 static void 378 cpuset_freelist_add(struct setlist *list, int count) 379 { 380 struct cpuset *set; 381 int i; 382 383 for (i = 0; i < count; i++) { 384 set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK); 385 LIST_INSERT_HEAD(list, set, cs_link); 386 } 387 } 388 389 static void 390 cpuset_freelist_init(struct setlist *list, int count) 391 { 392 393 LIST_INIT(list); 394 cpuset_freelist_add(list, count); 395 } 396 397 static void 398 cpuset_freelist_free(struct setlist *list) 399 { 400 struct cpuset *set; 401 402 while ((set = LIST_FIRST(list)) != NULL) { 403 LIST_REMOVE(set, cs_link); 404 uma_zfree(cpuset_zone, set); 405 } 406 } 407 408 static void 409 domainset_freelist_add(struct domainlist *list, int count) 410 { 411 struct domainset *set; 412 int i; 413 414 for (i = 0; i < count; i++) { 415 set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK); 416 LIST_INSERT_HEAD(list, set, ds_link); 417 } 418 } 419 420 static void 421 domainset_freelist_init(struct domainlist *list, int count) 422 { 423 424 LIST_INIT(list); 425 domainset_freelist_add(list, count); 426 } 427 428 static void 429 domainset_freelist_free(struct domainlist *list) 430 { 431 struct domainset *set; 432 433 while ((set = LIST_FIRST(list)) != NULL) { 434 LIST_REMOVE(set, ds_link); 435 uma_zfree(domainset_zone, set); 436 } 437 } 438 439 /* Copy a domainset preserving mask and policy. */ 440 static void 441 domainset_copy(const struct domainset *from, struct domainset *to) 442 { 443 444 DOMAINSET_COPY(&from->ds_mask, &to->ds_mask); 445 to->ds_policy = from->ds_policy; 446 to->ds_prefer = from->ds_prefer; 447 } 448 449 /* Return 1 if mask and policy are equal, otherwise 0. */ 450 static int 451 domainset_equal(const struct domainset *one, const struct domainset *two) 452 { 453 454 return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 && 455 one->ds_policy == two->ds_policy && 456 one->ds_prefer == two->ds_prefer); 457 } 458 459 /* Return 1 if child is a valid subset of parent. */ 460 static int 461 domainset_valid(const struct domainset *parent, const struct domainset *child) 462 { 463 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 464 return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask)); 465 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 466 } 467 468 static int 469 domainset_restrict(const struct domainset *parent, 470 const struct domainset *child) 471 { 472 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 473 return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask)); 474 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 475 } 476 477 /* 478 * Lookup or create a domainset. The key is provided in ds_mask and 479 * ds_policy. If the domainset does not yet exist the storage in 480 * 'domain' is used to insert. Otherwise this storage is freed to the 481 * domainset_zone and the existing domainset is returned. 482 */ 483 static struct domainset * 484 _domainset_create(struct domainset *domain, struct domainlist *freelist) 485 { 486 struct domainset *ndomain; 487 int i, j; 488 489 KASSERT(domain->ds_cnt <= vm_ndomains, 490 ("invalid domain count in domainset %p", domain)); 491 KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER || 492 domain->ds_prefer < vm_ndomains, 493 ("invalid preferred domain in domains %p", domain)); 494 495 mtx_lock_spin(&cpuset_lock); 496 LIST_FOREACH(ndomain, &cpuset_domains, ds_link) 497 if (domainset_equal(ndomain, domain)) 498 break; 499 /* 500 * If the domain does not yet exist we insert it and initialize 501 * various iteration helpers which are not part of the key. 502 */ 503 if (ndomain == NULL) { 504 LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link); 505 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 506 for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 507 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 508 domain->ds_order[j++] = i; 509 } 510 mtx_unlock_spin(&cpuset_lock); 511 if (ndomain == NULL) 512 return (domain); 513 if (freelist != NULL) 514 LIST_INSERT_HEAD(freelist, domain, ds_link); 515 else 516 uma_zfree(domainset_zone, domain); 517 return (ndomain); 518 519 } 520 521 /* 522 * Are any of the domains in the mask empty? If so, silently 523 * remove them and update the domainset accordingly. If only empty 524 * domains are present, we must return failure. 525 */ 526 static bool 527 domainset_empty_vm(struct domainset *domain) 528 { 529 domainset_t empty; 530 int i, j; 531 532 DOMAINSET_ZERO(&empty); 533 for (i = 0; i < vm_ndomains; i++) 534 if (VM_DOMAIN_EMPTY(i)) 535 DOMAINSET_SET(i, &empty); 536 if (DOMAINSET_SUBSET(&empty, &domain->ds_mask)) 537 return (true); 538 539 /* Remove empty domains from the set and recompute. */ 540 DOMAINSET_ANDNOT(&domain->ds_mask, &empty); 541 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 542 for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 543 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 544 domain->ds_order[j++] = i; 545 546 /* Convert a PREFER policy referencing an empty domain to RR. */ 547 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 548 DOMAINSET_ISSET(domain->ds_prefer, &empty)) { 549 domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 550 domain->ds_prefer = -1; 551 } 552 553 return (false); 554 } 555 556 /* 557 * Create or lookup a domainset based on the key held in 'domain'. 558 */ 559 struct domainset * 560 domainset_create(const struct domainset *domain) 561 { 562 struct domainset *ndomain; 563 564 /* 565 * Validate the policy. It must specify a useable policy number with 566 * only valid domains. Preferred must include the preferred domain 567 * in the mask. 568 */ 569 if (domain->ds_policy <= DOMAINSET_POLICY_INVALID || 570 domain->ds_policy > DOMAINSET_POLICY_MAX) 571 return (NULL); 572 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 573 !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask)) 574 return (NULL); 575 if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask)) 576 return (NULL); 577 ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO); 578 domainset_copy(domain, ndomain); 579 return _domainset_create(ndomain, NULL); 580 } 581 582 /* 583 * Update thread domainset pointers. 584 */ 585 static void 586 domainset_notify(void) 587 { 588 struct thread *td; 589 struct proc *p; 590 591 sx_slock(&allproc_lock); 592 FOREACH_PROC_IN_SYSTEM(p) { 593 PROC_LOCK(p); 594 if (p->p_state == PRS_NEW) { 595 PROC_UNLOCK(p); 596 continue; 597 } 598 FOREACH_THREAD_IN_PROC(p, td) { 599 thread_lock(td); 600 td->td_domain.dr_policy = td->td_cpuset->cs_domain; 601 thread_unlock(td); 602 } 603 PROC_UNLOCK(p); 604 } 605 sx_sunlock(&allproc_lock); 606 kernel_object->domain.dr_policy = cpuset_kernel->cs_domain; 607 } 608 609 /* 610 * Create a new set that is a subset of a parent. 611 */ 612 static struct domainset * 613 domainset_shadow(const struct domainset *pdomain, 614 const struct domainset *domain, struct domainlist *freelist) 615 { 616 struct domainset *ndomain; 617 618 ndomain = LIST_FIRST(freelist); 619 LIST_REMOVE(ndomain, ds_link); 620 621 /* 622 * Initialize the key from the request. 623 */ 624 domainset_copy(domain, ndomain); 625 626 /* 627 * Restrict the key by the parent. 628 */ 629 DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask); 630 631 return _domainset_create(ndomain, freelist); 632 } 633 634 /* 635 * Recursively check for errors that would occur from applying mask to 636 * the tree of sets starting at 'set'. Checks for sets that would become 637 * empty as well as RDONLY flags. 638 */ 639 static int 640 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask) 641 { 642 struct cpuset *nset; 643 cpuset_t newmask; 644 int error; 645 646 mtx_assert(&cpuset_lock, MA_OWNED); 647 if (set->cs_flags & CPU_SET_RDONLY) 648 return (EPERM); 649 if (augment_mask) { 650 CPU_AND(&newmask, &set->cs_mask, mask); 651 } else 652 CPU_COPY(mask, &newmask); 653 654 if (CPU_EMPTY(&newmask)) 655 return (EDEADLK); 656 error = 0; 657 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 658 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 659 break; 660 return (error); 661 } 662 663 /* 664 * Applies the mask 'mask' without checking for empty sets or permissions. 665 */ 666 static void 667 cpuset_update(struct cpuset *set, cpuset_t *mask) 668 { 669 struct cpuset *nset; 670 671 mtx_assert(&cpuset_lock, MA_OWNED); 672 CPU_AND(&set->cs_mask, &set->cs_mask, mask); 673 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 674 cpuset_update(nset, &set->cs_mask); 675 676 return; 677 } 678 679 /* 680 * Modify the set 'set' to use a copy of the mask provided. Apply this new 681 * mask to restrict all children in the tree. Checks for validity before 682 * applying the changes. 683 */ 684 static int 685 cpuset_modify(struct cpuset *set, cpuset_t *mask) 686 { 687 struct cpuset *root; 688 int error; 689 690 error = priv_check(curthread, PRIV_SCHED_CPUSET); 691 if (error) 692 return (error); 693 /* 694 * In case we are called from within the jail, 695 * we do not allow modifying the dedicated root 696 * cpuset of the jail but may still allow to 697 * change child sets, including subordinate jails' 698 * roots. 699 */ 700 if ((set->cs_flags & CPU_SET_ROOT) != 0 && 701 jailed(curthread->td_ucred) && 702 set == curthread->td_ucred->cr_prison->pr_cpuset) 703 return (EPERM); 704 /* 705 * Verify that we have access to this set of 706 * cpus. 707 */ 708 if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) { 709 KASSERT(set->cs_parent != NULL, 710 ("jail.cpuset=%d is not a proper child of parent jail's root.", 711 set->cs_id)); 712 713 /* 714 * cpuset_getroot() cannot work here due to how top-level jail 715 * roots are constructed. Top-level jails are parented to 716 * thread0's cpuset (i.e. cpuset 1) rather than the system root. 717 */ 718 root = set->cs_parent; 719 } else { 720 root = cpuset_getroot(set); 721 } 722 mtx_lock_spin(&cpuset_lock); 723 if (root && !CPU_SUBSET(&root->cs_mask, mask)) { 724 error = EINVAL; 725 goto out; 726 } 727 error = cpuset_testupdate(set, mask, 0); 728 if (error) 729 goto out; 730 CPU_COPY(mask, &set->cs_mask); 731 cpuset_update(set, mask); 732 out: 733 mtx_unlock_spin(&cpuset_lock); 734 735 return (error); 736 } 737 738 /* 739 * Recursively check for errors that would occur from applying mask to 740 * the tree of sets starting at 'set'. Checks for sets that would become 741 * empty as well as RDONLY flags. 742 */ 743 static int 744 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset, 745 struct domainset *orig, int *count, int augment_mask __unused) 746 { 747 struct cpuset *nset; 748 struct domainset *domain; 749 struct domainset newset; 750 int error; 751 752 mtx_assert(&cpuset_lock, MA_OWNED); 753 if (set->cs_flags & CPU_SET_RDONLY) 754 return (EPERM); 755 domain = set->cs_domain; 756 domainset_copy(domain, &newset); 757 if (!domainset_equal(domain, orig)) { 758 if (!domainset_restrict(domain, dset)) 759 return (EDEADLK); 760 DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask); 761 /* Count the number of domains that are changing. */ 762 (*count)++; 763 } 764 error = 0; 765 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 766 if ((error = cpuset_testupdate_domain(nset, &newset, domain, 767 count, 1)) != 0) 768 break; 769 return (error); 770 } 771 772 /* 773 * Applies the mask 'mask' without checking for empty sets or permissions. 774 */ 775 static void 776 cpuset_update_domain(struct cpuset *set, struct domainset *domain, 777 struct domainset *orig, struct domainlist *domains) 778 { 779 struct cpuset *nset; 780 781 mtx_assert(&cpuset_lock, MA_OWNED); 782 /* 783 * If this domainset has changed from the parent we must calculate 784 * a new set. Otherwise it simply inherits from the parent. When 785 * we inherit from the parent we get a new mask and policy. If the 786 * set is modified from the parent we keep the policy and only 787 * update the mask. 788 */ 789 if (set->cs_domain != orig) { 790 orig = set->cs_domain; 791 set->cs_domain = domainset_shadow(domain, orig, domains); 792 } else 793 set->cs_domain = domain; 794 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 795 cpuset_update_domain(nset, set->cs_domain, orig, domains); 796 797 return; 798 } 799 800 /* 801 * Modify the set 'set' to use a copy the domainset provided. Apply this new 802 * mask to restrict all children in the tree. Checks for validity before 803 * applying the changes. 804 */ 805 static int 806 cpuset_modify_domain(struct cpuset *set, struct domainset *domain) 807 { 808 struct domainlist domains; 809 struct domainset temp; 810 struct domainset *dset; 811 struct cpuset *root; 812 int ndomains, needed; 813 int error; 814 815 error = priv_check(curthread, PRIV_SCHED_CPUSET); 816 if (error) 817 return (error); 818 /* 819 * In case we are called from within the jail 820 * we do not allow modifying the dedicated root 821 * cpuset of the jail but may still allow to 822 * change child sets. 823 */ 824 if (jailed(curthread->td_ucred) && 825 set->cs_flags & CPU_SET_ROOT) 826 return (EPERM); 827 domainset_freelist_init(&domains, 0); 828 domain = domainset_create(domain); 829 ndomains = 0; 830 831 mtx_lock_spin(&cpuset_lock); 832 for (;;) { 833 root = cpuset_getroot(set); 834 dset = root->cs_domain; 835 /* 836 * Verify that we have access to this set of domains. 837 */ 838 if (!domainset_valid(dset, domain)) { 839 error = EINVAL; 840 goto out; 841 } 842 /* 843 * If applying prefer we keep the current set as the fallback. 844 */ 845 if (domain->ds_policy == DOMAINSET_POLICY_PREFER) 846 DOMAINSET_COPY(&set->cs_domain->ds_mask, 847 &domain->ds_mask); 848 /* 849 * Determine whether we can apply this set of domains and 850 * how many new domain structures it will require. 851 */ 852 domainset_copy(domain, &temp); 853 needed = 0; 854 error = cpuset_testupdate_domain(set, &temp, set->cs_domain, 855 &needed, 0); 856 if (error) 857 goto out; 858 if (ndomains >= needed) 859 break; 860 861 /* Dropping the lock; we'll need to re-evaluate again. */ 862 mtx_unlock_spin(&cpuset_lock); 863 domainset_freelist_add(&domains, needed - ndomains); 864 ndomains = needed; 865 mtx_lock_spin(&cpuset_lock); 866 } 867 dset = set->cs_domain; 868 cpuset_update_domain(set, domain, dset, &domains); 869 out: 870 mtx_unlock_spin(&cpuset_lock); 871 domainset_freelist_free(&domains); 872 if (error == 0) 873 domainset_notify(); 874 875 return (error); 876 } 877 878 /* 879 * Resolve the 'which' parameter of several cpuset apis. 880 * 881 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 882 * checks for permission via p_cansched(). 883 * 884 * For WHICH_SET returns a valid set with a new reference. 885 * 886 * -1 may be supplied for any argument to mean the current proc/thread or 887 * the base set of the current thread. May fail with ESRCH/EPERM. 888 */ 889 int 890 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 891 struct cpuset **setp) 892 { 893 struct cpuset *set; 894 struct thread *td; 895 struct proc *p; 896 int error; 897 898 *pp = p = NULL; 899 *tdp = td = NULL; 900 *setp = set = NULL; 901 switch (which) { 902 case CPU_WHICH_PID: 903 if (id == -1) { 904 PROC_LOCK(curproc); 905 p = curproc; 906 break; 907 } 908 if ((p = pfind(id)) == NULL) 909 return (ESRCH); 910 break; 911 case CPU_WHICH_TID: 912 if (id == -1) { 913 PROC_LOCK(curproc); 914 p = curproc; 915 td = curthread; 916 break; 917 } 918 td = tdfind(id, -1); 919 if (td == NULL) 920 return (ESRCH); 921 p = td->td_proc; 922 break; 923 case CPU_WHICH_CPUSET: 924 if (id == -1) { 925 thread_lock(curthread); 926 set = cpuset_refbase(curthread->td_cpuset); 927 thread_unlock(curthread); 928 } else 929 set = cpuset_lookup(id, curthread); 930 if (set) { 931 *setp = set; 932 return (0); 933 } 934 return (ESRCH); 935 case CPU_WHICH_JAIL: 936 { 937 /* Find `set' for prison with given id. */ 938 struct prison *pr; 939 940 sx_slock(&allprison_lock); 941 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 942 sx_sunlock(&allprison_lock); 943 if (pr == NULL) 944 return (ESRCH); 945 cpuset_ref(pr->pr_cpuset); 946 *setp = pr->pr_cpuset; 947 mtx_unlock(&pr->pr_mtx); 948 return (0); 949 } 950 case CPU_WHICH_IRQ: 951 case CPU_WHICH_DOMAIN: 952 return (0); 953 default: 954 return (EINVAL); 955 } 956 error = p_cansched(curthread, p); 957 if (error) { 958 PROC_UNLOCK(p); 959 return (error); 960 } 961 if (td == NULL) 962 td = FIRST_THREAD_IN_PROC(p); 963 *pp = p; 964 *tdp = td; 965 return (0); 966 } 967 968 static int 969 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask, 970 const struct domainset *domain) 971 { 972 struct cpuset *parent; 973 struct domainset *dset; 974 975 parent = cpuset_getbase(set); 976 /* 977 * If we are restricting a cpu mask it must be a subset of the 978 * parent or invalid CPUs have been specified. 979 */ 980 if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask)) 981 return (EINVAL); 982 983 /* 984 * If we are restricting a domain mask it must be a subset of the 985 * parent or invalid domains have been specified. 986 */ 987 dset = parent->cs_domain; 988 if (domain != NULL && !domainset_valid(dset, domain)) 989 return (EINVAL); 990 991 return (0); 992 } 993 994 /* 995 * Create an anonymous set with the provided mask in the space provided by 996 * 'nset'. If the passed in set is anonymous we use its parent otherwise 997 * the new set is a child of 'set'. 998 */ 999 static int 1000 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp, 1001 const cpuset_t *mask, const struct domainset *domain, 1002 struct setlist *cpusets, struct domainlist *domains) 1003 { 1004 struct cpuset *parent; 1005 struct cpuset *nset; 1006 struct domainset *dset; 1007 struct domainset *d; 1008 int error; 1009 1010 error = cpuset_testshadow(set, mask, domain); 1011 if (error) 1012 return (error); 1013 1014 parent = cpuset_getbase(set); 1015 dset = parent->cs_domain; 1016 if (mask == NULL) 1017 mask = &set->cs_mask; 1018 if (domain != NULL) 1019 d = domainset_shadow(dset, domain, domains); 1020 else 1021 d = set->cs_domain; 1022 nset = LIST_FIRST(cpusets); 1023 error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID); 1024 if (error == 0) { 1025 LIST_REMOVE(nset, cs_link); 1026 *nsetp = nset; 1027 } 1028 return (error); 1029 } 1030 1031 static struct cpuset * 1032 cpuset_update_thread(struct thread *td, struct cpuset *nset) 1033 { 1034 struct cpuset *tdset; 1035 1036 tdset = td->td_cpuset; 1037 td->td_cpuset = nset; 1038 td->td_domain.dr_policy = nset->cs_domain; 1039 sched_affinity(td); 1040 1041 return (tdset); 1042 } 1043 1044 static int 1045 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask, 1046 struct domainset *domain) 1047 { 1048 struct cpuset *parent; 1049 1050 parent = cpuset_getbase(tdset); 1051 if (mask == NULL) 1052 mask = &tdset->cs_mask; 1053 if (domain == NULL) 1054 domain = tdset->cs_domain; 1055 return cpuset_testshadow(parent, mask, domain); 1056 } 1057 1058 static int 1059 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask, 1060 struct domainset *domain, struct cpuset **nsetp, 1061 struct setlist *freelist, struct domainlist *domainlist) 1062 { 1063 struct cpuset *parent; 1064 1065 parent = cpuset_getbase(tdset); 1066 if (mask == NULL) 1067 mask = &tdset->cs_mask; 1068 if (domain == NULL) 1069 domain = tdset->cs_domain; 1070 return cpuset_shadow(parent, nsetp, mask, domain, freelist, 1071 domainlist); 1072 } 1073 1074 static int 1075 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set, 1076 cpuset_t *mask, struct domainset *domain) 1077 { 1078 struct cpuset *parent; 1079 1080 parent = cpuset_getbase(tdset); 1081 1082 /* 1083 * If the thread restricted its mask then apply that same 1084 * restriction to the new set, otherwise take it wholesale. 1085 */ 1086 if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { 1087 CPU_AND(mask, &tdset->cs_mask, &set->cs_mask); 1088 } else 1089 CPU_COPY(&set->cs_mask, mask); 1090 1091 /* 1092 * If the thread restricted the domain then we apply the 1093 * restriction to the new set but retain the policy. 1094 */ 1095 if (tdset->cs_domain != parent->cs_domain) { 1096 domainset_copy(tdset->cs_domain, domain); 1097 DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask); 1098 } else 1099 domainset_copy(set->cs_domain, domain); 1100 1101 if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask)) 1102 return (EDEADLK); 1103 1104 return (0); 1105 } 1106 1107 static int 1108 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set) 1109 { 1110 struct domainset domain; 1111 cpuset_t mask; 1112 1113 if (tdset->cs_id != CPUSET_INVALID) 1114 return (0); 1115 return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1116 } 1117 1118 static int 1119 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set, 1120 struct cpuset **nsetp, struct setlist *freelist, 1121 struct domainlist *domainlist) 1122 { 1123 struct domainset domain; 1124 cpuset_t mask; 1125 int error; 1126 1127 /* 1128 * If we're replacing on a thread that has not constrained the 1129 * original set we can simply accept the new set. 1130 */ 1131 if (tdset->cs_id != CPUSET_INVALID) { 1132 *nsetp = cpuset_ref(set); 1133 return (0); 1134 } 1135 error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1136 if (error) 1137 return (error); 1138 1139 return cpuset_shadow(set, nsetp, &mask, &domain, freelist, 1140 domainlist); 1141 } 1142 1143 static int 1144 cpuset_setproc_newbase(struct thread *td, struct cpuset *set, 1145 struct cpuset *nroot, struct cpuset **nsetp, 1146 struct setlist *cpusets, struct domainlist *domainlist) 1147 { 1148 struct domainset ndomain; 1149 cpuset_t nmask; 1150 struct cpuset *pbase; 1151 int error; 1152 1153 pbase = cpuset_getbase(td->td_cpuset); 1154 1155 /* Copy process mask, then further apply the new root mask. */ 1156 CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask); 1157 1158 domainset_copy(pbase->cs_domain, &ndomain); 1159 DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask); 1160 1161 /* Policy is too restrictive, will not work. */ 1162 if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask)) 1163 return (EDEADLK); 1164 1165 /* 1166 * Remove pbase from the freelist in advance, it'll be pushed to 1167 * cpuset_ids on success. We assume here that cpuset_create() will not 1168 * touch pbase on failure, and we just enqueue it back to the freelist 1169 * to remain in a consistent state. 1170 */ 1171 pbase = LIST_FIRST(cpusets); 1172 LIST_REMOVE(pbase, cs_link); 1173 error = cpuset_create(&pbase, set, &nmask); 1174 if (error != 0) { 1175 LIST_INSERT_HEAD(cpusets, pbase, cs_link); 1176 return (error); 1177 } 1178 1179 /* Duplicates some work from above... oh well. */ 1180 pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain, 1181 domainlist); 1182 *nsetp = pbase; 1183 return (0); 1184 } 1185 1186 /* 1187 * Handle four cases for updating an entire process. 1188 * 1189 * 1) Set is non-null and the process is not rebasing onto a new root. This 1190 * reparents all anonymous sets to the provided set and replaces all 1191 * non-anonymous td_cpusets with the provided set. 1192 * 2) Set is non-null and the process is rebasing onto a new root. This 1193 * creates a new base set if the process previously had its own base set, 1194 * then reparents all anonymous sets either to that set or the provided set 1195 * if one was not created. Non-anonymous sets are similarly replaced. 1196 * 3) Mask is non-null. This replaces or creates anonymous sets for every 1197 * thread with the existing base as a parent. 1198 * 4) domain is non-null. This creates anonymous sets for every thread 1199 * and replaces the domain set. 1200 * 1201 * This is overly complicated because we can't allocate while holding a 1202 * spinlock and spinlocks must be held while changing and examining thread 1203 * state. 1204 */ 1205 static int 1206 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask, 1207 struct domainset *domain, bool rebase) 1208 { 1209 struct setlist freelist; 1210 struct setlist droplist; 1211 struct domainlist domainlist; 1212 struct cpuset *base, *nset, *nroot, *tdroot; 1213 struct thread *td; 1214 struct proc *p; 1215 int needed; 1216 int nfree; 1217 int error; 1218 1219 /* 1220 * The algorithm requires two passes due to locking considerations. 1221 * 1222 * 1) Lookup the process and acquire the locks in the required order. 1223 * 2) If enough cpusets have not been allocated release the locks and 1224 * allocate them. Loop. 1225 */ 1226 cpuset_freelist_init(&freelist, 1); 1227 domainset_freelist_init(&domainlist, 1); 1228 nfree = 1; 1229 LIST_INIT(&droplist); 1230 nfree = 0; 1231 base = set; 1232 nroot = NULL; 1233 if (set != NULL) 1234 nroot = cpuset_getroot(set); 1235 for (;;) { 1236 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 1237 if (error) 1238 goto out; 1239 tdroot = cpuset_getroot(td->td_cpuset); 1240 needed = p->p_numthreads; 1241 if (set != NULL && rebase && tdroot != nroot) 1242 needed++; 1243 if (nfree >= needed) 1244 break; 1245 PROC_UNLOCK(p); 1246 if (nfree < needed) { 1247 cpuset_freelist_add(&freelist, needed - nfree); 1248 domainset_freelist_add(&domainlist, needed - nfree); 1249 nfree = needed; 1250 } 1251 } 1252 PROC_LOCK_ASSERT(p, MA_OWNED); 1253 1254 /* 1255 * If we're changing roots and the root set is what has been specified 1256 * as the parent, then we'll check if the process was previously using 1257 * the root set and, if it wasn't, create a new base with the process's 1258 * mask applied to it. 1259 * 1260 * If the new root is incompatible with the existing mask, then we allow 1261 * the process to take on the new root if and only if they have 1262 * privilege to widen their mask anyways. Unprivileged processes get 1263 * rejected with EDEADLK. 1264 */ 1265 if (set != NULL && rebase && nroot != tdroot) { 1266 cpusetid_t base_id, root_id; 1267 1268 root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id; 1269 base_id = cpuset_getbase(td->td_cpuset)->cs_id; 1270 1271 if (base_id != root_id) { 1272 error = cpuset_setproc_newbase(td, set, nroot, &base, 1273 &freelist, &domainlist); 1274 if (error == EDEADLK && 1275 priv_check(td, PRIV_SCHED_CPUSET) == 0) 1276 error = 0; 1277 if (error != 0) 1278 goto unlock_out; 1279 } 1280 } 1281 1282 /* 1283 * Now that the appropriate locks are held and we have enough cpusets, 1284 * make sure the operation will succeed before applying changes. The 1285 * proc lock prevents td_cpuset from changing between calls. 1286 */ 1287 error = 0; 1288 FOREACH_THREAD_IN_PROC(p, td) { 1289 thread_lock(td); 1290 if (set != NULL) 1291 error = cpuset_setproc_test_setthread(td->td_cpuset, 1292 base); 1293 else 1294 error = cpuset_setproc_test_maskthread(td->td_cpuset, 1295 mask, domain); 1296 thread_unlock(td); 1297 if (error) 1298 goto unlock_out; 1299 } 1300 /* 1301 * Replace each thread's cpuset while using deferred release. We 1302 * must do this because the thread lock must be held while operating 1303 * on the thread and this limits the type of operations allowed. 1304 */ 1305 FOREACH_THREAD_IN_PROC(p, td) { 1306 thread_lock(td); 1307 if (set != NULL) 1308 error = cpuset_setproc_setthread(td->td_cpuset, base, 1309 &nset, &freelist, &domainlist); 1310 else 1311 error = cpuset_setproc_maskthread(td->td_cpuset, mask, 1312 domain, &nset, &freelist, &domainlist); 1313 if (error) { 1314 thread_unlock(td); 1315 break; 1316 } 1317 cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset)); 1318 thread_unlock(td); 1319 } 1320 unlock_out: 1321 PROC_UNLOCK(p); 1322 out: 1323 if (base != NULL && base != set) 1324 cpuset_rel(base); 1325 while ((nset = LIST_FIRST(&droplist)) != NULL) 1326 cpuset_rel_complete(nset); 1327 cpuset_freelist_free(&freelist); 1328 domainset_freelist_free(&domainlist); 1329 return (error); 1330 } 1331 1332 static int 1333 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen) 1334 { 1335 size_t bytes; 1336 int i, once; 1337 char *p; 1338 1339 once = 0; 1340 p = buf; 1341 for (i = 0; i < __bitset_words(setlen); i++) { 1342 if (once != 0) { 1343 if (bufsiz < 1) 1344 return (0); 1345 *p = ','; 1346 p++; 1347 bufsiz--; 1348 } else 1349 once = 1; 1350 if (bufsiz < sizeof(__STRING(ULONG_MAX))) 1351 return (0); 1352 bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]); 1353 p += bytes; 1354 bufsiz -= bytes; 1355 } 1356 return (p - buf); 1357 } 1358 1359 static int 1360 bitset_strscan(struct bitset *set, int setlen, const char *buf) 1361 { 1362 int i, ret; 1363 const char *p; 1364 1365 BIT_ZERO(setlen, set); 1366 p = buf; 1367 for (i = 0; i < __bitset_words(setlen); i++) { 1368 if (*p == ',') { 1369 p++; 1370 continue; 1371 } 1372 ret = sscanf(p, "%lx", &set->__bits[i]); 1373 if (ret == 0 || ret == -1) 1374 break; 1375 while (isxdigit(*p)) 1376 p++; 1377 } 1378 return (p - buf); 1379 } 1380 1381 /* 1382 * Return a string representing a valid layout for a cpuset_t object. 1383 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1384 */ 1385 char * 1386 cpusetobj_strprint(char *buf, const cpuset_t *set) 1387 { 1388 1389 bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set, 1390 CPU_SETSIZE); 1391 return (buf); 1392 } 1393 1394 /* 1395 * Build a valid cpuset_t object from a string representation. 1396 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1397 */ 1398 int 1399 cpusetobj_strscan(cpuset_t *set, const char *buf) 1400 { 1401 char p; 1402 1403 if (strlen(buf) > CPUSETBUFSIZ - 1) 1404 return (-1); 1405 1406 p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)]; 1407 if (p != '\0') 1408 return (-1); 1409 1410 return (0); 1411 } 1412 1413 /* 1414 * Handle a domainset specifier in the sysctl tree. A poiner to a pointer to 1415 * a domainset is in arg1. If the user specifies a valid domainset the 1416 * pointer is updated. 1417 * 1418 * Format is: 1419 * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred 1420 */ 1421 int 1422 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS) 1423 { 1424 char buf[DOMAINSETBUFSIZ]; 1425 struct domainset *dset; 1426 struct domainset key; 1427 int policy, prefer, error; 1428 char *p; 1429 1430 dset = *(struct domainset **)arg1; 1431 error = 0; 1432 1433 if (dset != NULL) { 1434 p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ, 1435 (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE); 1436 sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer); 1437 } else 1438 sprintf(buf, "<NULL>"); 1439 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 1440 if (error != 0 || req->newptr == NULL) 1441 return (error); 1442 1443 /* 1444 * Read in and validate the string. 1445 */ 1446 memset(&key, 0, sizeof(key)); 1447 p = &buf[bitset_strscan((struct bitset *)&key.ds_mask, 1448 DOMAINSET_SETSIZE, buf)]; 1449 if (p == buf) 1450 return (EINVAL); 1451 if (sscanf(p, ":%d:%d", &policy, &prefer) != 2) 1452 return (EINVAL); 1453 key.ds_policy = policy; 1454 key.ds_prefer = prefer; 1455 1456 /* Domainset_create() validates the policy.*/ 1457 dset = domainset_create(&key); 1458 if (dset == NULL) 1459 return (EINVAL); 1460 *(struct domainset **)arg1 = dset; 1461 1462 return (error); 1463 } 1464 1465 /* 1466 * Apply an anonymous mask or a domain to a single thread. 1467 */ 1468 static int 1469 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain) 1470 { 1471 struct setlist cpusets; 1472 struct domainlist domainlist; 1473 struct cpuset *nset; 1474 struct cpuset *set; 1475 struct thread *td; 1476 struct proc *p; 1477 int error; 1478 1479 cpuset_freelist_init(&cpusets, 1); 1480 domainset_freelist_init(&domainlist, domain != NULL); 1481 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 1482 if (error) 1483 goto out; 1484 set = NULL; 1485 thread_lock(td); 1486 error = cpuset_shadow(td->td_cpuset, &nset, mask, domain, 1487 &cpusets, &domainlist); 1488 if (error == 0) 1489 set = cpuset_update_thread(td, nset); 1490 thread_unlock(td); 1491 PROC_UNLOCK(p); 1492 if (set) 1493 cpuset_rel(set); 1494 out: 1495 cpuset_freelist_free(&cpusets); 1496 domainset_freelist_free(&domainlist); 1497 return (error); 1498 } 1499 1500 /* 1501 * Apply an anonymous mask to a single thread. 1502 */ 1503 int 1504 cpuset_setthread(lwpid_t id, cpuset_t *mask) 1505 { 1506 1507 return _cpuset_setthread(id, mask, NULL); 1508 } 1509 1510 /* 1511 * Apply new cpumask to the ithread. 1512 */ 1513 int 1514 cpuset_setithread(lwpid_t id, int cpu) 1515 { 1516 cpuset_t mask; 1517 1518 CPU_ZERO(&mask); 1519 if (cpu == NOCPU) 1520 CPU_COPY(cpuset_root, &mask); 1521 else 1522 CPU_SET(cpu, &mask); 1523 return _cpuset_setthread(id, &mask, NULL); 1524 } 1525 1526 /* 1527 * Initialize static domainsets after NUMA information is available. This is 1528 * called before memory allocators are initialized. 1529 */ 1530 void 1531 domainset_init(void) 1532 { 1533 struct domainset *dset; 1534 int i; 1535 1536 dset = &domainset_firsttouch; 1537 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1538 dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH; 1539 dset->ds_prefer = -1; 1540 _domainset_create(dset, NULL); 1541 1542 dset = &domainset_interleave; 1543 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1544 dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE; 1545 dset->ds_prefer = -1; 1546 _domainset_create(dset, NULL); 1547 1548 dset = &domainset_roundrobin; 1549 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1550 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1551 dset->ds_prefer = -1; 1552 _domainset_create(dset, NULL); 1553 1554 for (i = 0; i < vm_ndomains; i++) { 1555 dset = &domainset_fixed[i]; 1556 DOMAINSET_ZERO(&dset->ds_mask); 1557 DOMAINSET_SET(i, &dset->ds_mask); 1558 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1559 _domainset_create(dset, NULL); 1560 1561 dset = &domainset_prefer[i]; 1562 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1563 dset->ds_policy = DOMAINSET_POLICY_PREFER; 1564 dset->ds_prefer = i; 1565 _domainset_create(dset, NULL); 1566 } 1567 } 1568 1569 /* 1570 * Define the domainsets for cpuset 0, 1 and cpuset 2. 1571 */ 1572 void 1573 domainset_zero(void) 1574 { 1575 struct domainset *dset, *tmp; 1576 1577 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 1578 1579 domainset0 = &domainset_firsttouch; 1580 curthread->td_domain.dr_policy = domainset0; 1581 1582 domainset2 = &domainset_interleave; 1583 kernel_object->domain.dr_policy = domainset2; 1584 1585 /* Remove empty domains from the global policies. */ 1586 LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp) 1587 if (domainset_empty_vm(dset)) 1588 LIST_REMOVE(dset, ds_link); 1589 } 1590 1591 /* 1592 * Creates system-wide cpusets and the cpuset for thread0 including three 1593 * sets: 1594 * 1595 * 0 - The root set which should represent all valid processors in the 1596 * system. This set is immutable. 1597 * 1 - The default set which all processes are a member of until changed. 1598 * This allows an administrator to move all threads off of given cpus to 1599 * dedicate them to high priority tasks or save power etc. 1600 * 2 - The kernel set which allows restriction and policy to be applied only 1601 * to kernel threads and the kernel_object. 1602 */ 1603 struct cpuset * 1604 cpuset_thread0(void) 1605 { 1606 struct cpuset *set; 1607 int i; 1608 int error __unused; 1609 1610 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 1611 NULL, NULL, UMA_ALIGN_CACHE, 0); 1612 domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), 1613 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 1614 1615 /* 1616 * Create the root system set (0) for the whole machine. Doesn't use 1617 * cpuset_create() due to NULL parent. 1618 */ 1619 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1620 CPU_COPY(&all_cpus, &set->cs_mask); 1621 LIST_INIT(&set->cs_children); 1622 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 1623 refcount_init(&set->cs_ref, 1); 1624 set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY; 1625 set->cs_domain = domainset0; 1626 cpuset_zero = set; 1627 cpuset_root = &set->cs_mask; 1628 1629 /* 1630 * Now derive a default (1), modifiable set from that to give out. 1631 */ 1632 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1633 error = cpuset_init(set, cpuset_zero, NULL, NULL, 1); 1634 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 1635 cpuset_default = set; 1636 /* 1637 * Create the kernel set (2). 1638 */ 1639 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1640 error = cpuset_init(set, cpuset_zero, NULL, NULL, 2); 1641 KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); 1642 set->cs_domain = domainset2; 1643 cpuset_kernel = set; 1644 1645 /* 1646 * Initialize the unit allocator. 0 and 1 are allocated above. 1647 */ 1648 cpuset_unr = new_unrhdr(3, INT_MAX, NULL); 1649 1650 /* 1651 * If MD code has not initialized per-domain cpusets, place all 1652 * CPUs in domain 0. 1653 */ 1654 for (i = 0; i < MAXMEMDOM; i++) 1655 if (!CPU_EMPTY(&cpuset_domain[i])) 1656 goto domains_set; 1657 CPU_COPY(&all_cpus, &cpuset_domain[0]); 1658 domains_set: 1659 1660 return (cpuset_default); 1661 } 1662 1663 void 1664 cpuset_kernthread(struct thread *td) 1665 { 1666 struct cpuset *set; 1667 1668 thread_lock(td); 1669 set = td->td_cpuset; 1670 td->td_cpuset = cpuset_ref(cpuset_kernel); 1671 thread_unlock(td); 1672 cpuset_rel(set); 1673 } 1674 1675 /* 1676 * Create a cpuset, which would be cpuset_create() but 1677 * mark the new 'set' as root. 1678 * 1679 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 1680 * for that. 1681 * 1682 * In case of no error, returns the set in *setp locked with a reference. 1683 */ 1684 int 1685 cpuset_create_root(struct prison *pr, struct cpuset **setp) 1686 { 1687 struct cpuset *set; 1688 int error; 1689 1690 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 1691 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 1692 1693 set = NULL; 1694 error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 1695 if (error) 1696 return (error); 1697 1698 KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data", 1699 __func__, __LINE__)); 1700 1701 /* Mark the set as root. */ 1702 set->cs_flags |= CPU_SET_ROOT; 1703 *setp = set; 1704 1705 return (0); 1706 } 1707 1708 int 1709 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 1710 { 1711 int error; 1712 1713 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 1714 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 1715 1716 cpuset_ref(set); 1717 error = cpuset_setproc(p->p_pid, set, NULL, NULL, true); 1718 if (error) 1719 return (error); 1720 cpuset_rel(set); 1721 return (0); 1722 } 1723 1724 /* 1725 * In Capability mode, the only accesses that are permitted are to the current 1726 * thread and process' CPU and domain sets. 1727 */ 1728 static int 1729 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which, 1730 id_t id) 1731 { 1732 if (IN_CAPABILITY_MODE(td)) { 1733 if (level != CPU_LEVEL_WHICH) 1734 return (ECAPMODE); 1735 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1736 return (ECAPMODE); 1737 if (id != -1 && 1738 !(which == CPU_WHICH_TID && id == td->td_tid) && 1739 !(which == CPU_WHICH_PID && id == td->td_proc->p_pid)) 1740 return (ECAPMODE); 1741 } 1742 return (0); 1743 } 1744 1745 #ifndef _SYS_SYSPROTO_H_ 1746 struct cpuset_args { 1747 cpusetid_t *setid; 1748 }; 1749 #endif 1750 int 1751 sys_cpuset(struct thread *td, struct cpuset_args *uap) 1752 { 1753 struct cpuset *root; 1754 struct cpuset *set; 1755 int error; 1756 1757 thread_lock(td); 1758 root = cpuset_refroot(td->td_cpuset); 1759 thread_unlock(td); 1760 set = NULL; 1761 error = cpuset_create(&set, root, &root->cs_mask); 1762 cpuset_rel(root); 1763 if (error) 1764 return (error); 1765 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 1766 if (error == 0) 1767 error = cpuset_setproc(-1, set, NULL, NULL, false); 1768 cpuset_rel(set); 1769 return (error); 1770 } 1771 1772 #ifndef _SYS_SYSPROTO_H_ 1773 struct cpuset_setid_args { 1774 cpuwhich_t which; 1775 id_t id; 1776 cpusetid_t setid; 1777 }; 1778 #endif 1779 int 1780 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 1781 { 1782 1783 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 1784 } 1785 1786 int 1787 kern_cpuset_setid(struct thread *td, cpuwhich_t which, 1788 id_t id, cpusetid_t setid) 1789 { 1790 struct cpuset *set; 1791 int error; 1792 1793 /* 1794 * Presently we only support per-process sets. 1795 */ 1796 if (which != CPU_WHICH_PID) 1797 return (EINVAL); 1798 set = cpuset_lookup(setid, td); 1799 if (set == NULL) 1800 return (ESRCH); 1801 error = cpuset_setproc(id, set, NULL, NULL, false); 1802 cpuset_rel(set); 1803 return (error); 1804 } 1805 1806 #ifndef _SYS_SYSPROTO_H_ 1807 struct cpuset_getid_args { 1808 cpulevel_t level; 1809 cpuwhich_t which; 1810 id_t id; 1811 cpusetid_t *setid; 1812 }; 1813 #endif 1814 int 1815 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1816 { 1817 1818 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1819 uap->setid)); 1820 } 1821 1822 int 1823 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1824 id_t id, cpusetid_t *setid) 1825 { 1826 struct cpuset *nset; 1827 struct cpuset *set; 1828 struct thread *ttd; 1829 struct proc *p; 1830 cpusetid_t tmpid; 1831 int error; 1832 1833 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1834 return (EINVAL); 1835 error = cpuset_which(which, id, &p, &ttd, &set); 1836 if (error) 1837 return (error); 1838 switch (which) { 1839 case CPU_WHICH_TID: 1840 case CPU_WHICH_PID: 1841 thread_lock(ttd); 1842 set = cpuset_refbase(ttd->td_cpuset); 1843 thread_unlock(ttd); 1844 PROC_UNLOCK(p); 1845 break; 1846 case CPU_WHICH_CPUSET: 1847 case CPU_WHICH_JAIL: 1848 break; 1849 case CPU_WHICH_IRQ: 1850 case CPU_WHICH_DOMAIN: 1851 return (EINVAL); 1852 } 1853 switch (level) { 1854 case CPU_LEVEL_ROOT: 1855 nset = cpuset_refroot(set); 1856 cpuset_rel(set); 1857 set = nset; 1858 break; 1859 case CPU_LEVEL_CPUSET: 1860 break; 1861 case CPU_LEVEL_WHICH: 1862 break; 1863 } 1864 tmpid = set->cs_id; 1865 cpuset_rel(set); 1866 if (error == 0) 1867 error = copyout(&tmpid, setid, sizeof(tmpid)); 1868 1869 return (error); 1870 } 1871 1872 #ifndef _SYS_SYSPROTO_H_ 1873 struct cpuset_getaffinity_args { 1874 cpulevel_t level; 1875 cpuwhich_t which; 1876 id_t id; 1877 size_t cpusetsize; 1878 cpuset_t *mask; 1879 }; 1880 #endif 1881 int 1882 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1883 { 1884 1885 return (kern_cpuset_getaffinity(td, uap->level, uap->which, 1886 uap->id, uap->cpusetsize, uap->mask)); 1887 } 1888 1889 int 1890 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1891 id_t id, size_t cpusetsize, cpuset_t *maskp) 1892 { 1893 struct thread *ttd; 1894 struct cpuset *nset; 1895 struct cpuset *set; 1896 struct proc *p; 1897 cpuset_t *mask; 1898 int error; 1899 size_t size; 1900 1901 error = cpuset_check_capabilities(td, level, which, id); 1902 if (error != 0) 1903 return (error); 1904 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 1905 error = cpuset_which(which, id, &p, &ttd, &set); 1906 if (error) 1907 goto out; 1908 switch (level) { 1909 case CPU_LEVEL_ROOT: 1910 case CPU_LEVEL_CPUSET: 1911 switch (which) { 1912 case CPU_WHICH_TID: 1913 case CPU_WHICH_PID: 1914 thread_lock(ttd); 1915 set = cpuset_ref(ttd->td_cpuset); 1916 thread_unlock(ttd); 1917 break; 1918 case CPU_WHICH_CPUSET: 1919 case CPU_WHICH_JAIL: 1920 break; 1921 case CPU_WHICH_IRQ: 1922 case CPU_WHICH_INTRHANDLER: 1923 case CPU_WHICH_ITHREAD: 1924 case CPU_WHICH_DOMAIN: 1925 error = EINVAL; 1926 goto out; 1927 } 1928 if (level == CPU_LEVEL_ROOT) 1929 nset = cpuset_refroot(set); 1930 else 1931 nset = cpuset_refbase(set); 1932 CPU_COPY(&nset->cs_mask, mask); 1933 cpuset_rel(nset); 1934 break; 1935 case CPU_LEVEL_WHICH: 1936 switch (which) { 1937 case CPU_WHICH_TID: 1938 thread_lock(ttd); 1939 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1940 thread_unlock(ttd); 1941 break; 1942 case CPU_WHICH_PID: 1943 FOREACH_THREAD_IN_PROC(p, ttd) { 1944 thread_lock(ttd); 1945 CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); 1946 thread_unlock(ttd); 1947 } 1948 break; 1949 case CPU_WHICH_CPUSET: 1950 case CPU_WHICH_JAIL: 1951 CPU_COPY(&set->cs_mask, mask); 1952 break; 1953 case CPU_WHICH_IRQ: 1954 case CPU_WHICH_INTRHANDLER: 1955 case CPU_WHICH_ITHREAD: 1956 error = intr_getaffinity(id, which, mask); 1957 break; 1958 case CPU_WHICH_DOMAIN: 1959 if (id < 0 || id >= MAXMEMDOM) 1960 error = ESRCH; 1961 else 1962 CPU_COPY(&cpuset_domain[id], mask); 1963 break; 1964 } 1965 break; 1966 default: 1967 error = EINVAL; 1968 break; 1969 } 1970 if (set) 1971 cpuset_rel(set); 1972 if (p) 1973 PROC_UNLOCK(p); 1974 if (error == 0) { 1975 if (cpusetsize < howmany(CPU_FLS(mask), NBBY)) { 1976 error = ERANGE; 1977 goto out; 1978 } 1979 size = min(cpusetsize, sizeof(cpuset_t)); 1980 error = copyout(mask, maskp, size); 1981 if (error != 0) 1982 goto out; 1983 if (cpusetsize > size) { 1984 char *end; 1985 char *cp; 1986 int rv; 1987 1988 end = cp = (char *)&maskp->__bits; 1989 end += cpusetsize; 1990 cp += size; 1991 while (cp != end) { 1992 rv = subyte(cp, 0); 1993 if (rv == -1) { 1994 error = EFAULT; 1995 goto out; 1996 } 1997 cp++; 1998 } 1999 } 2000 #ifdef KTRACE 2001 if ( KTRPOINT(td, KTR_STRUCT)) 2002 ktrcpuset(mask, size); 2003 #endif 2004 } 2005 out: 2006 free(mask, M_TEMP); 2007 return (error); 2008 } 2009 2010 #ifndef _SYS_SYSPROTO_H_ 2011 struct cpuset_setaffinity_args { 2012 cpulevel_t level; 2013 cpuwhich_t which; 2014 id_t id; 2015 size_t cpusetsize; 2016 const cpuset_t *mask; 2017 }; 2018 #endif 2019 int 2020 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 2021 { 2022 2023 return (user_cpuset_setaffinity(td, uap->level, uap->which, 2024 uap->id, uap->cpusetsize, uap->mask)); 2025 } 2026 2027 int 2028 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2029 id_t id, cpuset_t *mask) 2030 { 2031 struct cpuset *nset; 2032 struct cpuset *set; 2033 struct thread *ttd; 2034 struct proc *p; 2035 int error; 2036 2037 #ifdef KTRACE 2038 if (KTRPOINT(td, KTR_STRUCT)) 2039 ktrcpuset(mask, sizeof(cpuset_t)); 2040 #endif 2041 error = cpuset_check_capabilities(td, level, which, id); 2042 if (error != 0) 2043 return (error); 2044 if (CPU_EMPTY(mask)) 2045 return (EDEADLK); 2046 switch (level) { 2047 case CPU_LEVEL_ROOT: 2048 case CPU_LEVEL_CPUSET: 2049 error = cpuset_which(which, id, &p, &ttd, &set); 2050 if (error) 2051 break; 2052 switch (which) { 2053 case CPU_WHICH_TID: 2054 case CPU_WHICH_PID: 2055 thread_lock(ttd); 2056 set = cpuset_ref(ttd->td_cpuset); 2057 thread_unlock(ttd); 2058 PROC_UNLOCK(p); 2059 break; 2060 case CPU_WHICH_CPUSET: 2061 case CPU_WHICH_JAIL: 2062 break; 2063 case CPU_WHICH_IRQ: 2064 case CPU_WHICH_INTRHANDLER: 2065 case CPU_WHICH_ITHREAD: 2066 case CPU_WHICH_DOMAIN: 2067 return (EINVAL); 2068 } 2069 if (level == CPU_LEVEL_ROOT) 2070 nset = cpuset_refroot(set); 2071 else 2072 nset = cpuset_refbase(set); 2073 error = cpuset_modify(nset, mask); 2074 cpuset_rel(nset); 2075 cpuset_rel(set); 2076 break; 2077 case CPU_LEVEL_WHICH: 2078 switch (which) { 2079 case CPU_WHICH_TID: 2080 error = cpuset_setthread(id, mask); 2081 break; 2082 case CPU_WHICH_PID: 2083 error = cpuset_setproc(id, NULL, mask, NULL, false); 2084 break; 2085 case CPU_WHICH_CPUSET: 2086 case CPU_WHICH_JAIL: 2087 error = cpuset_which(which, id, &p, &ttd, &set); 2088 if (error == 0) { 2089 error = cpuset_modify(set, mask); 2090 cpuset_rel(set); 2091 } 2092 break; 2093 case CPU_WHICH_IRQ: 2094 case CPU_WHICH_INTRHANDLER: 2095 case CPU_WHICH_ITHREAD: 2096 error = intr_setaffinity(id, which, mask); 2097 break; 2098 default: 2099 error = EINVAL; 2100 break; 2101 } 2102 break; 2103 default: 2104 error = EINVAL; 2105 break; 2106 } 2107 return (error); 2108 } 2109 2110 int 2111 user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2112 id_t id, size_t cpusetsize, const cpuset_t *maskp) 2113 { 2114 cpuset_t *mask; 2115 int error; 2116 size_t size; 2117 2118 size = min(cpusetsize, sizeof(cpuset_t)); 2119 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 2120 error = copyin(maskp, mask, size); 2121 if (error) 2122 goto out; 2123 /* 2124 * Verify that no high bits are set. 2125 */ 2126 if (cpusetsize > sizeof(cpuset_t)) { 2127 const char *end, *cp; 2128 int val; 2129 end = cp = (const char *)&maskp->__bits; 2130 end += cpusetsize; 2131 cp += sizeof(cpuset_t); 2132 2133 while (cp != end) { 2134 val = fubyte(cp); 2135 if (val == -1) { 2136 error = EFAULT; 2137 goto out; 2138 } 2139 if (val != 0) { 2140 error = EINVAL; 2141 goto out; 2142 } 2143 cp++; 2144 } 2145 } 2146 error = kern_cpuset_setaffinity(td, level, which, id, mask); 2147 2148 out: 2149 free(mask, M_TEMP); 2150 return (error); 2151 } 2152 2153 #ifndef _SYS_SYSPROTO_H_ 2154 struct cpuset_getdomain_args { 2155 cpulevel_t level; 2156 cpuwhich_t which; 2157 id_t id; 2158 size_t domainsetsize; 2159 domainset_t *mask; 2160 int *policy; 2161 }; 2162 #endif 2163 int 2164 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap) 2165 { 2166 2167 return (kern_cpuset_getdomain(td, uap->level, uap->which, 2168 uap->id, uap->domainsetsize, uap->mask, uap->policy)); 2169 } 2170 2171 int 2172 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2173 id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp) 2174 { 2175 struct domainset outset; 2176 struct thread *ttd; 2177 struct cpuset *nset; 2178 struct cpuset *set; 2179 struct domainset *dset; 2180 struct proc *p; 2181 domainset_t *mask; 2182 int error; 2183 2184 if (domainsetsize < sizeof(domainset_t) || 2185 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2186 return (ERANGE); 2187 error = cpuset_check_capabilities(td, level, which, id); 2188 if (error != 0) 2189 return (error); 2190 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2191 bzero(&outset, sizeof(outset)); 2192 error = cpuset_which(which, id, &p, &ttd, &set); 2193 if (error) 2194 goto out; 2195 switch (level) { 2196 case CPU_LEVEL_ROOT: 2197 case CPU_LEVEL_CPUSET: 2198 switch (which) { 2199 case CPU_WHICH_TID: 2200 case CPU_WHICH_PID: 2201 thread_lock(ttd); 2202 set = cpuset_ref(ttd->td_cpuset); 2203 thread_unlock(ttd); 2204 break; 2205 case CPU_WHICH_CPUSET: 2206 case CPU_WHICH_JAIL: 2207 break; 2208 case CPU_WHICH_IRQ: 2209 case CPU_WHICH_INTRHANDLER: 2210 case CPU_WHICH_ITHREAD: 2211 case CPU_WHICH_DOMAIN: 2212 error = EINVAL; 2213 goto out; 2214 } 2215 if (level == CPU_LEVEL_ROOT) 2216 nset = cpuset_refroot(set); 2217 else 2218 nset = cpuset_refbase(set); 2219 domainset_copy(nset->cs_domain, &outset); 2220 cpuset_rel(nset); 2221 break; 2222 case CPU_LEVEL_WHICH: 2223 switch (which) { 2224 case CPU_WHICH_TID: 2225 thread_lock(ttd); 2226 domainset_copy(ttd->td_cpuset->cs_domain, &outset); 2227 thread_unlock(ttd); 2228 break; 2229 case CPU_WHICH_PID: 2230 FOREACH_THREAD_IN_PROC(p, ttd) { 2231 thread_lock(ttd); 2232 dset = ttd->td_cpuset->cs_domain; 2233 /* Show all domains in the proc. */ 2234 DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask); 2235 /* Last policy wins. */ 2236 outset.ds_policy = dset->ds_policy; 2237 outset.ds_prefer = dset->ds_prefer; 2238 thread_unlock(ttd); 2239 } 2240 break; 2241 case CPU_WHICH_CPUSET: 2242 case CPU_WHICH_JAIL: 2243 domainset_copy(set->cs_domain, &outset); 2244 break; 2245 case CPU_WHICH_IRQ: 2246 case CPU_WHICH_INTRHANDLER: 2247 case CPU_WHICH_ITHREAD: 2248 case CPU_WHICH_DOMAIN: 2249 error = EINVAL; 2250 break; 2251 } 2252 break; 2253 default: 2254 error = EINVAL; 2255 break; 2256 } 2257 if (set) 2258 cpuset_rel(set); 2259 if (p) 2260 PROC_UNLOCK(p); 2261 /* 2262 * Translate prefer into a set containing only the preferred domain, 2263 * not the entire fallback set. 2264 */ 2265 if (outset.ds_policy == DOMAINSET_POLICY_PREFER) { 2266 DOMAINSET_ZERO(&outset.ds_mask); 2267 DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask); 2268 } 2269 DOMAINSET_COPY(&outset.ds_mask, mask); 2270 if (error == 0) 2271 error = copyout(mask, maskp, domainsetsize); 2272 if (error == 0) 2273 if (suword32(policyp, outset.ds_policy) != 0) 2274 error = EFAULT; 2275 out: 2276 free(mask, M_TEMP); 2277 return (error); 2278 } 2279 2280 #ifndef _SYS_SYSPROTO_H_ 2281 struct cpuset_setdomain_args { 2282 cpulevel_t level; 2283 cpuwhich_t which; 2284 id_t id; 2285 size_t domainsetsize; 2286 domainset_t *mask; 2287 int policy; 2288 }; 2289 #endif 2290 int 2291 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap) 2292 { 2293 2294 return (kern_cpuset_setdomain(td, uap->level, uap->which, 2295 uap->id, uap->domainsetsize, uap->mask, uap->policy)); 2296 } 2297 2298 int 2299 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2300 id_t id, size_t domainsetsize, const domainset_t *maskp, int policy) 2301 { 2302 struct cpuset *nset; 2303 struct cpuset *set; 2304 struct thread *ttd; 2305 struct proc *p; 2306 struct domainset domain; 2307 domainset_t *mask; 2308 int error; 2309 2310 if (domainsetsize < sizeof(domainset_t) || 2311 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2312 return (ERANGE); 2313 if (policy <= DOMAINSET_POLICY_INVALID || 2314 policy > DOMAINSET_POLICY_MAX) 2315 return (EINVAL); 2316 error = cpuset_check_capabilities(td, level, which, id); 2317 if (error != 0) 2318 return (error); 2319 memset(&domain, 0, sizeof(domain)); 2320 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2321 error = copyin(maskp, mask, domainsetsize); 2322 if (error) 2323 goto out; 2324 /* 2325 * Verify that no high bits are set. 2326 */ 2327 if (domainsetsize > sizeof(domainset_t)) { 2328 char *end; 2329 char *cp; 2330 2331 end = cp = (char *)&mask->__bits; 2332 end += domainsetsize; 2333 cp += sizeof(domainset_t); 2334 while (cp != end) 2335 if (*cp++ != 0) { 2336 error = EINVAL; 2337 goto out; 2338 } 2339 } 2340 if (DOMAINSET_EMPTY(mask)) { 2341 error = EDEADLK; 2342 goto out; 2343 } 2344 DOMAINSET_COPY(mask, &domain.ds_mask); 2345 domain.ds_policy = policy; 2346 2347 /* 2348 * Sanitize the provided mask. 2349 */ 2350 if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) { 2351 error = EINVAL; 2352 goto out; 2353 } 2354 2355 /* Translate preferred policy into a mask and fallback. */ 2356 if (policy == DOMAINSET_POLICY_PREFER) { 2357 /* Only support a single preferred domain. */ 2358 if (DOMAINSET_COUNT(&domain.ds_mask) != 1) { 2359 error = EINVAL; 2360 goto out; 2361 } 2362 domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1; 2363 /* This will be constrained by domainset_shadow(). */ 2364 DOMAINSET_COPY(&all_domains, &domain.ds_mask); 2365 } 2366 2367 /* 2368 * When given an impossible policy, fall back to interleaving 2369 * across all domains. 2370 */ 2371 if (domainset_empty_vm(&domain)) 2372 domainset_copy(domainset2, &domain); 2373 2374 switch (level) { 2375 case CPU_LEVEL_ROOT: 2376 case CPU_LEVEL_CPUSET: 2377 error = cpuset_which(which, id, &p, &ttd, &set); 2378 if (error) 2379 break; 2380 switch (which) { 2381 case CPU_WHICH_TID: 2382 case CPU_WHICH_PID: 2383 thread_lock(ttd); 2384 set = cpuset_ref(ttd->td_cpuset); 2385 thread_unlock(ttd); 2386 PROC_UNLOCK(p); 2387 break; 2388 case CPU_WHICH_CPUSET: 2389 case CPU_WHICH_JAIL: 2390 break; 2391 case CPU_WHICH_IRQ: 2392 case CPU_WHICH_INTRHANDLER: 2393 case CPU_WHICH_ITHREAD: 2394 case CPU_WHICH_DOMAIN: 2395 error = EINVAL; 2396 goto out; 2397 } 2398 if (level == CPU_LEVEL_ROOT) 2399 nset = cpuset_refroot(set); 2400 else 2401 nset = cpuset_refbase(set); 2402 error = cpuset_modify_domain(nset, &domain); 2403 cpuset_rel(nset); 2404 cpuset_rel(set); 2405 break; 2406 case CPU_LEVEL_WHICH: 2407 switch (which) { 2408 case CPU_WHICH_TID: 2409 error = _cpuset_setthread(id, NULL, &domain); 2410 break; 2411 case CPU_WHICH_PID: 2412 error = cpuset_setproc(id, NULL, NULL, &domain, false); 2413 break; 2414 case CPU_WHICH_CPUSET: 2415 case CPU_WHICH_JAIL: 2416 error = cpuset_which(which, id, &p, &ttd, &set); 2417 if (error == 0) { 2418 error = cpuset_modify_domain(set, &domain); 2419 cpuset_rel(set); 2420 } 2421 break; 2422 case CPU_WHICH_IRQ: 2423 case CPU_WHICH_INTRHANDLER: 2424 case CPU_WHICH_ITHREAD: 2425 default: 2426 error = EINVAL; 2427 break; 2428 } 2429 break; 2430 default: 2431 error = EINVAL; 2432 break; 2433 } 2434 out: 2435 free(mask, M_TEMP); 2436 return (error); 2437 } 2438 2439 #ifdef DDB 2440 2441 static void 2442 ddb_display_bitset(const struct bitset *set, int size) 2443 { 2444 int bit, once; 2445 2446 for (once = 0, bit = 0; bit < size; bit++) { 2447 if (CPU_ISSET(bit, set)) { 2448 if (once == 0) { 2449 db_printf("%d", bit); 2450 once = 1; 2451 } else 2452 db_printf(",%d", bit); 2453 } 2454 } 2455 if (once == 0) 2456 db_printf("<none>"); 2457 } 2458 2459 void 2460 ddb_display_cpuset(const cpuset_t *set) 2461 { 2462 ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE); 2463 } 2464 2465 static void 2466 ddb_display_domainset(const domainset_t *set) 2467 { 2468 ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE); 2469 } 2470 2471 DB_SHOW_COMMAND(cpusets, db_show_cpusets) 2472 { 2473 struct cpuset *set; 2474 2475 LIST_FOREACH(set, &cpuset_ids, cs_link) { 2476 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 2477 set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags, 2478 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 2479 db_printf(" cpu mask="); 2480 ddb_display_cpuset(&set->cs_mask); 2481 db_printf("\n"); 2482 db_printf(" domain policy %d prefer %d mask=", 2483 set->cs_domain->ds_policy, set->cs_domain->ds_prefer); 2484 ddb_display_domainset(&set->cs_domain->ds_mask); 2485 db_printf("\n"); 2486 if (db_pager_quit) 2487 break; 2488 } 2489 } 2490 2491 DB_SHOW_COMMAND(domainsets, db_show_domainsets) 2492 { 2493 struct domainset *set; 2494 2495 LIST_FOREACH(set, &cpuset_domains, ds_link) { 2496 db_printf("set=%p policy %d prefer %d cnt %d\n", 2497 set, set->ds_policy, set->ds_prefer, set->ds_cnt); 2498 db_printf(" mask ="); 2499 ddb_display_domainset(&set->ds_mask); 2500 db_printf("\n"); 2501 } 2502 } 2503 #endif /* DDB */ 2504