1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 5 * All rights reserved. 6 * 7 * Copyright (c) 2008 Nokia Corporation 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice unmodified, this list of conditions, and the following 15 * disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 #include "opt_ddb.h" 35 #include "opt_ktrace.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sysctl.h> 40 #include <sys/ctype.h> 41 #include <sys/sysproto.h> 42 #include <sys/jail.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mutex.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 #include <sys/refcount.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/syscallsubr.h> 53 #include <sys/sysent.h> 54 #include <sys/capsicum.h> 55 #include <sys/cpuset.h> 56 #include <sys/domainset.h> 57 #include <sys/sx.h> 58 #include <sys/queue.h> 59 #include <sys/libkern.h> 60 #include <sys/limits.h> 61 #include <sys/bus.h> 62 #include <sys/interrupt.h> 63 #include <sys/vmmeter.h> 64 #include <sys/ktrace.h> 65 66 #include <vm/uma.h> 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_pageout.h> 71 #include <vm/vm_extern.h> 72 #include <vm/vm_param.h> 73 #include <vm/vm_phys.h> 74 #include <vm/vm_pagequeue.h> 75 76 #ifdef DDB 77 #include <ddb/ddb.h> 78 #endif /* DDB */ 79 80 /* 81 * cpusets provide a mechanism for creating and manipulating sets of 82 * processors for the purpose of constraining the scheduling of threads to 83 * specific processors. 84 * 85 * Each process belongs to an identified set, by default this is set 1. Each 86 * thread may further restrict the cpus it may run on to a subset of this 87 * named set. This creates an anonymous set which other threads and processes 88 * may not join by number. 89 * 90 * The named set is referred to herein as the 'base' set to avoid ambiguity. 91 * This set is usually a child of a 'root' set while the anonymous set may 92 * simply be referred to as a mask. In the syscall api these are referred to 93 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 94 * 95 * Threads inherit their set from their creator whether it be anonymous or 96 * not. This means that anonymous sets are immutable because they may be 97 * shared. To modify an anonymous set a new set is created with the desired 98 * mask and the same parent as the existing anonymous set. This gives the 99 * illusion of each thread having a private mask. 100 * 101 * Via the syscall apis a user may ask to retrieve or modify the root, base, 102 * or mask that is discovered via a pid, tid, or setid. Modifying a set 103 * modifies all numbered and anonymous child sets to comply with the new mask. 104 * Modifying a pid or tid's mask applies only to that tid but must still 105 * exist within the assigned parent set. 106 * 107 * A thread may not be assigned to a group separate from other threads in 108 * the process. This is to remove ambiguity when the setid is queried with 109 * a pid argument. There is no other technical limitation. 110 * 111 * This somewhat complex arrangement is intended to make it easy for 112 * applications to query available processors and bind their threads to 113 * specific processors while also allowing administrators to dynamically 114 * reprovision by changing sets which apply to groups of processes. 115 * 116 * A simple application should not concern itself with sets at all and 117 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 118 * meaning 'curthread'. It may query available cpus for that tid with a 119 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 120 */ 121 122 LIST_HEAD(domainlist, domainset); 123 struct domainset __read_mostly domainset_firsttouch; 124 struct domainset __read_mostly domainset_fixed[MAXMEMDOM]; 125 struct domainset __read_mostly domainset_interleave; 126 struct domainset __read_mostly domainset_prefer[MAXMEMDOM]; 127 struct domainset __read_mostly domainset_roundrobin; 128 129 static uma_zone_t cpuset_zone; 130 static uma_zone_t domainset_zone; 131 static struct mtx cpuset_lock; 132 static struct setlist cpuset_ids; 133 static struct domainlist cpuset_domains; 134 static struct unrhdr *cpuset_unr; 135 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel; 136 static struct domainset *domainset0, *domainset2; 137 u_int cpusetsizemin = 1; 138 139 /* Return the size of cpuset_t at the kernel level */ 140 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 141 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 142 143 /* Return the minimum size of cpuset_t allowed by the kernel */ 144 SYSCTL_UINT(_kern_sched, OID_AUTO, cpusetsizemin, 145 CTLFLAG_RD | CTLFLAG_CAPRD, &cpusetsizemin, 0, 146 "The minimum size of cpuset_t allowed by the kernel"); 147 148 cpuset_t *cpuset_root; 149 cpuset_t cpuset_domain[MAXMEMDOM]; 150 151 static int cpuset_which2(cpuwhich_t *, id_t, struct proc **, struct thread **, 152 struct cpuset **); 153 static int domainset_valid(const struct domainset *, const struct domainset *); 154 155 /* 156 * Find the first non-anonymous set starting from 'set'. 157 */ 158 static struct cpuset * 159 cpuset_getbase(struct cpuset *set) 160 { 161 162 if (set->cs_id == CPUSET_INVALID) 163 set = set->cs_parent; 164 return (set); 165 } 166 167 /* 168 * Walks up the tree from 'set' to find the root. 169 */ 170 static struct cpuset * 171 cpuset_getroot(struct cpuset *set) 172 { 173 174 while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL) 175 set = set->cs_parent; 176 return (set); 177 } 178 179 /* 180 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 181 */ 182 struct cpuset * 183 cpuset_ref(struct cpuset *set) 184 { 185 186 refcount_acquire(&set->cs_ref); 187 return (set); 188 } 189 190 /* 191 * Walks up the tree from 'set' to find the root. Returns the root 192 * referenced. 193 */ 194 static struct cpuset * 195 cpuset_refroot(struct cpuset *set) 196 { 197 198 return (cpuset_ref(cpuset_getroot(set))); 199 } 200 201 /* 202 * Find the first non-anonymous set starting from 'set'. Returns this set 203 * referenced. May return the passed in set with an extra ref if it is 204 * not anonymous. 205 */ 206 static struct cpuset * 207 cpuset_refbase(struct cpuset *set) 208 { 209 210 return (cpuset_ref(cpuset_getbase(set))); 211 } 212 213 /* 214 * Release a reference in a context where it is safe to allocate. 215 */ 216 void 217 cpuset_rel(struct cpuset *set) 218 { 219 cpusetid_t id; 220 221 if (refcount_release_if_not_last(&set->cs_ref)) 222 return; 223 mtx_lock_spin(&cpuset_lock); 224 if (!refcount_release(&set->cs_ref)) { 225 mtx_unlock_spin(&cpuset_lock); 226 return; 227 } 228 LIST_REMOVE(set, cs_siblings); 229 id = set->cs_id; 230 if (id != CPUSET_INVALID) 231 LIST_REMOVE(set, cs_link); 232 mtx_unlock_spin(&cpuset_lock); 233 cpuset_rel(set->cs_parent); 234 uma_zfree(cpuset_zone, set); 235 if (id != CPUSET_INVALID) 236 free_unr(cpuset_unr, id); 237 } 238 239 /* 240 * Deferred release must be used when in a context that is not safe to 241 * allocate/free. This places any unreferenced sets on the list 'head'. 242 */ 243 static void 244 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 245 { 246 247 if (refcount_release_if_not_last(&set->cs_ref)) 248 return; 249 mtx_lock_spin(&cpuset_lock); 250 if (!refcount_release(&set->cs_ref)) { 251 mtx_unlock_spin(&cpuset_lock); 252 return; 253 } 254 LIST_REMOVE(set, cs_siblings); 255 if (set->cs_id != CPUSET_INVALID) 256 LIST_REMOVE(set, cs_link); 257 LIST_INSERT_HEAD(head, set, cs_link); 258 mtx_unlock_spin(&cpuset_lock); 259 } 260 261 /* 262 * Complete a deferred release. Removes the set from the list provided to 263 * cpuset_rel_defer. 264 */ 265 static void 266 cpuset_rel_complete(struct cpuset *set) 267 { 268 cpusetid_t id; 269 270 id = set->cs_id; 271 LIST_REMOVE(set, cs_link); 272 cpuset_rel(set->cs_parent); 273 uma_zfree(cpuset_zone, set); 274 if (id != CPUSET_INVALID) 275 free_unr(cpuset_unr, id); 276 } 277 278 /* 279 * Find a set based on an id. Returns it with a ref. 280 */ 281 static struct cpuset * 282 cpuset_lookup(cpusetid_t setid, struct thread *td) 283 { 284 struct cpuset *set; 285 286 if (setid == CPUSET_INVALID) 287 return (NULL); 288 mtx_lock_spin(&cpuset_lock); 289 LIST_FOREACH(set, &cpuset_ids, cs_link) 290 if (set->cs_id == setid) 291 break; 292 if (set) 293 cpuset_ref(set); 294 mtx_unlock_spin(&cpuset_lock); 295 296 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 297 if (set != NULL && jailed(td->td_ucred)) { 298 struct cpuset *jset, *tset; 299 300 jset = td->td_ucred->cr_prison->pr_cpuset; 301 for (tset = set; tset != NULL; tset = tset->cs_parent) 302 if (tset == jset) 303 break; 304 if (tset == NULL) { 305 cpuset_rel(set); 306 set = NULL; 307 } 308 } 309 310 return (set); 311 } 312 313 /* 314 * Initialize a set in the space provided in 'set' with the provided parameters. 315 * The set is returned with a single ref. May return EDEADLK if the set 316 * will have no valid cpu based on restrictions from the parent. 317 */ 318 static int 319 cpuset_init(struct cpuset *set, struct cpuset *parent, 320 const cpuset_t *mask, struct domainset *domain, cpusetid_t id) 321 { 322 323 if (domain == NULL) 324 domain = parent->cs_domain; 325 if (mask == NULL) 326 mask = &parent->cs_mask; 327 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 328 return (EDEADLK); 329 /* The domain must be prepared ahead of time. */ 330 if (!domainset_valid(parent->cs_domain, domain)) 331 return (EDEADLK); 332 CPU_COPY(mask, &set->cs_mask); 333 LIST_INIT(&set->cs_children); 334 refcount_init(&set->cs_ref, 1); 335 set->cs_flags = 0; 336 mtx_lock_spin(&cpuset_lock); 337 set->cs_domain = domain; 338 CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask); 339 set->cs_id = id; 340 set->cs_parent = cpuset_ref(parent); 341 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 342 if (set->cs_id != CPUSET_INVALID) 343 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 344 mtx_unlock_spin(&cpuset_lock); 345 346 return (0); 347 } 348 349 /* 350 * Create a new non-anonymous set with the requested parent and mask. May 351 * return failures if the mask is invalid or a new number can not be 352 * allocated. 353 * 354 * If *setp is not NULL, then it will be used as-is. The caller must take 355 * into account that *setp will be inserted at the head of cpuset_ids and 356 * plan any potentially conflicting cs_link usage accordingly. 357 */ 358 static int 359 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 360 { 361 struct cpuset *set; 362 cpusetid_t id; 363 int error; 364 bool dofree; 365 366 id = alloc_unr(cpuset_unr); 367 if (id == -1) 368 return (ENFILE); 369 dofree = (*setp == NULL); 370 if (*setp != NULL) 371 set = *setp; 372 else 373 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 374 error = cpuset_init(set, parent, mask, NULL, id); 375 if (error == 0) 376 return (0); 377 free_unr(cpuset_unr, id); 378 if (dofree) 379 uma_zfree(cpuset_zone, set); 380 381 return (error); 382 } 383 384 static void 385 cpuset_freelist_add(struct setlist *list, int count) 386 { 387 struct cpuset *set; 388 int i; 389 390 for (i = 0; i < count; i++) { 391 set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK); 392 LIST_INSERT_HEAD(list, set, cs_link); 393 } 394 } 395 396 static void 397 cpuset_freelist_init(struct setlist *list, int count) 398 { 399 400 LIST_INIT(list); 401 cpuset_freelist_add(list, count); 402 } 403 404 static void 405 cpuset_freelist_free(struct setlist *list) 406 { 407 struct cpuset *set; 408 409 while ((set = LIST_FIRST(list)) != NULL) { 410 LIST_REMOVE(set, cs_link); 411 uma_zfree(cpuset_zone, set); 412 } 413 } 414 415 static void 416 domainset_freelist_add(struct domainlist *list, int count) 417 { 418 struct domainset *set; 419 int i; 420 421 for (i = 0; i < count; i++) { 422 set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK); 423 LIST_INSERT_HEAD(list, set, ds_link); 424 } 425 } 426 427 static void 428 domainset_freelist_init(struct domainlist *list, int count) 429 { 430 431 LIST_INIT(list); 432 domainset_freelist_add(list, count); 433 } 434 435 static void 436 domainset_freelist_free(struct domainlist *list) 437 { 438 struct domainset *set; 439 440 while ((set = LIST_FIRST(list)) != NULL) { 441 LIST_REMOVE(set, ds_link); 442 uma_zfree(domainset_zone, set); 443 } 444 } 445 446 /* Copy a domainset preserving mask and policy. */ 447 static void 448 domainset_copy(const struct domainset *from, struct domainset *to) 449 { 450 451 DOMAINSET_COPY(&from->ds_mask, &to->ds_mask); 452 to->ds_policy = from->ds_policy; 453 to->ds_prefer = from->ds_prefer; 454 } 455 456 /* Return 1 if mask and policy are equal, otherwise 0. */ 457 static int 458 domainset_equal(const struct domainset *one, const struct domainset *two) 459 { 460 461 return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 && 462 one->ds_policy == two->ds_policy && 463 one->ds_prefer == two->ds_prefer); 464 } 465 466 /* Return 1 if child is a valid subset of parent. */ 467 static int 468 domainset_valid(const struct domainset *parent, const struct domainset *child) 469 { 470 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 471 return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask)); 472 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 473 } 474 475 static int 476 domainset_restrict(const struct domainset *parent, 477 const struct domainset *child) 478 { 479 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 480 return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask)); 481 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 482 } 483 484 /* 485 * Lookup or create a domainset. The key is provided in ds_mask and 486 * ds_policy. If the domainset does not yet exist the storage in 487 * 'domain' is used to insert. Otherwise this storage is freed to the 488 * domainset_zone and the existing domainset is returned. 489 */ 490 static struct domainset * 491 _domainset_create(struct domainset *domain, struct domainlist *freelist) 492 { 493 struct domainset *ndomain; 494 int i, j; 495 496 KASSERT(domain->ds_cnt <= vm_ndomains, 497 ("invalid domain count in domainset %p", domain)); 498 KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER || 499 domain->ds_prefer < vm_ndomains, 500 ("invalid preferred domain in domains %p", domain)); 501 502 mtx_lock_spin(&cpuset_lock); 503 LIST_FOREACH(ndomain, &cpuset_domains, ds_link) 504 if (domainset_equal(ndomain, domain)) 505 break; 506 /* 507 * If the domain does not yet exist we insert it and initialize 508 * various iteration helpers which are not part of the key. 509 */ 510 if (ndomain == NULL) { 511 LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link); 512 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 513 for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 514 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 515 domain->ds_order[j++] = i; 516 } 517 mtx_unlock_spin(&cpuset_lock); 518 if (ndomain == NULL) 519 return (domain); 520 if (freelist != NULL) 521 LIST_INSERT_HEAD(freelist, domain, ds_link); 522 else 523 uma_zfree(domainset_zone, domain); 524 return (ndomain); 525 526 } 527 528 /* 529 * Are any of the domains in the mask empty? If so, silently 530 * remove them and update the domainset accordingly. If only empty 531 * domains are present, we must return failure. 532 */ 533 static bool 534 domainset_empty_vm(struct domainset *domain) 535 { 536 domainset_t empty; 537 int i, j; 538 539 DOMAINSET_ZERO(&empty); 540 for (i = 0; i < vm_ndomains; i++) 541 if (VM_DOMAIN_EMPTY(i)) 542 DOMAINSET_SET(i, &empty); 543 if (DOMAINSET_SUBSET(&empty, &domain->ds_mask)) 544 return (true); 545 546 /* Remove empty domains from the set and recompute. */ 547 DOMAINSET_ANDNOT(&domain->ds_mask, &empty); 548 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 549 for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 550 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 551 domain->ds_order[j++] = i; 552 553 /* Convert a PREFER policy referencing an empty domain to RR. */ 554 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 555 DOMAINSET_ISSET(domain->ds_prefer, &empty)) { 556 domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 557 domain->ds_prefer = -1; 558 } 559 560 return (false); 561 } 562 563 /* 564 * Create or lookup a domainset based on the key held in 'domain'. 565 */ 566 struct domainset * 567 domainset_create(const struct domainset *domain) 568 { 569 struct domainset *ndomain; 570 571 /* 572 * Validate the policy. It must specify a useable policy number with 573 * only valid domains. Preferred must include the preferred domain 574 * in the mask. 575 */ 576 if (domain->ds_policy <= DOMAINSET_POLICY_INVALID || 577 domain->ds_policy > DOMAINSET_POLICY_MAX) 578 return (NULL); 579 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 580 !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask)) 581 return (NULL); 582 if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask)) 583 return (NULL); 584 ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO); 585 domainset_copy(domain, ndomain); 586 return _domainset_create(ndomain, NULL); 587 } 588 589 /* 590 * Update thread domainset pointers. 591 */ 592 static void 593 domainset_notify(void) 594 { 595 struct thread *td; 596 struct proc *p; 597 598 sx_slock(&allproc_lock); 599 FOREACH_PROC_IN_SYSTEM(p) { 600 PROC_LOCK(p); 601 if (p->p_state == PRS_NEW) { 602 PROC_UNLOCK(p); 603 continue; 604 } 605 FOREACH_THREAD_IN_PROC(p, td) { 606 thread_lock(td); 607 td->td_domain.dr_policy = td->td_cpuset->cs_domain; 608 thread_unlock(td); 609 } 610 PROC_UNLOCK(p); 611 } 612 sx_sunlock(&allproc_lock); 613 kernel_object->domain.dr_policy = cpuset_kernel->cs_domain; 614 } 615 616 /* 617 * Create a new set that is a subset of a parent. 618 */ 619 static struct domainset * 620 domainset_shadow(const struct domainset *pdomain, 621 const struct domainset *domain, struct domainlist *freelist) 622 { 623 struct domainset *ndomain; 624 625 ndomain = LIST_FIRST(freelist); 626 LIST_REMOVE(ndomain, ds_link); 627 628 /* 629 * Initialize the key from the request. 630 */ 631 domainset_copy(domain, ndomain); 632 633 /* 634 * Restrict the key by the parent. 635 */ 636 DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask); 637 638 return _domainset_create(ndomain, freelist); 639 } 640 641 /* 642 * Recursively check for errors that would occur from applying mask to 643 * the tree of sets starting at 'set'. Checks for sets that would become 644 * empty as well as RDONLY flags. 645 */ 646 static int 647 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask) 648 { 649 struct cpuset *nset; 650 cpuset_t newmask; 651 int error; 652 653 mtx_assert(&cpuset_lock, MA_OWNED); 654 if (set->cs_flags & CPU_SET_RDONLY) 655 return (EPERM); 656 if (augment_mask) { 657 CPU_AND(&newmask, &set->cs_mask, mask); 658 } else 659 CPU_COPY(mask, &newmask); 660 661 if (CPU_EMPTY(&newmask)) 662 return (EDEADLK); 663 error = 0; 664 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 665 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 666 break; 667 return (error); 668 } 669 670 /* 671 * Applies the mask 'mask' without checking for empty sets or permissions. 672 */ 673 static void 674 cpuset_update(struct cpuset *set, cpuset_t *mask) 675 { 676 struct cpuset *nset; 677 678 mtx_assert(&cpuset_lock, MA_OWNED); 679 CPU_AND(&set->cs_mask, &set->cs_mask, mask); 680 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 681 cpuset_update(nset, &set->cs_mask); 682 683 return; 684 } 685 686 /* 687 * Modify the set 'set' to use a copy of the mask provided. Apply this new 688 * mask to restrict all children in the tree. Checks for validity before 689 * applying the changes. 690 */ 691 static int 692 cpuset_modify(struct cpuset *set, cpuset_t *mask) 693 { 694 struct cpuset *root; 695 int error; 696 697 error = priv_check(curthread, PRIV_SCHED_CPUSET); 698 if (error) 699 return (error); 700 /* 701 * In case we are called from within the jail, 702 * we do not allow modifying the dedicated root 703 * cpuset of the jail but may still allow to 704 * change child sets, including subordinate jails' 705 * roots. 706 */ 707 if ((set->cs_flags & CPU_SET_ROOT) != 0 && 708 jailed(curthread->td_ucred) && 709 set == curthread->td_ucred->cr_prison->pr_cpuset) 710 return (EPERM); 711 /* 712 * Verify that we have access to this set of 713 * cpus. 714 */ 715 if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) { 716 KASSERT(set->cs_parent != NULL, 717 ("jail.cpuset=%d is not a proper child of parent jail's root.", 718 set->cs_id)); 719 720 /* 721 * cpuset_getroot() cannot work here due to how top-level jail 722 * roots are constructed. Top-level jails are parented to 723 * thread0's cpuset (i.e. cpuset 1) rather than the system root. 724 */ 725 root = set->cs_parent; 726 } else { 727 root = cpuset_getroot(set); 728 } 729 mtx_lock_spin(&cpuset_lock); 730 if (root && !CPU_SUBSET(&root->cs_mask, mask)) { 731 error = EINVAL; 732 goto out; 733 } 734 error = cpuset_testupdate(set, mask, 0); 735 if (error) 736 goto out; 737 CPU_COPY(mask, &set->cs_mask); 738 cpuset_update(set, mask); 739 out: 740 mtx_unlock_spin(&cpuset_lock); 741 742 return (error); 743 } 744 745 /* 746 * Recursively check for errors that would occur from applying mask to 747 * the tree of sets starting at 'set'. Checks for sets that would become 748 * empty as well as RDONLY flags. 749 */ 750 static int 751 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset, 752 struct domainset *orig, int *count, int augment_mask __unused) 753 { 754 struct cpuset *nset; 755 struct domainset *domain; 756 struct domainset newset; 757 int error; 758 759 mtx_assert(&cpuset_lock, MA_OWNED); 760 if (set->cs_flags & CPU_SET_RDONLY) 761 return (EPERM); 762 domain = set->cs_domain; 763 domainset_copy(domain, &newset); 764 if (!domainset_equal(domain, orig)) { 765 if (!domainset_restrict(domain, dset)) 766 return (EDEADLK); 767 DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask); 768 /* Count the number of domains that are changing. */ 769 (*count)++; 770 } 771 error = 0; 772 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 773 if ((error = cpuset_testupdate_domain(nset, &newset, domain, 774 count, 1)) != 0) 775 break; 776 return (error); 777 } 778 779 /* 780 * Applies the mask 'mask' without checking for empty sets or permissions. 781 */ 782 static void 783 cpuset_update_domain(struct cpuset *set, struct domainset *domain, 784 struct domainset *orig, struct domainlist *domains) 785 { 786 struct cpuset *nset; 787 788 mtx_assert(&cpuset_lock, MA_OWNED); 789 /* 790 * If this domainset has changed from the parent we must calculate 791 * a new set. Otherwise it simply inherits from the parent. When 792 * we inherit from the parent we get a new mask and policy. If the 793 * set is modified from the parent we keep the policy and only 794 * update the mask. 795 */ 796 if (set->cs_domain != orig) { 797 orig = set->cs_domain; 798 set->cs_domain = domainset_shadow(domain, orig, domains); 799 } else 800 set->cs_domain = domain; 801 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 802 cpuset_update_domain(nset, set->cs_domain, orig, domains); 803 804 return; 805 } 806 807 /* 808 * Modify the set 'set' to use a copy the domainset provided. Apply this new 809 * mask to restrict all children in the tree. Checks for validity before 810 * applying the changes. 811 */ 812 static int 813 cpuset_modify_domain(struct cpuset *set, struct domainset *domain) 814 { 815 struct domainlist domains; 816 struct domainset temp; 817 struct domainset *dset; 818 struct cpuset *root; 819 int ndomains, needed; 820 int error; 821 822 error = priv_check(curthread, PRIV_SCHED_CPUSET); 823 if (error) 824 return (error); 825 /* 826 * In case we are called from within the jail 827 * we do not allow modifying the dedicated root 828 * cpuset of the jail but may still allow to 829 * change child sets. 830 */ 831 if (jailed(curthread->td_ucred) && 832 set->cs_flags & CPU_SET_ROOT) 833 return (EPERM); 834 domainset_freelist_init(&domains, 0); 835 domain = domainset_create(domain); 836 ndomains = 0; 837 838 mtx_lock_spin(&cpuset_lock); 839 for (;;) { 840 root = cpuset_getroot(set); 841 dset = root->cs_domain; 842 /* 843 * Verify that we have access to this set of domains. 844 */ 845 if (!domainset_valid(dset, domain)) { 846 error = EINVAL; 847 goto out; 848 } 849 /* 850 * If applying prefer we keep the current set as the fallback. 851 */ 852 if (domain->ds_policy == DOMAINSET_POLICY_PREFER) 853 DOMAINSET_COPY(&set->cs_domain->ds_mask, 854 &domain->ds_mask); 855 /* 856 * Determine whether we can apply this set of domains and 857 * how many new domain structures it will require. 858 */ 859 domainset_copy(domain, &temp); 860 needed = 0; 861 error = cpuset_testupdate_domain(set, &temp, set->cs_domain, 862 &needed, 0); 863 if (error) 864 goto out; 865 if (ndomains >= needed) 866 break; 867 868 /* Dropping the lock; we'll need to re-evaluate again. */ 869 mtx_unlock_spin(&cpuset_lock); 870 domainset_freelist_add(&domains, needed - ndomains); 871 ndomains = needed; 872 mtx_lock_spin(&cpuset_lock); 873 } 874 dset = set->cs_domain; 875 cpuset_update_domain(set, domain, dset, &domains); 876 out: 877 mtx_unlock_spin(&cpuset_lock); 878 domainset_freelist_free(&domains); 879 if (error == 0) 880 domainset_notify(); 881 882 return (error); 883 } 884 885 /* 886 * Resolve the 'which' parameter of several cpuset apis. 887 * 888 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 889 * checks for permission via p_cansched(). 890 * 891 * For WHICH_SET returns a valid set with a new reference. 892 * 893 * -1 may be supplied for any argument to mean the current proc/thread or 894 * the base set of the current thread. May fail with ESRCH/EPERM. 895 */ 896 int 897 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 898 struct cpuset **setp) 899 { 900 struct cpuset *set; 901 struct thread *td; 902 struct proc *p; 903 int error; 904 905 *pp = p = NULL; 906 *tdp = td = NULL; 907 *setp = set = NULL; 908 switch (which) { 909 case CPU_WHICH_PID: 910 if (id == -1) { 911 PROC_LOCK(curproc); 912 p = curproc; 913 break; 914 } 915 if ((p = pfind(id)) == NULL) 916 return (ESRCH); 917 break; 918 case CPU_WHICH_TID: 919 if (id == -1) { 920 PROC_LOCK(curproc); 921 p = curproc; 922 td = curthread; 923 break; 924 } 925 td = tdfind(id, -1); 926 if (td == NULL) 927 return (ESRCH); 928 p = td->td_proc; 929 break; 930 case CPU_WHICH_TIDPID: 931 if (id == -1) { 932 PROC_LOCK(curproc); 933 td = curthread; 934 p = curproc; 935 } else if (id > PID_MAX) { 936 td = tdfind(id, -1); 937 if (td == NULL) 938 return (ESRCH); 939 p = td->td_proc; 940 } else { 941 p = pfind(id); 942 if (p == NULL) 943 return (ESRCH); 944 } 945 break; 946 case CPU_WHICH_CPUSET: 947 if (id == -1) { 948 thread_lock(curthread); 949 set = cpuset_refbase(curthread->td_cpuset); 950 thread_unlock(curthread); 951 } else 952 set = cpuset_lookup(id, curthread); 953 if (set) { 954 *setp = set; 955 return (0); 956 } 957 return (ESRCH); 958 case CPU_WHICH_JAIL: 959 { 960 /* Find `set' for prison with given id. */ 961 struct prison *pr; 962 963 sx_slock(&allprison_lock); 964 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 965 sx_sunlock(&allprison_lock); 966 if (pr == NULL) 967 return (ESRCH); 968 cpuset_ref(pr->pr_cpuset); 969 *setp = pr->pr_cpuset; 970 mtx_unlock(&pr->pr_mtx); 971 return (0); 972 } 973 case CPU_WHICH_IRQ: 974 case CPU_WHICH_DOMAIN: 975 return (0); 976 default: 977 return (EINVAL); 978 } 979 error = p_cansched(curthread, p); 980 if (error) { 981 PROC_UNLOCK(p); 982 return (error); 983 } 984 if (td == NULL) 985 td = FIRST_THREAD_IN_PROC(p); 986 *pp = p; 987 *tdp = td; 988 return (0); 989 } 990 991 static int 992 cpuset_which2(cpuwhich_t *which, id_t id, struct proc **pp, struct thread **tdp, 993 struct cpuset **setp) 994 { 995 996 if (*which == CPU_WHICH_TIDPID) { 997 if (id == -1 || id > PID_MAX) 998 *which = CPU_WHICH_TID; 999 else 1000 *which = CPU_WHICH_PID; 1001 } 1002 return (cpuset_which(*which, id, pp, tdp, setp)); 1003 } 1004 1005 static int 1006 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask, 1007 const struct domainset *domain) 1008 { 1009 struct cpuset *parent; 1010 struct domainset *dset; 1011 1012 parent = cpuset_getbase(set); 1013 /* 1014 * If we are restricting a cpu mask it must be a subset of the 1015 * parent or invalid CPUs have been specified. 1016 */ 1017 if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask)) 1018 return (EINVAL); 1019 1020 /* 1021 * If we are restricting a domain mask it must be a subset of the 1022 * parent or invalid domains have been specified. 1023 */ 1024 dset = parent->cs_domain; 1025 if (domain != NULL && !domainset_valid(dset, domain)) 1026 return (EINVAL); 1027 1028 return (0); 1029 } 1030 1031 /* 1032 * Create an anonymous set with the provided mask in the space provided by 1033 * 'nset'. If the passed in set is anonymous we use its parent otherwise 1034 * the new set is a child of 'set'. 1035 */ 1036 static int 1037 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp, 1038 const cpuset_t *mask, const struct domainset *domain, 1039 struct setlist *cpusets, struct domainlist *domains) 1040 { 1041 struct cpuset *parent; 1042 struct cpuset *nset; 1043 struct domainset *dset; 1044 struct domainset *d; 1045 int error; 1046 1047 error = cpuset_testshadow(set, mask, domain); 1048 if (error) 1049 return (error); 1050 1051 parent = cpuset_getbase(set); 1052 dset = parent->cs_domain; 1053 if (mask == NULL) 1054 mask = &set->cs_mask; 1055 if (domain != NULL) 1056 d = domainset_shadow(dset, domain, domains); 1057 else 1058 d = set->cs_domain; 1059 nset = LIST_FIRST(cpusets); 1060 error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID); 1061 if (error == 0) { 1062 LIST_REMOVE(nset, cs_link); 1063 *nsetp = nset; 1064 } 1065 return (error); 1066 } 1067 1068 static struct cpuset * 1069 cpuset_update_thread(struct thread *td, struct cpuset *nset) 1070 { 1071 struct cpuset *tdset; 1072 1073 tdset = td->td_cpuset; 1074 td->td_cpuset = nset; 1075 td->td_domain.dr_policy = nset->cs_domain; 1076 sched_affinity(td); 1077 1078 return (tdset); 1079 } 1080 1081 static int 1082 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask, 1083 struct domainset *domain) 1084 { 1085 struct cpuset *parent; 1086 1087 parent = cpuset_getbase(tdset); 1088 if (mask == NULL) 1089 mask = &tdset->cs_mask; 1090 if (domain == NULL) 1091 domain = tdset->cs_domain; 1092 return cpuset_testshadow(parent, mask, domain); 1093 } 1094 1095 static int 1096 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask, 1097 struct domainset *domain, struct cpuset **nsetp, 1098 struct setlist *freelist, struct domainlist *domainlist) 1099 { 1100 struct cpuset *parent; 1101 1102 parent = cpuset_getbase(tdset); 1103 if (mask == NULL) 1104 mask = &tdset->cs_mask; 1105 if (domain == NULL) 1106 domain = tdset->cs_domain; 1107 return cpuset_shadow(parent, nsetp, mask, domain, freelist, 1108 domainlist); 1109 } 1110 1111 static int 1112 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set, 1113 cpuset_t *mask, struct domainset *domain) 1114 { 1115 struct cpuset *parent; 1116 1117 parent = cpuset_getbase(tdset); 1118 1119 /* 1120 * If the thread restricted its mask then apply that same 1121 * restriction to the new set, otherwise take it wholesale. 1122 */ 1123 if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { 1124 CPU_AND(mask, &tdset->cs_mask, &set->cs_mask); 1125 } else 1126 CPU_COPY(&set->cs_mask, mask); 1127 1128 /* 1129 * If the thread restricted the domain then we apply the 1130 * restriction to the new set but retain the policy. 1131 */ 1132 if (tdset->cs_domain != parent->cs_domain) { 1133 domainset_copy(tdset->cs_domain, domain); 1134 DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask); 1135 } else 1136 domainset_copy(set->cs_domain, domain); 1137 1138 if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask)) 1139 return (EDEADLK); 1140 1141 return (0); 1142 } 1143 1144 static int 1145 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set) 1146 { 1147 struct domainset domain; 1148 cpuset_t mask; 1149 1150 if (tdset->cs_id != CPUSET_INVALID) 1151 return (0); 1152 return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1153 } 1154 1155 static int 1156 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set, 1157 struct cpuset **nsetp, struct setlist *freelist, 1158 struct domainlist *domainlist) 1159 { 1160 struct domainset domain; 1161 cpuset_t mask; 1162 int error; 1163 1164 /* 1165 * If we're replacing on a thread that has not constrained the 1166 * original set we can simply accept the new set. 1167 */ 1168 if (tdset->cs_id != CPUSET_INVALID) { 1169 *nsetp = cpuset_ref(set); 1170 return (0); 1171 } 1172 error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1173 if (error) 1174 return (error); 1175 1176 return cpuset_shadow(set, nsetp, &mask, &domain, freelist, 1177 domainlist); 1178 } 1179 1180 static int 1181 cpuset_setproc_newbase(struct thread *td, struct cpuset *set, 1182 struct cpuset *nroot, struct cpuset **nsetp, 1183 struct setlist *cpusets, struct domainlist *domainlist) 1184 { 1185 struct domainset ndomain; 1186 cpuset_t nmask; 1187 struct cpuset *pbase; 1188 int error; 1189 1190 pbase = cpuset_getbase(td->td_cpuset); 1191 1192 /* Copy process mask, then further apply the new root mask. */ 1193 CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask); 1194 1195 domainset_copy(pbase->cs_domain, &ndomain); 1196 DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask); 1197 1198 /* Policy is too restrictive, will not work. */ 1199 if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask)) 1200 return (EDEADLK); 1201 1202 /* 1203 * Remove pbase from the freelist in advance, it'll be pushed to 1204 * cpuset_ids on success. We assume here that cpuset_create() will not 1205 * touch pbase on failure, and we just enqueue it back to the freelist 1206 * to remain in a consistent state. 1207 */ 1208 pbase = LIST_FIRST(cpusets); 1209 LIST_REMOVE(pbase, cs_link); 1210 error = cpuset_create(&pbase, set, &nmask); 1211 if (error != 0) { 1212 LIST_INSERT_HEAD(cpusets, pbase, cs_link); 1213 return (error); 1214 } 1215 1216 /* Duplicates some work from above... oh well. */ 1217 pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain, 1218 domainlist); 1219 *nsetp = pbase; 1220 return (0); 1221 } 1222 1223 /* 1224 * Handle four cases for updating an entire process. 1225 * 1226 * 1) Set is non-null and the process is not rebasing onto a new root. This 1227 * reparents all anonymous sets to the provided set and replaces all 1228 * non-anonymous td_cpusets with the provided set. 1229 * 2) Set is non-null and the process is rebasing onto a new root. This 1230 * creates a new base set if the process previously had its own base set, 1231 * then reparents all anonymous sets either to that set or the provided set 1232 * if one was not created. Non-anonymous sets are similarly replaced. 1233 * 3) Mask is non-null. This replaces or creates anonymous sets for every 1234 * thread with the existing base as a parent. 1235 * 4) domain is non-null. This creates anonymous sets for every thread 1236 * and replaces the domain set. 1237 * 1238 * This is overly complicated because we can't allocate while holding a 1239 * spinlock and spinlocks must be held while changing and examining thread 1240 * state. 1241 */ 1242 static int 1243 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask, 1244 struct domainset *domain, bool rebase) 1245 { 1246 struct setlist freelist; 1247 struct setlist droplist; 1248 struct domainlist domainlist; 1249 struct cpuset *base, *nset, *nroot, *tdroot; 1250 struct thread *td; 1251 struct proc *p; 1252 int needed; 1253 int nfree; 1254 int error; 1255 1256 /* 1257 * The algorithm requires two passes due to locking considerations. 1258 * 1259 * 1) Lookup the process and acquire the locks in the required order. 1260 * 2) If enough cpusets have not been allocated release the locks and 1261 * allocate them. Loop. 1262 */ 1263 cpuset_freelist_init(&freelist, 1); 1264 domainset_freelist_init(&domainlist, 1); 1265 nfree = 1; 1266 LIST_INIT(&droplist); 1267 nfree = 0; 1268 base = set; 1269 nroot = NULL; 1270 if (set != NULL) 1271 nroot = cpuset_getroot(set); 1272 for (;;) { 1273 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 1274 if (error) 1275 goto out; 1276 tdroot = cpuset_getroot(td->td_cpuset); 1277 needed = p->p_numthreads; 1278 if (set != NULL && rebase && tdroot != nroot) 1279 needed++; 1280 if (nfree >= needed) 1281 break; 1282 PROC_UNLOCK(p); 1283 if (nfree < needed) { 1284 cpuset_freelist_add(&freelist, needed - nfree); 1285 domainset_freelist_add(&domainlist, needed - nfree); 1286 nfree = needed; 1287 } 1288 } 1289 PROC_LOCK_ASSERT(p, MA_OWNED); 1290 1291 /* 1292 * If we're changing roots and the root set is what has been specified 1293 * as the parent, then we'll check if the process was previously using 1294 * the root set and, if it wasn't, create a new base with the process's 1295 * mask applied to it. 1296 * 1297 * If the new root is incompatible with the existing mask, then we allow 1298 * the process to take on the new root if and only if they have 1299 * privilege to widen their mask anyways. Unprivileged processes get 1300 * rejected with EDEADLK. 1301 */ 1302 if (set != NULL && rebase && nroot != tdroot) { 1303 cpusetid_t base_id, root_id; 1304 1305 root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id; 1306 base_id = cpuset_getbase(td->td_cpuset)->cs_id; 1307 1308 if (base_id != root_id) { 1309 error = cpuset_setproc_newbase(td, set, nroot, &base, 1310 &freelist, &domainlist); 1311 if (error == EDEADLK && 1312 priv_check(td, PRIV_SCHED_CPUSET) == 0) 1313 error = 0; 1314 if (error != 0) 1315 goto unlock_out; 1316 } 1317 } 1318 1319 /* 1320 * Now that the appropriate locks are held and we have enough cpusets, 1321 * make sure the operation will succeed before applying changes. The 1322 * proc lock prevents td_cpuset from changing between calls. 1323 */ 1324 error = 0; 1325 FOREACH_THREAD_IN_PROC(p, td) { 1326 thread_lock(td); 1327 if (set != NULL) 1328 error = cpuset_setproc_test_setthread(td->td_cpuset, 1329 base); 1330 else 1331 error = cpuset_setproc_test_maskthread(td->td_cpuset, 1332 mask, domain); 1333 thread_unlock(td); 1334 if (error) 1335 goto unlock_out; 1336 } 1337 /* 1338 * Replace each thread's cpuset while using deferred release. We 1339 * must do this because the thread lock must be held while operating 1340 * on the thread and this limits the type of operations allowed. 1341 */ 1342 FOREACH_THREAD_IN_PROC(p, td) { 1343 thread_lock(td); 1344 if (set != NULL) 1345 error = cpuset_setproc_setthread(td->td_cpuset, base, 1346 &nset, &freelist, &domainlist); 1347 else 1348 error = cpuset_setproc_maskthread(td->td_cpuset, mask, 1349 domain, &nset, &freelist, &domainlist); 1350 if (error) { 1351 thread_unlock(td); 1352 break; 1353 } 1354 cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset)); 1355 thread_unlock(td); 1356 } 1357 unlock_out: 1358 PROC_UNLOCK(p); 1359 out: 1360 if (base != NULL && base != set) 1361 cpuset_rel(base); 1362 while ((nset = LIST_FIRST(&droplist)) != NULL) 1363 cpuset_rel_complete(nset); 1364 cpuset_freelist_free(&freelist); 1365 domainset_freelist_free(&domainlist); 1366 return (error); 1367 } 1368 1369 static int 1370 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen) 1371 { 1372 size_t bytes; 1373 int i, once; 1374 char *p; 1375 1376 once = 0; 1377 p = buf; 1378 for (i = 0; i < __bitset_words(setlen); i++) { 1379 if (once != 0) { 1380 if (bufsiz < 1) 1381 return (0); 1382 *p = ','; 1383 p++; 1384 bufsiz--; 1385 } else 1386 once = 1; 1387 if (bufsiz < sizeof(__STRING(ULONG_MAX))) 1388 return (0); 1389 bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]); 1390 p += bytes; 1391 bufsiz -= bytes; 1392 } 1393 return (p - buf); 1394 } 1395 1396 static int 1397 bitset_strscan(struct bitset *set, int setlen, const char *buf) 1398 { 1399 int i, ret; 1400 const char *p; 1401 1402 BIT_ZERO(setlen, set); 1403 p = buf; 1404 for (i = 0; i < __bitset_words(setlen); i++) { 1405 if (*p == ',') { 1406 p++; 1407 continue; 1408 } 1409 ret = sscanf(p, "%lx", &set->__bits[i]); 1410 if (ret == 0 || ret == -1) 1411 break; 1412 while (isxdigit(*p)) 1413 p++; 1414 } 1415 return (p - buf); 1416 } 1417 1418 /* 1419 * Return a string representing a valid layout for a cpuset_t object. 1420 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1421 */ 1422 char * 1423 cpusetobj_strprint(char *buf, const cpuset_t *set) 1424 { 1425 1426 bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set, 1427 CPU_SETSIZE); 1428 return (buf); 1429 } 1430 1431 /* 1432 * Build a valid cpuset_t object from a string representation. 1433 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1434 */ 1435 int 1436 cpusetobj_strscan(cpuset_t *set, const char *buf) 1437 { 1438 char p; 1439 1440 if (strlen(buf) > CPUSETBUFSIZ - 1) 1441 return (-1); 1442 1443 p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)]; 1444 if (p != '\0') 1445 return (-1); 1446 1447 return (0); 1448 } 1449 1450 /* 1451 * Handle a domainset specifier in the sysctl tree. A poiner to a pointer to 1452 * a domainset is in arg1. If the user specifies a valid domainset the 1453 * pointer is updated. 1454 * 1455 * Format is: 1456 * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred 1457 */ 1458 int 1459 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS) 1460 { 1461 char buf[DOMAINSETBUFSIZ]; 1462 struct domainset *dset; 1463 struct domainset key; 1464 int policy, prefer, error; 1465 char *p; 1466 1467 dset = *(struct domainset **)arg1; 1468 error = 0; 1469 1470 if (dset != NULL) { 1471 p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ, 1472 (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE); 1473 sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer); 1474 } else 1475 sprintf(buf, "<NULL>"); 1476 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 1477 if (error != 0 || req->newptr == NULL) 1478 return (error); 1479 1480 /* 1481 * Read in and validate the string. 1482 */ 1483 memset(&key, 0, sizeof(key)); 1484 p = &buf[bitset_strscan((struct bitset *)&key.ds_mask, 1485 DOMAINSET_SETSIZE, buf)]; 1486 if (p == buf) 1487 return (EINVAL); 1488 if (sscanf(p, ":%d:%d", &policy, &prefer) != 2) 1489 return (EINVAL); 1490 key.ds_policy = policy; 1491 key.ds_prefer = prefer; 1492 1493 /* Domainset_create() validates the policy.*/ 1494 dset = domainset_create(&key); 1495 if (dset == NULL) 1496 return (EINVAL); 1497 *(struct domainset **)arg1 = dset; 1498 1499 return (error); 1500 } 1501 1502 /* 1503 * Apply an anonymous mask or a domain to a single thread. 1504 */ 1505 static int 1506 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain) 1507 { 1508 struct setlist cpusets; 1509 struct domainlist domainlist; 1510 struct cpuset *nset; 1511 struct cpuset *set; 1512 struct thread *td; 1513 struct proc *p; 1514 int error; 1515 1516 cpuset_freelist_init(&cpusets, 1); 1517 domainset_freelist_init(&domainlist, domain != NULL); 1518 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 1519 if (error) 1520 goto out; 1521 set = NULL; 1522 thread_lock(td); 1523 error = cpuset_shadow(td->td_cpuset, &nset, mask, domain, 1524 &cpusets, &domainlist); 1525 if (error == 0) 1526 set = cpuset_update_thread(td, nset); 1527 thread_unlock(td); 1528 PROC_UNLOCK(p); 1529 if (set) 1530 cpuset_rel(set); 1531 out: 1532 cpuset_freelist_free(&cpusets); 1533 domainset_freelist_free(&domainlist); 1534 return (error); 1535 } 1536 1537 /* 1538 * Apply an anonymous mask to a single thread. 1539 */ 1540 int 1541 cpuset_setthread(lwpid_t id, cpuset_t *mask) 1542 { 1543 1544 return _cpuset_setthread(id, mask, NULL); 1545 } 1546 1547 /* 1548 * Apply new cpumask to the ithread. 1549 */ 1550 int 1551 cpuset_setithread(lwpid_t id, int cpu) 1552 { 1553 cpuset_t mask; 1554 1555 CPU_ZERO(&mask); 1556 if (cpu == NOCPU) 1557 CPU_COPY(cpuset_root, &mask); 1558 else 1559 CPU_SET(cpu, &mask); 1560 return _cpuset_setthread(id, &mask, NULL); 1561 } 1562 1563 /* 1564 * Initialize static domainsets after NUMA information is available. This is 1565 * called before memory allocators are initialized. 1566 */ 1567 void 1568 domainset_init(void) 1569 { 1570 struct domainset *dset; 1571 int i; 1572 1573 dset = &domainset_firsttouch; 1574 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1575 dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH; 1576 dset->ds_prefer = -1; 1577 _domainset_create(dset, NULL); 1578 1579 dset = &domainset_interleave; 1580 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1581 dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE; 1582 dset->ds_prefer = -1; 1583 _domainset_create(dset, NULL); 1584 1585 dset = &domainset_roundrobin; 1586 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1587 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1588 dset->ds_prefer = -1; 1589 _domainset_create(dset, NULL); 1590 1591 for (i = 0; i < vm_ndomains; i++) { 1592 dset = &domainset_fixed[i]; 1593 DOMAINSET_ZERO(&dset->ds_mask); 1594 DOMAINSET_SET(i, &dset->ds_mask); 1595 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1596 _domainset_create(dset, NULL); 1597 1598 dset = &domainset_prefer[i]; 1599 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1600 dset->ds_policy = DOMAINSET_POLICY_PREFER; 1601 dset->ds_prefer = i; 1602 _domainset_create(dset, NULL); 1603 } 1604 } 1605 1606 /* 1607 * Define the domainsets for cpuset 0, 1 and cpuset 2. 1608 */ 1609 void 1610 domainset_zero(void) 1611 { 1612 struct domainset *dset, *tmp; 1613 1614 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 1615 1616 domainset0 = &domainset_firsttouch; 1617 curthread->td_domain.dr_policy = domainset0; 1618 1619 domainset2 = &domainset_interleave; 1620 kernel_object->domain.dr_policy = domainset2; 1621 1622 /* Remove empty domains from the global policies. */ 1623 LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp) 1624 if (domainset_empty_vm(dset)) 1625 LIST_REMOVE(dset, ds_link); 1626 } 1627 1628 /* 1629 * Creates system-wide cpusets and the cpuset for thread0 including three 1630 * sets: 1631 * 1632 * 0 - The root set which should represent all valid processors in the 1633 * system. This set is immutable. 1634 * 1 - The default set which all processes are a member of until changed. 1635 * This allows an administrator to move all threads off of given cpus to 1636 * dedicate them to high priority tasks or save power etc. 1637 * 2 - The kernel set which allows restriction and policy to be applied only 1638 * to kernel threads and the kernel_object. 1639 */ 1640 struct cpuset * 1641 cpuset_thread0(void) 1642 { 1643 struct cpuset *set; 1644 int i; 1645 int error __unused; 1646 1647 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 1648 NULL, NULL, UMA_ALIGN_CACHE, 0); 1649 domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), 1650 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 1651 1652 /* 1653 * Create the root system set (0) for the whole machine. Doesn't use 1654 * cpuset_create() due to NULL parent. 1655 */ 1656 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1657 CPU_COPY(&all_cpus, &set->cs_mask); 1658 LIST_INIT(&set->cs_children); 1659 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 1660 refcount_init(&set->cs_ref, 1); 1661 set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY; 1662 set->cs_domain = domainset0; 1663 cpuset_zero = set; 1664 cpuset_root = &set->cs_mask; 1665 1666 /* 1667 * Now derive a default (1), modifiable set from that to give out. 1668 */ 1669 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1670 error = cpuset_init(set, cpuset_zero, NULL, NULL, 1); 1671 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 1672 cpuset_default = set; 1673 /* 1674 * Create the kernel set (2). 1675 */ 1676 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1677 error = cpuset_init(set, cpuset_zero, NULL, NULL, 2); 1678 KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); 1679 set->cs_domain = domainset2; 1680 cpuset_kernel = set; 1681 1682 /* 1683 * Initialize the unit allocator. 0 and 1 are allocated above. 1684 */ 1685 cpuset_unr = new_unrhdr(3, INT_MAX, NULL); 1686 1687 /* 1688 * If MD code has not initialized per-domain cpusets, place all 1689 * CPUs in domain 0. 1690 */ 1691 for (i = 0; i < MAXMEMDOM; i++) 1692 if (!CPU_EMPTY(&cpuset_domain[i])) 1693 goto domains_set; 1694 CPU_COPY(&all_cpus, &cpuset_domain[0]); 1695 domains_set: 1696 1697 return (cpuset_default); 1698 } 1699 1700 void 1701 cpuset_kernthread(struct thread *td) 1702 { 1703 struct cpuset *set; 1704 1705 thread_lock(td); 1706 set = td->td_cpuset; 1707 td->td_cpuset = cpuset_ref(cpuset_kernel); 1708 thread_unlock(td); 1709 cpuset_rel(set); 1710 } 1711 1712 /* 1713 * Create a cpuset, which would be cpuset_create() but 1714 * mark the new 'set' as root. 1715 * 1716 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 1717 * for that. 1718 * 1719 * In case of no error, returns the set in *setp locked with a reference. 1720 */ 1721 int 1722 cpuset_create_root(struct prison *pr, struct cpuset **setp) 1723 { 1724 struct cpuset *set; 1725 int error; 1726 1727 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 1728 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 1729 1730 set = NULL; 1731 error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 1732 if (error) 1733 return (error); 1734 1735 KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data", 1736 __func__, __LINE__)); 1737 1738 /* Mark the set as root. */ 1739 set->cs_flags |= CPU_SET_ROOT; 1740 *setp = set; 1741 1742 return (0); 1743 } 1744 1745 int 1746 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 1747 { 1748 int error; 1749 1750 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 1751 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 1752 1753 cpuset_ref(set); 1754 error = cpuset_setproc(p->p_pid, set, NULL, NULL, true); 1755 if (error) 1756 return (error); 1757 cpuset_rel(set); 1758 return (0); 1759 } 1760 1761 /* 1762 * In Capability mode, the only accesses that are permitted are to the current 1763 * thread and process' CPU and domain sets. 1764 */ 1765 static int 1766 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which, 1767 id_t id) 1768 { 1769 if (IN_CAPABILITY_MODE(td)) { 1770 if (level != CPU_LEVEL_WHICH) 1771 return (ECAPMODE); 1772 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID && 1773 which != CPU_WHICH_TIDPID) 1774 return (ECAPMODE); 1775 if (id != -1 && which == CPU_WHICH_TIDPID && 1776 id != td->td_tid && id != td->td_proc->p_pid) 1777 return (ECAPMODE); 1778 if (id != -1 && 1779 !(which == CPU_WHICH_TID && id == td->td_tid) && 1780 !(which == CPU_WHICH_PID && id == td->td_proc->p_pid)) 1781 return (ECAPMODE); 1782 } 1783 return (0); 1784 } 1785 1786 #if defined(__powerpc__) 1787 /* 1788 * TODO: At least powerpc64 and powerpc64le kernels panic with 1789 * exception 0x480 (instruction segment exception) when copyin/copyout, 1790 * are set as a function pointer in cpuset_copy_cb struct and called by 1791 * an external module (like pfsync). Tip: copyin/copyout have an ifunc 1792 * resolver function. 1793 * 1794 * Bisect of LLVM shows that the behavior changed on LLVM 10.0 with 1795 * https://reviews.llvm.org/rGdc06b0bc9ad055d06535462d91bfc2a744b2f589 1796 * 1797 * This is a hack/workaround while problem is being discussed with LLVM 1798 * community 1799 */ 1800 static int 1801 cpuset_copyin(const void *uaddr, void *kaddr, size_t len) 1802 { 1803 return(copyin(uaddr, kaddr, len)); 1804 } 1805 1806 static int 1807 cpuset_copyout(const void *kaddr, void *uaddr, size_t len) 1808 { 1809 return(copyout(kaddr, uaddr, len)); 1810 } 1811 1812 static const struct cpuset_copy_cb copy_set = { 1813 .cpuset_copyin = cpuset_copyin, 1814 .cpuset_copyout = cpuset_copyout 1815 }; 1816 #else 1817 static const struct cpuset_copy_cb copy_set = { 1818 .cpuset_copyin = copyin, 1819 .cpuset_copyout = copyout 1820 }; 1821 #endif 1822 1823 #ifndef _SYS_SYSPROTO_H_ 1824 struct cpuset_args { 1825 cpusetid_t *setid; 1826 }; 1827 #endif 1828 int 1829 sys_cpuset(struct thread *td, struct cpuset_args *uap) 1830 { 1831 struct cpuset *root; 1832 struct cpuset *set; 1833 int error; 1834 1835 thread_lock(td); 1836 root = cpuset_refroot(td->td_cpuset); 1837 thread_unlock(td); 1838 set = NULL; 1839 error = cpuset_create(&set, root, &root->cs_mask); 1840 cpuset_rel(root); 1841 if (error) 1842 return (error); 1843 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 1844 if (error == 0) 1845 error = cpuset_setproc(-1, set, NULL, NULL, false); 1846 cpuset_rel(set); 1847 return (error); 1848 } 1849 1850 #ifndef _SYS_SYSPROTO_H_ 1851 struct cpuset_setid_args { 1852 cpuwhich_t which; 1853 id_t id; 1854 cpusetid_t setid; 1855 }; 1856 #endif 1857 int 1858 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 1859 { 1860 1861 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 1862 } 1863 1864 int 1865 kern_cpuset_setid(struct thread *td, cpuwhich_t which, 1866 id_t id, cpusetid_t setid) 1867 { 1868 struct cpuset *set; 1869 int error; 1870 1871 /* 1872 * Presently we only support per-process sets. 1873 */ 1874 if (which != CPU_WHICH_PID) 1875 return (EINVAL); 1876 set = cpuset_lookup(setid, td); 1877 if (set == NULL) 1878 return (ESRCH); 1879 error = cpuset_setproc(id, set, NULL, NULL, false); 1880 cpuset_rel(set); 1881 return (error); 1882 } 1883 1884 #ifndef _SYS_SYSPROTO_H_ 1885 struct cpuset_getid_args { 1886 cpulevel_t level; 1887 cpuwhich_t which; 1888 id_t id; 1889 cpusetid_t *setid; 1890 }; 1891 #endif 1892 int 1893 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1894 { 1895 1896 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1897 uap->setid)); 1898 } 1899 1900 int 1901 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1902 id_t id, cpusetid_t *setid) 1903 { 1904 struct cpuset *nset; 1905 struct cpuset *set; 1906 struct thread *ttd; 1907 struct proc *p; 1908 cpusetid_t tmpid; 1909 int error; 1910 1911 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1912 return (EINVAL); 1913 error = cpuset_which(which, id, &p, &ttd, &set); 1914 if (error) 1915 return (error); 1916 switch (which) { 1917 case CPU_WHICH_TID: 1918 case CPU_WHICH_PID: 1919 case CPU_WHICH_TIDPID: 1920 thread_lock(ttd); 1921 set = cpuset_refbase(ttd->td_cpuset); 1922 thread_unlock(ttd); 1923 PROC_UNLOCK(p); 1924 break; 1925 case CPU_WHICH_CPUSET: 1926 case CPU_WHICH_JAIL: 1927 break; 1928 case CPU_WHICH_IRQ: 1929 case CPU_WHICH_DOMAIN: 1930 return (EINVAL); 1931 } 1932 switch (level) { 1933 case CPU_LEVEL_ROOT: 1934 nset = cpuset_refroot(set); 1935 cpuset_rel(set); 1936 set = nset; 1937 break; 1938 case CPU_LEVEL_CPUSET: 1939 break; 1940 case CPU_LEVEL_WHICH: 1941 break; 1942 } 1943 tmpid = set->cs_id; 1944 cpuset_rel(set); 1945 if (error == 0) 1946 error = copyout(&tmpid, setid, sizeof(tmpid)); 1947 1948 return (error); 1949 } 1950 1951 #ifndef _SYS_SYSPROTO_H_ 1952 struct cpuset_getaffinity_args { 1953 cpulevel_t level; 1954 cpuwhich_t which; 1955 id_t id; 1956 size_t cpusetsize; 1957 cpuset_t *mask; 1958 }; 1959 #endif 1960 int 1961 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1962 { 1963 1964 return (user_cpuset_getaffinity(td, uap->level, uap->which, 1965 uap->id, uap->cpusetsize, uap->mask, ©_set)); 1966 } 1967 1968 int 1969 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1970 id_t id, size_t cpusetsize, cpuset_t *mask) 1971 { 1972 struct thread *ttd; 1973 struct cpuset *nset; 1974 struct cpuset *set; 1975 struct proc *p; 1976 int error; 1977 1978 error = cpuset_check_capabilities(td, level, which, id); 1979 if (error != 0) 1980 return (error); 1981 error = cpuset_which2(&which, id, &p, &ttd, &set); 1982 if (error != 0) 1983 return (error); 1984 switch (level) { 1985 case CPU_LEVEL_ROOT: 1986 case CPU_LEVEL_CPUSET: 1987 switch (which) { 1988 case CPU_WHICH_TID: 1989 case CPU_WHICH_PID: 1990 thread_lock(ttd); 1991 set = cpuset_ref(ttd->td_cpuset); 1992 thread_unlock(ttd); 1993 break; 1994 case CPU_WHICH_CPUSET: 1995 case CPU_WHICH_JAIL: 1996 break; 1997 case CPU_WHICH_IRQ: 1998 case CPU_WHICH_INTRHANDLER: 1999 case CPU_WHICH_ITHREAD: 2000 case CPU_WHICH_DOMAIN: 2001 return (EINVAL); 2002 } 2003 if (level == CPU_LEVEL_ROOT) 2004 nset = cpuset_refroot(set); 2005 else 2006 nset = cpuset_refbase(set); 2007 CPU_COPY(&nset->cs_mask, mask); 2008 cpuset_rel(nset); 2009 break; 2010 case CPU_LEVEL_WHICH: 2011 switch (which) { 2012 case CPU_WHICH_TID: 2013 thread_lock(ttd); 2014 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 2015 thread_unlock(ttd); 2016 break; 2017 case CPU_WHICH_PID: 2018 FOREACH_THREAD_IN_PROC(p, ttd) { 2019 thread_lock(ttd); 2020 CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); 2021 thread_unlock(ttd); 2022 } 2023 break; 2024 case CPU_WHICH_CPUSET: 2025 case CPU_WHICH_JAIL: 2026 CPU_COPY(&set->cs_mask, mask); 2027 break; 2028 case CPU_WHICH_IRQ: 2029 case CPU_WHICH_INTRHANDLER: 2030 case CPU_WHICH_ITHREAD: 2031 error = intr_getaffinity(id, which, mask); 2032 break; 2033 case CPU_WHICH_DOMAIN: 2034 if (id < 0 || id >= MAXMEMDOM) 2035 error = ESRCH; 2036 else 2037 CPU_COPY(&cpuset_domain[id], mask); 2038 break; 2039 } 2040 break; 2041 default: 2042 error = EINVAL; 2043 break; 2044 } 2045 if (set) 2046 cpuset_rel(set); 2047 if (p) 2048 PROC_UNLOCK(p); 2049 if (error == 0) { 2050 if (cpusetsize < howmany(CPU_FLS(mask), NBBY)) 2051 return (ERANGE); 2052 #ifdef KTRACE 2053 if (KTRPOINT(td, KTR_STRUCT)) 2054 ktrcpuset(mask, cpusetsize); 2055 #endif 2056 } 2057 return (error); 2058 } 2059 2060 int 2061 user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2062 id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb) 2063 { 2064 cpuset_t *mask; 2065 size_t size; 2066 int error; 2067 2068 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 2069 size = min(cpusetsize, sizeof(cpuset_t)); 2070 error = kern_cpuset_getaffinity(td, level, which, id, size, mask); 2071 if (error == 0) { 2072 error = cb->cpuset_copyout(mask, maskp, size); 2073 if (error != 0) 2074 goto out; 2075 if (cpusetsize > size) { 2076 char *end; 2077 char *cp; 2078 int rv; 2079 2080 end = cp = (char *)&maskp->__bits; 2081 end += cpusetsize; 2082 cp += size; 2083 while (cp != end) { 2084 rv = subyte(cp, 0); 2085 if (rv == -1) { 2086 error = EFAULT; 2087 goto out; 2088 } 2089 cp++; 2090 } 2091 } 2092 } 2093 out: 2094 free(mask, M_TEMP); 2095 return (error); 2096 } 2097 2098 #ifndef _SYS_SYSPROTO_H_ 2099 struct cpuset_setaffinity_args { 2100 cpulevel_t level; 2101 cpuwhich_t which; 2102 id_t id; 2103 size_t cpusetsize; 2104 const cpuset_t *mask; 2105 }; 2106 #endif 2107 int 2108 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 2109 { 2110 2111 return (user_cpuset_setaffinity(td, uap->level, uap->which, 2112 uap->id, uap->cpusetsize, uap->mask, ©_set)); 2113 } 2114 2115 int 2116 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2117 id_t id, cpuset_t *mask) 2118 { 2119 struct cpuset *nset; 2120 struct cpuset *set; 2121 struct thread *ttd; 2122 struct proc *p; 2123 int error; 2124 2125 #ifdef KTRACE 2126 if (KTRPOINT(td, KTR_STRUCT)) 2127 ktrcpuset(mask, sizeof(cpuset_t)); 2128 #endif 2129 error = cpuset_check_capabilities(td, level, which, id); 2130 if (error != 0) 2131 return (error); 2132 if (CPU_EMPTY(mask)) 2133 return (EDEADLK); 2134 switch (level) { 2135 case CPU_LEVEL_ROOT: 2136 case CPU_LEVEL_CPUSET: 2137 error = cpuset_which(which, id, &p, &ttd, &set); 2138 if (error) 2139 break; 2140 switch (which) { 2141 case CPU_WHICH_TID: 2142 case CPU_WHICH_PID: 2143 case CPU_WHICH_TIDPID: 2144 thread_lock(ttd); 2145 set = cpuset_ref(ttd->td_cpuset); 2146 thread_unlock(ttd); 2147 PROC_UNLOCK(p); 2148 break; 2149 case CPU_WHICH_CPUSET: 2150 case CPU_WHICH_JAIL: 2151 break; 2152 case CPU_WHICH_IRQ: 2153 case CPU_WHICH_INTRHANDLER: 2154 case CPU_WHICH_ITHREAD: 2155 case CPU_WHICH_DOMAIN: 2156 return (EINVAL); 2157 } 2158 if (level == CPU_LEVEL_ROOT) 2159 nset = cpuset_refroot(set); 2160 else 2161 nset = cpuset_refbase(set); 2162 error = cpuset_modify(nset, mask); 2163 cpuset_rel(nset); 2164 cpuset_rel(set); 2165 break; 2166 case CPU_LEVEL_WHICH: 2167 switch (which) { 2168 case CPU_WHICH_TID: 2169 error = cpuset_setthread(id, mask); 2170 break; 2171 case CPU_WHICH_PID: 2172 error = cpuset_setproc(id, NULL, mask, NULL, false); 2173 break; 2174 case CPU_WHICH_TIDPID: 2175 if (id > PID_MAX || id == -1) 2176 error = cpuset_setthread(id, mask); 2177 else 2178 error = cpuset_setproc(id, NULL, mask, NULL, 2179 false); 2180 break; 2181 case CPU_WHICH_CPUSET: 2182 case CPU_WHICH_JAIL: 2183 error = cpuset_which(which, id, &p, &ttd, &set); 2184 if (error == 0) { 2185 error = cpuset_modify(set, mask); 2186 cpuset_rel(set); 2187 } 2188 break; 2189 case CPU_WHICH_IRQ: 2190 case CPU_WHICH_INTRHANDLER: 2191 case CPU_WHICH_ITHREAD: 2192 error = intr_setaffinity(id, which, mask); 2193 break; 2194 default: 2195 error = EINVAL; 2196 break; 2197 } 2198 break; 2199 default: 2200 error = EINVAL; 2201 break; 2202 } 2203 return (error); 2204 } 2205 2206 int 2207 user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2208 id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb) 2209 { 2210 cpuset_t *mask; 2211 int error; 2212 size_t size; 2213 2214 size = min(cpusetsize, sizeof(cpuset_t)); 2215 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 2216 error = cb->cpuset_copyin(maskp, mask, size); 2217 if (error) 2218 goto out; 2219 /* 2220 * Verify that no high bits are set. 2221 */ 2222 if (cpusetsize > sizeof(cpuset_t)) { 2223 const char *end, *cp; 2224 int val; 2225 end = cp = (const char *)&maskp->__bits; 2226 end += cpusetsize; 2227 cp += sizeof(cpuset_t); 2228 2229 while (cp != end) { 2230 val = fubyte(cp); 2231 if (val == -1) { 2232 error = EFAULT; 2233 goto out; 2234 } 2235 if (val != 0) { 2236 error = EINVAL; 2237 goto out; 2238 } 2239 cp++; 2240 } 2241 } 2242 error = kern_cpuset_setaffinity(td, level, which, id, mask); 2243 2244 out: 2245 free(mask, M_TEMP); 2246 return (error); 2247 } 2248 2249 #ifndef _SYS_SYSPROTO_H_ 2250 struct cpuset_getdomain_args { 2251 cpulevel_t level; 2252 cpuwhich_t which; 2253 id_t id; 2254 size_t domainsetsize; 2255 domainset_t *mask; 2256 int *policy; 2257 }; 2258 #endif 2259 int 2260 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap) 2261 { 2262 2263 return (kern_cpuset_getdomain(td, uap->level, uap->which, 2264 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set)); 2265 } 2266 2267 int 2268 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2269 id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp, 2270 const struct cpuset_copy_cb *cb) 2271 { 2272 struct domainset outset; 2273 struct thread *ttd; 2274 struct cpuset *nset; 2275 struct cpuset *set; 2276 struct domainset *dset; 2277 struct proc *p; 2278 domainset_t *mask; 2279 int error; 2280 2281 if (domainsetsize < sizeof(domainset_t) || 2282 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2283 return (ERANGE); 2284 error = cpuset_check_capabilities(td, level, which, id); 2285 if (error != 0) 2286 return (error); 2287 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2288 bzero(&outset, sizeof(outset)); 2289 error = cpuset_which2(&which, id, &p, &ttd, &set); 2290 if (error) 2291 goto out; 2292 switch (level) { 2293 case CPU_LEVEL_ROOT: 2294 case CPU_LEVEL_CPUSET: 2295 switch (which) { 2296 case CPU_WHICH_TID: 2297 case CPU_WHICH_PID: 2298 thread_lock(ttd); 2299 set = cpuset_ref(ttd->td_cpuset); 2300 thread_unlock(ttd); 2301 break; 2302 case CPU_WHICH_CPUSET: 2303 case CPU_WHICH_JAIL: 2304 break; 2305 case CPU_WHICH_IRQ: 2306 case CPU_WHICH_INTRHANDLER: 2307 case CPU_WHICH_ITHREAD: 2308 case CPU_WHICH_DOMAIN: 2309 error = EINVAL; 2310 goto out; 2311 } 2312 if (level == CPU_LEVEL_ROOT) 2313 nset = cpuset_refroot(set); 2314 else 2315 nset = cpuset_refbase(set); 2316 domainset_copy(nset->cs_domain, &outset); 2317 cpuset_rel(nset); 2318 break; 2319 case CPU_LEVEL_WHICH: 2320 switch (which) { 2321 case CPU_WHICH_TID: 2322 thread_lock(ttd); 2323 domainset_copy(ttd->td_cpuset->cs_domain, &outset); 2324 thread_unlock(ttd); 2325 break; 2326 case CPU_WHICH_PID: 2327 FOREACH_THREAD_IN_PROC(p, ttd) { 2328 thread_lock(ttd); 2329 dset = ttd->td_cpuset->cs_domain; 2330 /* Show all domains in the proc. */ 2331 DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask); 2332 /* Last policy wins. */ 2333 outset.ds_policy = dset->ds_policy; 2334 outset.ds_prefer = dset->ds_prefer; 2335 thread_unlock(ttd); 2336 } 2337 break; 2338 case CPU_WHICH_CPUSET: 2339 case CPU_WHICH_JAIL: 2340 domainset_copy(set->cs_domain, &outset); 2341 break; 2342 case CPU_WHICH_IRQ: 2343 case CPU_WHICH_INTRHANDLER: 2344 case CPU_WHICH_ITHREAD: 2345 case CPU_WHICH_DOMAIN: 2346 error = EINVAL; 2347 break; 2348 } 2349 break; 2350 default: 2351 error = EINVAL; 2352 break; 2353 } 2354 if (set) 2355 cpuset_rel(set); 2356 if (p) 2357 PROC_UNLOCK(p); 2358 /* 2359 * Translate prefer into a set containing only the preferred domain, 2360 * not the entire fallback set. 2361 */ 2362 if (outset.ds_policy == DOMAINSET_POLICY_PREFER) { 2363 DOMAINSET_ZERO(&outset.ds_mask); 2364 DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask); 2365 } 2366 DOMAINSET_COPY(&outset.ds_mask, mask); 2367 if (error == 0) 2368 error = cb->cpuset_copyout(mask, maskp, domainsetsize); 2369 if (error == 0) 2370 if (suword32(policyp, outset.ds_policy) != 0) 2371 error = EFAULT; 2372 out: 2373 free(mask, M_TEMP); 2374 return (error); 2375 } 2376 2377 #ifndef _SYS_SYSPROTO_H_ 2378 struct cpuset_setdomain_args { 2379 cpulevel_t level; 2380 cpuwhich_t which; 2381 id_t id; 2382 size_t domainsetsize; 2383 domainset_t *mask; 2384 int policy; 2385 }; 2386 #endif 2387 int 2388 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap) 2389 { 2390 2391 return (kern_cpuset_setdomain(td, uap->level, uap->which, 2392 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set)); 2393 } 2394 2395 int 2396 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2397 id_t id, size_t domainsetsize, const domainset_t *maskp, int policy, 2398 const struct cpuset_copy_cb *cb) 2399 { 2400 struct cpuset *nset; 2401 struct cpuset *set; 2402 struct thread *ttd; 2403 struct proc *p; 2404 struct domainset domain; 2405 domainset_t *mask; 2406 int error; 2407 2408 if (domainsetsize < sizeof(domainset_t) || 2409 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2410 return (ERANGE); 2411 if (policy <= DOMAINSET_POLICY_INVALID || 2412 policy > DOMAINSET_POLICY_MAX) 2413 return (EINVAL); 2414 error = cpuset_check_capabilities(td, level, which, id); 2415 if (error != 0) 2416 return (error); 2417 memset(&domain, 0, sizeof(domain)); 2418 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2419 error = cb->cpuset_copyin(maskp, mask, domainsetsize); 2420 if (error) 2421 goto out; 2422 /* 2423 * Verify that no high bits are set. 2424 */ 2425 if (domainsetsize > sizeof(domainset_t)) { 2426 char *end; 2427 char *cp; 2428 2429 end = cp = (char *)&mask->__bits; 2430 end += domainsetsize; 2431 cp += sizeof(domainset_t); 2432 while (cp != end) 2433 if (*cp++ != 0) { 2434 error = EINVAL; 2435 goto out; 2436 } 2437 } 2438 if (DOMAINSET_EMPTY(mask)) { 2439 error = EDEADLK; 2440 goto out; 2441 } 2442 DOMAINSET_COPY(mask, &domain.ds_mask); 2443 domain.ds_policy = policy; 2444 2445 /* 2446 * Sanitize the provided mask. 2447 */ 2448 if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) { 2449 error = EINVAL; 2450 goto out; 2451 } 2452 2453 /* Translate preferred policy into a mask and fallback. */ 2454 if (policy == DOMAINSET_POLICY_PREFER) { 2455 /* Only support a single preferred domain. */ 2456 if (DOMAINSET_COUNT(&domain.ds_mask) != 1) { 2457 error = EINVAL; 2458 goto out; 2459 } 2460 domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1; 2461 /* This will be constrained by domainset_shadow(). */ 2462 DOMAINSET_COPY(&all_domains, &domain.ds_mask); 2463 } 2464 2465 /* 2466 * When given an impossible policy, fall back to interleaving 2467 * across all domains. 2468 */ 2469 if (domainset_empty_vm(&domain)) 2470 domainset_copy(domainset2, &domain); 2471 2472 switch (level) { 2473 case CPU_LEVEL_ROOT: 2474 case CPU_LEVEL_CPUSET: 2475 error = cpuset_which(which, id, &p, &ttd, &set); 2476 if (error) 2477 break; 2478 switch (which) { 2479 case CPU_WHICH_TID: 2480 case CPU_WHICH_PID: 2481 case CPU_WHICH_TIDPID: 2482 thread_lock(ttd); 2483 set = cpuset_ref(ttd->td_cpuset); 2484 thread_unlock(ttd); 2485 PROC_UNLOCK(p); 2486 break; 2487 case CPU_WHICH_CPUSET: 2488 case CPU_WHICH_JAIL: 2489 break; 2490 case CPU_WHICH_IRQ: 2491 case CPU_WHICH_INTRHANDLER: 2492 case CPU_WHICH_ITHREAD: 2493 case CPU_WHICH_DOMAIN: 2494 error = EINVAL; 2495 goto out; 2496 } 2497 if (level == CPU_LEVEL_ROOT) 2498 nset = cpuset_refroot(set); 2499 else 2500 nset = cpuset_refbase(set); 2501 error = cpuset_modify_domain(nset, &domain); 2502 cpuset_rel(nset); 2503 cpuset_rel(set); 2504 break; 2505 case CPU_LEVEL_WHICH: 2506 switch (which) { 2507 case CPU_WHICH_TID: 2508 error = _cpuset_setthread(id, NULL, &domain); 2509 break; 2510 case CPU_WHICH_PID: 2511 error = cpuset_setproc(id, NULL, NULL, &domain, false); 2512 break; 2513 case CPU_WHICH_TIDPID: 2514 if (id > PID_MAX || id == -1) 2515 error = _cpuset_setthread(id, NULL, &domain); 2516 else 2517 error = cpuset_setproc(id, NULL, NULL, &domain, 2518 false); 2519 break; 2520 case CPU_WHICH_CPUSET: 2521 case CPU_WHICH_JAIL: 2522 error = cpuset_which(which, id, &p, &ttd, &set); 2523 if (error == 0) { 2524 error = cpuset_modify_domain(set, &domain); 2525 cpuset_rel(set); 2526 } 2527 break; 2528 case CPU_WHICH_IRQ: 2529 case CPU_WHICH_INTRHANDLER: 2530 case CPU_WHICH_ITHREAD: 2531 default: 2532 error = EINVAL; 2533 break; 2534 } 2535 break; 2536 default: 2537 error = EINVAL; 2538 break; 2539 } 2540 out: 2541 free(mask, M_TEMP); 2542 return (error); 2543 } 2544 2545 #ifdef DDB 2546 2547 static void 2548 ddb_display_bitset(const struct bitset *set, int size) 2549 { 2550 int bit, once; 2551 2552 for (once = 0, bit = 0; bit < size; bit++) { 2553 if (CPU_ISSET(bit, set)) { 2554 if (once == 0) { 2555 db_printf("%d", bit); 2556 once = 1; 2557 } else 2558 db_printf(",%d", bit); 2559 } 2560 } 2561 if (once == 0) 2562 db_printf("<none>"); 2563 } 2564 2565 void 2566 ddb_display_cpuset(const cpuset_t *set) 2567 { 2568 ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE); 2569 } 2570 2571 static void 2572 ddb_display_domainset(const domainset_t *set) 2573 { 2574 ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE); 2575 } 2576 2577 DB_SHOW_COMMAND_FLAGS(cpusets, db_show_cpusets, DB_CMD_MEMSAFE) 2578 { 2579 struct cpuset *set; 2580 2581 LIST_FOREACH(set, &cpuset_ids, cs_link) { 2582 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 2583 set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags, 2584 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 2585 db_printf(" cpu mask="); 2586 ddb_display_cpuset(&set->cs_mask); 2587 db_printf("\n"); 2588 db_printf(" domain policy %d prefer %d mask=", 2589 set->cs_domain->ds_policy, set->cs_domain->ds_prefer); 2590 ddb_display_domainset(&set->cs_domain->ds_mask); 2591 db_printf("\n"); 2592 if (db_pager_quit) 2593 break; 2594 } 2595 } 2596 2597 DB_SHOW_COMMAND_FLAGS(domainsets, db_show_domainsets, DB_CMD_MEMSAFE) 2598 { 2599 struct domainset *set; 2600 2601 LIST_FOREACH(set, &cpuset_domains, ds_link) { 2602 db_printf("set=%p policy %d prefer %d cnt %d\n", 2603 set, set->ds_policy, set->ds_prefer, set->ds_cnt); 2604 db_printf(" mask ="); 2605 ddb_display_domainset(&set->ds_mask); 2606 db_printf("\n"); 2607 } 2608 } 2609 #endif /* DDB */ 2610