1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 5 * All rights reserved. 6 * 7 * Copyright (c) 2008 Nokia Corporation 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice unmodified, this list of conditions, and the following 15 * disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ddb.h" 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/ctype.h> 43 #include <sys/sysproto.h> 44 #include <sys/jail.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/refcount.h> 52 #include <sys/sched.h> 53 #include <sys/smp.h> 54 #include <sys/syscallsubr.h> 55 #include <sys/sysent.h> 56 #include <sys/capsicum.h> 57 #include <sys/cpuset.h> 58 #include <sys/domainset.h> 59 #include <sys/sx.h> 60 #include <sys/queue.h> 61 #include <sys/libkern.h> 62 #include <sys/limits.h> 63 #include <sys/bus.h> 64 #include <sys/interrupt.h> 65 #include <sys/vmmeter.h> 66 #include <sys/ktrace.h> 67 68 #include <vm/uma.h> 69 #include <vm/vm.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pageout.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_param.h> 75 #include <vm/vm_phys.h> 76 #include <vm/vm_pagequeue.h> 77 78 #ifdef DDB 79 #include <ddb/ddb.h> 80 #endif /* DDB */ 81 82 /* 83 * cpusets provide a mechanism for creating and manipulating sets of 84 * processors for the purpose of constraining the scheduling of threads to 85 * specific processors. 86 * 87 * Each process belongs to an identified set, by default this is set 1. Each 88 * thread may further restrict the cpus it may run on to a subset of this 89 * named set. This creates an anonymous set which other threads and processes 90 * may not join by number. 91 * 92 * The named set is referred to herein as the 'base' set to avoid ambiguity. 93 * This set is usually a child of a 'root' set while the anonymous set may 94 * simply be referred to as a mask. In the syscall api these are referred to 95 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 96 * 97 * Threads inherit their set from their creator whether it be anonymous or 98 * not. This means that anonymous sets are immutable because they may be 99 * shared. To modify an anonymous set a new set is created with the desired 100 * mask and the same parent as the existing anonymous set. This gives the 101 * illusion of each thread having a private mask. 102 * 103 * Via the syscall apis a user may ask to retrieve or modify the root, base, 104 * or mask that is discovered via a pid, tid, or setid. Modifying a set 105 * modifies all numbered and anonymous child sets to comply with the new mask. 106 * Modifying a pid or tid's mask applies only to that tid but must still 107 * exist within the assigned parent set. 108 * 109 * A thread may not be assigned to a group separate from other threads in 110 * the process. This is to remove ambiguity when the setid is queried with 111 * a pid argument. There is no other technical limitation. 112 * 113 * This somewhat complex arrangement is intended to make it easy for 114 * applications to query available processors and bind their threads to 115 * specific processors while also allowing administrators to dynamically 116 * reprovision by changing sets which apply to groups of processes. 117 * 118 * A simple application should not concern itself with sets at all and 119 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 120 * meaning 'curthread'. It may query available cpus for that tid with a 121 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 122 */ 123 124 LIST_HEAD(domainlist, domainset); 125 struct domainset __read_mostly domainset_firsttouch; 126 struct domainset __read_mostly domainset_fixed[MAXMEMDOM]; 127 struct domainset __read_mostly domainset_interleave; 128 struct domainset __read_mostly domainset_prefer[MAXMEMDOM]; 129 struct domainset __read_mostly domainset_roundrobin; 130 131 static uma_zone_t cpuset_zone; 132 static uma_zone_t domainset_zone; 133 static struct mtx cpuset_lock; 134 static struct setlist cpuset_ids; 135 static struct domainlist cpuset_domains; 136 static struct unrhdr *cpuset_unr; 137 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel; 138 static struct domainset *domainset0, *domainset2; 139 u_int cpusetsizemin = 1; 140 141 /* Return the size of cpuset_t at the kernel level */ 142 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 143 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 144 145 /* Return the minimum size of cpuset_t allowed by the kernel */ 146 SYSCTL_UINT(_kern_sched, OID_AUTO, cpusetsizemin, 147 CTLFLAG_RD | CTLFLAG_CAPRD, &cpusetsizemin, 0, 148 "The minimum size of cpuset_t allowed by the kernel"); 149 150 cpuset_t *cpuset_root; 151 cpuset_t cpuset_domain[MAXMEMDOM]; 152 153 static int domainset_valid(const struct domainset *, const struct domainset *); 154 155 /* 156 * Find the first non-anonymous set starting from 'set'. 157 */ 158 static struct cpuset * 159 cpuset_getbase(struct cpuset *set) 160 { 161 162 if (set->cs_id == CPUSET_INVALID) 163 set = set->cs_parent; 164 return (set); 165 } 166 167 /* 168 * Walks up the tree from 'set' to find the root. 169 */ 170 static struct cpuset * 171 cpuset_getroot(struct cpuset *set) 172 { 173 174 while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL) 175 set = set->cs_parent; 176 return (set); 177 } 178 179 /* 180 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 181 */ 182 struct cpuset * 183 cpuset_ref(struct cpuset *set) 184 { 185 186 refcount_acquire(&set->cs_ref); 187 return (set); 188 } 189 190 /* 191 * Walks up the tree from 'set' to find the root. Returns the root 192 * referenced. 193 */ 194 static struct cpuset * 195 cpuset_refroot(struct cpuset *set) 196 { 197 198 return (cpuset_ref(cpuset_getroot(set))); 199 } 200 201 /* 202 * Find the first non-anonymous set starting from 'set'. Returns this set 203 * referenced. May return the passed in set with an extra ref if it is 204 * not anonymous. 205 */ 206 static struct cpuset * 207 cpuset_refbase(struct cpuset *set) 208 { 209 210 return (cpuset_ref(cpuset_getbase(set))); 211 } 212 213 /* 214 * Release a reference in a context where it is safe to allocate. 215 */ 216 void 217 cpuset_rel(struct cpuset *set) 218 { 219 cpusetid_t id; 220 221 if (refcount_release_if_not_last(&set->cs_ref)) 222 return; 223 mtx_lock_spin(&cpuset_lock); 224 if (!refcount_release(&set->cs_ref)) { 225 mtx_unlock_spin(&cpuset_lock); 226 return; 227 } 228 LIST_REMOVE(set, cs_siblings); 229 id = set->cs_id; 230 if (id != CPUSET_INVALID) 231 LIST_REMOVE(set, cs_link); 232 mtx_unlock_spin(&cpuset_lock); 233 cpuset_rel(set->cs_parent); 234 uma_zfree(cpuset_zone, set); 235 if (id != CPUSET_INVALID) 236 free_unr(cpuset_unr, id); 237 } 238 239 /* 240 * Deferred release must be used when in a context that is not safe to 241 * allocate/free. This places any unreferenced sets on the list 'head'. 242 */ 243 static void 244 cpuset_rel_defer(struct setlist *head, struct cpuset *set) 245 { 246 247 if (refcount_release_if_not_last(&set->cs_ref)) 248 return; 249 mtx_lock_spin(&cpuset_lock); 250 if (!refcount_release(&set->cs_ref)) { 251 mtx_unlock_spin(&cpuset_lock); 252 return; 253 } 254 LIST_REMOVE(set, cs_siblings); 255 if (set->cs_id != CPUSET_INVALID) 256 LIST_REMOVE(set, cs_link); 257 LIST_INSERT_HEAD(head, set, cs_link); 258 mtx_unlock_spin(&cpuset_lock); 259 } 260 261 /* 262 * Complete a deferred release. Removes the set from the list provided to 263 * cpuset_rel_defer. 264 */ 265 static void 266 cpuset_rel_complete(struct cpuset *set) 267 { 268 cpusetid_t id; 269 270 id = set->cs_id; 271 LIST_REMOVE(set, cs_link); 272 cpuset_rel(set->cs_parent); 273 uma_zfree(cpuset_zone, set); 274 if (id != CPUSET_INVALID) 275 free_unr(cpuset_unr, id); 276 } 277 278 /* 279 * Find a set based on an id. Returns it with a ref. 280 */ 281 static struct cpuset * 282 cpuset_lookup(cpusetid_t setid, struct thread *td) 283 { 284 struct cpuset *set; 285 286 if (setid == CPUSET_INVALID) 287 return (NULL); 288 mtx_lock_spin(&cpuset_lock); 289 LIST_FOREACH(set, &cpuset_ids, cs_link) 290 if (set->cs_id == setid) 291 break; 292 if (set) 293 cpuset_ref(set); 294 mtx_unlock_spin(&cpuset_lock); 295 296 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 297 if (set != NULL && jailed(td->td_ucred)) { 298 struct cpuset *jset, *tset; 299 300 jset = td->td_ucred->cr_prison->pr_cpuset; 301 for (tset = set; tset != NULL; tset = tset->cs_parent) 302 if (tset == jset) 303 break; 304 if (tset == NULL) { 305 cpuset_rel(set); 306 set = NULL; 307 } 308 } 309 310 return (set); 311 } 312 313 /* 314 * Initialize a set in the space provided in 'set' with the provided parameters. 315 * The set is returned with a single ref. May return EDEADLK if the set 316 * will have no valid cpu based on restrictions from the parent. 317 */ 318 static int 319 cpuset_init(struct cpuset *set, struct cpuset *parent, 320 const cpuset_t *mask, struct domainset *domain, cpusetid_t id) 321 { 322 323 if (domain == NULL) 324 domain = parent->cs_domain; 325 if (mask == NULL) 326 mask = &parent->cs_mask; 327 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 328 return (EDEADLK); 329 /* The domain must be prepared ahead of time. */ 330 if (!domainset_valid(parent->cs_domain, domain)) 331 return (EDEADLK); 332 CPU_COPY(mask, &set->cs_mask); 333 LIST_INIT(&set->cs_children); 334 refcount_init(&set->cs_ref, 1); 335 set->cs_flags = 0; 336 mtx_lock_spin(&cpuset_lock); 337 set->cs_domain = domain; 338 CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask); 339 set->cs_id = id; 340 set->cs_parent = cpuset_ref(parent); 341 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 342 if (set->cs_id != CPUSET_INVALID) 343 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 344 mtx_unlock_spin(&cpuset_lock); 345 346 return (0); 347 } 348 349 /* 350 * Create a new non-anonymous set with the requested parent and mask. May 351 * return failures if the mask is invalid or a new number can not be 352 * allocated. 353 * 354 * If *setp is not NULL, then it will be used as-is. The caller must take 355 * into account that *setp will be inserted at the head of cpuset_ids and 356 * plan any potentially conflicting cs_link usage accordingly. 357 */ 358 static int 359 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 360 { 361 struct cpuset *set; 362 cpusetid_t id; 363 int error; 364 bool dofree; 365 366 id = alloc_unr(cpuset_unr); 367 if (id == -1) 368 return (ENFILE); 369 dofree = (*setp == NULL); 370 if (*setp != NULL) 371 set = *setp; 372 else 373 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 374 error = cpuset_init(set, parent, mask, NULL, id); 375 if (error == 0) 376 return (0); 377 free_unr(cpuset_unr, id); 378 if (dofree) 379 uma_zfree(cpuset_zone, set); 380 381 return (error); 382 } 383 384 static void 385 cpuset_freelist_add(struct setlist *list, int count) 386 { 387 struct cpuset *set; 388 int i; 389 390 for (i = 0; i < count; i++) { 391 set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK); 392 LIST_INSERT_HEAD(list, set, cs_link); 393 } 394 } 395 396 static void 397 cpuset_freelist_init(struct setlist *list, int count) 398 { 399 400 LIST_INIT(list); 401 cpuset_freelist_add(list, count); 402 } 403 404 static void 405 cpuset_freelist_free(struct setlist *list) 406 { 407 struct cpuset *set; 408 409 while ((set = LIST_FIRST(list)) != NULL) { 410 LIST_REMOVE(set, cs_link); 411 uma_zfree(cpuset_zone, set); 412 } 413 } 414 415 static void 416 domainset_freelist_add(struct domainlist *list, int count) 417 { 418 struct domainset *set; 419 int i; 420 421 for (i = 0; i < count; i++) { 422 set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK); 423 LIST_INSERT_HEAD(list, set, ds_link); 424 } 425 } 426 427 static void 428 domainset_freelist_init(struct domainlist *list, int count) 429 { 430 431 LIST_INIT(list); 432 domainset_freelist_add(list, count); 433 } 434 435 static void 436 domainset_freelist_free(struct domainlist *list) 437 { 438 struct domainset *set; 439 440 while ((set = LIST_FIRST(list)) != NULL) { 441 LIST_REMOVE(set, ds_link); 442 uma_zfree(domainset_zone, set); 443 } 444 } 445 446 /* Copy a domainset preserving mask and policy. */ 447 static void 448 domainset_copy(const struct domainset *from, struct domainset *to) 449 { 450 451 DOMAINSET_COPY(&from->ds_mask, &to->ds_mask); 452 to->ds_policy = from->ds_policy; 453 to->ds_prefer = from->ds_prefer; 454 } 455 456 /* Return 1 if mask and policy are equal, otherwise 0. */ 457 static int 458 domainset_equal(const struct domainset *one, const struct domainset *two) 459 { 460 461 return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 && 462 one->ds_policy == two->ds_policy && 463 one->ds_prefer == two->ds_prefer); 464 } 465 466 /* Return 1 if child is a valid subset of parent. */ 467 static int 468 domainset_valid(const struct domainset *parent, const struct domainset *child) 469 { 470 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 471 return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask)); 472 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 473 } 474 475 static int 476 domainset_restrict(const struct domainset *parent, 477 const struct domainset *child) 478 { 479 if (child->ds_policy != DOMAINSET_POLICY_PREFER) 480 return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask)); 481 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); 482 } 483 484 /* 485 * Lookup or create a domainset. The key is provided in ds_mask and 486 * ds_policy. If the domainset does not yet exist the storage in 487 * 'domain' is used to insert. Otherwise this storage is freed to the 488 * domainset_zone and the existing domainset is returned. 489 */ 490 static struct domainset * 491 _domainset_create(struct domainset *domain, struct domainlist *freelist) 492 { 493 struct domainset *ndomain; 494 int i, j; 495 496 KASSERT(domain->ds_cnt <= vm_ndomains, 497 ("invalid domain count in domainset %p", domain)); 498 KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER || 499 domain->ds_prefer < vm_ndomains, 500 ("invalid preferred domain in domains %p", domain)); 501 502 mtx_lock_spin(&cpuset_lock); 503 LIST_FOREACH(ndomain, &cpuset_domains, ds_link) 504 if (domainset_equal(ndomain, domain)) 505 break; 506 /* 507 * If the domain does not yet exist we insert it and initialize 508 * various iteration helpers which are not part of the key. 509 */ 510 if (ndomain == NULL) { 511 LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link); 512 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 513 for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 514 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 515 domain->ds_order[j++] = i; 516 } 517 mtx_unlock_spin(&cpuset_lock); 518 if (ndomain == NULL) 519 return (domain); 520 if (freelist != NULL) 521 LIST_INSERT_HEAD(freelist, domain, ds_link); 522 else 523 uma_zfree(domainset_zone, domain); 524 return (ndomain); 525 526 } 527 528 /* 529 * Are any of the domains in the mask empty? If so, silently 530 * remove them and update the domainset accordingly. If only empty 531 * domains are present, we must return failure. 532 */ 533 static bool 534 domainset_empty_vm(struct domainset *domain) 535 { 536 domainset_t empty; 537 int i, j; 538 539 DOMAINSET_ZERO(&empty); 540 for (i = 0; i < vm_ndomains; i++) 541 if (VM_DOMAIN_EMPTY(i)) 542 DOMAINSET_SET(i, &empty); 543 if (DOMAINSET_SUBSET(&empty, &domain->ds_mask)) 544 return (true); 545 546 /* Remove empty domains from the set and recompute. */ 547 DOMAINSET_ANDNOT(&domain->ds_mask, &empty); 548 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); 549 for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++) 550 if (DOMAINSET_ISSET(i, &domain->ds_mask)) 551 domain->ds_order[j++] = i; 552 553 /* Convert a PREFER policy referencing an empty domain to RR. */ 554 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 555 DOMAINSET_ISSET(domain->ds_prefer, &empty)) { 556 domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 557 domain->ds_prefer = -1; 558 } 559 560 return (false); 561 } 562 563 /* 564 * Create or lookup a domainset based on the key held in 'domain'. 565 */ 566 struct domainset * 567 domainset_create(const struct domainset *domain) 568 { 569 struct domainset *ndomain; 570 571 /* 572 * Validate the policy. It must specify a useable policy number with 573 * only valid domains. Preferred must include the preferred domain 574 * in the mask. 575 */ 576 if (domain->ds_policy <= DOMAINSET_POLICY_INVALID || 577 domain->ds_policy > DOMAINSET_POLICY_MAX) 578 return (NULL); 579 if (domain->ds_policy == DOMAINSET_POLICY_PREFER && 580 !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask)) 581 return (NULL); 582 if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask)) 583 return (NULL); 584 ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO); 585 domainset_copy(domain, ndomain); 586 return _domainset_create(ndomain, NULL); 587 } 588 589 /* 590 * Update thread domainset pointers. 591 */ 592 static void 593 domainset_notify(void) 594 { 595 struct thread *td; 596 struct proc *p; 597 598 sx_slock(&allproc_lock); 599 FOREACH_PROC_IN_SYSTEM(p) { 600 PROC_LOCK(p); 601 if (p->p_state == PRS_NEW) { 602 PROC_UNLOCK(p); 603 continue; 604 } 605 FOREACH_THREAD_IN_PROC(p, td) { 606 thread_lock(td); 607 td->td_domain.dr_policy = td->td_cpuset->cs_domain; 608 thread_unlock(td); 609 } 610 PROC_UNLOCK(p); 611 } 612 sx_sunlock(&allproc_lock); 613 kernel_object->domain.dr_policy = cpuset_kernel->cs_domain; 614 } 615 616 /* 617 * Create a new set that is a subset of a parent. 618 */ 619 static struct domainset * 620 domainset_shadow(const struct domainset *pdomain, 621 const struct domainset *domain, struct domainlist *freelist) 622 { 623 struct domainset *ndomain; 624 625 ndomain = LIST_FIRST(freelist); 626 LIST_REMOVE(ndomain, ds_link); 627 628 /* 629 * Initialize the key from the request. 630 */ 631 domainset_copy(domain, ndomain); 632 633 /* 634 * Restrict the key by the parent. 635 */ 636 DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask); 637 638 return _domainset_create(ndomain, freelist); 639 } 640 641 /* 642 * Recursively check for errors that would occur from applying mask to 643 * the tree of sets starting at 'set'. Checks for sets that would become 644 * empty as well as RDONLY flags. 645 */ 646 static int 647 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask) 648 { 649 struct cpuset *nset; 650 cpuset_t newmask; 651 int error; 652 653 mtx_assert(&cpuset_lock, MA_OWNED); 654 if (set->cs_flags & CPU_SET_RDONLY) 655 return (EPERM); 656 if (augment_mask) { 657 CPU_AND(&newmask, &set->cs_mask, mask); 658 } else 659 CPU_COPY(mask, &newmask); 660 661 if (CPU_EMPTY(&newmask)) 662 return (EDEADLK); 663 error = 0; 664 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 665 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 666 break; 667 return (error); 668 } 669 670 /* 671 * Applies the mask 'mask' without checking for empty sets or permissions. 672 */ 673 static void 674 cpuset_update(struct cpuset *set, cpuset_t *mask) 675 { 676 struct cpuset *nset; 677 678 mtx_assert(&cpuset_lock, MA_OWNED); 679 CPU_AND(&set->cs_mask, &set->cs_mask, mask); 680 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 681 cpuset_update(nset, &set->cs_mask); 682 683 return; 684 } 685 686 /* 687 * Modify the set 'set' to use a copy of the mask provided. Apply this new 688 * mask to restrict all children in the tree. Checks for validity before 689 * applying the changes. 690 */ 691 static int 692 cpuset_modify(struct cpuset *set, cpuset_t *mask) 693 { 694 struct cpuset *root; 695 int error; 696 697 error = priv_check(curthread, PRIV_SCHED_CPUSET); 698 if (error) 699 return (error); 700 /* 701 * In case we are called from within the jail, 702 * we do not allow modifying the dedicated root 703 * cpuset of the jail but may still allow to 704 * change child sets, including subordinate jails' 705 * roots. 706 */ 707 if ((set->cs_flags & CPU_SET_ROOT) != 0 && 708 jailed(curthread->td_ucred) && 709 set == curthread->td_ucred->cr_prison->pr_cpuset) 710 return (EPERM); 711 /* 712 * Verify that we have access to this set of 713 * cpus. 714 */ 715 if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) { 716 KASSERT(set->cs_parent != NULL, 717 ("jail.cpuset=%d is not a proper child of parent jail's root.", 718 set->cs_id)); 719 720 /* 721 * cpuset_getroot() cannot work here due to how top-level jail 722 * roots are constructed. Top-level jails are parented to 723 * thread0's cpuset (i.e. cpuset 1) rather than the system root. 724 */ 725 root = set->cs_parent; 726 } else { 727 root = cpuset_getroot(set); 728 } 729 mtx_lock_spin(&cpuset_lock); 730 if (root && !CPU_SUBSET(&root->cs_mask, mask)) { 731 error = EINVAL; 732 goto out; 733 } 734 error = cpuset_testupdate(set, mask, 0); 735 if (error) 736 goto out; 737 CPU_COPY(mask, &set->cs_mask); 738 cpuset_update(set, mask); 739 out: 740 mtx_unlock_spin(&cpuset_lock); 741 742 return (error); 743 } 744 745 /* 746 * Recursively check for errors that would occur from applying mask to 747 * the tree of sets starting at 'set'. Checks for sets that would become 748 * empty as well as RDONLY flags. 749 */ 750 static int 751 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset, 752 struct domainset *orig, int *count, int augment_mask __unused) 753 { 754 struct cpuset *nset; 755 struct domainset *domain; 756 struct domainset newset; 757 int error; 758 759 mtx_assert(&cpuset_lock, MA_OWNED); 760 if (set->cs_flags & CPU_SET_RDONLY) 761 return (EPERM); 762 domain = set->cs_domain; 763 domainset_copy(domain, &newset); 764 if (!domainset_equal(domain, orig)) { 765 if (!domainset_restrict(domain, dset)) 766 return (EDEADLK); 767 DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask); 768 /* Count the number of domains that are changing. */ 769 (*count)++; 770 } 771 error = 0; 772 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 773 if ((error = cpuset_testupdate_domain(nset, &newset, domain, 774 count, 1)) != 0) 775 break; 776 return (error); 777 } 778 779 /* 780 * Applies the mask 'mask' without checking for empty sets or permissions. 781 */ 782 static void 783 cpuset_update_domain(struct cpuset *set, struct domainset *domain, 784 struct domainset *orig, struct domainlist *domains) 785 { 786 struct cpuset *nset; 787 788 mtx_assert(&cpuset_lock, MA_OWNED); 789 /* 790 * If this domainset has changed from the parent we must calculate 791 * a new set. Otherwise it simply inherits from the parent. When 792 * we inherit from the parent we get a new mask and policy. If the 793 * set is modified from the parent we keep the policy and only 794 * update the mask. 795 */ 796 if (set->cs_domain != orig) { 797 orig = set->cs_domain; 798 set->cs_domain = domainset_shadow(domain, orig, domains); 799 } else 800 set->cs_domain = domain; 801 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 802 cpuset_update_domain(nset, set->cs_domain, orig, domains); 803 804 return; 805 } 806 807 /* 808 * Modify the set 'set' to use a copy the domainset provided. Apply this new 809 * mask to restrict all children in the tree. Checks for validity before 810 * applying the changes. 811 */ 812 static int 813 cpuset_modify_domain(struct cpuset *set, struct domainset *domain) 814 { 815 struct domainlist domains; 816 struct domainset temp; 817 struct domainset *dset; 818 struct cpuset *root; 819 int ndomains, needed; 820 int error; 821 822 error = priv_check(curthread, PRIV_SCHED_CPUSET); 823 if (error) 824 return (error); 825 /* 826 * In case we are called from within the jail 827 * we do not allow modifying the dedicated root 828 * cpuset of the jail but may still allow to 829 * change child sets. 830 */ 831 if (jailed(curthread->td_ucred) && 832 set->cs_flags & CPU_SET_ROOT) 833 return (EPERM); 834 domainset_freelist_init(&domains, 0); 835 domain = domainset_create(domain); 836 ndomains = 0; 837 838 mtx_lock_spin(&cpuset_lock); 839 for (;;) { 840 root = cpuset_getroot(set); 841 dset = root->cs_domain; 842 /* 843 * Verify that we have access to this set of domains. 844 */ 845 if (!domainset_valid(dset, domain)) { 846 error = EINVAL; 847 goto out; 848 } 849 /* 850 * If applying prefer we keep the current set as the fallback. 851 */ 852 if (domain->ds_policy == DOMAINSET_POLICY_PREFER) 853 DOMAINSET_COPY(&set->cs_domain->ds_mask, 854 &domain->ds_mask); 855 /* 856 * Determine whether we can apply this set of domains and 857 * how many new domain structures it will require. 858 */ 859 domainset_copy(domain, &temp); 860 needed = 0; 861 error = cpuset_testupdate_domain(set, &temp, set->cs_domain, 862 &needed, 0); 863 if (error) 864 goto out; 865 if (ndomains >= needed) 866 break; 867 868 /* Dropping the lock; we'll need to re-evaluate again. */ 869 mtx_unlock_spin(&cpuset_lock); 870 domainset_freelist_add(&domains, needed - ndomains); 871 ndomains = needed; 872 mtx_lock_spin(&cpuset_lock); 873 } 874 dset = set->cs_domain; 875 cpuset_update_domain(set, domain, dset, &domains); 876 out: 877 mtx_unlock_spin(&cpuset_lock); 878 domainset_freelist_free(&domains); 879 if (error == 0) 880 domainset_notify(); 881 882 return (error); 883 } 884 885 /* 886 * Resolve the 'which' parameter of several cpuset apis. 887 * 888 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 889 * checks for permission via p_cansched(). 890 * 891 * For WHICH_SET returns a valid set with a new reference. 892 * 893 * -1 may be supplied for any argument to mean the current proc/thread or 894 * the base set of the current thread. May fail with ESRCH/EPERM. 895 */ 896 int 897 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 898 struct cpuset **setp) 899 { 900 struct cpuset *set; 901 struct thread *td; 902 struct proc *p; 903 int error; 904 905 *pp = p = NULL; 906 *tdp = td = NULL; 907 *setp = set = NULL; 908 switch (which) { 909 case CPU_WHICH_PID: 910 if (id == -1) { 911 PROC_LOCK(curproc); 912 p = curproc; 913 break; 914 } 915 if ((p = pfind(id)) == NULL) 916 return (ESRCH); 917 break; 918 case CPU_WHICH_TID: 919 if (id == -1) { 920 PROC_LOCK(curproc); 921 p = curproc; 922 td = curthread; 923 break; 924 } 925 td = tdfind(id, -1); 926 if (td == NULL) 927 return (ESRCH); 928 p = td->td_proc; 929 break; 930 case CPU_WHICH_TIDPID: 931 if (id == -1) { 932 PROC_LOCK(curproc); 933 td = curthread; 934 p = curproc; 935 } else if (id > PID_MAX) { 936 td = tdfind(id, -1); 937 if (td == NULL) 938 return (ESRCH); 939 p = td->td_proc; 940 } else { 941 p = pfind(id); 942 if (p == NULL) 943 return (ESRCH); 944 } 945 break; 946 case CPU_WHICH_CPUSET: 947 if (id == -1) { 948 thread_lock(curthread); 949 set = cpuset_refbase(curthread->td_cpuset); 950 thread_unlock(curthread); 951 } else 952 set = cpuset_lookup(id, curthread); 953 if (set) { 954 *setp = set; 955 return (0); 956 } 957 return (ESRCH); 958 case CPU_WHICH_JAIL: 959 { 960 /* Find `set' for prison with given id. */ 961 struct prison *pr; 962 963 sx_slock(&allprison_lock); 964 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 965 sx_sunlock(&allprison_lock); 966 if (pr == NULL) 967 return (ESRCH); 968 cpuset_ref(pr->pr_cpuset); 969 *setp = pr->pr_cpuset; 970 mtx_unlock(&pr->pr_mtx); 971 return (0); 972 } 973 case CPU_WHICH_IRQ: 974 case CPU_WHICH_DOMAIN: 975 return (0); 976 default: 977 return (EINVAL); 978 } 979 error = p_cansched(curthread, p); 980 if (error) { 981 PROC_UNLOCK(p); 982 return (error); 983 } 984 if (td == NULL) 985 td = FIRST_THREAD_IN_PROC(p); 986 *pp = p; 987 *tdp = td; 988 return (0); 989 } 990 991 static int 992 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask, 993 const struct domainset *domain) 994 { 995 struct cpuset *parent; 996 struct domainset *dset; 997 998 parent = cpuset_getbase(set); 999 /* 1000 * If we are restricting a cpu mask it must be a subset of the 1001 * parent or invalid CPUs have been specified. 1002 */ 1003 if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask)) 1004 return (EINVAL); 1005 1006 /* 1007 * If we are restricting a domain mask it must be a subset of the 1008 * parent or invalid domains have been specified. 1009 */ 1010 dset = parent->cs_domain; 1011 if (domain != NULL && !domainset_valid(dset, domain)) 1012 return (EINVAL); 1013 1014 return (0); 1015 } 1016 1017 /* 1018 * Create an anonymous set with the provided mask in the space provided by 1019 * 'nset'. If the passed in set is anonymous we use its parent otherwise 1020 * the new set is a child of 'set'. 1021 */ 1022 static int 1023 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp, 1024 const cpuset_t *mask, const struct domainset *domain, 1025 struct setlist *cpusets, struct domainlist *domains) 1026 { 1027 struct cpuset *parent; 1028 struct cpuset *nset; 1029 struct domainset *dset; 1030 struct domainset *d; 1031 int error; 1032 1033 error = cpuset_testshadow(set, mask, domain); 1034 if (error) 1035 return (error); 1036 1037 parent = cpuset_getbase(set); 1038 dset = parent->cs_domain; 1039 if (mask == NULL) 1040 mask = &set->cs_mask; 1041 if (domain != NULL) 1042 d = domainset_shadow(dset, domain, domains); 1043 else 1044 d = set->cs_domain; 1045 nset = LIST_FIRST(cpusets); 1046 error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID); 1047 if (error == 0) { 1048 LIST_REMOVE(nset, cs_link); 1049 *nsetp = nset; 1050 } 1051 return (error); 1052 } 1053 1054 static struct cpuset * 1055 cpuset_update_thread(struct thread *td, struct cpuset *nset) 1056 { 1057 struct cpuset *tdset; 1058 1059 tdset = td->td_cpuset; 1060 td->td_cpuset = nset; 1061 td->td_domain.dr_policy = nset->cs_domain; 1062 sched_affinity(td); 1063 1064 return (tdset); 1065 } 1066 1067 static int 1068 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask, 1069 struct domainset *domain) 1070 { 1071 struct cpuset *parent; 1072 1073 parent = cpuset_getbase(tdset); 1074 if (mask == NULL) 1075 mask = &tdset->cs_mask; 1076 if (domain == NULL) 1077 domain = tdset->cs_domain; 1078 return cpuset_testshadow(parent, mask, domain); 1079 } 1080 1081 static int 1082 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask, 1083 struct domainset *domain, struct cpuset **nsetp, 1084 struct setlist *freelist, struct domainlist *domainlist) 1085 { 1086 struct cpuset *parent; 1087 1088 parent = cpuset_getbase(tdset); 1089 if (mask == NULL) 1090 mask = &tdset->cs_mask; 1091 if (domain == NULL) 1092 domain = tdset->cs_domain; 1093 return cpuset_shadow(parent, nsetp, mask, domain, freelist, 1094 domainlist); 1095 } 1096 1097 static int 1098 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set, 1099 cpuset_t *mask, struct domainset *domain) 1100 { 1101 struct cpuset *parent; 1102 1103 parent = cpuset_getbase(tdset); 1104 1105 /* 1106 * If the thread restricted its mask then apply that same 1107 * restriction to the new set, otherwise take it wholesale. 1108 */ 1109 if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { 1110 CPU_AND(mask, &tdset->cs_mask, &set->cs_mask); 1111 } else 1112 CPU_COPY(&set->cs_mask, mask); 1113 1114 /* 1115 * If the thread restricted the domain then we apply the 1116 * restriction to the new set but retain the policy. 1117 */ 1118 if (tdset->cs_domain != parent->cs_domain) { 1119 domainset_copy(tdset->cs_domain, domain); 1120 DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask); 1121 } else 1122 domainset_copy(set->cs_domain, domain); 1123 1124 if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask)) 1125 return (EDEADLK); 1126 1127 return (0); 1128 } 1129 1130 static int 1131 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set) 1132 { 1133 struct domainset domain; 1134 cpuset_t mask; 1135 1136 if (tdset->cs_id != CPUSET_INVALID) 1137 return (0); 1138 return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1139 } 1140 1141 static int 1142 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set, 1143 struct cpuset **nsetp, struct setlist *freelist, 1144 struct domainlist *domainlist) 1145 { 1146 struct domainset domain; 1147 cpuset_t mask; 1148 int error; 1149 1150 /* 1151 * If we're replacing on a thread that has not constrained the 1152 * original set we can simply accept the new set. 1153 */ 1154 if (tdset->cs_id != CPUSET_INVALID) { 1155 *nsetp = cpuset_ref(set); 1156 return (0); 1157 } 1158 error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); 1159 if (error) 1160 return (error); 1161 1162 return cpuset_shadow(set, nsetp, &mask, &domain, freelist, 1163 domainlist); 1164 } 1165 1166 static int 1167 cpuset_setproc_newbase(struct thread *td, struct cpuset *set, 1168 struct cpuset *nroot, struct cpuset **nsetp, 1169 struct setlist *cpusets, struct domainlist *domainlist) 1170 { 1171 struct domainset ndomain; 1172 cpuset_t nmask; 1173 struct cpuset *pbase; 1174 int error; 1175 1176 pbase = cpuset_getbase(td->td_cpuset); 1177 1178 /* Copy process mask, then further apply the new root mask. */ 1179 CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask); 1180 1181 domainset_copy(pbase->cs_domain, &ndomain); 1182 DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask); 1183 1184 /* Policy is too restrictive, will not work. */ 1185 if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask)) 1186 return (EDEADLK); 1187 1188 /* 1189 * Remove pbase from the freelist in advance, it'll be pushed to 1190 * cpuset_ids on success. We assume here that cpuset_create() will not 1191 * touch pbase on failure, and we just enqueue it back to the freelist 1192 * to remain in a consistent state. 1193 */ 1194 pbase = LIST_FIRST(cpusets); 1195 LIST_REMOVE(pbase, cs_link); 1196 error = cpuset_create(&pbase, set, &nmask); 1197 if (error != 0) { 1198 LIST_INSERT_HEAD(cpusets, pbase, cs_link); 1199 return (error); 1200 } 1201 1202 /* Duplicates some work from above... oh well. */ 1203 pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain, 1204 domainlist); 1205 *nsetp = pbase; 1206 return (0); 1207 } 1208 1209 /* 1210 * Handle four cases for updating an entire process. 1211 * 1212 * 1) Set is non-null and the process is not rebasing onto a new root. This 1213 * reparents all anonymous sets to the provided set and replaces all 1214 * non-anonymous td_cpusets with the provided set. 1215 * 2) Set is non-null and the process is rebasing onto a new root. This 1216 * creates a new base set if the process previously had its own base set, 1217 * then reparents all anonymous sets either to that set or the provided set 1218 * if one was not created. Non-anonymous sets are similarly replaced. 1219 * 3) Mask is non-null. This replaces or creates anonymous sets for every 1220 * thread with the existing base as a parent. 1221 * 4) domain is non-null. This creates anonymous sets for every thread 1222 * and replaces the domain set. 1223 * 1224 * This is overly complicated because we can't allocate while holding a 1225 * spinlock and spinlocks must be held while changing and examining thread 1226 * state. 1227 */ 1228 static int 1229 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask, 1230 struct domainset *domain, bool rebase) 1231 { 1232 struct setlist freelist; 1233 struct setlist droplist; 1234 struct domainlist domainlist; 1235 struct cpuset *base, *nset, *nroot, *tdroot; 1236 struct thread *td; 1237 struct proc *p; 1238 int needed; 1239 int nfree; 1240 int error; 1241 1242 /* 1243 * The algorithm requires two passes due to locking considerations. 1244 * 1245 * 1) Lookup the process and acquire the locks in the required order. 1246 * 2) If enough cpusets have not been allocated release the locks and 1247 * allocate them. Loop. 1248 */ 1249 cpuset_freelist_init(&freelist, 1); 1250 domainset_freelist_init(&domainlist, 1); 1251 nfree = 1; 1252 LIST_INIT(&droplist); 1253 nfree = 0; 1254 base = set; 1255 nroot = NULL; 1256 if (set != NULL) 1257 nroot = cpuset_getroot(set); 1258 for (;;) { 1259 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 1260 if (error) 1261 goto out; 1262 tdroot = cpuset_getroot(td->td_cpuset); 1263 needed = p->p_numthreads; 1264 if (set != NULL && rebase && tdroot != nroot) 1265 needed++; 1266 if (nfree >= needed) 1267 break; 1268 PROC_UNLOCK(p); 1269 if (nfree < needed) { 1270 cpuset_freelist_add(&freelist, needed - nfree); 1271 domainset_freelist_add(&domainlist, needed - nfree); 1272 nfree = needed; 1273 } 1274 } 1275 PROC_LOCK_ASSERT(p, MA_OWNED); 1276 1277 /* 1278 * If we're changing roots and the root set is what has been specified 1279 * as the parent, then we'll check if the process was previously using 1280 * the root set and, if it wasn't, create a new base with the process's 1281 * mask applied to it. 1282 * 1283 * If the new root is incompatible with the existing mask, then we allow 1284 * the process to take on the new root if and only if they have 1285 * privilege to widen their mask anyways. Unprivileged processes get 1286 * rejected with EDEADLK. 1287 */ 1288 if (set != NULL && rebase && nroot != tdroot) { 1289 cpusetid_t base_id, root_id; 1290 1291 root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id; 1292 base_id = cpuset_getbase(td->td_cpuset)->cs_id; 1293 1294 if (base_id != root_id) { 1295 error = cpuset_setproc_newbase(td, set, nroot, &base, 1296 &freelist, &domainlist); 1297 if (error == EDEADLK && 1298 priv_check(td, PRIV_SCHED_CPUSET) == 0) 1299 error = 0; 1300 if (error != 0) 1301 goto unlock_out; 1302 } 1303 } 1304 1305 /* 1306 * Now that the appropriate locks are held and we have enough cpusets, 1307 * make sure the operation will succeed before applying changes. The 1308 * proc lock prevents td_cpuset from changing between calls. 1309 */ 1310 error = 0; 1311 FOREACH_THREAD_IN_PROC(p, td) { 1312 thread_lock(td); 1313 if (set != NULL) 1314 error = cpuset_setproc_test_setthread(td->td_cpuset, 1315 base); 1316 else 1317 error = cpuset_setproc_test_maskthread(td->td_cpuset, 1318 mask, domain); 1319 thread_unlock(td); 1320 if (error) 1321 goto unlock_out; 1322 } 1323 /* 1324 * Replace each thread's cpuset while using deferred release. We 1325 * must do this because the thread lock must be held while operating 1326 * on the thread and this limits the type of operations allowed. 1327 */ 1328 FOREACH_THREAD_IN_PROC(p, td) { 1329 thread_lock(td); 1330 if (set != NULL) 1331 error = cpuset_setproc_setthread(td->td_cpuset, base, 1332 &nset, &freelist, &domainlist); 1333 else 1334 error = cpuset_setproc_maskthread(td->td_cpuset, mask, 1335 domain, &nset, &freelist, &domainlist); 1336 if (error) { 1337 thread_unlock(td); 1338 break; 1339 } 1340 cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset)); 1341 thread_unlock(td); 1342 } 1343 unlock_out: 1344 PROC_UNLOCK(p); 1345 out: 1346 if (base != NULL && base != set) 1347 cpuset_rel(base); 1348 while ((nset = LIST_FIRST(&droplist)) != NULL) 1349 cpuset_rel_complete(nset); 1350 cpuset_freelist_free(&freelist); 1351 domainset_freelist_free(&domainlist); 1352 return (error); 1353 } 1354 1355 static int 1356 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen) 1357 { 1358 size_t bytes; 1359 int i, once; 1360 char *p; 1361 1362 once = 0; 1363 p = buf; 1364 for (i = 0; i < __bitset_words(setlen); i++) { 1365 if (once != 0) { 1366 if (bufsiz < 1) 1367 return (0); 1368 *p = ','; 1369 p++; 1370 bufsiz--; 1371 } else 1372 once = 1; 1373 if (bufsiz < sizeof(__STRING(ULONG_MAX))) 1374 return (0); 1375 bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]); 1376 p += bytes; 1377 bufsiz -= bytes; 1378 } 1379 return (p - buf); 1380 } 1381 1382 static int 1383 bitset_strscan(struct bitset *set, int setlen, const char *buf) 1384 { 1385 int i, ret; 1386 const char *p; 1387 1388 BIT_ZERO(setlen, set); 1389 p = buf; 1390 for (i = 0; i < __bitset_words(setlen); i++) { 1391 if (*p == ',') { 1392 p++; 1393 continue; 1394 } 1395 ret = sscanf(p, "%lx", &set->__bits[i]); 1396 if (ret == 0 || ret == -1) 1397 break; 1398 while (isxdigit(*p)) 1399 p++; 1400 } 1401 return (p - buf); 1402 } 1403 1404 /* 1405 * Return a string representing a valid layout for a cpuset_t object. 1406 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1407 */ 1408 char * 1409 cpusetobj_strprint(char *buf, const cpuset_t *set) 1410 { 1411 1412 bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set, 1413 CPU_SETSIZE); 1414 return (buf); 1415 } 1416 1417 /* 1418 * Build a valid cpuset_t object from a string representation. 1419 * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 1420 */ 1421 int 1422 cpusetobj_strscan(cpuset_t *set, const char *buf) 1423 { 1424 char p; 1425 1426 if (strlen(buf) > CPUSETBUFSIZ - 1) 1427 return (-1); 1428 1429 p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)]; 1430 if (p != '\0') 1431 return (-1); 1432 1433 return (0); 1434 } 1435 1436 /* 1437 * Handle a domainset specifier in the sysctl tree. A poiner to a pointer to 1438 * a domainset is in arg1. If the user specifies a valid domainset the 1439 * pointer is updated. 1440 * 1441 * Format is: 1442 * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred 1443 */ 1444 int 1445 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS) 1446 { 1447 char buf[DOMAINSETBUFSIZ]; 1448 struct domainset *dset; 1449 struct domainset key; 1450 int policy, prefer, error; 1451 char *p; 1452 1453 dset = *(struct domainset **)arg1; 1454 error = 0; 1455 1456 if (dset != NULL) { 1457 p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ, 1458 (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE); 1459 sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer); 1460 } else 1461 sprintf(buf, "<NULL>"); 1462 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 1463 if (error != 0 || req->newptr == NULL) 1464 return (error); 1465 1466 /* 1467 * Read in and validate the string. 1468 */ 1469 memset(&key, 0, sizeof(key)); 1470 p = &buf[bitset_strscan((struct bitset *)&key.ds_mask, 1471 DOMAINSET_SETSIZE, buf)]; 1472 if (p == buf) 1473 return (EINVAL); 1474 if (sscanf(p, ":%d:%d", &policy, &prefer) != 2) 1475 return (EINVAL); 1476 key.ds_policy = policy; 1477 key.ds_prefer = prefer; 1478 1479 /* Domainset_create() validates the policy.*/ 1480 dset = domainset_create(&key); 1481 if (dset == NULL) 1482 return (EINVAL); 1483 *(struct domainset **)arg1 = dset; 1484 1485 return (error); 1486 } 1487 1488 /* 1489 * Apply an anonymous mask or a domain to a single thread. 1490 */ 1491 static int 1492 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain) 1493 { 1494 struct setlist cpusets; 1495 struct domainlist domainlist; 1496 struct cpuset *nset; 1497 struct cpuset *set; 1498 struct thread *td; 1499 struct proc *p; 1500 int error; 1501 1502 cpuset_freelist_init(&cpusets, 1); 1503 domainset_freelist_init(&domainlist, domain != NULL); 1504 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 1505 if (error) 1506 goto out; 1507 set = NULL; 1508 thread_lock(td); 1509 error = cpuset_shadow(td->td_cpuset, &nset, mask, domain, 1510 &cpusets, &domainlist); 1511 if (error == 0) 1512 set = cpuset_update_thread(td, nset); 1513 thread_unlock(td); 1514 PROC_UNLOCK(p); 1515 if (set) 1516 cpuset_rel(set); 1517 out: 1518 cpuset_freelist_free(&cpusets); 1519 domainset_freelist_free(&domainlist); 1520 return (error); 1521 } 1522 1523 /* 1524 * Apply an anonymous mask to a single thread. 1525 */ 1526 int 1527 cpuset_setthread(lwpid_t id, cpuset_t *mask) 1528 { 1529 1530 return _cpuset_setthread(id, mask, NULL); 1531 } 1532 1533 /* 1534 * Apply new cpumask to the ithread. 1535 */ 1536 int 1537 cpuset_setithread(lwpid_t id, int cpu) 1538 { 1539 cpuset_t mask; 1540 1541 CPU_ZERO(&mask); 1542 if (cpu == NOCPU) 1543 CPU_COPY(cpuset_root, &mask); 1544 else 1545 CPU_SET(cpu, &mask); 1546 return _cpuset_setthread(id, &mask, NULL); 1547 } 1548 1549 /* 1550 * Initialize static domainsets after NUMA information is available. This is 1551 * called before memory allocators are initialized. 1552 */ 1553 void 1554 domainset_init(void) 1555 { 1556 struct domainset *dset; 1557 int i; 1558 1559 dset = &domainset_firsttouch; 1560 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1561 dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH; 1562 dset->ds_prefer = -1; 1563 _domainset_create(dset, NULL); 1564 1565 dset = &domainset_interleave; 1566 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1567 dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE; 1568 dset->ds_prefer = -1; 1569 _domainset_create(dset, NULL); 1570 1571 dset = &domainset_roundrobin; 1572 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1573 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1574 dset->ds_prefer = -1; 1575 _domainset_create(dset, NULL); 1576 1577 for (i = 0; i < vm_ndomains; i++) { 1578 dset = &domainset_fixed[i]; 1579 DOMAINSET_ZERO(&dset->ds_mask); 1580 DOMAINSET_SET(i, &dset->ds_mask); 1581 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; 1582 _domainset_create(dset, NULL); 1583 1584 dset = &domainset_prefer[i]; 1585 DOMAINSET_COPY(&all_domains, &dset->ds_mask); 1586 dset->ds_policy = DOMAINSET_POLICY_PREFER; 1587 dset->ds_prefer = i; 1588 _domainset_create(dset, NULL); 1589 } 1590 } 1591 1592 /* 1593 * Define the domainsets for cpuset 0, 1 and cpuset 2. 1594 */ 1595 void 1596 domainset_zero(void) 1597 { 1598 struct domainset *dset, *tmp; 1599 1600 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 1601 1602 domainset0 = &domainset_firsttouch; 1603 curthread->td_domain.dr_policy = domainset0; 1604 1605 domainset2 = &domainset_interleave; 1606 kernel_object->domain.dr_policy = domainset2; 1607 1608 /* Remove empty domains from the global policies. */ 1609 LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp) 1610 if (domainset_empty_vm(dset)) 1611 LIST_REMOVE(dset, ds_link); 1612 } 1613 1614 /* 1615 * Creates system-wide cpusets and the cpuset for thread0 including three 1616 * sets: 1617 * 1618 * 0 - The root set which should represent all valid processors in the 1619 * system. This set is immutable. 1620 * 1 - The default set which all processes are a member of until changed. 1621 * This allows an administrator to move all threads off of given cpus to 1622 * dedicate them to high priority tasks or save power etc. 1623 * 2 - The kernel set which allows restriction and policy to be applied only 1624 * to kernel threads and the kernel_object. 1625 */ 1626 struct cpuset * 1627 cpuset_thread0(void) 1628 { 1629 struct cpuset *set; 1630 int i; 1631 int error __unused; 1632 1633 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 1634 NULL, NULL, UMA_ALIGN_CACHE, 0); 1635 domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), 1636 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 1637 1638 /* 1639 * Create the root system set (0) for the whole machine. Doesn't use 1640 * cpuset_create() due to NULL parent. 1641 */ 1642 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1643 CPU_COPY(&all_cpus, &set->cs_mask); 1644 LIST_INIT(&set->cs_children); 1645 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 1646 refcount_init(&set->cs_ref, 1); 1647 set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY; 1648 set->cs_domain = domainset0; 1649 cpuset_zero = set; 1650 cpuset_root = &set->cs_mask; 1651 1652 /* 1653 * Now derive a default (1), modifiable set from that to give out. 1654 */ 1655 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1656 error = cpuset_init(set, cpuset_zero, NULL, NULL, 1); 1657 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 1658 cpuset_default = set; 1659 /* 1660 * Create the kernel set (2). 1661 */ 1662 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 1663 error = cpuset_init(set, cpuset_zero, NULL, NULL, 2); 1664 KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); 1665 set->cs_domain = domainset2; 1666 cpuset_kernel = set; 1667 1668 /* 1669 * Initialize the unit allocator. 0 and 1 are allocated above. 1670 */ 1671 cpuset_unr = new_unrhdr(3, INT_MAX, NULL); 1672 1673 /* 1674 * If MD code has not initialized per-domain cpusets, place all 1675 * CPUs in domain 0. 1676 */ 1677 for (i = 0; i < MAXMEMDOM; i++) 1678 if (!CPU_EMPTY(&cpuset_domain[i])) 1679 goto domains_set; 1680 CPU_COPY(&all_cpus, &cpuset_domain[0]); 1681 domains_set: 1682 1683 return (cpuset_default); 1684 } 1685 1686 void 1687 cpuset_kernthread(struct thread *td) 1688 { 1689 struct cpuset *set; 1690 1691 thread_lock(td); 1692 set = td->td_cpuset; 1693 td->td_cpuset = cpuset_ref(cpuset_kernel); 1694 thread_unlock(td); 1695 cpuset_rel(set); 1696 } 1697 1698 /* 1699 * Create a cpuset, which would be cpuset_create() but 1700 * mark the new 'set' as root. 1701 * 1702 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 1703 * for that. 1704 * 1705 * In case of no error, returns the set in *setp locked with a reference. 1706 */ 1707 int 1708 cpuset_create_root(struct prison *pr, struct cpuset **setp) 1709 { 1710 struct cpuset *set; 1711 int error; 1712 1713 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 1714 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 1715 1716 set = NULL; 1717 error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 1718 if (error) 1719 return (error); 1720 1721 KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data", 1722 __func__, __LINE__)); 1723 1724 /* Mark the set as root. */ 1725 set->cs_flags |= CPU_SET_ROOT; 1726 *setp = set; 1727 1728 return (0); 1729 } 1730 1731 int 1732 cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 1733 { 1734 int error; 1735 1736 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 1737 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 1738 1739 cpuset_ref(set); 1740 error = cpuset_setproc(p->p_pid, set, NULL, NULL, true); 1741 if (error) 1742 return (error); 1743 cpuset_rel(set); 1744 return (0); 1745 } 1746 1747 /* 1748 * In Capability mode, the only accesses that are permitted are to the current 1749 * thread and process' CPU and domain sets. 1750 */ 1751 static int 1752 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which, 1753 id_t id) 1754 { 1755 if (IN_CAPABILITY_MODE(td)) { 1756 if (level != CPU_LEVEL_WHICH) 1757 return (ECAPMODE); 1758 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID && 1759 which != CPU_WHICH_TIDPID) 1760 return (ECAPMODE); 1761 if (id != -1 && which == CPU_WHICH_TIDPID && 1762 id != td->td_tid && id != td->td_proc->p_pid) 1763 return (ECAPMODE); 1764 if (id != -1 && 1765 !(which == CPU_WHICH_TID && id == td->td_tid) && 1766 !(which == CPU_WHICH_PID && id == td->td_proc->p_pid)) 1767 return (ECAPMODE); 1768 } 1769 return (0); 1770 } 1771 1772 #if defined(__powerpc__) 1773 /* 1774 * TODO: At least powerpc64 and powerpc64le kernels panic with 1775 * exception 0x480 (instruction segment exception) when copyin/copyout, 1776 * are set as a function pointer in cpuset_copy_cb struct and called by 1777 * an external module (like pfsync). Tip: copyin/copyout have an ifunc 1778 * resolver function. 1779 * 1780 * Bisect of LLVM shows that the behavior changed on LLVM 10.0 with 1781 * https://reviews.llvm.org/rGdc06b0bc9ad055d06535462d91bfc2a744b2f589 1782 * 1783 * This is a hack/workaround while problem is being discussed with LLVM 1784 * community 1785 */ 1786 static int 1787 cpuset_copyin(const void *uaddr, void *kaddr, size_t len) 1788 { 1789 return(copyin(uaddr, kaddr, len)); 1790 } 1791 1792 static int 1793 cpuset_copyout(const void *kaddr, void *uaddr, size_t len) 1794 { 1795 return(copyout(kaddr, uaddr, len)); 1796 } 1797 1798 static const struct cpuset_copy_cb copy_set = { 1799 .cpuset_copyin = cpuset_copyin, 1800 .cpuset_copyout = cpuset_copyout 1801 }; 1802 #else 1803 static const struct cpuset_copy_cb copy_set = { 1804 .cpuset_copyin = copyin, 1805 .cpuset_copyout = copyout 1806 }; 1807 #endif 1808 1809 #ifndef _SYS_SYSPROTO_H_ 1810 struct cpuset_args { 1811 cpusetid_t *setid; 1812 }; 1813 #endif 1814 int 1815 sys_cpuset(struct thread *td, struct cpuset_args *uap) 1816 { 1817 struct cpuset *root; 1818 struct cpuset *set; 1819 int error; 1820 1821 thread_lock(td); 1822 root = cpuset_refroot(td->td_cpuset); 1823 thread_unlock(td); 1824 set = NULL; 1825 error = cpuset_create(&set, root, &root->cs_mask); 1826 cpuset_rel(root); 1827 if (error) 1828 return (error); 1829 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 1830 if (error == 0) 1831 error = cpuset_setproc(-1, set, NULL, NULL, false); 1832 cpuset_rel(set); 1833 return (error); 1834 } 1835 1836 #ifndef _SYS_SYSPROTO_H_ 1837 struct cpuset_setid_args { 1838 cpuwhich_t which; 1839 id_t id; 1840 cpusetid_t setid; 1841 }; 1842 #endif 1843 int 1844 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 1845 { 1846 1847 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 1848 } 1849 1850 int 1851 kern_cpuset_setid(struct thread *td, cpuwhich_t which, 1852 id_t id, cpusetid_t setid) 1853 { 1854 struct cpuset *set; 1855 int error; 1856 1857 /* 1858 * Presently we only support per-process sets. 1859 */ 1860 if (which != CPU_WHICH_PID) 1861 return (EINVAL); 1862 set = cpuset_lookup(setid, td); 1863 if (set == NULL) 1864 return (ESRCH); 1865 error = cpuset_setproc(id, set, NULL, NULL, false); 1866 cpuset_rel(set); 1867 return (error); 1868 } 1869 1870 #ifndef _SYS_SYSPROTO_H_ 1871 struct cpuset_getid_args { 1872 cpulevel_t level; 1873 cpuwhich_t which; 1874 id_t id; 1875 cpusetid_t *setid; 1876 }; 1877 #endif 1878 int 1879 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1880 { 1881 1882 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1883 uap->setid)); 1884 } 1885 1886 int 1887 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1888 id_t id, cpusetid_t *setid) 1889 { 1890 struct cpuset *nset; 1891 struct cpuset *set; 1892 struct thread *ttd; 1893 struct proc *p; 1894 cpusetid_t tmpid; 1895 int error; 1896 1897 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1898 return (EINVAL); 1899 error = cpuset_which(which, id, &p, &ttd, &set); 1900 if (error) 1901 return (error); 1902 switch (which) { 1903 case CPU_WHICH_TID: 1904 case CPU_WHICH_PID: 1905 thread_lock(ttd); 1906 set = cpuset_refbase(ttd->td_cpuset); 1907 thread_unlock(ttd); 1908 PROC_UNLOCK(p); 1909 break; 1910 case CPU_WHICH_CPUSET: 1911 case CPU_WHICH_JAIL: 1912 break; 1913 case CPU_WHICH_IRQ: 1914 case CPU_WHICH_DOMAIN: 1915 return (EINVAL); 1916 } 1917 switch (level) { 1918 case CPU_LEVEL_ROOT: 1919 nset = cpuset_refroot(set); 1920 cpuset_rel(set); 1921 set = nset; 1922 break; 1923 case CPU_LEVEL_CPUSET: 1924 break; 1925 case CPU_LEVEL_WHICH: 1926 break; 1927 } 1928 tmpid = set->cs_id; 1929 cpuset_rel(set); 1930 if (error == 0) 1931 error = copyout(&tmpid, setid, sizeof(tmpid)); 1932 1933 return (error); 1934 } 1935 1936 #ifndef _SYS_SYSPROTO_H_ 1937 struct cpuset_getaffinity_args { 1938 cpulevel_t level; 1939 cpuwhich_t which; 1940 id_t id; 1941 size_t cpusetsize; 1942 cpuset_t *mask; 1943 }; 1944 #endif 1945 int 1946 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1947 { 1948 1949 return (user_cpuset_getaffinity(td, uap->level, uap->which, 1950 uap->id, uap->cpusetsize, uap->mask, ©_set)); 1951 } 1952 1953 int 1954 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 1955 id_t id, size_t cpusetsize, cpuset_t *mask) 1956 { 1957 struct thread *ttd; 1958 struct cpuset *nset; 1959 struct cpuset *set; 1960 struct proc *p; 1961 int error; 1962 1963 error = cpuset_check_capabilities(td, level, which, id); 1964 if (error != 0) 1965 return (error); 1966 error = cpuset_which(which, id, &p, &ttd, &set); 1967 if (error != 0) 1968 return (error); 1969 switch (level) { 1970 case CPU_LEVEL_ROOT: 1971 case CPU_LEVEL_CPUSET: 1972 switch (which) { 1973 case CPU_WHICH_TID: 1974 case CPU_WHICH_PID: 1975 thread_lock(ttd); 1976 set = cpuset_ref(ttd->td_cpuset); 1977 thread_unlock(ttd); 1978 break; 1979 case CPU_WHICH_CPUSET: 1980 case CPU_WHICH_JAIL: 1981 break; 1982 case CPU_WHICH_IRQ: 1983 case CPU_WHICH_INTRHANDLER: 1984 case CPU_WHICH_ITHREAD: 1985 case CPU_WHICH_DOMAIN: 1986 return (EINVAL); 1987 } 1988 if (level == CPU_LEVEL_ROOT) 1989 nset = cpuset_refroot(set); 1990 else 1991 nset = cpuset_refbase(set); 1992 CPU_COPY(&nset->cs_mask, mask); 1993 cpuset_rel(nset); 1994 break; 1995 case CPU_LEVEL_WHICH: 1996 switch (which) { 1997 case CPU_WHICH_TID: 1998 thread_lock(ttd); 1999 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 2000 thread_unlock(ttd); 2001 break; 2002 case CPU_WHICH_PID: 2003 FOREACH_THREAD_IN_PROC(p, ttd) { 2004 thread_lock(ttd); 2005 CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); 2006 thread_unlock(ttd); 2007 } 2008 break; 2009 case CPU_WHICH_TIDPID: 2010 if (id > PID_MAX || id == -1) { 2011 thread_lock(ttd); 2012 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 2013 thread_unlock(ttd); 2014 break; 2015 } 2016 FOREACH_THREAD_IN_PROC(p, ttd) { 2017 thread_lock(ttd); 2018 CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); 2019 thread_unlock(ttd); 2020 } 2021 break; 2022 case CPU_WHICH_CPUSET: 2023 case CPU_WHICH_JAIL: 2024 CPU_COPY(&set->cs_mask, mask); 2025 break; 2026 case CPU_WHICH_IRQ: 2027 case CPU_WHICH_INTRHANDLER: 2028 case CPU_WHICH_ITHREAD: 2029 error = intr_getaffinity(id, which, mask); 2030 break; 2031 case CPU_WHICH_DOMAIN: 2032 if (id < 0 || id >= MAXMEMDOM) 2033 error = ESRCH; 2034 else 2035 CPU_COPY(&cpuset_domain[id], mask); 2036 break; 2037 } 2038 break; 2039 default: 2040 error = EINVAL; 2041 break; 2042 } 2043 if (set) 2044 cpuset_rel(set); 2045 if (p) 2046 PROC_UNLOCK(p); 2047 if (error == 0) { 2048 if (cpusetsize < howmany(CPU_FLS(mask), NBBY)) 2049 return (ERANGE); 2050 #ifdef KTRACE 2051 if (KTRPOINT(td, KTR_STRUCT)) 2052 ktrcpuset(mask, cpusetsize); 2053 #endif 2054 } 2055 return (error); 2056 } 2057 2058 int 2059 user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2060 id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb) 2061 { 2062 cpuset_t *mask; 2063 size_t size; 2064 int error; 2065 2066 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 2067 size = min(cpusetsize, sizeof(cpuset_t)); 2068 error = kern_cpuset_getaffinity(td, level, which, id, size, mask); 2069 if (error == 0) { 2070 error = cb->cpuset_copyout(mask, maskp, size); 2071 if (error != 0) 2072 goto out; 2073 if (cpusetsize > size) { 2074 char *end; 2075 char *cp; 2076 int rv; 2077 2078 end = cp = (char *)&maskp->__bits; 2079 end += cpusetsize; 2080 cp += size; 2081 while (cp != end) { 2082 rv = subyte(cp, 0); 2083 if (rv == -1) { 2084 error = EFAULT; 2085 goto out; 2086 } 2087 cp++; 2088 } 2089 } 2090 } 2091 out: 2092 free(mask, M_TEMP); 2093 return (error); 2094 } 2095 2096 #ifndef _SYS_SYSPROTO_H_ 2097 struct cpuset_setaffinity_args { 2098 cpulevel_t level; 2099 cpuwhich_t which; 2100 id_t id; 2101 size_t cpusetsize; 2102 const cpuset_t *mask; 2103 }; 2104 #endif 2105 int 2106 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 2107 { 2108 2109 return (user_cpuset_setaffinity(td, uap->level, uap->which, 2110 uap->id, uap->cpusetsize, uap->mask, ©_set)); 2111 } 2112 2113 int 2114 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2115 id_t id, cpuset_t *mask) 2116 { 2117 struct cpuset *nset; 2118 struct cpuset *set; 2119 struct thread *ttd; 2120 struct proc *p; 2121 int error; 2122 2123 #ifdef KTRACE 2124 if (KTRPOINT(td, KTR_STRUCT)) 2125 ktrcpuset(mask, sizeof(cpuset_t)); 2126 #endif 2127 error = cpuset_check_capabilities(td, level, which, id); 2128 if (error != 0) 2129 return (error); 2130 if (CPU_EMPTY(mask)) 2131 return (EDEADLK); 2132 switch (level) { 2133 case CPU_LEVEL_ROOT: 2134 case CPU_LEVEL_CPUSET: 2135 error = cpuset_which(which, id, &p, &ttd, &set); 2136 if (error) 2137 break; 2138 switch (which) { 2139 case CPU_WHICH_TID: 2140 case CPU_WHICH_PID: 2141 thread_lock(ttd); 2142 set = cpuset_ref(ttd->td_cpuset); 2143 thread_unlock(ttd); 2144 PROC_UNLOCK(p); 2145 break; 2146 case CPU_WHICH_CPUSET: 2147 case CPU_WHICH_JAIL: 2148 break; 2149 case CPU_WHICH_IRQ: 2150 case CPU_WHICH_INTRHANDLER: 2151 case CPU_WHICH_ITHREAD: 2152 case CPU_WHICH_DOMAIN: 2153 return (EINVAL); 2154 } 2155 if (level == CPU_LEVEL_ROOT) 2156 nset = cpuset_refroot(set); 2157 else 2158 nset = cpuset_refbase(set); 2159 error = cpuset_modify(nset, mask); 2160 cpuset_rel(nset); 2161 cpuset_rel(set); 2162 break; 2163 case CPU_LEVEL_WHICH: 2164 switch (which) { 2165 case CPU_WHICH_TID: 2166 error = cpuset_setthread(id, mask); 2167 break; 2168 case CPU_WHICH_PID: 2169 error = cpuset_setproc(id, NULL, mask, NULL, false); 2170 break; 2171 case CPU_WHICH_TIDPID: 2172 if (id > PID_MAX || id == -1) 2173 error = cpuset_setthread(id, mask); 2174 else 2175 error = cpuset_setproc(id, NULL, mask, NULL, 2176 false); 2177 break; 2178 case CPU_WHICH_CPUSET: 2179 case CPU_WHICH_JAIL: 2180 error = cpuset_which(which, id, &p, &ttd, &set); 2181 if (error == 0) { 2182 error = cpuset_modify(set, mask); 2183 cpuset_rel(set); 2184 } 2185 break; 2186 case CPU_WHICH_IRQ: 2187 case CPU_WHICH_INTRHANDLER: 2188 case CPU_WHICH_ITHREAD: 2189 error = intr_setaffinity(id, which, mask); 2190 break; 2191 default: 2192 error = EINVAL; 2193 break; 2194 } 2195 break; 2196 default: 2197 error = EINVAL; 2198 break; 2199 } 2200 return (error); 2201 } 2202 2203 int 2204 user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 2205 id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb) 2206 { 2207 cpuset_t *mask; 2208 int error; 2209 size_t size; 2210 2211 size = min(cpusetsize, sizeof(cpuset_t)); 2212 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO); 2213 error = cb->cpuset_copyin(maskp, mask, size); 2214 if (error) 2215 goto out; 2216 /* 2217 * Verify that no high bits are set. 2218 */ 2219 if (cpusetsize > sizeof(cpuset_t)) { 2220 const char *end, *cp; 2221 int val; 2222 end = cp = (const char *)&maskp->__bits; 2223 end += cpusetsize; 2224 cp += sizeof(cpuset_t); 2225 2226 while (cp != end) { 2227 val = fubyte(cp); 2228 if (val == -1) { 2229 error = EFAULT; 2230 goto out; 2231 } 2232 if (val != 0) { 2233 error = EINVAL; 2234 goto out; 2235 } 2236 cp++; 2237 } 2238 } 2239 error = kern_cpuset_setaffinity(td, level, which, id, mask); 2240 2241 out: 2242 free(mask, M_TEMP); 2243 return (error); 2244 } 2245 2246 #ifndef _SYS_SYSPROTO_H_ 2247 struct cpuset_getdomain_args { 2248 cpulevel_t level; 2249 cpuwhich_t which; 2250 id_t id; 2251 size_t domainsetsize; 2252 domainset_t *mask; 2253 int *policy; 2254 }; 2255 #endif 2256 int 2257 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap) 2258 { 2259 2260 return (kern_cpuset_getdomain(td, uap->level, uap->which, 2261 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set)); 2262 } 2263 2264 int 2265 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2266 id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp, 2267 const struct cpuset_copy_cb *cb) 2268 { 2269 struct domainset outset; 2270 struct thread *ttd; 2271 struct cpuset *nset; 2272 struct cpuset *set; 2273 struct domainset *dset; 2274 struct proc *p; 2275 domainset_t *mask; 2276 int error; 2277 2278 if (domainsetsize < sizeof(domainset_t) || 2279 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2280 return (ERANGE); 2281 error = cpuset_check_capabilities(td, level, which, id); 2282 if (error != 0) 2283 return (error); 2284 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2285 bzero(&outset, sizeof(outset)); 2286 error = cpuset_which(which, id, &p, &ttd, &set); 2287 if (error) 2288 goto out; 2289 switch (level) { 2290 case CPU_LEVEL_ROOT: 2291 case CPU_LEVEL_CPUSET: 2292 switch (which) { 2293 case CPU_WHICH_TID: 2294 case CPU_WHICH_PID: 2295 thread_lock(ttd); 2296 set = cpuset_ref(ttd->td_cpuset); 2297 thread_unlock(ttd); 2298 break; 2299 case CPU_WHICH_CPUSET: 2300 case CPU_WHICH_JAIL: 2301 break; 2302 case CPU_WHICH_IRQ: 2303 case CPU_WHICH_INTRHANDLER: 2304 case CPU_WHICH_ITHREAD: 2305 case CPU_WHICH_DOMAIN: 2306 error = EINVAL; 2307 goto out; 2308 } 2309 if (level == CPU_LEVEL_ROOT) 2310 nset = cpuset_refroot(set); 2311 else 2312 nset = cpuset_refbase(set); 2313 domainset_copy(nset->cs_domain, &outset); 2314 cpuset_rel(nset); 2315 break; 2316 case CPU_LEVEL_WHICH: 2317 switch (which) { 2318 case CPU_WHICH_TID: 2319 thread_lock(ttd); 2320 domainset_copy(ttd->td_cpuset->cs_domain, &outset); 2321 thread_unlock(ttd); 2322 break; 2323 case CPU_WHICH_PID: 2324 FOREACH_THREAD_IN_PROC(p, ttd) { 2325 thread_lock(ttd); 2326 dset = ttd->td_cpuset->cs_domain; 2327 /* Show all domains in the proc. */ 2328 DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask); 2329 /* Last policy wins. */ 2330 outset.ds_policy = dset->ds_policy; 2331 outset.ds_prefer = dset->ds_prefer; 2332 thread_unlock(ttd); 2333 } 2334 break; 2335 case CPU_WHICH_CPUSET: 2336 case CPU_WHICH_JAIL: 2337 domainset_copy(set->cs_domain, &outset); 2338 break; 2339 case CPU_WHICH_IRQ: 2340 case CPU_WHICH_INTRHANDLER: 2341 case CPU_WHICH_ITHREAD: 2342 case CPU_WHICH_DOMAIN: 2343 error = EINVAL; 2344 break; 2345 } 2346 break; 2347 default: 2348 error = EINVAL; 2349 break; 2350 } 2351 if (set) 2352 cpuset_rel(set); 2353 if (p) 2354 PROC_UNLOCK(p); 2355 /* 2356 * Translate prefer into a set containing only the preferred domain, 2357 * not the entire fallback set. 2358 */ 2359 if (outset.ds_policy == DOMAINSET_POLICY_PREFER) { 2360 DOMAINSET_ZERO(&outset.ds_mask); 2361 DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask); 2362 } 2363 DOMAINSET_COPY(&outset.ds_mask, mask); 2364 if (error == 0) 2365 error = cb->cpuset_copyout(mask, maskp, domainsetsize); 2366 if (error == 0) 2367 if (suword32(policyp, outset.ds_policy) != 0) 2368 error = EFAULT; 2369 out: 2370 free(mask, M_TEMP); 2371 return (error); 2372 } 2373 2374 #ifndef _SYS_SYSPROTO_H_ 2375 struct cpuset_setdomain_args { 2376 cpulevel_t level; 2377 cpuwhich_t which; 2378 id_t id; 2379 size_t domainsetsize; 2380 domainset_t *mask; 2381 int policy; 2382 }; 2383 #endif 2384 int 2385 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap) 2386 { 2387 2388 return (kern_cpuset_setdomain(td, uap->level, uap->which, 2389 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set)); 2390 } 2391 2392 int 2393 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, 2394 id_t id, size_t domainsetsize, const domainset_t *maskp, int policy, 2395 const struct cpuset_copy_cb *cb) 2396 { 2397 struct cpuset *nset; 2398 struct cpuset *set; 2399 struct thread *ttd; 2400 struct proc *p; 2401 struct domainset domain; 2402 domainset_t *mask; 2403 int error; 2404 2405 if (domainsetsize < sizeof(domainset_t) || 2406 domainsetsize > DOMAINSET_MAXSIZE / NBBY) 2407 return (ERANGE); 2408 if (policy <= DOMAINSET_POLICY_INVALID || 2409 policy > DOMAINSET_POLICY_MAX) 2410 return (EINVAL); 2411 error = cpuset_check_capabilities(td, level, which, id); 2412 if (error != 0) 2413 return (error); 2414 memset(&domain, 0, sizeof(domain)); 2415 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); 2416 error = cb->cpuset_copyin(maskp, mask, domainsetsize); 2417 if (error) 2418 goto out; 2419 /* 2420 * Verify that no high bits are set. 2421 */ 2422 if (domainsetsize > sizeof(domainset_t)) { 2423 char *end; 2424 char *cp; 2425 2426 end = cp = (char *)&mask->__bits; 2427 end += domainsetsize; 2428 cp += sizeof(domainset_t); 2429 while (cp != end) 2430 if (*cp++ != 0) { 2431 error = EINVAL; 2432 goto out; 2433 } 2434 } 2435 if (DOMAINSET_EMPTY(mask)) { 2436 error = EDEADLK; 2437 goto out; 2438 } 2439 DOMAINSET_COPY(mask, &domain.ds_mask); 2440 domain.ds_policy = policy; 2441 2442 /* 2443 * Sanitize the provided mask. 2444 */ 2445 if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) { 2446 error = EINVAL; 2447 goto out; 2448 } 2449 2450 /* Translate preferred policy into a mask and fallback. */ 2451 if (policy == DOMAINSET_POLICY_PREFER) { 2452 /* Only support a single preferred domain. */ 2453 if (DOMAINSET_COUNT(&domain.ds_mask) != 1) { 2454 error = EINVAL; 2455 goto out; 2456 } 2457 domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1; 2458 /* This will be constrained by domainset_shadow(). */ 2459 DOMAINSET_COPY(&all_domains, &domain.ds_mask); 2460 } 2461 2462 /* 2463 * When given an impossible policy, fall back to interleaving 2464 * across all domains. 2465 */ 2466 if (domainset_empty_vm(&domain)) 2467 domainset_copy(domainset2, &domain); 2468 2469 switch (level) { 2470 case CPU_LEVEL_ROOT: 2471 case CPU_LEVEL_CPUSET: 2472 error = cpuset_which(which, id, &p, &ttd, &set); 2473 if (error) 2474 break; 2475 switch (which) { 2476 case CPU_WHICH_TID: 2477 case CPU_WHICH_PID: 2478 thread_lock(ttd); 2479 set = cpuset_ref(ttd->td_cpuset); 2480 thread_unlock(ttd); 2481 PROC_UNLOCK(p); 2482 break; 2483 case CPU_WHICH_CPUSET: 2484 case CPU_WHICH_JAIL: 2485 break; 2486 case CPU_WHICH_IRQ: 2487 case CPU_WHICH_INTRHANDLER: 2488 case CPU_WHICH_ITHREAD: 2489 case CPU_WHICH_DOMAIN: 2490 error = EINVAL; 2491 goto out; 2492 } 2493 if (level == CPU_LEVEL_ROOT) 2494 nset = cpuset_refroot(set); 2495 else 2496 nset = cpuset_refbase(set); 2497 error = cpuset_modify_domain(nset, &domain); 2498 cpuset_rel(nset); 2499 cpuset_rel(set); 2500 break; 2501 case CPU_LEVEL_WHICH: 2502 switch (which) { 2503 case CPU_WHICH_TID: 2504 error = _cpuset_setthread(id, NULL, &domain); 2505 break; 2506 case CPU_WHICH_PID: 2507 error = cpuset_setproc(id, NULL, NULL, &domain, false); 2508 break; 2509 case CPU_WHICH_CPUSET: 2510 case CPU_WHICH_JAIL: 2511 error = cpuset_which(which, id, &p, &ttd, &set); 2512 if (error == 0) { 2513 error = cpuset_modify_domain(set, &domain); 2514 cpuset_rel(set); 2515 } 2516 break; 2517 case CPU_WHICH_IRQ: 2518 case CPU_WHICH_INTRHANDLER: 2519 case CPU_WHICH_ITHREAD: 2520 default: 2521 error = EINVAL; 2522 break; 2523 } 2524 break; 2525 default: 2526 error = EINVAL; 2527 break; 2528 } 2529 out: 2530 free(mask, M_TEMP); 2531 return (error); 2532 } 2533 2534 #ifdef DDB 2535 2536 static void 2537 ddb_display_bitset(const struct bitset *set, int size) 2538 { 2539 int bit, once; 2540 2541 for (once = 0, bit = 0; bit < size; bit++) { 2542 if (CPU_ISSET(bit, set)) { 2543 if (once == 0) { 2544 db_printf("%d", bit); 2545 once = 1; 2546 } else 2547 db_printf(",%d", bit); 2548 } 2549 } 2550 if (once == 0) 2551 db_printf("<none>"); 2552 } 2553 2554 void 2555 ddb_display_cpuset(const cpuset_t *set) 2556 { 2557 ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE); 2558 } 2559 2560 static void 2561 ddb_display_domainset(const domainset_t *set) 2562 { 2563 ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE); 2564 } 2565 2566 DB_SHOW_COMMAND_FLAGS(cpusets, db_show_cpusets, DB_CMD_MEMSAFE) 2567 { 2568 struct cpuset *set; 2569 2570 LIST_FOREACH(set, &cpuset_ids, cs_link) { 2571 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 2572 set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags, 2573 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 2574 db_printf(" cpu mask="); 2575 ddb_display_cpuset(&set->cs_mask); 2576 db_printf("\n"); 2577 db_printf(" domain policy %d prefer %d mask=", 2578 set->cs_domain->ds_policy, set->cs_domain->ds_prefer); 2579 ddb_display_domainset(&set->cs_domain->ds_mask); 2580 db_printf("\n"); 2581 if (db_pager_quit) 2582 break; 2583 } 2584 } 2585 2586 DB_SHOW_COMMAND_FLAGS(domainsets, db_show_domainsets, DB_CMD_MEMSAFE) 2587 { 2588 struct domainset *set; 2589 2590 LIST_FOREACH(set, &cpuset_domains, ds_link) { 2591 db_printf("set=%p policy %d prefer %d cnt %d\n", 2592 set, set->ds_policy, set->ds_prefer, set->ds_cnt); 2593 db_printf(" mask ="); 2594 ddb_display_domainset(&set->ds_mask); 2595 db_printf("\n"); 2596 } 2597 } 2598 #endif /* DDB */ 2599