xref: /freebsd/sys/kern/kern_cpuset.c (revision f2d48b5e2c3b45850585e4d7aee324fe148afbf2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
5  * All rights reserved.
6  *
7  * Copyright (c) 2008 Nokia Corporation
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/ctype.h>
42 #include <sys/sysproto.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/refcount.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/capsicum.h>
55 #include <sys/cpuset.h>
56 #include <sys/domainset.h>
57 #include <sys/sx.h>
58 #include <sys/queue.h>
59 #include <sys/libkern.h>
60 #include <sys/limits.h>
61 #include <sys/bus.h>
62 #include <sys/interrupt.h>
63 #include <sys/vmmeter.h>
64 
65 #include <vm/uma.h>
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pageout.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_phys.h>
73 #include <vm/vm_pagequeue.h>
74 
75 #ifdef DDB
76 #include <ddb/ddb.h>
77 #endif /* DDB */
78 
79 /*
80  * cpusets provide a mechanism for creating and manipulating sets of
81  * processors for the purpose of constraining the scheduling of threads to
82  * specific processors.
83  *
84  * Each process belongs to an identified set, by default this is set 1.  Each
85  * thread may further restrict the cpus it may run on to a subset of this
86  * named set.  This creates an anonymous set which other threads and processes
87  * may not join by number.
88  *
89  * The named set is referred to herein as the 'base' set to avoid ambiguity.
90  * This set is usually a child of a 'root' set while the anonymous set may
91  * simply be referred to as a mask.  In the syscall api these are referred to
92  * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
93  *
94  * Threads inherit their set from their creator whether it be anonymous or
95  * not.  This means that anonymous sets are immutable because they may be
96  * shared.  To modify an anonymous set a new set is created with the desired
97  * mask and the same parent as the existing anonymous set.  This gives the
98  * illusion of each thread having a private mask.
99  *
100  * Via the syscall apis a user may ask to retrieve or modify the root, base,
101  * or mask that is discovered via a pid, tid, or setid.  Modifying a set
102  * modifies all numbered and anonymous child sets to comply with the new mask.
103  * Modifying a pid or tid's mask applies only to that tid but must still
104  * exist within the assigned parent set.
105  *
106  * A thread may not be assigned to a group separate from other threads in
107  * the process.  This is to remove ambiguity when the setid is queried with
108  * a pid argument.  There is no other technical limitation.
109  *
110  * This somewhat complex arrangement is intended to make it easy for
111  * applications to query available processors and bind their threads to
112  * specific processors while also allowing administrators to dynamically
113  * reprovision by changing sets which apply to groups of processes.
114  *
115  * A simple application should not concern itself with sets at all and
116  * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
117  * meaning 'curthread'.  It may query available cpus for that tid with a
118  * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
119  */
120 
121 LIST_HEAD(domainlist, domainset);
122 struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
123 struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
124 struct domainset __read_mostly domainset_roundrobin;
125 
126 static uma_zone_t cpuset_zone;
127 static uma_zone_t domainset_zone;
128 static struct mtx cpuset_lock;
129 static struct setlist cpuset_ids;
130 static struct domainlist cpuset_domains;
131 static struct unrhdr *cpuset_unr;
132 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel;
133 static struct domainset domainset0, domainset2;
134 
135 /* Return the size of cpuset_t at the kernel level */
136 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
137     SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
138 
139 cpuset_t *cpuset_root;
140 cpuset_t cpuset_domain[MAXMEMDOM];
141 
142 static int domainset_valid(const struct domainset *, const struct domainset *);
143 
144 /*
145  * Find the first non-anonymous set starting from 'set'.
146  */
147 static struct cpuset *
148 cpuset_getbase(struct cpuset *set)
149 {
150 
151 	if (set->cs_id == CPUSET_INVALID)
152 		set = set->cs_parent;
153 	return (set);
154 }
155 
156 /*
157  * Walks up the tree from 'set' to find the root.
158  */
159 static struct cpuset *
160 cpuset_getroot(struct cpuset *set)
161 {
162 
163 	while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL)
164 		set = set->cs_parent;
165 	return (set);
166 }
167 
168 /*
169  * Acquire a reference to a cpuset, all pointers must be tracked with refs.
170  */
171 struct cpuset *
172 cpuset_ref(struct cpuset *set)
173 {
174 
175 	refcount_acquire(&set->cs_ref);
176 	return (set);
177 }
178 
179 /*
180  * Walks up the tree from 'set' to find the root.  Returns the root
181  * referenced.
182  */
183 static struct cpuset *
184 cpuset_refroot(struct cpuset *set)
185 {
186 
187 	return (cpuset_ref(cpuset_getroot(set)));
188 }
189 
190 /*
191  * Find the first non-anonymous set starting from 'set'.  Returns this set
192  * referenced.  May return the passed in set with an extra ref if it is
193  * not anonymous.
194  */
195 static struct cpuset *
196 cpuset_refbase(struct cpuset *set)
197 {
198 
199 	return (cpuset_ref(cpuset_getbase(set)));
200 }
201 
202 /*
203  * Release a reference in a context where it is safe to allocate.
204  */
205 void
206 cpuset_rel(struct cpuset *set)
207 {
208 	cpusetid_t id;
209 
210 	if (refcount_release_if_not_last(&set->cs_ref))
211 		return;
212 	mtx_lock_spin(&cpuset_lock);
213 	if (!refcount_release(&set->cs_ref)) {
214 		mtx_unlock_spin(&cpuset_lock);
215 		return;
216 	}
217 	LIST_REMOVE(set, cs_siblings);
218 	id = set->cs_id;
219 	if (id != CPUSET_INVALID)
220 		LIST_REMOVE(set, cs_link);
221 	mtx_unlock_spin(&cpuset_lock);
222 	cpuset_rel(set->cs_parent);
223 	uma_zfree(cpuset_zone, set);
224 	if (id != CPUSET_INVALID)
225 		free_unr(cpuset_unr, id);
226 }
227 
228 /*
229  * Deferred release must be used when in a context that is not safe to
230  * allocate/free.  This places any unreferenced sets on the list 'head'.
231  */
232 static void
233 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
234 {
235 
236 	if (refcount_release_if_not_last(&set->cs_ref))
237 		return;
238 	mtx_lock_spin(&cpuset_lock);
239 	if (!refcount_release(&set->cs_ref)) {
240 		mtx_unlock_spin(&cpuset_lock);
241 		return;
242 	}
243 	LIST_REMOVE(set, cs_siblings);
244 	if (set->cs_id != CPUSET_INVALID)
245 		LIST_REMOVE(set, cs_link);
246 	LIST_INSERT_HEAD(head, set, cs_link);
247 	mtx_unlock_spin(&cpuset_lock);
248 }
249 
250 /*
251  * Complete a deferred release.  Removes the set from the list provided to
252  * cpuset_rel_defer.
253  */
254 static void
255 cpuset_rel_complete(struct cpuset *set)
256 {
257 	cpusetid_t id;
258 
259 	id = set->cs_id;
260 	LIST_REMOVE(set, cs_link);
261 	cpuset_rel(set->cs_parent);
262 	uma_zfree(cpuset_zone, set);
263 	if (id != CPUSET_INVALID)
264 		free_unr(cpuset_unr, id);
265 }
266 
267 /*
268  * Find a set based on an id.  Returns it with a ref.
269  */
270 static struct cpuset *
271 cpuset_lookup(cpusetid_t setid, struct thread *td)
272 {
273 	struct cpuset *set;
274 
275 	if (setid == CPUSET_INVALID)
276 		return (NULL);
277 	mtx_lock_spin(&cpuset_lock);
278 	LIST_FOREACH(set, &cpuset_ids, cs_link)
279 		if (set->cs_id == setid)
280 			break;
281 	if (set)
282 		cpuset_ref(set);
283 	mtx_unlock_spin(&cpuset_lock);
284 
285 	KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
286 	if (set != NULL && jailed(td->td_ucred)) {
287 		struct cpuset *jset, *tset;
288 
289 		jset = td->td_ucred->cr_prison->pr_cpuset;
290 		for (tset = set; tset != NULL; tset = tset->cs_parent)
291 			if (tset == jset)
292 				break;
293 		if (tset == NULL) {
294 			cpuset_rel(set);
295 			set = NULL;
296 		}
297 	}
298 
299 	return (set);
300 }
301 
302 /*
303  * Initialize a set in the space provided in 'set' with the provided parameters.
304  * The set is returned with a single ref.  May return EDEADLK if the set
305  * will have no valid cpu based on restrictions from the parent.
306  */
307 static int
308 cpuset_init(struct cpuset *set, struct cpuset *parent,
309     const cpuset_t *mask, struct domainset *domain, cpusetid_t id)
310 {
311 
312 	if (domain == NULL)
313 		domain = parent->cs_domain;
314 	if (mask == NULL)
315 		mask = &parent->cs_mask;
316 	if (!CPU_OVERLAP(&parent->cs_mask, mask))
317 		return (EDEADLK);
318 	/* The domain must be prepared ahead of time. */
319 	if (!domainset_valid(parent->cs_domain, domain))
320 		return (EDEADLK);
321 	CPU_COPY(mask, &set->cs_mask);
322 	LIST_INIT(&set->cs_children);
323 	refcount_init(&set->cs_ref, 1);
324 	set->cs_flags = 0;
325 	mtx_lock_spin(&cpuset_lock);
326 	set->cs_domain = domain;
327 	CPU_AND(&set->cs_mask, &parent->cs_mask);
328 	set->cs_id = id;
329 	set->cs_parent = cpuset_ref(parent);
330 	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
331 	if (set->cs_id != CPUSET_INVALID)
332 		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
333 	mtx_unlock_spin(&cpuset_lock);
334 
335 	return (0);
336 }
337 
338 /*
339  * Create a new non-anonymous set with the requested parent and mask.  May
340  * return failures if the mask is invalid or a new number can not be
341  * allocated.
342  *
343  * If *setp is not NULL, then it will be used as-is.  The caller must take
344  * into account that *setp will be inserted at the head of cpuset_ids and
345  * plan any potentially conflicting cs_link usage accordingly.
346  */
347 static int
348 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
349 {
350 	struct cpuset *set;
351 	cpusetid_t id;
352 	int error;
353 	bool dofree;
354 
355 	id = alloc_unr(cpuset_unr);
356 	if (id == -1)
357 		return (ENFILE);
358 	dofree = (*setp == NULL);
359 	if (*setp != NULL)
360 		set = *setp;
361 	else
362 		*setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
363 	error = cpuset_init(set, parent, mask, NULL, id);
364 	if (error == 0)
365 		return (0);
366 	free_unr(cpuset_unr, id);
367 	if (dofree)
368 		uma_zfree(cpuset_zone, set);
369 
370 	return (error);
371 }
372 
373 static void
374 cpuset_freelist_add(struct setlist *list, int count)
375 {
376 	struct cpuset *set;
377 	int i;
378 
379 	for (i = 0; i < count; i++) {
380 		set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK);
381 		LIST_INSERT_HEAD(list, set, cs_link);
382 	}
383 }
384 
385 static void
386 cpuset_freelist_init(struct setlist *list, int count)
387 {
388 
389 	LIST_INIT(list);
390 	cpuset_freelist_add(list, count);
391 }
392 
393 static void
394 cpuset_freelist_free(struct setlist *list)
395 {
396 	struct cpuset *set;
397 
398 	while ((set = LIST_FIRST(list)) != NULL) {
399 		LIST_REMOVE(set, cs_link);
400 		uma_zfree(cpuset_zone, set);
401 	}
402 }
403 
404 static void
405 domainset_freelist_add(struct domainlist *list, int count)
406 {
407 	struct domainset *set;
408 	int i;
409 
410 	for (i = 0; i < count; i++) {
411 		set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK);
412 		LIST_INSERT_HEAD(list, set, ds_link);
413 	}
414 }
415 
416 static void
417 domainset_freelist_init(struct domainlist *list, int count)
418 {
419 
420 	LIST_INIT(list);
421 	domainset_freelist_add(list, count);
422 }
423 
424 static void
425 domainset_freelist_free(struct domainlist *list)
426 {
427 	struct domainset *set;
428 
429 	while ((set = LIST_FIRST(list)) != NULL) {
430 		LIST_REMOVE(set, ds_link);
431 		uma_zfree(domainset_zone, set);
432 	}
433 }
434 
435 /* Copy a domainset preserving mask and policy. */
436 static void
437 domainset_copy(const struct domainset *from, struct domainset *to)
438 {
439 
440 	DOMAINSET_COPY(&from->ds_mask, &to->ds_mask);
441 	to->ds_policy = from->ds_policy;
442 	to->ds_prefer = from->ds_prefer;
443 }
444 
445 /* Return 1 if mask and policy are equal, otherwise 0. */
446 static int
447 domainset_equal(const struct domainset *one, const struct domainset *two)
448 {
449 
450 	return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 &&
451 	    one->ds_policy == two->ds_policy &&
452 	    one->ds_prefer == two->ds_prefer);
453 }
454 
455 /* Return 1 if child is a valid subset of parent. */
456 static int
457 domainset_valid(const struct domainset *parent, const struct domainset *child)
458 {
459 	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
460 		return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask));
461 	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
462 }
463 
464 static int
465 domainset_restrict(const struct domainset *parent,
466     const struct domainset *child)
467 {
468 	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
469 		return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask));
470 	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
471 }
472 
473 /*
474  * Lookup or create a domainset.  The key is provided in ds_mask and
475  * ds_policy.  If the domainset does not yet exist the storage in
476  * 'domain' is used to insert.  Otherwise this storage is freed to the
477  * domainset_zone and the existing domainset is returned.
478  */
479 static struct domainset *
480 _domainset_create(struct domainset *domain, struct domainlist *freelist)
481 {
482 	struct domainset *ndomain;
483 	int i, j;
484 
485 	KASSERT(domain->ds_cnt <= vm_ndomains,
486 	    ("invalid domain count in domainset %p", domain));
487 	KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER ||
488 	    domain->ds_prefer < vm_ndomains,
489 	    ("invalid preferred domain in domains %p", domain));
490 
491 	mtx_lock_spin(&cpuset_lock);
492 	LIST_FOREACH(ndomain, &cpuset_domains, ds_link)
493 		if (domainset_equal(ndomain, domain))
494 			break;
495 	/*
496 	 * If the domain does not yet exist we insert it and initialize
497 	 * various iteration helpers which are not part of the key.
498 	 */
499 	if (ndomain == NULL) {
500 		LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link);
501 		domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
502 		for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
503 			if (DOMAINSET_ISSET(i, &domain->ds_mask))
504 				domain->ds_order[j++] = i;
505 	}
506 	mtx_unlock_spin(&cpuset_lock);
507 	if (ndomain == NULL)
508 		return (domain);
509 	if (freelist != NULL)
510 		LIST_INSERT_HEAD(freelist, domain, ds_link);
511 	else
512 		uma_zfree(domainset_zone, domain);
513 	return (ndomain);
514 
515 }
516 
517 /*
518  * Are any of the domains in the mask empty?  If so, silently
519  * remove them and update the domainset accordingly.  If only empty
520  * domains are present, we must return failure.
521  */
522 static bool
523 domainset_empty_vm(struct domainset *domain)
524 {
525 	domainset_t empty;
526 	int i, j;
527 
528 	DOMAINSET_ZERO(&empty);
529 	for (i = 0; i < vm_ndomains; i++)
530 		if (VM_DOMAIN_EMPTY(i))
531 			DOMAINSET_SET(i, &empty);
532 	if (DOMAINSET_SUBSET(&empty, &domain->ds_mask))
533 		return (true);
534 
535 	/* Remove empty domains from the set and recompute. */
536 	DOMAINSET_ANDNOT(&domain->ds_mask, &empty);
537 	domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
538 	for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
539 		if (DOMAINSET_ISSET(i, &domain->ds_mask))
540 			domain->ds_order[j++] = i;
541 
542 	/* Convert a PREFER policy referencing an empty domain to RR. */
543 	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
544 	    DOMAINSET_ISSET(domain->ds_prefer, &empty)) {
545 		domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
546 		domain->ds_prefer = -1;
547 	}
548 
549 	return (false);
550 }
551 
552 /*
553  * Create or lookup a domainset based on the key held in 'domain'.
554  */
555 struct domainset *
556 domainset_create(const struct domainset *domain)
557 {
558 	struct domainset *ndomain;
559 
560 	/*
561 	 * Validate the policy.  It must specify a useable policy number with
562 	 * only valid domains.  Preferred must include the preferred domain
563 	 * in the mask.
564 	 */
565 	if (domain->ds_policy <= DOMAINSET_POLICY_INVALID ||
566 	    domain->ds_policy > DOMAINSET_POLICY_MAX)
567 		return (NULL);
568 	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
569 	    !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask))
570 		return (NULL);
571 	if (!DOMAINSET_SUBSET(&domainset0.ds_mask, &domain->ds_mask))
572 		return (NULL);
573 	ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO);
574 	domainset_copy(domain, ndomain);
575 	return _domainset_create(ndomain, NULL);
576 }
577 
578 /*
579  * Update thread domainset pointers.
580  */
581 static void
582 domainset_notify(void)
583 {
584 	struct thread *td;
585 	struct proc *p;
586 
587 	sx_slock(&allproc_lock);
588 	FOREACH_PROC_IN_SYSTEM(p) {
589 		PROC_LOCK(p);
590 		if (p->p_state == PRS_NEW) {
591 			PROC_UNLOCK(p);
592 			continue;
593 		}
594 		FOREACH_THREAD_IN_PROC(p, td) {
595 			thread_lock(td);
596 			td->td_domain.dr_policy = td->td_cpuset->cs_domain;
597 			thread_unlock(td);
598 		}
599 		PROC_UNLOCK(p);
600 	}
601 	sx_sunlock(&allproc_lock);
602 	kernel_object->domain.dr_policy = cpuset_kernel->cs_domain;
603 }
604 
605 /*
606  * Create a new set that is a subset of a parent.
607  */
608 static struct domainset *
609 domainset_shadow(const struct domainset *pdomain,
610     const struct domainset *domain, struct domainlist *freelist)
611 {
612 	struct domainset *ndomain;
613 
614 	ndomain = LIST_FIRST(freelist);
615 	LIST_REMOVE(ndomain, ds_link);
616 
617 	/*
618 	 * Initialize the key from the request.
619 	 */
620 	domainset_copy(domain, ndomain);
621 
622 	/*
623 	 * Restrict the key by the parent.
624 	 */
625 	DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask);
626 
627 	return _domainset_create(ndomain, freelist);
628 }
629 
630 /*
631  * Recursively check for errors that would occur from applying mask to
632  * the tree of sets starting at 'set'.  Checks for sets that would become
633  * empty as well as RDONLY flags.
634  */
635 static int
636 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
637 {
638 	struct cpuset *nset;
639 	cpuset_t newmask;
640 	int error;
641 
642 	mtx_assert(&cpuset_lock, MA_OWNED);
643 	if (set->cs_flags & CPU_SET_RDONLY)
644 		return (EPERM);
645 	if (augment_mask) {
646 		CPU_COPY(&set->cs_mask, &newmask);
647 		CPU_AND(&newmask, mask);
648 	} else
649 		CPU_COPY(mask, &newmask);
650 
651 	if (CPU_EMPTY(&newmask))
652 		return (EDEADLK);
653 	error = 0;
654 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
655 		if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
656 			break;
657 	return (error);
658 }
659 
660 /*
661  * Applies the mask 'mask' without checking for empty sets or permissions.
662  */
663 static void
664 cpuset_update(struct cpuset *set, cpuset_t *mask)
665 {
666 	struct cpuset *nset;
667 
668 	mtx_assert(&cpuset_lock, MA_OWNED);
669 	CPU_AND(&set->cs_mask, mask);
670 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
671 		cpuset_update(nset, &set->cs_mask);
672 
673 	return;
674 }
675 
676 /*
677  * Modify the set 'set' to use a copy of the mask provided.  Apply this new
678  * mask to restrict all children in the tree.  Checks for validity before
679  * applying the changes.
680  */
681 static int
682 cpuset_modify(struct cpuset *set, cpuset_t *mask)
683 {
684 	struct cpuset *root;
685 	int error;
686 
687 	error = priv_check(curthread, PRIV_SCHED_CPUSET);
688 	if (error)
689 		return (error);
690 	/*
691 	 * In case we are called from within the jail,
692 	 * we do not allow modifying the dedicated root
693 	 * cpuset of the jail but may still allow to
694 	 * change child sets, including subordinate jails'
695 	 * roots.
696 	 */
697 	if ((set->cs_flags & CPU_SET_ROOT) != 0 &&
698 	    jailed(curthread->td_ucred) &&
699 	    set == curthread->td_ucred->cr_prison->pr_cpuset)
700 		return (EPERM);
701 	/*
702 	 * Verify that we have access to this set of
703 	 * cpus.
704 	 */
705 	if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) {
706 		KASSERT(set->cs_parent != NULL,
707 		    ("jail.cpuset=%d is not a proper child of parent jail's root.",
708 		    set->cs_id));
709 
710 		/*
711 		 * cpuset_getroot() cannot work here due to how top-level jail
712 		 * roots are constructed.  Top-level jails are parented to
713 		 * thread0's cpuset (i.e. cpuset 1) rather than the system root.
714 		 */
715 		root = set->cs_parent;
716 	} else {
717 		root = cpuset_getroot(set);
718 	}
719 	mtx_lock_spin(&cpuset_lock);
720 	if (root && !CPU_SUBSET(&root->cs_mask, mask)) {
721 		error = EINVAL;
722 		goto out;
723 	}
724 	error = cpuset_testupdate(set, mask, 0);
725 	if (error)
726 		goto out;
727 	CPU_COPY(mask, &set->cs_mask);
728 	cpuset_update(set, mask);
729 out:
730 	mtx_unlock_spin(&cpuset_lock);
731 
732 	return (error);
733 }
734 
735 /*
736  * Recursively check for errors that would occur from applying mask to
737  * the tree of sets starting at 'set'.  Checks for sets that would become
738  * empty as well as RDONLY flags.
739  */
740 static int
741 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset,
742     struct domainset *orig, int *count, int augment_mask __unused)
743 {
744 	struct cpuset *nset;
745 	struct domainset *domain;
746 	struct domainset newset;
747 	int error;
748 
749 	mtx_assert(&cpuset_lock, MA_OWNED);
750 	if (set->cs_flags & CPU_SET_RDONLY)
751 		return (EPERM);
752 	domain = set->cs_domain;
753 	domainset_copy(domain, &newset);
754 	if (!domainset_equal(domain, orig)) {
755 		if (!domainset_restrict(domain, dset))
756 			return (EDEADLK);
757 		DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask);
758 		/* Count the number of domains that are changing. */
759 		(*count)++;
760 	}
761 	error = 0;
762 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
763 		if ((error = cpuset_testupdate_domain(nset, &newset, domain,
764 		    count, 1)) != 0)
765 			break;
766 	return (error);
767 }
768 
769 /*
770  * Applies the mask 'mask' without checking for empty sets or permissions.
771  */
772 static void
773 cpuset_update_domain(struct cpuset *set, struct domainset *domain,
774     struct domainset *orig, struct domainlist *domains)
775 {
776 	struct cpuset *nset;
777 
778 	mtx_assert(&cpuset_lock, MA_OWNED);
779 	/*
780 	 * If this domainset has changed from the parent we must calculate
781 	 * a new set.  Otherwise it simply inherits from the parent.  When
782 	 * we inherit from the parent we get a new mask and policy.  If the
783 	 * set is modified from the parent we keep the policy and only
784 	 * update the mask.
785 	 */
786 	if (set->cs_domain != orig) {
787 		orig = set->cs_domain;
788 		set->cs_domain = domainset_shadow(domain, orig, domains);
789 	} else
790 		set->cs_domain = domain;
791 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
792 		cpuset_update_domain(nset, set->cs_domain, orig, domains);
793 
794 	return;
795 }
796 
797 /*
798  * Modify the set 'set' to use a copy the domainset provided.  Apply this new
799  * mask to restrict all children in the tree.  Checks for validity before
800  * applying the changes.
801  */
802 static int
803 cpuset_modify_domain(struct cpuset *set, struct domainset *domain)
804 {
805 	struct domainlist domains;
806 	struct domainset temp;
807 	struct domainset *dset;
808 	struct cpuset *root;
809 	int ndomains, needed;
810 	int error;
811 
812 	error = priv_check(curthread, PRIV_SCHED_CPUSET);
813 	if (error)
814 		return (error);
815 	/*
816 	 * In case we are called from within the jail
817 	 * we do not allow modifying the dedicated root
818 	 * cpuset of the jail but may still allow to
819 	 * change child sets.
820 	 */
821 	if (jailed(curthread->td_ucred) &&
822 	    set->cs_flags & CPU_SET_ROOT)
823 		return (EPERM);
824 	domainset_freelist_init(&domains, 0);
825 	domain = domainset_create(domain);
826 	ndomains = 0;
827 
828 	mtx_lock_spin(&cpuset_lock);
829 	for (;;) {
830 		root = cpuset_getroot(set);
831 		dset = root->cs_domain;
832 		/*
833 		 * Verify that we have access to this set of domains.
834 		 */
835 		if (!domainset_valid(dset, domain)) {
836 			error = EINVAL;
837 			goto out;
838 		}
839 		/*
840 		 * If applying prefer we keep the current set as the fallback.
841 		 */
842 		if (domain->ds_policy == DOMAINSET_POLICY_PREFER)
843 			DOMAINSET_COPY(&set->cs_domain->ds_mask,
844 			    &domain->ds_mask);
845 		/*
846 		 * Determine whether we can apply this set of domains and
847 		 * how many new domain structures it will require.
848 		 */
849 		domainset_copy(domain, &temp);
850 		needed = 0;
851 		error = cpuset_testupdate_domain(set, &temp, set->cs_domain,
852 		    &needed, 0);
853 		if (error)
854 			goto out;
855 		if (ndomains >= needed)
856 			break;
857 
858 		/* Dropping the lock; we'll need to re-evaluate again. */
859 		mtx_unlock_spin(&cpuset_lock);
860 		domainset_freelist_add(&domains, needed - ndomains);
861 		ndomains = needed;
862 		mtx_lock_spin(&cpuset_lock);
863 	}
864 	dset = set->cs_domain;
865 	cpuset_update_domain(set, domain, dset, &domains);
866 out:
867 	mtx_unlock_spin(&cpuset_lock);
868 	domainset_freelist_free(&domains);
869 	if (error == 0)
870 		domainset_notify();
871 
872 	return (error);
873 }
874 
875 /*
876  * Resolve the 'which' parameter of several cpuset apis.
877  *
878  * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
879  * checks for permission via p_cansched().
880  *
881  * For WHICH_SET returns a valid set with a new reference.
882  *
883  * -1 may be supplied for any argument to mean the current proc/thread or
884  * the base set of the current thread.  May fail with ESRCH/EPERM.
885  */
886 int
887 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
888     struct cpuset **setp)
889 {
890 	struct cpuset *set;
891 	struct thread *td;
892 	struct proc *p;
893 	int error;
894 
895 	*pp = p = NULL;
896 	*tdp = td = NULL;
897 	*setp = set = NULL;
898 	switch (which) {
899 	case CPU_WHICH_PID:
900 		if (id == -1) {
901 			PROC_LOCK(curproc);
902 			p = curproc;
903 			break;
904 		}
905 		if ((p = pfind(id)) == NULL)
906 			return (ESRCH);
907 		break;
908 	case CPU_WHICH_TID:
909 		if (id == -1) {
910 			PROC_LOCK(curproc);
911 			p = curproc;
912 			td = curthread;
913 			break;
914 		}
915 		td = tdfind(id, -1);
916 		if (td == NULL)
917 			return (ESRCH);
918 		p = td->td_proc;
919 		break;
920 	case CPU_WHICH_CPUSET:
921 		if (id == -1) {
922 			thread_lock(curthread);
923 			set = cpuset_refbase(curthread->td_cpuset);
924 			thread_unlock(curthread);
925 		} else
926 			set = cpuset_lookup(id, curthread);
927 		if (set) {
928 			*setp = set;
929 			return (0);
930 		}
931 		return (ESRCH);
932 	case CPU_WHICH_JAIL:
933 	{
934 		/* Find `set' for prison with given id. */
935 		struct prison *pr;
936 
937 		sx_slock(&allprison_lock);
938 		pr = prison_find_child(curthread->td_ucred->cr_prison, id);
939 		sx_sunlock(&allprison_lock);
940 		if (pr == NULL)
941 			return (ESRCH);
942 		cpuset_ref(pr->pr_cpuset);
943 		*setp = pr->pr_cpuset;
944 		mtx_unlock(&pr->pr_mtx);
945 		return (0);
946 	}
947 	case CPU_WHICH_IRQ:
948 	case CPU_WHICH_DOMAIN:
949 		return (0);
950 	default:
951 		return (EINVAL);
952 	}
953 	error = p_cansched(curthread, p);
954 	if (error) {
955 		PROC_UNLOCK(p);
956 		return (error);
957 	}
958 	if (td == NULL)
959 		td = FIRST_THREAD_IN_PROC(p);
960 	*pp = p;
961 	*tdp = td;
962 	return (0);
963 }
964 
965 static int
966 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask,
967     const struct domainset *domain)
968 {
969 	struct cpuset *parent;
970 	struct domainset *dset;
971 
972 	parent = cpuset_getbase(set);
973 	/*
974 	 * If we are restricting a cpu mask it must be a subset of the
975 	 * parent or invalid CPUs have been specified.
976 	 */
977 	if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask))
978 		return (EINVAL);
979 
980 	/*
981 	 * If we are restricting a domain mask it must be a subset of the
982 	 * parent or invalid domains have been specified.
983 	 */
984 	dset = parent->cs_domain;
985 	if (domain != NULL && !domainset_valid(dset, domain))
986 		return (EINVAL);
987 
988 	return (0);
989 }
990 
991 /*
992  * Create an anonymous set with the provided mask in the space provided by
993  * 'nset'.  If the passed in set is anonymous we use its parent otherwise
994  * the new set is a child of 'set'.
995  */
996 static int
997 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp,
998    const cpuset_t *mask, const struct domainset *domain,
999    struct setlist *cpusets, struct domainlist *domains)
1000 {
1001 	struct cpuset *parent;
1002 	struct cpuset *nset;
1003 	struct domainset *dset;
1004 	struct domainset *d;
1005 	int error;
1006 
1007 	error = cpuset_testshadow(set, mask, domain);
1008 	if (error)
1009 		return (error);
1010 
1011 	parent = cpuset_getbase(set);
1012 	dset = parent->cs_domain;
1013 	if (mask == NULL)
1014 		mask = &set->cs_mask;
1015 	if (domain != NULL)
1016 		d = domainset_shadow(dset, domain, domains);
1017 	else
1018 		d = set->cs_domain;
1019 	nset = LIST_FIRST(cpusets);
1020 	error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID);
1021 	if (error == 0) {
1022 		LIST_REMOVE(nset, cs_link);
1023 		*nsetp = nset;
1024 	}
1025 	return (error);
1026 }
1027 
1028 static struct cpuset *
1029 cpuset_update_thread(struct thread *td, struct cpuset *nset)
1030 {
1031 	struct cpuset *tdset;
1032 
1033 	tdset = td->td_cpuset;
1034 	td->td_cpuset = nset;
1035 	td->td_domain.dr_policy = nset->cs_domain;
1036 	sched_affinity(td);
1037 
1038 	return (tdset);
1039 }
1040 
1041 static int
1042 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask,
1043     struct domainset *domain)
1044 {
1045 	struct cpuset *parent;
1046 
1047 	parent = cpuset_getbase(tdset);
1048 	if (mask == NULL)
1049 		mask = &tdset->cs_mask;
1050 	if (domain == NULL)
1051 		domain = tdset->cs_domain;
1052 	return cpuset_testshadow(parent, mask, domain);
1053 }
1054 
1055 static int
1056 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask,
1057     struct domainset *domain, struct cpuset **nsetp,
1058     struct setlist *freelist, struct domainlist *domainlist)
1059 {
1060 	struct cpuset *parent;
1061 
1062 	parent = cpuset_getbase(tdset);
1063 	if (mask == NULL)
1064 		mask = &tdset->cs_mask;
1065 	if (domain == NULL)
1066 		domain = tdset->cs_domain;
1067 	return cpuset_shadow(parent, nsetp, mask, domain, freelist,
1068 	    domainlist);
1069 }
1070 
1071 static int
1072 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
1073     cpuset_t *mask, struct domainset *domain)
1074 {
1075 	struct cpuset *parent;
1076 
1077 	parent = cpuset_getbase(tdset);
1078 
1079 	/*
1080 	 * If the thread restricted its mask then apply that same
1081 	 * restriction to the new set, otherwise take it wholesale.
1082 	 */
1083 	if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
1084 		CPU_COPY(&tdset->cs_mask, mask);
1085 		CPU_AND(mask, &set->cs_mask);
1086 	} else
1087 		CPU_COPY(&set->cs_mask, mask);
1088 
1089 	/*
1090 	 * If the thread restricted the domain then we apply the
1091 	 * restriction to the new set but retain the policy.
1092 	 */
1093 	if (tdset->cs_domain != parent->cs_domain) {
1094 		domainset_copy(tdset->cs_domain, domain);
1095 		DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask);
1096 	} else
1097 		domainset_copy(set->cs_domain, domain);
1098 
1099 	if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask))
1100 		return (EDEADLK);
1101 
1102 	return (0);
1103 }
1104 
1105 static int
1106 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set)
1107 {
1108 	struct domainset domain;
1109 	cpuset_t mask;
1110 
1111 	if (tdset->cs_id != CPUSET_INVALID)
1112 		return (0);
1113 	return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1114 }
1115 
1116 static int
1117 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set,
1118     struct cpuset **nsetp, struct setlist *freelist,
1119     struct domainlist *domainlist)
1120 {
1121 	struct domainset domain;
1122 	cpuset_t mask;
1123 	int error;
1124 
1125 	/*
1126 	 * If we're replacing on a thread that has not constrained the
1127 	 * original set we can simply accept the new set.
1128 	 */
1129 	if (tdset->cs_id != CPUSET_INVALID) {
1130 		*nsetp = cpuset_ref(set);
1131 		return (0);
1132 	}
1133 	error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1134 	if (error)
1135 		return (error);
1136 
1137 	return cpuset_shadow(set, nsetp, &mask, &domain, freelist,
1138 	    domainlist);
1139 }
1140 
1141 static int
1142 cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
1143     struct cpuset *nroot, struct cpuset **nsetp,
1144     struct setlist *cpusets, struct domainlist *domainlist)
1145 {
1146 	struct domainset ndomain;
1147 	cpuset_t nmask;
1148 	struct cpuset *pbase;
1149 	int error;
1150 
1151 	pbase = cpuset_getbase(td->td_cpuset);
1152 
1153 	/* Copy process mask, then further apply the new root mask. */
1154 	CPU_COPY(&pbase->cs_mask, &nmask);
1155 	CPU_AND(&nmask, &nroot->cs_mask);
1156 
1157 	domainset_copy(pbase->cs_domain, &ndomain);
1158 	DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
1159 
1160 	/* Policy is too restrictive, will not work. */
1161 	if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask))
1162 		return (EDEADLK);
1163 
1164 	/*
1165 	 * Remove pbase from the freelist in advance, it'll be pushed to
1166 	 * cpuset_ids on success.  We assume here that cpuset_create() will not
1167 	 * touch pbase on failure, and we just enqueue it back to the freelist
1168 	 * to remain in a consistent state.
1169 	 */
1170 	pbase = LIST_FIRST(cpusets);
1171 	LIST_REMOVE(pbase, cs_link);
1172 	error = cpuset_create(&pbase, set, &nmask);
1173 	if (error != 0) {
1174 		LIST_INSERT_HEAD(cpusets, pbase, cs_link);
1175 		return (error);
1176 	}
1177 
1178 	/* Duplicates some work from above... oh well. */
1179 	pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain,
1180 	    domainlist);
1181 	*nsetp = pbase;
1182 	return (0);
1183 }
1184 
1185 /*
1186  * Handle four cases for updating an entire process.
1187  *
1188  * 1) Set is non-null and the process is not rebasing onto a new root.  This
1189  *    reparents all anonymous sets to the provided set and replaces all
1190  *    non-anonymous td_cpusets with the provided set.
1191  * 2) Set is non-null and the process is rebasing onto a new root.  This
1192  *    creates a new base set if the process previously had its own base set,
1193  *    then reparents all anonymous sets either to that set or the provided set
1194  *    if one was not created.  Non-anonymous sets are similarly replaced.
1195  * 3) Mask is non-null.  This replaces or creates anonymous sets for every
1196  *    thread with the existing base as a parent.
1197  * 4) domain is non-null.  This creates anonymous sets for every thread
1198  *    and replaces the domain set.
1199  *
1200  * This is overly complicated because we can't allocate while holding a
1201  * spinlock and spinlocks must be held while changing and examining thread
1202  * state.
1203  */
1204 static int
1205 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask,
1206     struct domainset *domain, bool rebase)
1207 {
1208 	struct setlist freelist;
1209 	struct setlist droplist;
1210 	struct domainlist domainlist;
1211 	struct cpuset *base, *nset, *nroot, *tdroot;
1212 	struct thread *td;
1213 	struct proc *p;
1214 	int needed;
1215 	int nfree;
1216 	int error;
1217 
1218 	/*
1219 	 * The algorithm requires two passes due to locking considerations.
1220 	 *
1221 	 * 1) Lookup the process and acquire the locks in the required order.
1222 	 * 2) If enough cpusets have not been allocated release the locks and
1223 	 *    allocate them.  Loop.
1224 	 */
1225 	cpuset_freelist_init(&freelist, 1);
1226 	domainset_freelist_init(&domainlist, 1);
1227 	nfree = 1;
1228 	LIST_INIT(&droplist);
1229 	nfree = 0;
1230 	base = set;
1231 	nroot = NULL;
1232 	if (set != NULL)
1233 		nroot = cpuset_getroot(set);
1234 	for (;;) {
1235 		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
1236 		if (error)
1237 			goto out;
1238 		tdroot = cpuset_getroot(td->td_cpuset);
1239 		needed = p->p_numthreads;
1240 		if (set != NULL && rebase && tdroot != nroot)
1241 			needed++;
1242 		if (nfree >= needed)
1243 			break;
1244 		PROC_UNLOCK(p);
1245 		if (nfree < needed) {
1246 			cpuset_freelist_add(&freelist, needed - nfree);
1247 			domainset_freelist_add(&domainlist, needed - nfree);
1248 			nfree = needed;
1249 		}
1250 	}
1251 	PROC_LOCK_ASSERT(p, MA_OWNED);
1252 
1253 	/*
1254 	 * If we're changing roots and the root set is what has been specified
1255 	 * as the parent, then we'll check if the process was previously using
1256 	 * the root set and, if it wasn't, create a new base with the process's
1257 	 * mask applied to it.
1258 	 */
1259 	if (set != NULL && rebase && nroot != tdroot) {
1260 		cpusetid_t base_id, root_id;
1261 
1262 		root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id;
1263 		base_id = cpuset_getbase(td->td_cpuset)->cs_id;
1264 
1265 		if (base_id != root_id) {
1266 			error = cpuset_setproc_newbase(td, set, nroot, &base,
1267 			    &freelist, &domainlist);
1268 			if (error != 0)
1269 				goto unlock_out;
1270 		}
1271 	}
1272 
1273 	/*
1274 	 * Now that the appropriate locks are held and we have enough cpusets,
1275 	 * make sure the operation will succeed before applying changes. The
1276 	 * proc lock prevents td_cpuset from changing between calls.
1277 	 */
1278 	error = 0;
1279 	FOREACH_THREAD_IN_PROC(p, td) {
1280 		thread_lock(td);
1281 		if (set != NULL)
1282 			error = cpuset_setproc_test_setthread(td->td_cpuset,
1283 			    base);
1284 		else
1285 			error = cpuset_setproc_test_maskthread(td->td_cpuset,
1286 			    mask, domain);
1287 		thread_unlock(td);
1288 		if (error)
1289 			goto unlock_out;
1290 	}
1291 	/*
1292 	 * Replace each thread's cpuset while using deferred release.  We
1293 	 * must do this because the thread lock must be held while operating
1294 	 * on the thread and this limits the type of operations allowed.
1295 	 */
1296 	FOREACH_THREAD_IN_PROC(p, td) {
1297 		thread_lock(td);
1298 		if (set != NULL)
1299 			error = cpuset_setproc_setthread(td->td_cpuset, base,
1300 			    &nset, &freelist, &domainlist);
1301 		else
1302 			error = cpuset_setproc_maskthread(td->td_cpuset, mask,
1303 			    domain, &nset, &freelist, &domainlist);
1304 		if (error) {
1305 			thread_unlock(td);
1306 			break;
1307 		}
1308 		cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset));
1309 		thread_unlock(td);
1310 	}
1311 unlock_out:
1312 	PROC_UNLOCK(p);
1313 out:
1314 	if (base != NULL && base != set)
1315 		cpuset_rel(base);
1316 	while ((nset = LIST_FIRST(&droplist)) != NULL)
1317 		cpuset_rel_complete(nset);
1318 	cpuset_freelist_free(&freelist);
1319 	domainset_freelist_free(&domainlist);
1320 	return (error);
1321 }
1322 
1323 static int
1324 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen)
1325 {
1326 	size_t bytes;
1327 	int i, once;
1328 	char *p;
1329 
1330 	once = 0;
1331 	p = buf;
1332 	for (i = 0; i < __bitset_words(setlen); i++) {
1333 		if (once != 0) {
1334 			if (bufsiz < 1)
1335 				return (0);
1336 			*p = ',';
1337 			p++;
1338 			bufsiz--;
1339 		} else
1340 			once = 1;
1341 		if (bufsiz < sizeof(__STRING(ULONG_MAX)))
1342 			return (0);
1343 		bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]);
1344 		p += bytes;
1345 		bufsiz -= bytes;
1346 	}
1347 	return (p - buf);
1348 }
1349 
1350 static int
1351 bitset_strscan(struct bitset *set, int setlen, const char *buf)
1352 {
1353 	int i, ret;
1354 	const char *p;
1355 
1356 	BIT_ZERO(setlen, set);
1357 	p = buf;
1358 	for (i = 0; i < __bitset_words(setlen); i++) {
1359 		if (*p == ',') {
1360 			p++;
1361 			continue;
1362 		}
1363 		ret = sscanf(p, "%lx", &set->__bits[i]);
1364 		if (ret == 0 || ret == -1)
1365 			break;
1366 		while (isxdigit(*p))
1367 			p++;
1368 	}
1369 	return (p - buf);
1370 }
1371 
1372 /*
1373  * Return a string representing a valid layout for a cpuset_t object.
1374  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1375  */
1376 char *
1377 cpusetobj_strprint(char *buf, const cpuset_t *set)
1378 {
1379 
1380 	bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set,
1381 	    CPU_SETSIZE);
1382 	return (buf);
1383 }
1384 
1385 /*
1386  * Build a valid cpuset_t object from a string representation.
1387  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1388  */
1389 int
1390 cpusetobj_strscan(cpuset_t *set, const char *buf)
1391 {
1392 	char p;
1393 
1394 	if (strlen(buf) > CPUSETBUFSIZ - 1)
1395 		return (-1);
1396 
1397 	p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)];
1398 	if (p != '\0')
1399 		return (-1);
1400 
1401 	return (0);
1402 }
1403 
1404 /*
1405  * Handle a domainset specifier in the sysctl tree.  A poiner to a pointer to
1406  * a domainset is in arg1.  If the user specifies a valid domainset the
1407  * pointer is updated.
1408  *
1409  * Format is:
1410  * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred
1411  */
1412 int
1413 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS)
1414 {
1415 	char buf[DOMAINSETBUFSIZ];
1416 	struct domainset *dset;
1417 	struct domainset key;
1418 	int policy, prefer, error;
1419 	char *p;
1420 
1421 	dset = *(struct domainset **)arg1;
1422 	error = 0;
1423 
1424 	if (dset != NULL) {
1425 		p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ,
1426 		    (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE);
1427 		sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer);
1428 	} else
1429 		sprintf(buf, "<NULL>");
1430 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1431 	if (error != 0 || req->newptr == NULL)
1432 		return (error);
1433 
1434 	/*
1435 	 * Read in and validate the string.
1436 	 */
1437 	memset(&key, 0, sizeof(key));
1438 	p = &buf[bitset_strscan((struct bitset *)&key.ds_mask,
1439 	    DOMAINSET_SETSIZE, buf)];
1440 	if (p == buf)
1441 		return (EINVAL);
1442 	if (sscanf(p, ":%d:%d", &policy, &prefer) != 2)
1443 		return (EINVAL);
1444 	key.ds_policy = policy;
1445 	key.ds_prefer = prefer;
1446 
1447 	/* Domainset_create() validates the policy.*/
1448 	dset = domainset_create(&key);
1449 	if (dset == NULL)
1450 		return (EINVAL);
1451 	*(struct domainset **)arg1 = dset;
1452 
1453 	return (error);
1454 }
1455 
1456 /*
1457  * Apply an anonymous mask or a domain to a single thread.
1458  */
1459 static int
1460 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain)
1461 {
1462 	struct setlist cpusets;
1463 	struct domainlist domainlist;
1464 	struct cpuset *nset;
1465 	struct cpuset *set;
1466 	struct thread *td;
1467 	struct proc *p;
1468 	int error;
1469 
1470 	cpuset_freelist_init(&cpusets, 1);
1471 	domainset_freelist_init(&domainlist, domain != NULL);
1472 	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
1473 	if (error)
1474 		goto out;
1475 	set = NULL;
1476 	thread_lock(td);
1477 	error = cpuset_shadow(td->td_cpuset, &nset, mask, domain,
1478 	    &cpusets, &domainlist);
1479 	if (error == 0)
1480 		set = cpuset_update_thread(td, nset);
1481 	thread_unlock(td);
1482 	PROC_UNLOCK(p);
1483 	if (set)
1484 		cpuset_rel(set);
1485 out:
1486 	cpuset_freelist_free(&cpusets);
1487 	domainset_freelist_free(&domainlist);
1488 	return (error);
1489 }
1490 
1491 /*
1492  * Apply an anonymous mask to a single thread.
1493  */
1494 int
1495 cpuset_setthread(lwpid_t id, cpuset_t *mask)
1496 {
1497 
1498 	return _cpuset_setthread(id, mask, NULL);
1499 }
1500 
1501 /*
1502  * Apply new cpumask to the ithread.
1503  */
1504 int
1505 cpuset_setithread(lwpid_t id, int cpu)
1506 {
1507 	cpuset_t mask;
1508 
1509 	CPU_ZERO(&mask);
1510 	if (cpu == NOCPU)
1511 		CPU_COPY(cpuset_root, &mask);
1512 	else
1513 		CPU_SET(cpu, &mask);
1514 	return _cpuset_setthread(id, &mask, NULL);
1515 }
1516 
1517 /*
1518  * Initialize static domainsets after NUMA information is available.  This is
1519  * called before memory allocators are initialized.
1520  */
1521 void
1522 domainset_init(void)
1523 {
1524 	struct domainset *dset;
1525 	int i;
1526 
1527 	dset = &domainset_roundrobin;
1528 	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1529 	dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1530 	dset->ds_prefer = -1;
1531 	_domainset_create(dset, NULL);
1532 
1533 	for (i = 0; i < vm_ndomains; i++) {
1534 		dset = &domainset_fixed[i];
1535 		DOMAINSET_ZERO(&dset->ds_mask);
1536 		DOMAINSET_SET(i, &dset->ds_mask);
1537 		dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1538 		_domainset_create(dset, NULL);
1539 
1540 		dset = &domainset_prefer[i];
1541 		DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1542 		dset->ds_policy = DOMAINSET_POLICY_PREFER;
1543 		dset->ds_prefer = i;
1544 		_domainset_create(dset, NULL);
1545 	}
1546 }
1547 
1548 /*
1549  * Create the domainset for cpuset 0, 1 and cpuset 2.
1550  */
1551 void
1552 domainset_zero(void)
1553 {
1554 	struct domainset *dset, *tmp;
1555 
1556 	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
1557 
1558 	dset = &domainset0;
1559 	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1560 	dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH;
1561 	dset->ds_prefer = -1;
1562 	curthread->td_domain.dr_policy = _domainset_create(dset, NULL);
1563 
1564 	domainset_copy(dset, &domainset2);
1565 	domainset2.ds_policy = DOMAINSET_POLICY_INTERLEAVE;
1566 	kernel_object->domain.dr_policy = _domainset_create(&domainset2, NULL);
1567 
1568 	/* Remove empty domains from the global policies. */
1569 	LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp)
1570 		if (domainset_empty_vm(dset))
1571 			LIST_REMOVE(dset, ds_link);
1572 }
1573 
1574 /*
1575  * Creates system-wide cpusets and the cpuset for thread0 including three
1576  * sets:
1577  *
1578  * 0 - The root set which should represent all valid processors in the
1579  *     system.  This set is immutable.
1580  * 1 - The default set which all processes are a member of until changed.
1581  *     This allows an administrator to move all threads off of given cpus to
1582  *     dedicate them to high priority tasks or save power etc.
1583  * 2 - The kernel set which allows restriction and policy to be applied only
1584  *     to kernel threads and the kernel_object.
1585  */
1586 struct cpuset *
1587 cpuset_thread0(void)
1588 {
1589 	struct cpuset *set;
1590 	int i;
1591 	int error __unused;
1592 
1593 	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
1594 	    NULL, NULL, UMA_ALIGN_CACHE, 0);
1595 	domainset_zone = uma_zcreate("domainset", sizeof(struct domainset),
1596 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
1597 
1598 	/*
1599 	 * Create the root system set (0) for the whole machine.  Doesn't use
1600 	 * cpuset_create() due to NULL parent.
1601 	 */
1602 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1603 	CPU_COPY(&all_cpus, &set->cs_mask);
1604 	LIST_INIT(&set->cs_children);
1605 	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
1606 	refcount_init(&set->cs_ref, 1);
1607 	set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY;
1608 	set->cs_domain = &domainset0;
1609 	cpuset_zero = set;
1610 	cpuset_root = &set->cs_mask;
1611 
1612 	/*
1613 	 * Now derive a default (1), modifiable set from that to give out.
1614 	 */
1615 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1616 	error = cpuset_init(set, cpuset_zero, NULL, NULL, 1);
1617 	KASSERT(error == 0, ("Error creating default set: %d\n", error));
1618 	cpuset_default = set;
1619 	/*
1620 	 * Create the kernel set (2).
1621 	 */
1622 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1623 	error = cpuset_init(set, cpuset_zero, NULL, NULL, 2);
1624 	KASSERT(error == 0, ("Error creating kernel set: %d\n", error));
1625 	set->cs_domain = &domainset2;
1626 	cpuset_kernel = set;
1627 
1628 	/*
1629 	 * Initialize the unit allocator. 0 and 1 are allocated above.
1630 	 */
1631 	cpuset_unr = new_unrhdr(3, INT_MAX, NULL);
1632 
1633 	/*
1634 	 * If MD code has not initialized per-domain cpusets, place all
1635 	 * CPUs in domain 0.
1636 	 */
1637 	for (i = 0; i < MAXMEMDOM; i++)
1638 		if (!CPU_EMPTY(&cpuset_domain[i]))
1639 			goto domains_set;
1640 	CPU_COPY(&all_cpus, &cpuset_domain[0]);
1641 domains_set:
1642 
1643 	return (cpuset_default);
1644 }
1645 
1646 void
1647 cpuset_kernthread(struct thread *td)
1648 {
1649 	struct cpuset *set;
1650 
1651 	thread_lock(td);
1652 	set = td->td_cpuset;
1653 	td->td_cpuset = cpuset_ref(cpuset_kernel);
1654 	thread_unlock(td);
1655 	cpuset_rel(set);
1656 }
1657 
1658 /*
1659  * Create a cpuset, which would be cpuset_create() but
1660  * mark the new 'set' as root.
1661  *
1662  * We are not going to reparent the td to it.  Use cpuset_setproc_update_set()
1663  * for that.
1664  *
1665  * In case of no error, returns the set in *setp locked with a reference.
1666  */
1667 int
1668 cpuset_create_root(struct prison *pr, struct cpuset **setp)
1669 {
1670 	struct cpuset *set;
1671 	int error;
1672 
1673 	KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
1674 	KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
1675 
1676 	set = NULL;
1677 	error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
1678 	if (error)
1679 		return (error);
1680 
1681 	KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data",
1682 	    __func__, __LINE__));
1683 
1684 	/* Mark the set as root. */
1685 	set->cs_flags |= CPU_SET_ROOT;
1686 	*setp = set;
1687 
1688 	return (0);
1689 }
1690 
1691 int
1692 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
1693 {
1694 	int error;
1695 
1696 	KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
1697 	KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
1698 
1699 	cpuset_ref(set);
1700 	error = cpuset_setproc(p->p_pid, set, NULL, NULL, true);
1701 	if (error)
1702 		return (error);
1703 	cpuset_rel(set);
1704 	return (0);
1705 }
1706 
1707 /*
1708  * In Capability mode, the only accesses that are permitted are to the current
1709  * thread and process' CPU and domain sets.
1710  */
1711 static int
1712 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which,
1713     id_t id)
1714 {
1715 	if (IN_CAPABILITY_MODE(td)) {
1716 		if (level != CPU_LEVEL_WHICH)
1717 			return (ECAPMODE);
1718 		if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
1719 			return (ECAPMODE);
1720 		if (id != -1 &&
1721 		    !(which == CPU_WHICH_TID && id == td->td_tid) &&
1722 		    !(which == CPU_WHICH_PID && id == td->td_proc->p_pid))
1723 			return (ECAPMODE);
1724 	}
1725 	return (0);
1726 }
1727 
1728 #ifndef _SYS_SYSPROTO_H_
1729 struct cpuset_args {
1730 	cpusetid_t	*setid;
1731 };
1732 #endif
1733 int
1734 sys_cpuset(struct thread *td, struct cpuset_args *uap)
1735 {
1736 	struct cpuset *root;
1737 	struct cpuset *set;
1738 	int error;
1739 
1740 	thread_lock(td);
1741 	root = cpuset_refroot(td->td_cpuset);
1742 	thread_unlock(td);
1743 	set = NULL;
1744 	error = cpuset_create(&set, root, &root->cs_mask);
1745 	cpuset_rel(root);
1746 	if (error)
1747 		return (error);
1748 	error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
1749 	if (error == 0)
1750 		error = cpuset_setproc(-1, set, NULL, NULL, false);
1751 	cpuset_rel(set);
1752 	return (error);
1753 }
1754 
1755 #ifndef _SYS_SYSPROTO_H_
1756 struct cpuset_setid_args {
1757 	cpuwhich_t	which;
1758 	id_t		id;
1759 	cpusetid_t	setid;
1760 };
1761 #endif
1762 int
1763 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
1764 {
1765 
1766 	return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
1767 }
1768 
1769 int
1770 kern_cpuset_setid(struct thread *td, cpuwhich_t which,
1771     id_t id, cpusetid_t setid)
1772 {
1773 	struct cpuset *set;
1774 	int error;
1775 
1776 	/*
1777 	 * Presently we only support per-process sets.
1778 	 */
1779 	if (which != CPU_WHICH_PID)
1780 		return (EINVAL);
1781 	set = cpuset_lookup(setid, td);
1782 	if (set == NULL)
1783 		return (ESRCH);
1784 	error = cpuset_setproc(id, set, NULL, NULL, false);
1785 	cpuset_rel(set);
1786 	return (error);
1787 }
1788 
1789 #ifndef _SYS_SYSPROTO_H_
1790 struct cpuset_getid_args {
1791 	cpulevel_t	level;
1792 	cpuwhich_t	which;
1793 	id_t		id;
1794 	cpusetid_t	*setid;
1795 };
1796 #endif
1797 int
1798 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
1799 {
1800 
1801 	return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
1802 	    uap->setid));
1803 }
1804 
1805 int
1806 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
1807     id_t id, cpusetid_t *setid)
1808 {
1809 	struct cpuset *nset;
1810 	struct cpuset *set;
1811 	struct thread *ttd;
1812 	struct proc *p;
1813 	cpusetid_t tmpid;
1814 	int error;
1815 
1816 	if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
1817 		return (EINVAL);
1818 	error = cpuset_which(which, id, &p, &ttd, &set);
1819 	if (error)
1820 		return (error);
1821 	switch (which) {
1822 	case CPU_WHICH_TID:
1823 	case CPU_WHICH_PID:
1824 		thread_lock(ttd);
1825 		set = cpuset_refbase(ttd->td_cpuset);
1826 		thread_unlock(ttd);
1827 		PROC_UNLOCK(p);
1828 		break;
1829 	case CPU_WHICH_CPUSET:
1830 	case CPU_WHICH_JAIL:
1831 		break;
1832 	case CPU_WHICH_IRQ:
1833 	case CPU_WHICH_DOMAIN:
1834 		return (EINVAL);
1835 	}
1836 	switch (level) {
1837 	case CPU_LEVEL_ROOT:
1838 		nset = cpuset_refroot(set);
1839 		cpuset_rel(set);
1840 		set = nset;
1841 		break;
1842 	case CPU_LEVEL_CPUSET:
1843 		break;
1844 	case CPU_LEVEL_WHICH:
1845 		break;
1846 	}
1847 	tmpid = set->cs_id;
1848 	cpuset_rel(set);
1849 	if (error == 0)
1850 		error = copyout(&tmpid, setid, sizeof(tmpid));
1851 
1852 	return (error);
1853 }
1854 
1855 #ifndef _SYS_SYSPROTO_H_
1856 struct cpuset_getaffinity_args {
1857 	cpulevel_t	level;
1858 	cpuwhich_t	which;
1859 	id_t		id;
1860 	size_t		cpusetsize;
1861 	cpuset_t	*mask;
1862 };
1863 #endif
1864 int
1865 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
1866 {
1867 
1868 	return (kern_cpuset_getaffinity(td, uap->level, uap->which,
1869 	    uap->id, uap->cpusetsize, uap->mask));
1870 }
1871 
1872 int
1873 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1874     id_t id, size_t cpusetsize, cpuset_t *maskp)
1875 {
1876 	struct thread *ttd;
1877 	struct cpuset *nset;
1878 	struct cpuset *set;
1879 	struct proc *p;
1880 	cpuset_t *mask;
1881 	int error;
1882 	size_t size;
1883 
1884 	if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1885 		return (ERANGE);
1886 	error = cpuset_check_capabilities(td, level, which, id);
1887 	if (error != 0)
1888 		return (error);
1889 	size = cpusetsize;
1890 	mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
1891 	error = cpuset_which(which, id, &p, &ttd, &set);
1892 	if (error)
1893 		goto out;
1894 	switch (level) {
1895 	case CPU_LEVEL_ROOT:
1896 	case CPU_LEVEL_CPUSET:
1897 		switch (which) {
1898 		case CPU_WHICH_TID:
1899 		case CPU_WHICH_PID:
1900 			thread_lock(ttd);
1901 			set = cpuset_ref(ttd->td_cpuset);
1902 			thread_unlock(ttd);
1903 			break;
1904 		case CPU_WHICH_CPUSET:
1905 		case CPU_WHICH_JAIL:
1906 			break;
1907 		case CPU_WHICH_IRQ:
1908 		case CPU_WHICH_INTRHANDLER:
1909 		case CPU_WHICH_ITHREAD:
1910 		case CPU_WHICH_DOMAIN:
1911 			error = EINVAL;
1912 			goto out;
1913 		}
1914 		if (level == CPU_LEVEL_ROOT)
1915 			nset = cpuset_refroot(set);
1916 		else
1917 			nset = cpuset_refbase(set);
1918 		CPU_COPY(&nset->cs_mask, mask);
1919 		cpuset_rel(nset);
1920 		break;
1921 	case CPU_LEVEL_WHICH:
1922 		switch (which) {
1923 		case CPU_WHICH_TID:
1924 			thread_lock(ttd);
1925 			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
1926 			thread_unlock(ttd);
1927 			break;
1928 		case CPU_WHICH_PID:
1929 			FOREACH_THREAD_IN_PROC(p, ttd) {
1930 				thread_lock(ttd);
1931 				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
1932 				thread_unlock(ttd);
1933 			}
1934 			break;
1935 		case CPU_WHICH_CPUSET:
1936 		case CPU_WHICH_JAIL:
1937 			CPU_COPY(&set->cs_mask, mask);
1938 			break;
1939 		case CPU_WHICH_IRQ:
1940 		case CPU_WHICH_INTRHANDLER:
1941 		case CPU_WHICH_ITHREAD:
1942 			error = intr_getaffinity(id, which, mask);
1943 			break;
1944 		case CPU_WHICH_DOMAIN:
1945 			if (id < 0 || id >= MAXMEMDOM)
1946 				error = ESRCH;
1947 			else
1948 				CPU_COPY(&cpuset_domain[id], mask);
1949 			break;
1950 		}
1951 		break;
1952 	default:
1953 		error = EINVAL;
1954 		break;
1955 	}
1956 	if (set)
1957 		cpuset_rel(set);
1958 	if (p)
1959 		PROC_UNLOCK(p);
1960 	if (error == 0)
1961 		error = copyout(mask, maskp, size);
1962 out:
1963 	free(mask, M_TEMP);
1964 	return (error);
1965 }
1966 
1967 #ifndef _SYS_SYSPROTO_H_
1968 struct cpuset_setaffinity_args {
1969 	cpulevel_t	level;
1970 	cpuwhich_t	which;
1971 	id_t		id;
1972 	size_t		cpusetsize;
1973 	const cpuset_t	*mask;
1974 };
1975 #endif
1976 int
1977 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
1978 {
1979 
1980 	return (kern_cpuset_setaffinity(td, uap->level, uap->which,
1981 	    uap->id, uap->cpusetsize, uap->mask));
1982 }
1983 
1984 int
1985 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1986     id_t id, size_t cpusetsize, const cpuset_t *maskp)
1987 {
1988 	struct cpuset *nset;
1989 	struct cpuset *set;
1990 	struct thread *ttd;
1991 	struct proc *p;
1992 	cpuset_t *mask;
1993 	int error;
1994 
1995 	if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1996 		return (ERANGE);
1997 	error = cpuset_check_capabilities(td, level, which, id);
1998 	if (error != 0)
1999 		return (error);
2000 	mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
2001 	error = copyin(maskp, mask, cpusetsize);
2002 	if (error)
2003 		goto out;
2004 	/*
2005 	 * Verify that no high bits are set.
2006 	 */
2007 	if (cpusetsize > sizeof(cpuset_t)) {
2008 		char *end;
2009 		char *cp;
2010 
2011 		end = cp = (char *)&mask->__bits;
2012 		end += cpusetsize;
2013 		cp += sizeof(cpuset_t);
2014 		while (cp != end)
2015 			if (*cp++ != 0) {
2016 				error = EINVAL;
2017 				goto out;
2018 			}
2019 	}
2020 	if (CPU_EMPTY(mask)) {
2021 		error = EDEADLK;
2022 		goto out;
2023 	}
2024 	switch (level) {
2025 	case CPU_LEVEL_ROOT:
2026 	case CPU_LEVEL_CPUSET:
2027 		error = cpuset_which(which, id, &p, &ttd, &set);
2028 		if (error)
2029 			break;
2030 		switch (which) {
2031 		case CPU_WHICH_TID:
2032 		case CPU_WHICH_PID:
2033 			thread_lock(ttd);
2034 			set = cpuset_ref(ttd->td_cpuset);
2035 			thread_unlock(ttd);
2036 			PROC_UNLOCK(p);
2037 			break;
2038 		case CPU_WHICH_CPUSET:
2039 		case CPU_WHICH_JAIL:
2040 			break;
2041 		case CPU_WHICH_IRQ:
2042 		case CPU_WHICH_INTRHANDLER:
2043 		case CPU_WHICH_ITHREAD:
2044 		case CPU_WHICH_DOMAIN:
2045 			error = EINVAL;
2046 			goto out;
2047 		}
2048 		if (level == CPU_LEVEL_ROOT)
2049 			nset = cpuset_refroot(set);
2050 		else
2051 			nset = cpuset_refbase(set);
2052 		error = cpuset_modify(nset, mask);
2053 		cpuset_rel(nset);
2054 		cpuset_rel(set);
2055 		break;
2056 	case CPU_LEVEL_WHICH:
2057 		switch (which) {
2058 		case CPU_WHICH_TID:
2059 			error = cpuset_setthread(id, mask);
2060 			break;
2061 		case CPU_WHICH_PID:
2062 			error = cpuset_setproc(id, NULL, mask, NULL, false);
2063 			break;
2064 		case CPU_WHICH_CPUSET:
2065 		case CPU_WHICH_JAIL:
2066 			error = cpuset_which(which, id, &p, &ttd, &set);
2067 			if (error == 0) {
2068 				error = cpuset_modify(set, mask);
2069 				cpuset_rel(set);
2070 			}
2071 			break;
2072 		case CPU_WHICH_IRQ:
2073 		case CPU_WHICH_INTRHANDLER:
2074 		case CPU_WHICH_ITHREAD:
2075 			error = intr_setaffinity(id, which, mask);
2076 			break;
2077 		default:
2078 			error = EINVAL;
2079 			break;
2080 		}
2081 		break;
2082 	default:
2083 		error = EINVAL;
2084 		break;
2085 	}
2086 out:
2087 	free(mask, M_TEMP);
2088 	return (error);
2089 }
2090 
2091 #ifndef _SYS_SYSPROTO_H_
2092 struct cpuset_getdomain_args {
2093 	cpulevel_t	level;
2094 	cpuwhich_t	which;
2095 	id_t		id;
2096 	size_t		domainsetsize;
2097 	domainset_t	*mask;
2098 	int 		*policy;
2099 };
2100 #endif
2101 int
2102 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap)
2103 {
2104 
2105 	return (kern_cpuset_getdomain(td, uap->level, uap->which,
2106 	    uap->id, uap->domainsetsize, uap->mask, uap->policy));
2107 }
2108 
2109 int
2110 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2111     id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp)
2112 {
2113 	struct domainset outset;
2114 	struct thread *ttd;
2115 	struct cpuset *nset;
2116 	struct cpuset *set;
2117 	struct domainset *dset;
2118 	struct proc *p;
2119 	domainset_t *mask;
2120 	int error;
2121 
2122 	if (domainsetsize < sizeof(domainset_t) ||
2123 	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2124 		return (ERANGE);
2125 	error = cpuset_check_capabilities(td, level, which, id);
2126 	if (error != 0)
2127 		return (error);
2128 	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2129 	bzero(&outset, sizeof(outset));
2130 	error = cpuset_which(which, id, &p, &ttd, &set);
2131 	if (error)
2132 		goto out;
2133 	switch (level) {
2134 	case CPU_LEVEL_ROOT:
2135 	case CPU_LEVEL_CPUSET:
2136 		switch (which) {
2137 		case CPU_WHICH_TID:
2138 		case CPU_WHICH_PID:
2139 			thread_lock(ttd);
2140 			set = cpuset_ref(ttd->td_cpuset);
2141 			thread_unlock(ttd);
2142 			break;
2143 		case CPU_WHICH_CPUSET:
2144 		case CPU_WHICH_JAIL:
2145 			break;
2146 		case CPU_WHICH_IRQ:
2147 		case CPU_WHICH_INTRHANDLER:
2148 		case CPU_WHICH_ITHREAD:
2149 		case CPU_WHICH_DOMAIN:
2150 			error = EINVAL;
2151 			goto out;
2152 		}
2153 		if (level == CPU_LEVEL_ROOT)
2154 			nset = cpuset_refroot(set);
2155 		else
2156 			nset = cpuset_refbase(set);
2157 		domainset_copy(nset->cs_domain, &outset);
2158 		cpuset_rel(nset);
2159 		break;
2160 	case CPU_LEVEL_WHICH:
2161 		switch (which) {
2162 		case CPU_WHICH_TID:
2163 			thread_lock(ttd);
2164 			domainset_copy(ttd->td_cpuset->cs_domain, &outset);
2165 			thread_unlock(ttd);
2166 			break;
2167 		case CPU_WHICH_PID:
2168 			FOREACH_THREAD_IN_PROC(p, ttd) {
2169 				thread_lock(ttd);
2170 				dset = ttd->td_cpuset->cs_domain;
2171 				/* Show all domains in the proc. */
2172 				DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask);
2173 				/* Last policy wins. */
2174 				outset.ds_policy = dset->ds_policy;
2175 				outset.ds_prefer = dset->ds_prefer;
2176 				thread_unlock(ttd);
2177 			}
2178 			break;
2179 		case CPU_WHICH_CPUSET:
2180 		case CPU_WHICH_JAIL:
2181 			domainset_copy(set->cs_domain, &outset);
2182 			break;
2183 		case CPU_WHICH_IRQ:
2184 		case CPU_WHICH_INTRHANDLER:
2185 		case CPU_WHICH_ITHREAD:
2186 		case CPU_WHICH_DOMAIN:
2187 			error = EINVAL;
2188 			break;
2189 		}
2190 		break;
2191 	default:
2192 		error = EINVAL;
2193 		break;
2194 	}
2195 	if (set)
2196 		cpuset_rel(set);
2197 	if (p)
2198 		PROC_UNLOCK(p);
2199 	/*
2200 	 * Translate prefer into a set containing only the preferred domain,
2201 	 * not the entire fallback set.
2202 	 */
2203 	if (outset.ds_policy == DOMAINSET_POLICY_PREFER) {
2204 		DOMAINSET_ZERO(&outset.ds_mask);
2205 		DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask);
2206 	}
2207 	DOMAINSET_COPY(&outset.ds_mask, mask);
2208 	if (error == 0)
2209 		error = copyout(mask, maskp, domainsetsize);
2210 	if (error == 0)
2211 		if (suword32(policyp, outset.ds_policy) != 0)
2212 			error = EFAULT;
2213 out:
2214 	free(mask, M_TEMP);
2215 	return (error);
2216 }
2217 
2218 #ifndef _SYS_SYSPROTO_H_
2219 struct cpuset_setdomain_args {
2220 	cpulevel_t	level;
2221 	cpuwhich_t	which;
2222 	id_t		id;
2223 	size_t		domainsetsize;
2224 	domainset_t	*mask;
2225 	int 		policy;
2226 };
2227 #endif
2228 int
2229 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap)
2230 {
2231 
2232 	return (kern_cpuset_setdomain(td, uap->level, uap->which,
2233 	    uap->id, uap->domainsetsize, uap->mask, uap->policy));
2234 }
2235 
2236 int
2237 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2238     id_t id, size_t domainsetsize, const domainset_t *maskp, int policy)
2239 {
2240 	struct cpuset *nset;
2241 	struct cpuset *set;
2242 	struct thread *ttd;
2243 	struct proc *p;
2244 	struct domainset domain;
2245 	domainset_t *mask;
2246 	int error;
2247 
2248 	if (domainsetsize < sizeof(domainset_t) ||
2249 	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2250 		return (ERANGE);
2251 	if (policy <= DOMAINSET_POLICY_INVALID ||
2252 	    policy > DOMAINSET_POLICY_MAX)
2253 		return (EINVAL);
2254 	error = cpuset_check_capabilities(td, level, which, id);
2255 	if (error != 0)
2256 		return (error);
2257 	memset(&domain, 0, sizeof(domain));
2258 	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2259 	error = copyin(maskp, mask, domainsetsize);
2260 	if (error)
2261 		goto out;
2262 	/*
2263 	 * Verify that no high bits are set.
2264 	 */
2265 	if (domainsetsize > sizeof(domainset_t)) {
2266 		char *end;
2267 		char *cp;
2268 
2269 		end = cp = (char *)&mask->__bits;
2270 		end += domainsetsize;
2271 		cp += sizeof(domainset_t);
2272 		while (cp != end)
2273 			if (*cp++ != 0) {
2274 				error = EINVAL;
2275 				goto out;
2276 			}
2277 	}
2278 	if (DOMAINSET_EMPTY(mask)) {
2279 		error = EDEADLK;
2280 		goto out;
2281 	}
2282 	DOMAINSET_COPY(mask, &domain.ds_mask);
2283 	domain.ds_policy = policy;
2284 
2285 	/*
2286 	 * Sanitize the provided mask.
2287 	 */
2288 	if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) {
2289 		error = EINVAL;
2290 		goto out;
2291 	}
2292 
2293 	/* Translate preferred policy into a mask and fallback. */
2294 	if (policy == DOMAINSET_POLICY_PREFER) {
2295 		/* Only support a single preferred domain. */
2296 		if (DOMAINSET_COUNT(&domain.ds_mask) != 1) {
2297 			error = EINVAL;
2298 			goto out;
2299 		}
2300 		domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1;
2301 		/* This will be constrained by domainset_shadow(). */
2302 		DOMAINSET_COPY(&all_domains, &domain.ds_mask);
2303 	}
2304 
2305 	/*
2306 	 * When given an impossible policy, fall back to interleaving
2307 	 * across all domains.
2308 	 */
2309 	if (domainset_empty_vm(&domain))
2310 		domainset_copy(&domainset2, &domain);
2311 
2312 	switch (level) {
2313 	case CPU_LEVEL_ROOT:
2314 	case CPU_LEVEL_CPUSET:
2315 		error = cpuset_which(which, id, &p, &ttd, &set);
2316 		if (error)
2317 			break;
2318 		switch (which) {
2319 		case CPU_WHICH_TID:
2320 		case CPU_WHICH_PID:
2321 			thread_lock(ttd);
2322 			set = cpuset_ref(ttd->td_cpuset);
2323 			thread_unlock(ttd);
2324 			PROC_UNLOCK(p);
2325 			break;
2326 		case CPU_WHICH_CPUSET:
2327 		case CPU_WHICH_JAIL:
2328 			break;
2329 		case CPU_WHICH_IRQ:
2330 		case CPU_WHICH_INTRHANDLER:
2331 		case CPU_WHICH_ITHREAD:
2332 		case CPU_WHICH_DOMAIN:
2333 			error = EINVAL;
2334 			goto out;
2335 		}
2336 		if (level == CPU_LEVEL_ROOT)
2337 			nset = cpuset_refroot(set);
2338 		else
2339 			nset = cpuset_refbase(set);
2340 		error = cpuset_modify_domain(nset, &domain);
2341 		cpuset_rel(nset);
2342 		cpuset_rel(set);
2343 		break;
2344 	case CPU_LEVEL_WHICH:
2345 		switch (which) {
2346 		case CPU_WHICH_TID:
2347 			error = _cpuset_setthread(id, NULL, &domain);
2348 			break;
2349 		case CPU_WHICH_PID:
2350 			error = cpuset_setproc(id, NULL, NULL, &domain, false);
2351 			break;
2352 		case CPU_WHICH_CPUSET:
2353 		case CPU_WHICH_JAIL:
2354 			error = cpuset_which(which, id, &p, &ttd, &set);
2355 			if (error == 0) {
2356 				error = cpuset_modify_domain(set, &domain);
2357 				cpuset_rel(set);
2358 			}
2359 			break;
2360 		case CPU_WHICH_IRQ:
2361 		case CPU_WHICH_INTRHANDLER:
2362 		case CPU_WHICH_ITHREAD:
2363 		default:
2364 			error = EINVAL;
2365 			break;
2366 		}
2367 		break;
2368 	default:
2369 		error = EINVAL;
2370 		break;
2371 	}
2372 out:
2373 	free(mask, M_TEMP);
2374 	return (error);
2375 }
2376 
2377 #ifdef DDB
2378 
2379 static void
2380 ddb_display_bitset(const struct bitset *set, int size)
2381 {
2382 	int bit, once;
2383 
2384 	for (once = 0, bit = 0; bit < size; bit++) {
2385 		if (CPU_ISSET(bit, set)) {
2386 			if (once == 0) {
2387 				db_printf("%d", bit);
2388 				once = 1;
2389 			} else
2390 				db_printf(",%d", bit);
2391 		}
2392 	}
2393 	if (once == 0)
2394 		db_printf("<none>");
2395 }
2396 
2397 void
2398 ddb_display_cpuset(const cpuset_t *set)
2399 {
2400 	ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE);
2401 }
2402 
2403 static void
2404 ddb_display_domainset(const domainset_t *set)
2405 {
2406 	ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE);
2407 }
2408 
2409 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
2410 {
2411 	struct cpuset *set;
2412 
2413 	LIST_FOREACH(set, &cpuset_ids, cs_link) {
2414 		db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
2415 		    set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags,
2416 		    (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
2417 		db_printf("  cpu mask=");
2418 		ddb_display_cpuset(&set->cs_mask);
2419 		db_printf("\n");
2420 		db_printf("  domain policy %d prefer %d mask=",
2421 		    set->cs_domain->ds_policy, set->cs_domain->ds_prefer);
2422 		ddb_display_domainset(&set->cs_domain->ds_mask);
2423 		db_printf("\n");
2424 		if (db_pager_quit)
2425 			break;
2426 	}
2427 }
2428 
2429 DB_SHOW_COMMAND(domainsets, db_show_domainsets)
2430 {
2431 	struct domainset *set;
2432 
2433 	LIST_FOREACH(set, &cpuset_domains, ds_link) {
2434 		db_printf("set=%p policy %d prefer %d cnt %d\n",
2435 		    set, set->ds_policy, set->ds_prefer, set->ds_cnt);
2436 		db_printf("  mask =");
2437 		ddb_display_domainset(&set->ds_mask);
2438 		db_printf("\n");
2439 	}
2440 }
2441 #endif /* DDB */
2442