xref: /freebsd/sys/kern/kern_cpuset.c (revision 25ecdc7d52770caf1c9b44b5ec11f468f6b636f3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
5  * All rights reserved.
6  *
7  * Copyright (c) 2008 Nokia Corporation
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/ctype.h>
42 #include <sys/sysproto.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/refcount.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/capsicum.h>
55 #include <sys/cpuset.h>
56 #include <sys/domainset.h>
57 #include <sys/sx.h>
58 #include <sys/queue.h>
59 #include <sys/libkern.h>
60 #include <sys/limits.h>
61 #include <sys/bus.h>
62 #include <sys/interrupt.h>
63 #include <sys/vmmeter.h>
64 
65 #include <vm/uma.h>
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pageout.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_phys.h>
73 #include <vm/vm_pagequeue.h>
74 
75 #ifdef DDB
76 #include <ddb/ddb.h>
77 #endif /* DDB */
78 
79 /*
80  * cpusets provide a mechanism for creating and manipulating sets of
81  * processors for the purpose of constraining the scheduling of threads to
82  * specific processors.
83  *
84  * Each process belongs to an identified set, by default this is set 1.  Each
85  * thread may further restrict the cpus it may run on to a subset of this
86  * named set.  This creates an anonymous set which other threads and processes
87  * may not join by number.
88  *
89  * The named set is referred to herein as the 'base' set to avoid ambiguity.
90  * This set is usually a child of a 'root' set while the anonymous set may
91  * simply be referred to as a mask.  In the syscall api these are referred to
92  * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
93  *
94  * Threads inherit their set from their creator whether it be anonymous or
95  * not.  This means that anonymous sets are immutable because they may be
96  * shared.  To modify an anonymous set a new set is created with the desired
97  * mask and the same parent as the existing anonymous set.  This gives the
98  * illusion of each thread having a private mask.
99  *
100  * Via the syscall apis a user may ask to retrieve or modify the root, base,
101  * or mask that is discovered via a pid, tid, or setid.  Modifying a set
102  * modifies all numbered and anonymous child sets to comply with the new mask.
103  * Modifying a pid or tid's mask applies only to that tid but must still
104  * exist within the assigned parent set.
105  *
106  * A thread may not be assigned to a group separate from other threads in
107  * the process.  This is to remove ambiguity when the setid is queried with
108  * a pid argument.  There is no other technical limitation.
109  *
110  * This somewhat complex arrangement is intended to make it easy for
111  * applications to query available processors and bind their threads to
112  * specific processors while also allowing administrators to dynamically
113  * reprovision by changing sets which apply to groups of processes.
114  *
115  * A simple application should not concern itself with sets at all and
116  * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
117  * meaning 'curthread'.  It may query available cpus for that tid with a
118  * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
119  */
120 
121 LIST_HEAD(domainlist, domainset);
122 struct domainset __read_mostly domainset_firsttouch;
123 struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
124 struct domainset __read_mostly domainset_interleave;
125 struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
126 struct domainset __read_mostly domainset_roundrobin;
127 
128 static uma_zone_t cpuset_zone;
129 static uma_zone_t domainset_zone;
130 static struct mtx cpuset_lock;
131 static struct setlist cpuset_ids;
132 static struct domainlist cpuset_domains;
133 static struct unrhdr *cpuset_unr;
134 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel;
135 static struct domainset *domainset0, *domainset2;
136 
137 /* Return the size of cpuset_t at the kernel level */
138 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
139     SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
140 
141 cpuset_t *cpuset_root;
142 cpuset_t cpuset_domain[MAXMEMDOM];
143 
144 static int domainset_valid(const struct domainset *, const struct domainset *);
145 
146 /*
147  * Find the first non-anonymous set starting from 'set'.
148  */
149 static struct cpuset *
150 cpuset_getbase(struct cpuset *set)
151 {
152 
153 	if (set->cs_id == CPUSET_INVALID)
154 		set = set->cs_parent;
155 	return (set);
156 }
157 
158 /*
159  * Walks up the tree from 'set' to find the root.
160  */
161 static struct cpuset *
162 cpuset_getroot(struct cpuset *set)
163 {
164 
165 	while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL)
166 		set = set->cs_parent;
167 	return (set);
168 }
169 
170 /*
171  * Acquire a reference to a cpuset, all pointers must be tracked with refs.
172  */
173 struct cpuset *
174 cpuset_ref(struct cpuset *set)
175 {
176 
177 	refcount_acquire(&set->cs_ref);
178 	return (set);
179 }
180 
181 /*
182  * Walks up the tree from 'set' to find the root.  Returns the root
183  * referenced.
184  */
185 static struct cpuset *
186 cpuset_refroot(struct cpuset *set)
187 {
188 
189 	return (cpuset_ref(cpuset_getroot(set)));
190 }
191 
192 /*
193  * Find the first non-anonymous set starting from 'set'.  Returns this set
194  * referenced.  May return the passed in set with an extra ref if it is
195  * not anonymous.
196  */
197 static struct cpuset *
198 cpuset_refbase(struct cpuset *set)
199 {
200 
201 	return (cpuset_ref(cpuset_getbase(set)));
202 }
203 
204 /*
205  * Release a reference in a context where it is safe to allocate.
206  */
207 void
208 cpuset_rel(struct cpuset *set)
209 {
210 	cpusetid_t id;
211 
212 	if (refcount_release_if_not_last(&set->cs_ref))
213 		return;
214 	mtx_lock_spin(&cpuset_lock);
215 	if (!refcount_release(&set->cs_ref)) {
216 		mtx_unlock_spin(&cpuset_lock);
217 		return;
218 	}
219 	LIST_REMOVE(set, cs_siblings);
220 	id = set->cs_id;
221 	if (id != CPUSET_INVALID)
222 		LIST_REMOVE(set, cs_link);
223 	mtx_unlock_spin(&cpuset_lock);
224 	cpuset_rel(set->cs_parent);
225 	uma_zfree(cpuset_zone, set);
226 	if (id != CPUSET_INVALID)
227 		free_unr(cpuset_unr, id);
228 }
229 
230 /*
231  * Deferred release must be used when in a context that is not safe to
232  * allocate/free.  This places any unreferenced sets on the list 'head'.
233  */
234 static void
235 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
236 {
237 
238 	if (refcount_release_if_not_last(&set->cs_ref))
239 		return;
240 	mtx_lock_spin(&cpuset_lock);
241 	if (!refcount_release(&set->cs_ref)) {
242 		mtx_unlock_spin(&cpuset_lock);
243 		return;
244 	}
245 	LIST_REMOVE(set, cs_siblings);
246 	if (set->cs_id != CPUSET_INVALID)
247 		LIST_REMOVE(set, cs_link);
248 	LIST_INSERT_HEAD(head, set, cs_link);
249 	mtx_unlock_spin(&cpuset_lock);
250 }
251 
252 /*
253  * Complete a deferred release.  Removes the set from the list provided to
254  * cpuset_rel_defer.
255  */
256 static void
257 cpuset_rel_complete(struct cpuset *set)
258 {
259 	cpusetid_t id;
260 
261 	id = set->cs_id;
262 	LIST_REMOVE(set, cs_link);
263 	cpuset_rel(set->cs_parent);
264 	uma_zfree(cpuset_zone, set);
265 	if (id != CPUSET_INVALID)
266 		free_unr(cpuset_unr, id);
267 }
268 
269 /*
270  * Find a set based on an id.  Returns it with a ref.
271  */
272 static struct cpuset *
273 cpuset_lookup(cpusetid_t setid, struct thread *td)
274 {
275 	struct cpuset *set;
276 
277 	if (setid == CPUSET_INVALID)
278 		return (NULL);
279 	mtx_lock_spin(&cpuset_lock);
280 	LIST_FOREACH(set, &cpuset_ids, cs_link)
281 		if (set->cs_id == setid)
282 			break;
283 	if (set)
284 		cpuset_ref(set);
285 	mtx_unlock_spin(&cpuset_lock);
286 
287 	KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
288 	if (set != NULL && jailed(td->td_ucred)) {
289 		struct cpuset *jset, *tset;
290 
291 		jset = td->td_ucred->cr_prison->pr_cpuset;
292 		for (tset = set; tset != NULL; tset = tset->cs_parent)
293 			if (tset == jset)
294 				break;
295 		if (tset == NULL) {
296 			cpuset_rel(set);
297 			set = NULL;
298 		}
299 	}
300 
301 	return (set);
302 }
303 
304 /*
305  * Initialize a set in the space provided in 'set' with the provided parameters.
306  * The set is returned with a single ref.  May return EDEADLK if the set
307  * will have no valid cpu based on restrictions from the parent.
308  */
309 static int
310 cpuset_init(struct cpuset *set, struct cpuset *parent,
311     const cpuset_t *mask, struct domainset *domain, cpusetid_t id)
312 {
313 
314 	if (domain == NULL)
315 		domain = parent->cs_domain;
316 	if (mask == NULL)
317 		mask = &parent->cs_mask;
318 	if (!CPU_OVERLAP(&parent->cs_mask, mask))
319 		return (EDEADLK);
320 	/* The domain must be prepared ahead of time. */
321 	if (!domainset_valid(parent->cs_domain, domain))
322 		return (EDEADLK);
323 	CPU_COPY(mask, &set->cs_mask);
324 	LIST_INIT(&set->cs_children);
325 	refcount_init(&set->cs_ref, 1);
326 	set->cs_flags = 0;
327 	mtx_lock_spin(&cpuset_lock);
328 	set->cs_domain = domain;
329 	CPU_AND(&set->cs_mask, &parent->cs_mask);
330 	set->cs_id = id;
331 	set->cs_parent = cpuset_ref(parent);
332 	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
333 	if (set->cs_id != CPUSET_INVALID)
334 		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
335 	mtx_unlock_spin(&cpuset_lock);
336 
337 	return (0);
338 }
339 
340 /*
341  * Create a new non-anonymous set with the requested parent and mask.  May
342  * return failures if the mask is invalid or a new number can not be
343  * allocated.
344  *
345  * If *setp is not NULL, then it will be used as-is.  The caller must take
346  * into account that *setp will be inserted at the head of cpuset_ids and
347  * plan any potentially conflicting cs_link usage accordingly.
348  */
349 static int
350 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
351 {
352 	struct cpuset *set;
353 	cpusetid_t id;
354 	int error;
355 	bool dofree;
356 
357 	id = alloc_unr(cpuset_unr);
358 	if (id == -1)
359 		return (ENFILE);
360 	dofree = (*setp == NULL);
361 	if (*setp != NULL)
362 		set = *setp;
363 	else
364 		*setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
365 	error = cpuset_init(set, parent, mask, NULL, id);
366 	if (error == 0)
367 		return (0);
368 	free_unr(cpuset_unr, id);
369 	if (dofree)
370 		uma_zfree(cpuset_zone, set);
371 
372 	return (error);
373 }
374 
375 static void
376 cpuset_freelist_add(struct setlist *list, int count)
377 {
378 	struct cpuset *set;
379 	int i;
380 
381 	for (i = 0; i < count; i++) {
382 		set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK);
383 		LIST_INSERT_HEAD(list, set, cs_link);
384 	}
385 }
386 
387 static void
388 cpuset_freelist_init(struct setlist *list, int count)
389 {
390 
391 	LIST_INIT(list);
392 	cpuset_freelist_add(list, count);
393 }
394 
395 static void
396 cpuset_freelist_free(struct setlist *list)
397 {
398 	struct cpuset *set;
399 
400 	while ((set = LIST_FIRST(list)) != NULL) {
401 		LIST_REMOVE(set, cs_link);
402 		uma_zfree(cpuset_zone, set);
403 	}
404 }
405 
406 static void
407 domainset_freelist_add(struct domainlist *list, int count)
408 {
409 	struct domainset *set;
410 	int i;
411 
412 	for (i = 0; i < count; i++) {
413 		set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK);
414 		LIST_INSERT_HEAD(list, set, ds_link);
415 	}
416 }
417 
418 static void
419 domainset_freelist_init(struct domainlist *list, int count)
420 {
421 
422 	LIST_INIT(list);
423 	domainset_freelist_add(list, count);
424 }
425 
426 static void
427 domainset_freelist_free(struct domainlist *list)
428 {
429 	struct domainset *set;
430 
431 	while ((set = LIST_FIRST(list)) != NULL) {
432 		LIST_REMOVE(set, ds_link);
433 		uma_zfree(domainset_zone, set);
434 	}
435 }
436 
437 /* Copy a domainset preserving mask and policy. */
438 static void
439 domainset_copy(const struct domainset *from, struct domainset *to)
440 {
441 
442 	DOMAINSET_COPY(&from->ds_mask, &to->ds_mask);
443 	to->ds_policy = from->ds_policy;
444 	to->ds_prefer = from->ds_prefer;
445 }
446 
447 /* Return 1 if mask and policy are equal, otherwise 0. */
448 static int
449 domainset_equal(const struct domainset *one, const struct domainset *two)
450 {
451 
452 	return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 &&
453 	    one->ds_policy == two->ds_policy &&
454 	    one->ds_prefer == two->ds_prefer);
455 }
456 
457 /* Return 1 if child is a valid subset of parent. */
458 static int
459 domainset_valid(const struct domainset *parent, const struct domainset *child)
460 {
461 	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
462 		return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask));
463 	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
464 }
465 
466 static int
467 domainset_restrict(const struct domainset *parent,
468     const struct domainset *child)
469 {
470 	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
471 		return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask));
472 	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
473 }
474 
475 /*
476  * Lookup or create a domainset.  The key is provided in ds_mask and
477  * ds_policy.  If the domainset does not yet exist the storage in
478  * 'domain' is used to insert.  Otherwise this storage is freed to the
479  * domainset_zone and the existing domainset is returned.
480  */
481 static struct domainset *
482 _domainset_create(struct domainset *domain, struct domainlist *freelist)
483 {
484 	struct domainset *ndomain;
485 	int i, j;
486 
487 	KASSERT(domain->ds_cnt <= vm_ndomains,
488 	    ("invalid domain count in domainset %p", domain));
489 	KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER ||
490 	    domain->ds_prefer < vm_ndomains,
491 	    ("invalid preferred domain in domains %p", domain));
492 
493 	mtx_lock_spin(&cpuset_lock);
494 	LIST_FOREACH(ndomain, &cpuset_domains, ds_link)
495 		if (domainset_equal(ndomain, domain))
496 			break;
497 	/*
498 	 * If the domain does not yet exist we insert it and initialize
499 	 * various iteration helpers which are not part of the key.
500 	 */
501 	if (ndomain == NULL) {
502 		LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link);
503 		domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
504 		for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
505 			if (DOMAINSET_ISSET(i, &domain->ds_mask))
506 				domain->ds_order[j++] = i;
507 	}
508 	mtx_unlock_spin(&cpuset_lock);
509 	if (ndomain == NULL)
510 		return (domain);
511 	if (freelist != NULL)
512 		LIST_INSERT_HEAD(freelist, domain, ds_link);
513 	else
514 		uma_zfree(domainset_zone, domain);
515 	return (ndomain);
516 
517 }
518 
519 /*
520  * Are any of the domains in the mask empty?  If so, silently
521  * remove them and update the domainset accordingly.  If only empty
522  * domains are present, we must return failure.
523  */
524 static bool
525 domainset_empty_vm(struct domainset *domain)
526 {
527 	domainset_t empty;
528 	int i, j;
529 
530 	DOMAINSET_ZERO(&empty);
531 	for (i = 0; i < vm_ndomains; i++)
532 		if (VM_DOMAIN_EMPTY(i))
533 			DOMAINSET_SET(i, &empty);
534 	if (DOMAINSET_SUBSET(&empty, &domain->ds_mask))
535 		return (true);
536 
537 	/* Remove empty domains from the set and recompute. */
538 	DOMAINSET_ANDNOT(&domain->ds_mask, &empty);
539 	domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
540 	for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
541 		if (DOMAINSET_ISSET(i, &domain->ds_mask))
542 			domain->ds_order[j++] = i;
543 
544 	/* Convert a PREFER policy referencing an empty domain to RR. */
545 	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
546 	    DOMAINSET_ISSET(domain->ds_prefer, &empty)) {
547 		domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
548 		domain->ds_prefer = -1;
549 	}
550 
551 	return (false);
552 }
553 
554 /*
555  * Create or lookup a domainset based on the key held in 'domain'.
556  */
557 struct domainset *
558 domainset_create(const struct domainset *domain)
559 {
560 	struct domainset *ndomain;
561 
562 	/*
563 	 * Validate the policy.  It must specify a useable policy number with
564 	 * only valid domains.  Preferred must include the preferred domain
565 	 * in the mask.
566 	 */
567 	if (domain->ds_policy <= DOMAINSET_POLICY_INVALID ||
568 	    domain->ds_policy > DOMAINSET_POLICY_MAX)
569 		return (NULL);
570 	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
571 	    !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask))
572 		return (NULL);
573 	if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask))
574 		return (NULL);
575 	ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO);
576 	domainset_copy(domain, ndomain);
577 	return _domainset_create(ndomain, NULL);
578 }
579 
580 /*
581  * Update thread domainset pointers.
582  */
583 static void
584 domainset_notify(void)
585 {
586 	struct thread *td;
587 	struct proc *p;
588 
589 	sx_slock(&allproc_lock);
590 	FOREACH_PROC_IN_SYSTEM(p) {
591 		PROC_LOCK(p);
592 		if (p->p_state == PRS_NEW) {
593 			PROC_UNLOCK(p);
594 			continue;
595 		}
596 		FOREACH_THREAD_IN_PROC(p, td) {
597 			thread_lock(td);
598 			td->td_domain.dr_policy = td->td_cpuset->cs_domain;
599 			thread_unlock(td);
600 		}
601 		PROC_UNLOCK(p);
602 	}
603 	sx_sunlock(&allproc_lock);
604 	kernel_object->domain.dr_policy = cpuset_kernel->cs_domain;
605 }
606 
607 /*
608  * Create a new set that is a subset of a parent.
609  */
610 static struct domainset *
611 domainset_shadow(const struct domainset *pdomain,
612     const struct domainset *domain, struct domainlist *freelist)
613 {
614 	struct domainset *ndomain;
615 
616 	ndomain = LIST_FIRST(freelist);
617 	LIST_REMOVE(ndomain, ds_link);
618 
619 	/*
620 	 * Initialize the key from the request.
621 	 */
622 	domainset_copy(domain, ndomain);
623 
624 	/*
625 	 * Restrict the key by the parent.
626 	 */
627 	DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask);
628 
629 	return _domainset_create(ndomain, freelist);
630 }
631 
632 /*
633  * Recursively check for errors that would occur from applying mask to
634  * the tree of sets starting at 'set'.  Checks for sets that would become
635  * empty as well as RDONLY flags.
636  */
637 static int
638 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
639 {
640 	struct cpuset *nset;
641 	cpuset_t newmask;
642 	int error;
643 
644 	mtx_assert(&cpuset_lock, MA_OWNED);
645 	if (set->cs_flags & CPU_SET_RDONLY)
646 		return (EPERM);
647 	if (augment_mask) {
648 		CPU_COPY(&set->cs_mask, &newmask);
649 		CPU_AND(&newmask, mask);
650 	} else
651 		CPU_COPY(mask, &newmask);
652 
653 	if (CPU_EMPTY(&newmask))
654 		return (EDEADLK);
655 	error = 0;
656 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
657 		if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
658 			break;
659 	return (error);
660 }
661 
662 /*
663  * Applies the mask 'mask' without checking for empty sets or permissions.
664  */
665 static void
666 cpuset_update(struct cpuset *set, cpuset_t *mask)
667 {
668 	struct cpuset *nset;
669 
670 	mtx_assert(&cpuset_lock, MA_OWNED);
671 	CPU_AND(&set->cs_mask, mask);
672 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
673 		cpuset_update(nset, &set->cs_mask);
674 
675 	return;
676 }
677 
678 /*
679  * Modify the set 'set' to use a copy of the mask provided.  Apply this new
680  * mask to restrict all children in the tree.  Checks for validity before
681  * applying the changes.
682  */
683 static int
684 cpuset_modify(struct cpuset *set, cpuset_t *mask)
685 {
686 	struct cpuset *root;
687 	int error;
688 
689 	error = priv_check(curthread, PRIV_SCHED_CPUSET);
690 	if (error)
691 		return (error);
692 	/*
693 	 * In case we are called from within the jail,
694 	 * we do not allow modifying the dedicated root
695 	 * cpuset of the jail but may still allow to
696 	 * change child sets, including subordinate jails'
697 	 * roots.
698 	 */
699 	if ((set->cs_flags & CPU_SET_ROOT) != 0 &&
700 	    jailed(curthread->td_ucred) &&
701 	    set == curthread->td_ucred->cr_prison->pr_cpuset)
702 		return (EPERM);
703 	/*
704 	 * Verify that we have access to this set of
705 	 * cpus.
706 	 */
707 	if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) {
708 		KASSERT(set->cs_parent != NULL,
709 		    ("jail.cpuset=%d is not a proper child of parent jail's root.",
710 		    set->cs_id));
711 
712 		/*
713 		 * cpuset_getroot() cannot work here due to how top-level jail
714 		 * roots are constructed.  Top-level jails are parented to
715 		 * thread0's cpuset (i.e. cpuset 1) rather than the system root.
716 		 */
717 		root = set->cs_parent;
718 	} else {
719 		root = cpuset_getroot(set);
720 	}
721 	mtx_lock_spin(&cpuset_lock);
722 	if (root && !CPU_SUBSET(&root->cs_mask, mask)) {
723 		error = EINVAL;
724 		goto out;
725 	}
726 	error = cpuset_testupdate(set, mask, 0);
727 	if (error)
728 		goto out;
729 	CPU_COPY(mask, &set->cs_mask);
730 	cpuset_update(set, mask);
731 out:
732 	mtx_unlock_spin(&cpuset_lock);
733 
734 	return (error);
735 }
736 
737 /*
738  * Recursively check for errors that would occur from applying mask to
739  * the tree of sets starting at 'set'.  Checks for sets that would become
740  * empty as well as RDONLY flags.
741  */
742 static int
743 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset,
744     struct domainset *orig, int *count, int augment_mask __unused)
745 {
746 	struct cpuset *nset;
747 	struct domainset *domain;
748 	struct domainset newset;
749 	int error;
750 
751 	mtx_assert(&cpuset_lock, MA_OWNED);
752 	if (set->cs_flags & CPU_SET_RDONLY)
753 		return (EPERM);
754 	domain = set->cs_domain;
755 	domainset_copy(domain, &newset);
756 	if (!domainset_equal(domain, orig)) {
757 		if (!domainset_restrict(domain, dset))
758 			return (EDEADLK);
759 		DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask);
760 		/* Count the number of domains that are changing. */
761 		(*count)++;
762 	}
763 	error = 0;
764 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
765 		if ((error = cpuset_testupdate_domain(nset, &newset, domain,
766 		    count, 1)) != 0)
767 			break;
768 	return (error);
769 }
770 
771 /*
772  * Applies the mask 'mask' without checking for empty sets or permissions.
773  */
774 static void
775 cpuset_update_domain(struct cpuset *set, struct domainset *domain,
776     struct domainset *orig, struct domainlist *domains)
777 {
778 	struct cpuset *nset;
779 
780 	mtx_assert(&cpuset_lock, MA_OWNED);
781 	/*
782 	 * If this domainset has changed from the parent we must calculate
783 	 * a new set.  Otherwise it simply inherits from the parent.  When
784 	 * we inherit from the parent we get a new mask and policy.  If the
785 	 * set is modified from the parent we keep the policy and only
786 	 * update the mask.
787 	 */
788 	if (set->cs_domain != orig) {
789 		orig = set->cs_domain;
790 		set->cs_domain = domainset_shadow(domain, orig, domains);
791 	} else
792 		set->cs_domain = domain;
793 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
794 		cpuset_update_domain(nset, set->cs_domain, orig, domains);
795 
796 	return;
797 }
798 
799 /*
800  * Modify the set 'set' to use a copy the domainset provided.  Apply this new
801  * mask to restrict all children in the tree.  Checks for validity before
802  * applying the changes.
803  */
804 static int
805 cpuset_modify_domain(struct cpuset *set, struct domainset *domain)
806 {
807 	struct domainlist domains;
808 	struct domainset temp;
809 	struct domainset *dset;
810 	struct cpuset *root;
811 	int ndomains, needed;
812 	int error;
813 
814 	error = priv_check(curthread, PRIV_SCHED_CPUSET);
815 	if (error)
816 		return (error);
817 	/*
818 	 * In case we are called from within the jail
819 	 * we do not allow modifying the dedicated root
820 	 * cpuset of the jail but may still allow to
821 	 * change child sets.
822 	 */
823 	if (jailed(curthread->td_ucred) &&
824 	    set->cs_flags & CPU_SET_ROOT)
825 		return (EPERM);
826 	domainset_freelist_init(&domains, 0);
827 	domain = domainset_create(domain);
828 	ndomains = 0;
829 
830 	mtx_lock_spin(&cpuset_lock);
831 	for (;;) {
832 		root = cpuset_getroot(set);
833 		dset = root->cs_domain;
834 		/*
835 		 * Verify that we have access to this set of domains.
836 		 */
837 		if (!domainset_valid(dset, domain)) {
838 			error = EINVAL;
839 			goto out;
840 		}
841 		/*
842 		 * If applying prefer we keep the current set as the fallback.
843 		 */
844 		if (domain->ds_policy == DOMAINSET_POLICY_PREFER)
845 			DOMAINSET_COPY(&set->cs_domain->ds_mask,
846 			    &domain->ds_mask);
847 		/*
848 		 * Determine whether we can apply this set of domains and
849 		 * how many new domain structures it will require.
850 		 */
851 		domainset_copy(domain, &temp);
852 		needed = 0;
853 		error = cpuset_testupdate_domain(set, &temp, set->cs_domain,
854 		    &needed, 0);
855 		if (error)
856 			goto out;
857 		if (ndomains >= needed)
858 			break;
859 
860 		/* Dropping the lock; we'll need to re-evaluate again. */
861 		mtx_unlock_spin(&cpuset_lock);
862 		domainset_freelist_add(&domains, needed - ndomains);
863 		ndomains = needed;
864 		mtx_lock_spin(&cpuset_lock);
865 	}
866 	dset = set->cs_domain;
867 	cpuset_update_domain(set, domain, dset, &domains);
868 out:
869 	mtx_unlock_spin(&cpuset_lock);
870 	domainset_freelist_free(&domains);
871 	if (error == 0)
872 		domainset_notify();
873 
874 	return (error);
875 }
876 
877 /*
878  * Resolve the 'which' parameter of several cpuset apis.
879  *
880  * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
881  * checks for permission via p_cansched().
882  *
883  * For WHICH_SET returns a valid set with a new reference.
884  *
885  * -1 may be supplied for any argument to mean the current proc/thread or
886  * the base set of the current thread.  May fail with ESRCH/EPERM.
887  */
888 int
889 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
890     struct cpuset **setp)
891 {
892 	struct cpuset *set;
893 	struct thread *td;
894 	struct proc *p;
895 	int error;
896 
897 	*pp = p = NULL;
898 	*tdp = td = NULL;
899 	*setp = set = NULL;
900 	switch (which) {
901 	case CPU_WHICH_PID:
902 		if (id == -1) {
903 			PROC_LOCK(curproc);
904 			p = curproc;
905 			break;
906 		}
907 		if ((p = pfind(id)) == NULL)
908 			return (ESRCH);
909 		break;
910 	case CPU_WHICH_TID:
911 		if (id == -1) {
912 			PROC_LOCK(curproc);
913 			p = curproc;
914 			td = curthread;
915 			break;
916 		}
917 		td = tdfind(id, -1);
918 		if (td == NULL)
919 			return (ESRCH);
920 		p = td->td_proc;
921 		break;
922 	case CPU_WHICH_CPUSET:
923 		if (id == -1) {
924 			thread_lock(curthread);
925 			set = cpuset_refbase(curthread->td_cpuset);
926 			thread_unlock(curthread);
927 		} else
928 			set = cpuset_lookup(id, curthread);
929 		if (set) {
930 			*setp = set;
931 			return (0);
932 		}
933 		return (ESRCH);
934 	case CPU_WHICH_JAIL:
935 	{
936 		/* Find `set' for prison with given id. */
937 		struct prison *pr;
938 
939 		sx_slock(&allprison_lock);
940 		pr = prison_find_child(curthread->td_ucred->cr_prison, id);
941 		sx_sunlock(&allprison_lock);
942 		if (pr == NULL)
943 			return (ESRCH);
944 		cpuset_ref(pr->pr_cpuset);
945 		*setp = pr->pr_cpuset;
946 		mtx_unlock(&pr->pr_mtx);
947 		return (0);
948 	}
949 	case CPU_WHICH_IRQ:
950 	case CPU_WHICH_DOMAIN:
951 		return (0);
952 	default:
953 		return (EINVAL);
954 	}
955 	error = p_cansched(curthread, p);
956 	if (error) {
957 		PROC_UNLOCK(p);
958 		return (error);
959 	}
960 	if (td == NULL)
961 		td = FIRST_THREAD_IN_PROC(p);
962 	*pp = p;
963 	*tdp = td;
964 	return (0);
965 }
966 
967 static int
968 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask,
969     const struct domainset *domain)
970 {
971 	struct cpuset *parent;
972 	struct domainset *dset;
973 
974 	parent = cpuset_getbase(set);
975 	/*
976 	 * If we are restricting a cpu mask it must be a subset of the
977 	 * parent or invalid CPUs have been specified.
978 	 */
979 	if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask))
980 		return (EINVAL);
981 
982 	/*
983 	 * If we are restricting a domain mask it must be a subset of the
984 	 * parent or invalid domains have been specified.
985 	 */
986 	dset = parent->cs_domain;
987 	if (domain != NULL && !domainset_valid(dset, domain))
988 		return (EINVAL);
989 
990 	return (0);
991 }
992 
993 /*
994  * Create an anonymous set with the provided mask in the space provided by
995  * 'nset'.  If the passed in set is anonymous we use its parent otherwise
996  * the new set is a child of 'set'.
997  */
998 static int
999 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp,
1000    const cpuset_t *mask, const struct domainset *domain,
1001    struct setlist *cpusets, struct domainlist *domains)
1002 {
1003 	struct cpuset *parent;
1004 	struct cpuset *nset;
1005 	struct domainset *dset;
1006 	struct domainset *d;
1007 	int error;
1008 
1009 	error = cpuset_testshadow(set, mask, domain);
1010 	if (error)
1011 		return (error);
1012 
1013 	parent = cpuset_getbase(set);
1014 	dset = parent->cs_domain;
1015 	if (mask == NULL)
1016 		mask = &set->cs_mask;
1017 	if (domain != NULL)
1018 		d = domainset_shadow(dset, domain, domains);
1019 	else
1020 		d = set->cs_domain;
1021 	nset = LIST_FIRST(cpusets);
1022 	error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID);
1023 	if (error == 0) {
1024 		LIST_REMOVE(nset, cs_link);
1025 		*nsetp = nset;
1026 	}
1027 	return (error);
1028 }
1029 
1030 static struct cpuset *
1031 cpuset_update_thread(struct thread *td, struct cpuset *nset)
1032 {
1033 	struct cpuset *tdset;
1034 
1035 	tdset = td->td_cpuset;
1036 	td->td_cpuset = nset;
1037 	td->td_domain.dr_policy = nset->cs_domain;
1038 	sched_affinity(td);
1039 
1040 	return (tdset);
1041 }
1042 
1043 static int
1044 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask,
1045     struct domainset *domain)
1046 {
1047 	struct cpuset *parent;
1048 
1049 	parent = cpuset_getbase(tdset);
1050 	if (mask == NULL)
1051 		mask = &tdset->cs_mask;
1052 	if (domain == NULL)
1053 		domain = tdset->cs_domain;
1054 	return cpuset_testshadow(parent, mask, domain);
1055 }
1056 
1057 static int
1058 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask,
1059     struct domainset *domain, struct cpuset **nsetp,
1060     struct setlist *freelist, struct domainlist *domainlist)
1061 {
1062 	struct cpuset *parent;
1063 
1064 	parent = cpuset_getbase(tdset);
1065 	if (mask == NULL)
1066 		mask = &tdset->cs_mask;
1067 	if (domain == NULL)
1068 		domain = tdset->cs_domain;
1069 	return cpuset_shadow(parent, nsetp, mask, domain, freelist,
1070 	    domainlist);
1071 }
1072 
1073 static int
1074 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
1075     cpuset_t *mask, struct domainset *domain)
1076 {
1077 	struct cpuset *parent;
1078 
1079 	parent = cpuset_getbase(tdset);
1080 
1081 	/*
1082 	 * If the thread restricted its mask then apply that same
1083 	 * restriction to the new set, otherwise take it wholesale.
1084 	 */
1085 	if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
1086 		CPU_COPY(&tdset->cs_mask, mask);
1087 		CPU_AND(mask, &set->cs_mask);
1088 	} else
1089 		CPU_COPY(&set->cs_mask, mask);
1090 
1091 	/*
1092 	 * If the thread restricted the domain then we apply the
1093 	 * restriction to the new set but retain the policy.
1094 	 */
1095 	if (tdset->cs_domain != parent->cs_domain) {
1096 		domainset_copy(tdset->cs_domain, domain);
1097 		DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask);
1098 	} else
1099 		domainset_copy(set->cs_domain, domain);
1100 
1101 	if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask))
1102 		return (EDEADLK);
1103 
1104 	return (0);
1105 }
1106 
1107 static int
1108 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set)
1109 {
1110 	struct domainset domain;
1111 	cpuset_t mask;
1112 
1113 	if (tdset->cs_id != CPUSET_INVALID)
1114 		return (0);
1115 	return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1116 }
1117 
1118 static int
1119 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set,
1120     struct cpuset **nsetp, struct setlist *freelist,
1121     struct domainlist *domainlist)
1122 {
1123 	struct domainset domain;
1124 	cpuset_t mask;
1125 	int error;
1126 
1127 	/*
1128 	 * If we're replacing on a thread that has not constrained the
1129 	 * original set we can simply accept the new set.
1130 	 */
1131 	if (tdset->cs_id != CPUSET_INVALID) {
1132 		*nsetp = cpuset_ref(set);
1133 		return (0);
1134 	}
1135 	error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1136 	if (error)
1137 		return (error);
1138 
1139 	return cpuset_shadow(set, nsetp, &mask, &domain, freelist,
1140 	    domainlist);
1141 }
1142 
1143 static int
1144 cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
1145     struct cpuset *nroot, struct cpuset **nsetp,
1146     struct setlist *cpusets, struct domainlist *domainlist)
1147 {
1148 	struct domainset ndomain;
1149 	cpuset_t nmask;
1150 	struct cpuset *pbase;
1151 	int error;
1152 
1153 	pbase = cpuset_getbase(td->td_cpuset);
1154 
1155 	/* Copy process mask, then further apply the new root mask. */
1156 	CPU_COPY(&pbase->cs_mask, &nmask);
1157 	CPU_AND(&nmask, &nroot->cs_mask);
1158 
1159 	domainset_copy(pbase->cs_domain, &ndomain);
1160 	DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
1161 
1162 	/* Policy is too restrictive, will not work. */
1163 	if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask))
1164 		return (EDEADLK);
1165 
1166 	/*
1167 	 * Remove pbase from the freelist in advance, it'll be pushed to
1168 	 * cpuset_ids on success.  We assume here that cpuset_create() will not
1169 	 * touch pbase on failure, and we just enqueue it back to the freelist
1170 	 * to remain in a consistent state.
1171 	 */
1172 	pbase = LIST_FIRST(cpusets);
1173 	LIST_REMOVE(pbase, cs_link);
1174 	error = cpuset_create(&pbase, set, &nmask);
1175 	if (error != 0) {
1176 		LIST_INSERT_HEAD(cpusets, pbase, cs_link);
1177 		return (error);
1178 	}
1179 
1180 	/* Duplicates some work from above... oh well. */
1181 	pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain,
1182 	    domainlist);
1183 	*nsetp = pbase;
1184 	return (0);
1185 }
1186 
1187 /*
1188  * Handle four cases for updating an entire process.
1189  *
1190  * 1) Set is non-null and the process is not rebasing onto a new root.  This
1191  *    reparents all anonymous sets to the provided set and replaces all
1192  *    non-anonymous td_cpusets with the provided set.
1193  * 2) Set is non-null and the process is rebasing onto a new root.  This
1194  *    creates a new base set if the process previously had its own base set,
1195  *    then reparents all anonymous sets either to that set or the provided set
1196  *    if one was not created.  Non-anonymous sets are similarly replaced.
1197  * 3) Mask is non-null.  This replaces or creates anonymous sets for every
1198  *    thread with the existing base as a parent.
1199  * 4) domain is non-null.  This creates anonymous sets for every thread
1200  *    and replaces the domain set.
1201  *
1202  * This is overly complicated because we can't allocate while holding a
1203  * spinlock and spinlocks must be held while changing and examining thread
1204  * state.
1205  */
1206 static int
1207 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask,
1208     struct domainset *domain, bool rebase)
1209 {
1210 	struct setlist freelist;
1211 	struct setlist droplist;
1212 	struct domainlist domainlist;
1213 	struct cpuset *base, *nset, *nroot, *tdroot;
1214 	struct thread *td;
1215 	struct proc *p;
1216 	int needed;
1217 	int nfree;
1218 	int error;
1219 
1220 	/*
1221 	 * The algorithm requires two passes due to locking considerations.
1222 	 *
1223 	 * 1) Lookup the process and acquire the locks in the required order.
1224 	 * 2) If enough cpusets have not been allocated release the locks and
1225 	 *    allocate them.  Loop.
1226 	 */
1227 	cpuset_freelist_init(&freelist, 1);
1228 	domainset_freelist_init(&domainlist, 1);
1229 	nfree = 1;
1230 	LIST_INIT(&droplist);
1231 	nfree = 0;
1232 	base = set;
1233 	nroot = NULL;
1234 	if (set != NULL)
1235 		nroot = cpuset_getroot(set);
1236 	for (;;) {
1237 		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
1238 		if (error)
1239 			goto out;
1240 		tdroot = cpuset_getroot(td->td_cpuset);
1241 		needed = p->p_numthreads;
1242 		if (set != NULL && rebase && tdroot != nroot)
1243 			needed++;
1244 		if (nfree >= needed)
1245 			break;
1246 		PROC_UNLOCK(p);
1247 		if (nfree < needed) {
1248 			cpuset_freelist_add(&freelist, needed - nfree);
1249 			domainset_freelist_add(&domainlist, needed - nfree);
1250 			nfree = needed;
1251 		}
1252 	}
1253 	PROC_LOCK_ASSERT(p, MA_OWNED);
1254 
1255 	/*
1256 	 * If we're changing roots and the root set is what has been specified
1257 	 * as the parent, then we'll check if the process was previously using
1258 	 * the root set and, if it wasn't, create a new base with the process's
1259 	 * mask applied to it.
1260 	 *
1261 	 * If the new root is incompatible with the existing mask, then we allow
1262 	 * the process to take on the new root if and only if they have
1263 	 * privilege to widen their mask anyways.  Unprivileged processes get
1264 	 * rejected with EDEADLK.
1265 	 */
1266 	if (set != NULL && rebase && nroot != tdroot) {
1267 		cpusetid_t base_id, root_id;
1268 
1269 		root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id;
1270 		base_id = cpuset_getbase(td->td_cpuset)->cs_id;
1271 
1272 		if (base_id != root_id) {
1273 			error = cpuset_setproc_newbase(td, set, nroot, &base,
1274 			    &freelist, &domainlist);
1275 			if (error == EDEADLK &&
1276 			    priv_check(td, PRIV_SCHED_CPUSET) == 0)
1277 				error = 0;
1278 			if (error != 0)
1279 				goto unlock_out;
1280 		}
1281 	}
1282 
1283 	/*
1284 	 * Now that the appropriate locks are held and we have enough cpusets,
1285 	 * make sure the operation will succeed before applying changes. The
1286 	 * proc lock prevents td_cpuset from changing between calls.
1287 	 */
1288 	error = 0;
1289 	FOREACH_THREAD_IN_PROC(p, td) {
1290 		thread_lock(td);
1291 		if (set != NULL)
1292 			error = cpuset_setproc_test_setthread(td->td_cpuset,
1293 			    base);
1294 		else
1295 			error = cpuset_setproc_test_maskthread(td->td_cpuset,
1296 			    mask, domain);
1297 		thread_unlock(td);
1298 		if (error)
1299 			goto unlock_out;
1300 	}
1301 	/*
1302 	 * Replace each thread's cpuset while using deferred release.  We
1303 	 * must do this because the thread lock must be held while operating
1304 	 * on the thread and this limits the type of operations allowed.
1305 	 */
1306 	FOREACH_THREAD_IN_PROC(p, td) {
1307 		thread_lock(td);
1308 		if (set != NULL)
1309 			error = cpuset_setproc_setthread(td->td_cpuset, base,
1310 			    &nset, &freelist, &domainlist);
1311 		else
1312 			error = cpuset_setproc_maskthread(td->td_cpuset, mask,
1313 			    domain, &nset, &freelist, &domainlist);
1314 		if (error) {
1315 			thread_unlock(td);
1316 			break;
1317 		}
1318 		cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset));
1319 		thread_unlock(td);
1320 	}
1321 unlock_out:
1322 	PROC_UNLOCK(p);
1323 out:
1324 	if (base != NULL && base != set)
1325 		cpuset_rel(base);
1326 	while ((nset = LIST_FIRST(&droplist)) != NULL)
1327 		cpuset_rel_complete(nset);
1328 	cpuset_freelist_free(&freelist);
1329 	domainset_freelist_free(&domainlist);
1330 	return (error);
1331 }
1332 
1333 static int
1334 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen)
1335 {
1336 	size_t bytes;
1337 	int i, once;
1338 	char *p;
1339 
1340 	once = 0;
1341 	p = buf;
1342 	for (i = 0; i < __bitset_words(setlen); i++) {
1343 		if (once != 0) {
1344 			if (bufsiz < 1)
1345 				return (0);
1346 			*p = ',';
1347 			p++;
1348 			bufsiz--;
1349 		} else
1350 			once = 1;
1351 		if (bufsiz < sizeof(__STRING(ULONG_MAX)))
1352 			return (0);
1353 		bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]);
1354 		p += bytes;
1355 		bufsiz -= bytes;
1356 	}
1357 	return (p - buf);
1358 }
1359 
1360 static int
1361 bitset_strscan(struct bitset *set, int setlen, const char *buf)
1362 {
1363 	int i, ret;
1364 	const char *p;
1365 
1366 	BIT_ZERO(setlen, set);
1367 	p = buf;
1368 	for (i = 0; i < __bitset_words(setlen); i++) {
1369 		if (*p == ',') {
1370 			p++;
1371 			continue;
1372 		}
1373 		ret = sscanf(p, "%lx", &set->__bits[i]);
1374 		if (ret == 0 || ret == -1)
1375 			break;
1376 		while (isxdigit(*p))
1377 			p++;
1378 	}
1379 	return (p - buf);
1380 }
1381 
1382 /*
1383  * Return a string representing a valid layout for a cpuset_t object.
1384  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1385  */
1386 char *
1387 cpusetobj_strprint(char *buf, const cpuset_t *set)
1388 {
1389 
1390 	bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set,
1391 	    CPU_SETSIZE);
1392 	return (buf);
1393 }
1394 
1395 /*
1396  * Build a valid cpuset_t object from a string representation.
1397  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1398  */
1399 int
1400 cpusetobj_strscan(cpuset_t *set, const char *buf)
1401 {
1402 	char p;
1403 
1404 	if (strlen(buf) > CPUSETBUFSIZ - 1)
1405 		return (-1);
1406 
1407 	p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)];
1408 	if (p != '\0')
1409 		return (-1);
1410 
1411 	return (0);
1412 }
1413 
1414 /*
1415  * Handle a domainset specifier in the sysctl tree.  A poiner to a pointer to
1416  * a domainset is in arg1.  If the user specifies a valid domainset the
1417  * pointer is updated.
1418  *
1419  * Format is:
1420  * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred
1421  */
1422 int
1423 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS)
1424 {
1425 	char buf[DOMAINSETBUFSIZ];
1426 	struct domainset *dset;
1427 	struct domainset key;
1428 	int policy, prefer, error;
1429 	char *p;
1430 
1431 	dset = *(struct domainset **)arg1;
1432 	error = 0;
1433 
1434 	if (dset != NULL) {
1435 		p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ,
1436 		    (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE);
1437 		sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer);
1438 	} else
1439 		sprintf(buf, "<NULL>");
1440 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1441 	if (error != 0 || req->newptr == NULL)
1442 		return (error);
1443 
1444 	/*
1445 	 * Read in and validate the string.
1446 	 */
1447 	memset(&key, 0, sizeof(key));
1448 	p = &buf[bitset_strscan((struct bitset *)&key.ds_mask,
1449 	    DOMAINSET_SETSIZE, buf)];
1450 	if (p == buf)
1451 		return (EINVAL);
1452 	if (sscanf(p, ":%d:%d", &policy, &prefer) != 2)
1453 		return (EINVAL);
1454 	key.ds_policy = policy;
1455 	key.ds_prefer = prefer;
1456 
1457 	/* Domainset_create() validates the policy.*/
1458 	dset = domainset_create(&key);
1459 	if (dset == NULL)
1460 		return (EINVAL);
1461 	*(struct domainset **)arg1 = dset;
1462 
1463 	return (error);
1464 }
1465 
1466 /*
1467  * Apply an anonymous mask or a domain to a single thread.
1468  */
1469 static int
1470 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain)
1471 {
1472 	struct setlist cpusets;
1473 	struct domainlist domainlist;
1474 	struct cpuset *nset;
1475 	struct cpuset *set;
1476 	struct thread *td;
1477 	struct proc *p;
1478 	int error;
1479 
1480 	cpuset_freelist_init(&cpusets, 1);
1481 	domainset_freelist_init(&domainlist, domain != NULL);
1482 	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
1483 	if (error)
1484 		goto out;
1485 	set = NULL;
1486 	thread_lock(td);
1487 	error = cpuset_shadow(td->td_cpuset, &nset, mask, domain,
1488 	    &cpusets, &domainlist);
1489 	if (error == 0)
1490 		set = cpuset_update_thread(td, nset);
1491 	thread_unlock(td);
1492 	PROC_UNLOCK(p);
1493 	if (set)
1494 		cpuset_rel(set);
1495 out:
1496 	cpuset_freelist_free(&cpusets);
1497 	domainset_freelist_free(&domainlist);
1498 	return (error);
1499 }
1500 
1501 /*
1502  * Apply an anonymous mask to a single thread.
1503  */
1504 int
1505 cpuset_setthread(lwpid_t id, cpuset_t *mask)
1506 {
1507 
1508 	return _cpuset_setthread(id, mask, NULL);
1509 }
1510 
1511 /*
1512  * Apply new cpumask to the ithread.
1513  */
1514 int
1515 cpuset_setithread(lwpid_t id, int cpu)
1516 {
1517 	cpuset_t mask;
1518 
1519 	CPU_ZERO(&mask);
1520 	if (cpu == NOCPU)
1521 		CPU_COPY(cpuset_root, &mask);
1522 	else
1523 		CPU_SET(cpu, &mask);
1524 	return _cpuset_setthread(id, &mask, NULL);
1525 }
1526 
1527 /*
1528  * Initialize static domainsets after NUMA information is available.  This is
1529  * called before memory allocators are initialized.
1530  */
1531 void
1532 domainset_init(void)
1533 {
1534 	struct domainset *dset;
1535 	int i;
1536 
1537 	dset = &domainset_firsttouch;
1538 	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1539 	dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH;
1540 	dset->ds_prefer = -1;
1541 	_domainset_create(dset, NULL);
1542 
1543 	dset = &domainset_interleave;
1544 	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1545 	dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE;
1546 	dset->ds_prefer = -1;
1547 	_domainset_create(dset, NULL);
1548 
1549 	dset = &domainset_roundrobin;
1550 	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1551 	dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1552 	dset->ds_prefer = -1;
1553 	_domainset_create(dset, NULL);
1554 
1555 	for (i = 0; i < vm_ndomains; i++) {
1556 		dset = &domainset_fixed[i];
1557 		DOMAINSET_ZERO(&dset->ds_mask);
1558 		DOMAINSET_SET(i, &dset->ds_mask);
1559 		dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1560 		_domainset_create(dset, NULL);
1561 
1562 		dset = &domainset_prefer[i];
1563 		DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1564 		dset->ds_policy = DOMAINSET_POLICY_PREFER;
1565 		dset->ds_prefer = i;
1566 		_domainset_create(dset, NULL);
1567 	}
1568 }
1569 
1570 /*
1571  * Define the domainsets for cpuset 0, 1 and cpuset 2.
1572  */
1573 void
1574 domainset_zero(void)
1575 {
1576 	struct domainset *dset, *tmp;
1577 
1578 	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
1579 
1580 	domainset0 = &domainset_firsttouch;
1581 	curthread->td_domain.dr_policy = domainset0;
1582 
1583 	domainset2 = &domainset_interleave;
1584 	kernel_object->domain.dr_policy = domainset2;
1585 
1586 	/* Remove empty domains from the global policies. */
1587 	LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp)
1588 		if (domainset_empty_vm(dset))
1589 			LIST_REMOVE(dset, ds_link);
1590 }
1591 
1592 /*
1593  * Creates system-wide cpusets and the cpuset for thread0 including three
1594  * sets:
1595  *
1596  * 0 - The root set which should represent all valid processors in the
1597  *     system.  This set is immutable.
1598  * 1 - The default set which all processes are a member of until changed.
1599  *     This allows an administrator to move all threads off of given cpus to
1600  *     dedicate them to high priority tasks or save power etc.
1601  * 2 - The kernel set which allows restriction and policy to be applied only
1602  *     to kernel threads and the kernel_object.
1603  */
1604 struct cpuset *
1605 cpuset_thread0(void)
1606 {
1607 	struct cpuset *set;
1608 	int i;
1609 	int error __unused;
1610 
1611 	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
1612 	    NULL, NULL, UMA_ALIGN_CACHE, 0);
1613 	domainset_zone = uma_zcreate("domainset", sizeof(struct domainset),
1614 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
1615 
1616 	/*
1617 	 * Create the root system set (0) for the whole machine.  Doesn't use
1618 	 * cpuset_create() due to NULL parent.
1619 	 */
1620 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1621 	CPU_COPY(&all_cpus, &set->cs_mask);
1622 	LIST_INIT(&set->cs_children);
1623 	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
1624 	refcount_init(&set->cs_ref, 1);
1625 	set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY;
1626 	set->cs_domain = domainset0;
1627 	cpuset_zero = set;
1628 	cpuset_root = &set->cs_mask;
1629 
1630 	/*
1631 	 * Now derive a default (1), modifiable set from that to give out.
1632 	 */
1633 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1634 	error = cpuset_init(set, cpuset_zero, NULL, NULL, 1);
1635 	KASSERT(error == 0, ("Error creating default set: %d\n", error));
1636 	cpuset_default = set;
1637 	/*
1638 	 * Create the kernel set (2).
1639 	 */
1640 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1641 	error = cpuset_init(set, cpuset_zero, NULL, NULL, 2);
1642 	KASSERT(error == 0, ("Error creating kernel set: %d\n", error));
1643 	set->cs_domain = domainset2;
1644 	cpuset_kernel = set;
1645 
1646 	/*
1647 	 * Initialize the unit allocator. 0 and 1 are allocated above.
1648 	 */
1649 	cpuset_unr = new_unrhdr(3, INT_MAX, NULL);
1650 
1651 	/*
1652 	 * If MD code has not initialized per-domain cpusets, place all
1653 	 * CPUs in domain 0.
1654 	 */
1655 	for (i = 0; i < MAXMEMDOM; i++)
1656 		if (!CPU_EMPTY(&cpuset_domain[i]))
1657 			goto domains_set;
1658 	CPU_COPY(&all_cpus, &cpuset_domain[0]);
1659 domains_set:
1660 
1661 	return (cpuset_default);
1662 }
1663 
1664 void
1665 cpuset_kernthread(struct thread *td)
1666 {
1667 	struct cpuset *set;
1668 
1669 	thread_lock(td);
1670 	set = td->td_cpuset;
1671 	td->td_cpuset = cpuset_ref(cpuset_kernel);
1672 	thread_unlock(td);
1673 	cpuset_rel(set);
1674 }
1675 
1676 /*
1677  * Create a cpuset, which would be cpuset_create() but
1678  * mark the new 'set' as root.
1679  *
1680  * We are not going to reparent the td to it.  Use cpuset_setproc_update_set()
1681  * for that.
1682  *
1683  * In case of no error, returns the set in *setp locked with a reference.
1684  */
1685 int
1686 cpuset_create_root(struct prison *pr, struct cpuset **setp)
1687 {
1688 	struct cpuset *set;
1689 	int error;
1690 
1691 	KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
1692 	KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
1693 
1694 	set = NULL;
1695 	error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
1696 	if (error)
1697 		return (error);
1698 
1699 	KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data",
1700 	    __func__, __LINE__));
1701 
1702 	/* Mark the set as root. */
1703 	set->cs_flags |= CPU_SET_ROOT;
1704 	*setp = set;
1705 
1706 	return (0);
1707 }
1708 
1709 int
1710 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
1711 {
1712 	int error;
1713 
1714 	KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
1715 	KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
1716 
1717 	cpuset_ref(set);
1718 	error = cpuset_setproc(p->p_pid, set, NULL, NULL, true);
1719 	if (error)
1720 		return (error);
1721 	cpuset_rel(set);
1722 	return (0);
1723 }
1724 
1725 /*
1726  * In Capability mode, the only accesses that are permitted are to the current
1727  * thread and process' CPU and domain sets.
1728  */
1729 static int
1730 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which,
1731     id_t id)
1732 {
1733 	if (IN_CAPABILITY_MODE(td)) {
1734 		if (level != CPU_LEVEL_WHICH)
1735 			return (ECAPMODE);
1736 		if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
1737 			return (ECAPMODE);
1738 		if (id != -1 &&
1739 		    !(which == CPU_WHICH_TID && id == td->td_tid) &&
1740 		    !(which == CPU_WHICH_PID && id == td->td_proc->p_pid))
1741 			return (ECAPMODE);
1742 	}
1743 	return (0);
1744 }
1745 
1746 #ifndef _SYS_SYSPROTO_H_
1747 struct cpuset_args {
1748 	cpusetid_t	*setid;
1749 };
1750 #endif
1751 int
1752 sys_cpuset(struct thread *td, struct cpuset_args *uap)
1753 {
1754 	struct cpuset *root;
1755 	struct cpuset *set;
1756 	int error;
1757 
1758 	thread_lock(td);
1759 	root = cpuset_refroot(td->td_cpuset);
1760 	thread_unlock(td);
1761 	set = NULL;
1762 	error = cpuset_create(&set, root, &root->cs_mask);
1763 	cpuset_rel(root);
1764 	if (error)
1765 		return (error);
1766 	error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
1767 	if (error == 0)
1768 		error = cpuset_setproc(-1, set, NULL, NULL, false);
1769 	cpuset_rel(set);
1770 	return (error);
1771 }
1772 
1773 #ifndef _SYS_SYSPROTO_H_
1774 struct cpuset_setid_args {
1775 	cpuwhich_t	which;
1776 	id_t		id;
1777 	cpusetid_t	setid;
1778 };
1779 #endif
1780 int
1781 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
1782 {
1783 
1784 	return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
1785 }
1786 
1787 int
1788 kern_cpuset_setid(struct thread *td, cpuwhich_t which,
1789     id_t id, cpusetid_t setid)
1790 {
1791 	struct cpuset *set;
1792 	int error;
1793 
1794 	/*
1795 	 * Presently we only support per-process sets.
1796 	 */
1797 	if (which != CPU_WHICH_PID)
1798 		return (EINVAL);
1799 	set = cpuset_lookup(setid, td);
1800 	if (set == NULL)
1801 		return (ESRCH);
1802 	error = cpuset_setproc(id, set, NULL, NULL, false);
1803 	cpuset_rel(set);
1804 	return (error);
1805 }
1806 
1807 #ifndef _SYS_SYSPROTO_H_
1808 struct cpuset_getid_args {
1809 	cpulevel_t	level;
1810 	cpuwhich_t	which;
1811 	id_t		id;
1812 	cpusetid_t	*setid;
1813 };
1814 #endif
1815 int
1816 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
1817 {
1818 
1819 	return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
1820 	    uap->setid));
1821 }
1822 
1823 int
1824 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
1825     id_t id, cpusetid_t *setid)
1826 {
1827 	struct cpuset *nset;
1828 	struct cpuset *set;
1829 	struct thread *ttd;
1830 	struct proc *p;
1831 	cpusetid_t tmpid;
1832 	int error;
1833 
1834 	if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
1835 		return (EINVAL);
1836 	error = cpuset_which(which, id, &p, &ttd, &set);
1837 	if (error)
1838 		return (error);
1839 	switch (which) {
1840 	case CPU_WHICH_TID:
1841 	case CPU_WHICH_PID:
1842 		thread_lock(ttd);
1843 		set = cpuset_refbase(ttd->td_cpuset);
1844 		thread_unlock(ttd);
1845 		PROC_UNLOCK(p);
1846 		break;
1847 	case CPU_WHICH_CPUSET:
1848 	case CPU_WHICH_JAIL:
1849 		break;
1850 	case CPU_WHICH_IRQ:
1851 	case CPU_WHICH_DOMAIN:
1852 		return (EINVAL);
1853 	}
1854 	switch (level) {
1855 	case CPU_LEVEL_ROOT:
1856 		nset = cpuset_refroot(set);
1857 		cpuset_rel(set);
1858 		set = nset;
1859 		break;
1860 	case CPU_LEVEL_CPUSET:
1861 		break;
1862 	case CPU_LEVEL_WHICH:
1863 		break;
1864 	}
1865 	tmpid = set->cs_id;
1866 	cpuset_rel(set);
1867 	if (error == 0)
1868 		error = copyout(&tmpid, setid, sizeof(tmpid));
1869 
1870 	return (error);
1871 }
1872 
1873 #ifndef _SYS_SYSPROTO_H_
1874 struct cpuset_getaffinity_args {
1875 	cpulevel_t	level;
1876 	cpuwhich_t	which;
1877 	id_t		id;
1878 	size_t		cpusetsize;
1879 	cpuset_t	*mask;
1880 };
1881 #endif
1882 int
1883 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
1884 {
1885 
1886 	return (kern_cpuset_getaffinity(td, uap->level, uap->which,
1887 	    uap->id, uap->cpusetsize, uap->mask));
1888 }
1889 
1890 int
1891 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1892     id_t id, size_t cpusetsize, cpuset_t *maskp)
1893 {
1894 	struct thread *ttd;
1895 	struct cpuset *nset;
1896 	struct cpuset *set;
1897 	struct proc *p;
1898 	cpuset_t *mask;
1899 	int error;
1900 	size_t size;
1901 
1902 	if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1903 		return (ERANGE);
1904 	error = cpuset_check_capabilities(td, level, which, id);
1905 	if (error != 0)
1906 		return (error);
1907 	size = cpusetsize;
1908 	mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
1909 	error = cpuset_which(which, id, &p, &ttd, &set);
1910 	if (error)
1911 		goto out;
1912 	switch (level) {
1913 	case CPU_LEVEL_ROOT:
1914 	case CPU_LEVEL_CPUSET:
1915 		switch (which) {
1916 		case CPU_WHICH_TID:
1917 		case CPU_WHICH_PID:
1918 			thread_lock(ttd);
1919 			set = cpuset_ref(ttd->td_cpuset);
1920 			thread_unlock(ttd);
1921 			break;
1922 		case CPU_WHICH_CPUSET:
1923 		case CPU_WHICH_JAIL:
1924 			break;
1925 		case CPU_WHICH_IRQ:
1926 		case CPU_WHICH_INTRHANDLER:
1927 		case CPU_WHICH_ITHREAD:
1928 		case CPU_WHICH_DOMAIN:
1929 			error = EINVAL;
1930 			goto out;
1931 		}
1932 		if (level == CPU_LEVEL_ROOT)
1933 			nset = cpuset_refroot(set);
1934 		else
1935 			nset = cpuset_refbase(set);
1936 		CPU_COPY(&nset->cs_mask, mask);
1937 		cpuset_rel(nset);
1938 		break;
1939 	case CPU_LEVEL_WHICH:
1940 		switch (which) {
1941 		case CPU_WHICH_TID:
1942 			thread_lock(ttd);
1943 			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
1944 			thread_unlock(ttd);
1945 			break;
1946 		case CPU_WHICH_PID:
1947 			FOREACH_THREAD_IN_PROC(p, ttd) {
1948 				thread_lock(ttd);
1949 				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
1950 				thread_unlock(ttd);
1951 			}
1952 			break;
1953 		case CPU_WHICH_CPUSET:
1954 		case CPU_WHICH_JAIL:
1955 			CPU_COPY(&set->cs_mask, mask);
1956 			break;
1957 		case CPU_WHICH_IRQ:
1958 		case CPU_WHICH_INTRHANDLER:
1959 		case CPU_WHICH_ITHREAD:
1960 			error = intr_getaffinity(id, which, mask);
1961 			break;
1962 		case CPU_WHICH_DOMAIN:
1963 			if (id < 0 || id >= MAXMEMDOM)
1964 				error = ESRCH;
1965 			else
1966 				CPU_COPY(&cpuset_domain[id], mask);
1967 			break;
1968 		}
1969 		break;
1970 	default:
1971 		error = EINVAL;
1972 		break;
1973 	}
1974 	if (set)
1975 		cpuset_rel(set);
1976 	if (p)
1977 		PROC_UNLOCK(p);
1978 	if (error == 0)
1979 		error = copyout(mask, maskp, size);
1980 out:
1981 	free(mask, M_TEMP);
1982 	return (error);
1983 }
1984 
1985 #ifndef _SYS_SYSPROTO_H_
1986 struct cpuset_setaffinity_args {
1987 	cpulevel_t	level;
1988 	cpuwhich_t	which;
1989 	id_t		id;
1990 	size_t		cpusetsize;
1991 	const cpuset_t	*mask;
1992 };
1993 #endif
1994 int
1995 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
1996 {
1997 
1998 	return (kern_cpuset_setaffinity(td, uap->level, uap->which,
1999 	    uap->id, uap->cpusetsize, uap->mask));
2000 }
2001 
2002 int
2003 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
2004     id_t id, size_t cpusetsize, const cpuset_t *maskp)
2005 {
2006 	struct cpuset *nset;
2007 	struct cpuset *set;
2008 	struct thread *ttd;
2009 	struct proc *p;
2010 	cpuset_t *mask;
2011 	int error;
2012 
2013 	if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
2014 		return (ERANGE);
2015 	error = cpuset_check_capabilities(td, level, which, id);
2016 	if (error != 0)
2017 		return (error);
2018 	mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
2019 	error = copyin(maskp, mask, cpusetsize);
2020 	if (error)
2021 		goto out;
2022 	/*
2023 	 * Verify that no high bits are set.
2024 	 */
2025 	if (cpusetsize > sizeof(cpuset_t)) {
2026 		char *end;
2027 		char *cp;
2028 
2029 		end = cp = (char *)&mask->__bits;
2030 		end += cpusetsize;
2031 		cp += sizeof(cpuset_t);
2032 		while (cp != end)
2033 			if (*cp++ != 0) {
2034 				error = EINVAL;
2035 				goto out;
2036 			}
2037 	}
2038 	if (CPU_EMPTY(mask)) {
2039 		error = EDEADLK;
2040 		goto out;
2041 	}
2042 	switch (level) {
2043 	case CPU_LEVEL_ROOT:
2044 	case CPU_LEVEL_CPUSET:
2045 		error = cpuset_which(which, id, &p, &ttd, &set);
2046 		if (error)
2047 			break;
2048 		switch (which) {
2049 		case CPU_WHICH_TID:
2050 		case CPU_WHICH_PID:
2051 			thread_lock(ttd);
2052 			set = cpuset_ref(ttd->td_cpuset);
2053 			thread_unlock(ttd);
2054 			PROC_UNLOCK(p);
2055 			break;
2056 		case CPU_WHICH_CPUSET:
2057 		case CPU_WHICH_JAIL:
2058 			break;
2059 		case CPU_WHICH_IRQ:
2060 		case CPU_WHICH_INTRHANDLER:
2061 		case CPU_WHICH_ITHREAD:
2062 		case CPU_WHICH_DOMAIN:
2063 			error = EINVAL;
2064 			goto out;
2065 		}
2066 		if (level == CPU_LEVEL_ROOT)
2067 			nset = cpuset_refroot(set);
2068 		else
2069 			nset = cpuset_refbase(set);
2070 		error = cpuset_modify(nset, mask);
2071 		cpuset_rel(nset);
2072 		cpuset_rel(set);
2073 		break;
2074 	case CPU_LEVEL_WHICH:
2075 		switch (which) {
2076 		case CPU_WHICH_TID:
2077 			error = cpuset_setthread(id, mask);
2078 			break;
2079 		case CPU_WHICH_PID:
2080 			error = cpuset_setproc(id, NULL, mask, NULL, false);
2081 			break;
2082 		case CPU_WHICH_CPUSET:
2083 		case CPU_WHICH_JAIL:
2084 			error = cpuset_which(which, id, &p, &ttd, &set);
2085 			if (error == 0) {
2086 				error = cpuset_modify(set, mask);
2087 				cpuset_rel(set);
2088 			}
2089 			break;
2090 		case CPU_WHICH_IRQ:
2091 		case CPU_WHICH_INTRHANDLER:
2092 		case CPU_WHICH_ITHREAD:
2093 			error = intr_setaffinity(id, which, mask);
2094 			break;
2095 		default:
2096 			error = EINVAL;
2097 			break;
2098 		}
2099 		break;
2100 	default:
2101 		error = EINVAL;
2102 		break;
2103 	}
2104 out:
2105 	free(mask, M_TEMP);
2106 	return (error);
2107 }
2108 
2109 #ifndef _SYS_SYSPROTO_H_
2110 struct cpuset_getdomain_args {
2111 	cpulevel_t	level;
2112 	cpuwhich_t	which;
2113 	id_t		id;
2114 	size_t		domainsetsize;
2115 	domainset_t	*mask;
2116 	int 		*policy;
2117 };
2118 #endif
2119 int
2120 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap)
2121 {
2122 
2123 	return (kern_cpuset_getdomain(td, uap->level, uap->which,
2124 	    uap->id, uap->domainsetsize, uap->mask, uap->policy));
2125 }
2126 
2127 int
2128 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2129     id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp)
2130 {
2131 	struct domainset outset;
2132 	struct thread *ttd;
2133 	struct cpuset *nset;
2134 	struct cpuset *set;
2135 	struct domainset *dset;
2136 	struct proc *p;
2137 	domainset_t *mask;
2138 	int error;
2139 
2140 	if (domainsetsize < sizeof(domainset_t) ||
2141 	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2142 		return (ERANGE);
2143 	error = cpuset_check_capabilities(td, level, which, id);
2144 	if (error != 0)
2145 		return (error);
2146 	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2147 	bzero(&outset, sizeof(outset));
2148 	error = cpuset_which(which, id, &p, &ttd, &set);
2149 	if (error)
2150 		goto out;
2151 	switch (level) {
2152 	case CPU_LEVEL_ROOT:
2153 	case CPU_LEVEL_CPUSET:
2154 		switch (which) {
2155 		case CPU_WHICH_TID:
2156 		case CPU_WHICH_PID:
2157 			thread_lock(ttd);
2158 			set = cpuset_ref(ttd->td_cpuset);
2159 			thread_unlock(ttd);
2160 			break;
2161 		case CPU_WHICH_CPUSET:
2162 		case CPU_WHICH_JAIL:
2163 			break;
2164 		case CPU_WHICH_IRQ:
2165 		case CPU_WHICH_INTRHANDLER:
2166 		case CPU_WHICH_ITHREAD:
2167 		case CPU_WHICH_DOMAIN:
2168 			error = EINVAL;
2169 			goto out;
2170 		}
2171 		if (level == CPU_LEVEL_ROOT)
2172 			nset = cpuset_refroot(set);
2173 		else
2174 			nset = cpuset_refbase(set);
2175 		domainset_copy(nset->cs_domain, &outset);
2176 		cpuset_rel(nset);
2177 		break;
2178 	case CPU_LEVEL_WHICH:
2179 		switch (which) {
2180 		case CPU_WHICH_TID:
2181 			thread_lock(ttd);
2182 			domainset_copy(ttd->td_cpuset->cs_domain, &outset);
2183 			thread_unlock(ttd);
2184 			break;
2185 		case CPU_WHICH_PID:
2186 			FOREACH_THREAD_IN_PROC(p, ttd) {
2187 				thread_lock(ttd);
2188 				dset = ttd->td_cpuset->cs_domain;
2189 				/* Show all domains in the proc. */
2190 				DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask);
2191 				/* Last policy wins. */
2192 				outset.ds_policy = dset->ds_policy;
2193 				outset.ds_prefer = dset->ds_prefer;
2194 				thread_unlock(ttd);
2195 			}
2196 			break;
2197 		case CPU_WHICH_CPUSET:
2198 		case CPU_WHICH_JAIL:
2199 			domainset_copy(set->cs_domain, &outset);
2200 			break;
2201 		case CPU_WHICH_IRQ:
2202 		case CPU_WHICH_INTRHANDLER:
2203 		case CPU_WHICH_ITHREAD:
2204 		case CPU_WHICH_DOMAIN:
2205 			error = EINVAL;
2206 			break;
2207 		}
2208 		break;
2209 	default:
2210 		error = EINVAL;
2211 		break;
2212 	}
2213 	if (set)
2214 		cpuset_rel(set);
2215 	if (p)
2216 		PROC_UNLOCK(p);
2217 	/*
2218 	 * Translate prefer into a set containing only the preferred domain,
2219 	 * not the entire fallback set.
2220 	 */
2221 	if (outset.ds_policy == DOMAINSET_POLICY_PREFER) {
2222 		DOMAINSET_ZERO(&outset.ds_mask);
2223 		DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask);
2224 	}
2225 	DOMAINSET_COPY(&outset.ds_mask, mask);
2226 	if (error == 0)
2227 		error = copyout(mask, maskp, domainsetsize);
2228 	if (error == 0)
2229 		if (suword32(policyp, outset.ds_policy) != 0)
2230 			error = EFAULT;
2231 out:
2232 	free(mask, M_TEMP);
2233 	return (error);
2234 }
2235 
2236 #ifndef _SYS_SYSPROTO_H_
2237 struct cpuset_setdomain_args {
2238 	cpulevel_t	level;
2239 	cpuwhich_t	which;
2240 	id_t		id;
2241 	size_t		domainsetsize;
2242 	domainset_t	*mask;
2243 	int 		policy;
2244 };
2245 #endif
2246 int
2247 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap)
2248 {
2249 
2250 	return (kern_cpuset_setdomain(td, uap->level, uap->which,
2251 	    uap->id, uap->domainsetsize, uap->mask, uap->policy));
2252 }
2253 
2254 int
2255 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2256     id_t id, size_t domainsetsize, const domainset_t *maskp, int policy)
2257 {
2258 	struct cpuset *nset;
2259 	struct cpuset *set;
2260 	struct thread *ttd;
2261 	struct proc *p;
2262 	struct domainset domain;
2263 	domainset_t *mask;
2264 	int error;
2265 
2266 	if (domainsetsize < sizeof(domainset_t) ||
2267 	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2268 		return (ERANGE);
2269 	if (policy <= DOMAINSET_POLICY_INVALID ||
2270 	    policy > DOMAINSET_POLICY_MAX)
2271 		return (EINVAL);
2272 	error = cpuset_check_capabilities(td, level, which, id);
2273 	if (error != 0)
2274 		return (error);
2275 	memset(&domain, 0, sizeof(domain));
2276 	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2277 	error = copyin(maskp, mask, domainsetsize);
2278 	if (error)
2279 		goto out;
2280 	/*
2281 	 * Verify that no high bits are set.
2282 	 */
2283 	if (domainsetsize > sizeof(domainset_t)) {
2284 		char *end;
2285 		char *cp;
2286 
2287 		end = cp = (char *)&mask->__bits;
2288 		end += domainsetsize;
2289 		cp += sizeof(domainset_t);
2290 		while (cp != end)
2291 			if (*cp++ != 0) {
2292 				error = EINVAL;
2293 				goto out;
2294 			}
2295 	}
2296 	if (DOMAINSET_EMPTY(mask)) {
2297 		error = EDEADLK;
2298 		goto out;
2299 	}
2300 	DOMAINSET_COPY(mask, &domain.ds_mask);
2301 	domain.ds_policy = policy;
2302 
2303 	/*
2304 	 * Sanitize the provided mask.
2305 	 */
2306 	if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) {
2307 		error = EINVAL;
2308 		goto out;
2309 	}
2310 
2311 	/* Translate preferred policy into a mask and fallback. */
2312 	if (policy == DOMAINSET_POLICY_PREFER) {
2313 		/* Only support a single preferred domain. */
2314 		if (DOMAINSET_COUNT(&domain.ds_mask) != 1) {
2315 			error = EINVAL;
2316 			goto out;
2317 		}
2318 		domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1;
2319 		/* This will be constrained by domainset_shadow(). */
2320 		DOMAINSET_COPY(&all_domains, &domain.ds_mask);
2321 	}
2322 
2323 	/*
2324 	 * When given an impossible policy, fall back to interleaving
2325 	 * across all domains.
2326 	 */
2327 	if (domainset_empty_vm(&domain))
2328 		domainset_copy(domainset2, &domain);
2329 
2330 	switch (level) {
2331 	case CPU_LEVEL_ROOT:
2332 	case CPU_LEVEL_CPUSET:
2333 		error = cpuset_which(which, id, &p, &ttd, &set);
2334 		if (error)
2335 			break;
2336 		switch (which) {
2337 		case CPU_WHICH_TID:
2338 		case CPU_WHICH_PID:
2339 			thread_lock(ttd);
2340 			set = cpuset_ref(ttd->td_cpuset);
2341 			thread_unlock(ttd);
2342 			PROC_UNLOCK(p);
2343 			break;
2344 		case CPU_WHICH_CPUSET:
2345 		case CPU_WHICH_JAIL:
2346 			break;
2347 		case CPU_WHICH_IRQ:
2348 		case CPU_WHICH_INTRHANDLER:
2349 		case CPU_WHICH_ITHREAD:
2350 		case CPU_WHICH_DOMAIN:
2351 			error = EINVAL;
2352 			goto out;
2353 		}
2354 		if (level == CPU_LEVEL_ROOT)
2355 			nset = cpuset_refroot(set);
2356 		else
2357 			nset = cpuset_refbase(set);
2358 		error = cpuset_modify_domain(nset, &domain);
2359 		cpuset_rel(nset);
2360 		cpuset_rel(set);
2361 		break;
2362 	case CPU_LEVEL_WHICH:
2363 		switch (which) {
2364 		case CPU_WHICH_TID:
2365 			error = _cpuset_setthread(id, NULL, &domain);
2366 			break;
2367 		case CPU_WHICH_PID:
2368 			error = cpuset_setproc(id, NULL, NULL, &domain, false);
2369 			break;
2370 		case CPU_WHICH_CPUSET:
2371 		case CPU_WHICH_JAIL:
2372 			error = cpuset_which(which, id, &p, &ttd, &set);
2373 			if (error == 0) {
2374 				error = cpuset_modify_domain(set, &domain);
2375 				cpuset_rel(set);
2376 			}
2377 			break;
2378 		case CPU_WHICH_IRQ:
2379 		case CPU_WHICH_INTRHANDLER:
2380 		case CPU_WHICH_ITHREAD:
2381 		default:
2382 			error = EINVAL;
2383 			break;
2384 		}
2385 		break;
2386 	default:
2387 		error = EINVAL;
2388 		break;
2389 	}
2390 out:
2391 	free(mask, M_TEMP);
2392 	return (error);
2393 }
2394 
2395 #ifdef DDB
2396 
2397 static void
2398 ddb_display_bitset(const struct bitset *set, int size)
2399 {
2400 	int bit, once;
2401 
2402 	for (once = 0, bit = 0; bit < size; bit++) {
2403 		if (CPU_ISSET(bit, set)) {
2404 			if (once == 0) {
2405 				db_printf("%d", bit);
2406 				once = 1;
2407 			} else
2408 				db_printf(",%d", bit);
2409 		}
2410 	}
2411 	if (once == 0)
2412 		db_printf("<none>");
2413 }
2414 
2415 void
2416 ddb_display_cpuset(const cpuset_t *set)
2417 {
2418 	ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE);
2419 }
2420 
2421 static void
2422 ddb_display_domainset(const domainset_t *set)
2423 {
2424 	ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE);
2425 }
2426 
2427 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
2428 {
2429 	struct cpuset *set;
2430 
2431 	LIST_FOREACH(set, &cpuset_ids, cs_link) {
2432 		db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
2433 		    set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags,
2434 		    (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
2435 		db_printf("  cpu mask=");
2436 		ddb_display_cpuset(&set->cs_mask);
2437 		db_printf("\n");
2438 		db_printf("  domain policy %d prefer %d mask=",
2439 		    set->cs_domain->ds_policy, set->cs_domain->ds_prefer);
2440 		ddb_display_domainset(&set->cs_domain->ds_mask);
2441 		db_printf("\n");
2442 		if (db_pager_quit)
2443 			break;
2444 	}
2445 }
2446 
2447 DB_SHOW_COMMAND(domainsets, db_show_domainsets)
2448 {
2449 	struct domainset *set;
2450 
2451 	LIST_FOREACH(set, &cpuset_domains, ds_link) {
2452 		db_printf("set=%p policy %d prefer %d cnt %d\n",
2453 		    set, set->ds_policy, set->ds_prefer, set->ds_cnt);
2454 		db_printf("  mask =");
2455 		ddb_display_domainset(&set->ds_mask);
2456 		db_printf("\n");
2457 	}
2458 }
2459 #endif /* DDB */
2460