xref: /freebsd/sys/kern/kern_cpuset.c (revision d5fc25e5d6c52b306312784663ccad85923a9c76)
1 /*-
2  * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Copyright (c) 2008 Nokia Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/jail.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/refcount.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/cpuset.h>
51 #include <sys/sx.h>
52 #include <sys/refcount.h>
53 #include <sys/queue.h>
54 #include <sys/limits.h>
55 #include <sys/bus.h>
56 #include <sys/interrupt.h>
57 
58 #include <vm/uma.h>
59 
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif /* DDB */
63 
64 /*
65  * cpusets provide a mechanism for creating and manipulating sets of
66  * processors for the purpose of constraining the scheduling of threads to
67  * specific processors.
68  *
69  * Each process belongs to an identified set, by default this is set 1.  Each
70  * thread may further restrict the cpus it may run on to a subset of this
71  * named set.  This creates an anonymous set which other threads and processes
72  * may not join by number.
73  *
74  * The named set is referred to herein as the 'base' set to avoid ambiguity.
75  * This set is usually a child of a 'root' set while the anonymous set may
76  * simply be referred to as a mask.  In the syscall api these are referred to
77  * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
78  *
79  * Threads inherit their set from their creator whether it be anonymous or
80  * not.  This means that anonymous sets are immutable because they may be
81  * shared.  To modify an anonymous set a new set is created with the desired
82  * mask and the same parent as the existing anonymous set.  This gives the
83  * illusion of each thread having a private mask.A
84  *
85  * Via the syscall apis a user may ask to retrieve or modify the root, base,
86  * or mask that is discovered via a pid, tid, or setid.  Modifying a set
87  * modifies all numbered and anonymous child sets to comply with the new mask.
88  * Modifying a pid or tid's mask applies only to that tid but must still
89  * exist within the assigned parent set.
90  *
91  * A thread may not be assigned to a a group seperate from other threads in
92  * the process.  This is to remove ambiguity when the setid is queried with
93  * a pid argument.  There is no other technical limitation.
94  *
95  * This somewhat complex arrangement is intended to make it easy for
96  * applications to query available processors and bind their threads to
97  * specific processors while also allowing administrators to dynamically
98  * reprovision by changing sets which apply to groups of processes.
99  *
100  * A simple application should not concern itself with sets at all and
101  * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
102  * meaning 'curthread'.  It may query availble cpus for that tid with a
103  * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
104  */
105 static uma_zone_t cpuset_zone;
106 static struct mtx cpuset_lock;
107 static struct setlist cpuset_ids;
108 static struct unrhdr *cpuset_unr;
109 static struct cpuset *cpuset_zero;
110 
111 cpuset_t *cpuset_root;
112 
113 /*
114  * Acquire a reference to a cpuset, all pointers must be tracked with refs.
115  */
116 struct cpuset *
117 cpuset_ref(struct cpuset *set)
118 {
119 
120 	refcount_acquire(&set->cs_ref);
121 	return (set);
122 }
123 
124 /*
125  * Walks up the tree from 'set' to find the root.  Returns the root
126  * referenced.
127  */
128 static struct cpuset *
129 cpuset_refroot(struct cpuset *set)
130 {
131 
132 	for (; set->cs_parent != NULL; set = set->cs_parent)
133 		if (set->cs_flags & CPU_SET_ROOT)
134 			break;
135 	cpuset_ref(set);
136 
137 	return (set);
138 }
139 
140 /*
141  * Find the first non-anonymous set starting from 'set'.  Returns this set
142  * referenced.  May return the passed in set with an extra ref if it is
143  * not anonymous.
144  */
145 static struct cpuset *
146 cpuset_refbase(struct cpuset *set)
147 {
148 
149 	if (set->cs_id == CPUSET_INVALID)
150 		set = set->cs_parent;
151 	cpuset_ref(set);
152 
153 	return (set);
154 }
155 
156 /*
157  * Release a reference in a context where it is safe to allocte.
158  */
159 void
160 cpuset_rel(struct cpuset *set)
161 {
162 	cpusetid_t id;
163 
164 	if (refcount_release(&set->cs_ref) == 0)
165 		return;
166 	mtx_lock_spin(&cpuset_lock);
167 	LIST_REMOVE(set, cs_siblings);
168 	id = set->cs_id;
169 	if (id != CPUSET_INVALID)
170 		LIST_REMOVE(set, cs_link);
171 	mtx_unlock_spin(&cpuset_lock);
172 	cpuset_rel(set->cs_parent);
173 	uma_zfree(cpuset_zone, set);
174 	if (id != CPUSET_INVALID)
175 		free_unr(cpuset_unr, id);
176 }
177 
178 /*
179  * Deferred release must be used when in a context that is not safe to
180  * allocate/free.  This places any unreferenced sets on the list 'head'.
181  */
182 static void
183 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
184 {
185 
186 	if (refcount_release(&set->cs_ref) == 0)
187 		return;
188 	mtx_lock_spin(&cpuset_lock);
189 	LIST_REMOVE(set, cs_siblings);
190 	if (set->cs_id != CPUSET_INVALID)
191 		LIST_REMOVE(set, cs_link);
192 	LIST_INSERT_HEAD(head, set, cs_link);
193 	mtx_unlock_spin(&cpuset_lock);
194 }
195 
196 /*
197  * Complete a deferred release.  Removes the set from the list provided to
198  * cpuset_rel_defer.
199  */
200 static void
201 cpuset_rel_complete(struct cpuset *set)
202 {
203 	LIST_REMOVE(set, cs_link);
204 	cpuset_rel(set->cs_parent);
205 	uma_zfree(cpuset_zone, set);
206 }
207 
208 /*
209  * Find a set based on an id.  Returns it with a ref.
210  */
211 static struct cpuset *
212 cpuset_lookup(cpusetid_t setid, struct thread *td)
213 {
214 	struct cpuset *set;
215 
216 	if (setid == CPUSET_INVALID)
217 		return (NULL);
218 	mtx_lock_spin(&cpuset_lock);
219 	LIST_FOREACH(set, &cpuset_ids, cs_link)
220 		if (set->cs_id == setid)
221 			break;
222 	if (set)
223 		cpuset_ref(set);
224 	mtx_unlock_spin(&cpuset_lock);
225 
226 	KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
227 	if (set != NULL && jailed(td->td_ucred)) {
228 		struct cpuset *jset, *tset;
229 
230 		jset = td->td_ucred->cr_prison->pr_cpuset;
231 		for (tset = set; tset != NULL; tset = tset->cs_parent)
232 			if (tset == jset)
233 				break;
234 		if (tset == NULL) {
235 			cpuset_rel(set);
236 			set = NULL;
237 		}
238 	}
239 
240 	return (set);
241 }
242 
243 /*
244  * Create a set in the space provided in 'set' with the provided parameters.
245  * The set is returned with a single ref.  May return EDEADLK if the set
246  * will have no valid cpu based on restrictions from the parent.
247  */
248 static int
249 _cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask,
250     cpusetid_t id)
251 {
252 
253 	if (!CPU_OVERLAP(&parent->cs_mask, mask))
254 		return (EDEADLK);
255 	CPU_COPY(mask, &set->cs_mask);
256 	LIST_INIT(&set->cs_children);
257 	refcount_init(&set->cs_ref, 1);
258 	set->cs_flags = 0;
259 	mtx_lock_spin(&cpuset_lock);
260 	CPU_AND(mask, &parent->cs_mask);
261 	set->cs_id = id;
262 	set->cs_parent = cpuset_ref(parent);
263 	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
264 	if (set->cs_id != CPUSET_INVALID)
265 		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
266 	mtx_unlock_spin(&cpuset_lock);
267 
268 	return (0);
269 }
270 
271 /*
272  * Create a new non-anonymous set with the requested parent and mask.  May
273  * return failures if the mask is invalid or a new number can not be
274  * allocated.
275  */
276 static int
277 cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask)
278 {
279 	struct cpuset *set;
280 	cpusetid_t id;
281 	int error;
282 
283 	id = alloc_unr(cpuset_unr);
284 	if (id == -1)
285 		return (ENFILE);
286 	*setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
287 	error = _cpuset_create(set, parent, mask, id);
288 	if (error == 0)
289 		return (0);
290 	free_unr(cpuset_unr, id);
291 	uma_zfree(cpuset_zone, set);
292 
293 	return (error);
294 }
295 
296 /*
297  * Recursively check for errors that would occur from applying mask to
298  * the tree of sets starting at 'set'.  Checks for sets that would become
299  * empty as well as RDONLY flags.
300  */
301 static int
302 cpuset_testupdate(struct cpuset *set, cpuset_t *mask)
303 {
304 	struct cpuset *nset;
305 	cpuset_t newmask;
306 	int error;
307 
308 	mtx_assert(&cpuset_lock, MA_OWNED);
309 	if (set->cs_flags & CPU_SET_RDONLY)
310 		return (EPERM);
311 	if (!CPU_OVERLAP(&set->cs_mask, mask))
312 		return (EDEADLK);
313 	CPU_COPY(&set->cs_mask, &newmask);
314 	CPU_AND(&newmask, mask);
315 	error = 0;
316 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
317 		if ((error = cpuset_testupdate(nset, &newmask)) != 0)
318 			break;
319 	return (error);
320 }
321 
322 /*
323  * Applies the mask 'mask' without checking for empty sets or permissions.
324  */
325 static void
326 cpuset_update(struct cpuset *set, cpuset_t *mask)
327 {
328 	struct cpuset *nset;
329 
330 	mtx_assert(&cpuset_lock, MA_OWNED);
331 	CPU_AND(&set->cs_mask, mask);
332 	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
333 		cpuset_update(nset, &set->cs_mask);
334 
335 	return;
336 }
337 
338 /*
339  * Modify the set 'set' to use a copy of the mask provided.  Apply this new
340  * mask to restrict all children in the tree.  Checks for validity before
341  * applying the changes.
342  */
343 static int
344 cpuset_modify(struct cpuset *set, cpuset_t *mask)
345 {
346 	struct cpuset *root;
347 	int error;
348 
349 	error = priv_check(curthread, PRIV_SCHED_CPUSET);
350 	if (error)
351 		return (error);
352 	/*
353 	 * In case we are called from within the jail
354 	 * we do not allow modifying the dedicated root
355 	 * cpuset of the jail but may still allow to
356 	 * change child sets.
357 	 */
358 	if (jailed(curthread->td_ucred) &&
359 	    set->cs_flags & CPU_SET_ROOT)
360 		return (EPERM);
361 	/*
362 	 * Verify that we have access to this set of
363 	 * cpus.
364 	 */
365 	root = set->cs_parent;
366 	if (root && !CPU_SUBSET(&root->cs_mask, mask))
367 		return (EINVAL);
368 	mtx_lock_spin(&cpuset_lock);
369 	error = cpuset_testupdate(set, mask);
370 	if (error)
371 		goto out;
372 	cpuset_update(set, mask);
373 	CPU_COPY(mask, &set->cs_mask);
374 out:
375 	mtx_unlock_spin(&cpuset_lock);
376 
377 	return (error);
378 }
379 
380 /*
381  * Resolve the 'which' parameter of several cpuset apis.
382  *
383  * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
384  * checks for permission via p_cansched().
385  *
386  * For WHICH_SET returns a valid set with a new reference.
387  *
388  * -1 may be supplied for any argument to mean the current proc/thread or
389  * the base set of the current thread.  May fail with ESRCH/EPERM.
390  */
391 static int
392 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
393     struct cpuset **setp)
394 {
395 	struct cpuset *set;
396 	struct thread *td;
397 	struct proc *p;
398 	int error;
399 
400 	*pp = p = NULL;
401 	*tdp = td = NULL;
402 	*setp = set = NULL;
403 	switch (which) {
404 	case CPU_WHICH_PID:
405 		if (id == -1) {
406 			PROC_LOCK(curproc);
407 			p = curproc;
408 			break;
409 		}
410 		if ((p = pfind(id)) == NULL)
411 			return (ESRCH);
412 		break;
413 	case CPU_WHICH_TID:
414 		if (id == -1) {
415 			PROC_LOCK(curproc);
416 			p = curproc;
417 			td = curthread;
418 			break;
419 		}
420 		sx_slock(&allproc_lock);
421 		FOREACH_PROC_IN_SYSTEM(p) {
422 			PROC_LOCK(p);
423 			FOREACH_THREAD_IN_PROC(p, td)
424 				if (td->td_tid == id)
425 					break;
426 			if (td != NULL)
427 				break;
428 			PROC_UNLOCK(p);
429 		}
430 		sx_sunlock(&allproc_lock);
431 		if (td == NULL)
432 			return (ESRCH);
433 		break;
434 	case CPU_WHICH_CPUSET:
435 		if (id == -1) {
436 			thread_lock(curthread);
437 			set = cpuset_refbase(curthread->td_cpuset);
438 			thread_unlock(curthread);
439 		} else
440 			set = cpuset_lookup(id, curthread);
441 		if (set) {
442 			*setp = set;
443 			return (0);
444 		}
445 		return (ESRCH);
446 	case CPU_WHICH_JAIL:
447 	{
448 		/* Find `set' for prison with given id. */
449 		struct prison *pr;
450 
451 		sx_slock(&allprison_lock);
452 		pr = prison_find_child(curthread->td_ucred->cr_prison, id);
453 		sx_sunlock(&allprison_lock);
454 		if (pr == NULL)
455 			return (ESRCH);
456 		cpuset_ref(pr->pr_cpuset);
457 		*setp = pr->pr_cpuset;
458 		mtx_unlock(&pr->pr_mtx);
459 		return (0);
460 	}
461 	case CPU_WHICH_IRQ:
462 		return (0);
463 	default:
464 		return (EINVAL);
465 	}
466 	error = p_cansched(curthread, p);
467 	if (error) {
468 		PROC_UNLOCK(p);
469 		return (error);
470 	}
471 	if (td == NULL)
472 		td = FIRST_THREAD_IN_PROC(p);
473 	*pp = p;
474 	*tdp = td;
475 	return (0);
476 }
477 
478 /*
479  * Create an anonymous set with the provided mask in the space provided by
480  * 'fset'.  If the passed in set is anonymous we use its parent otherwise
481  * the new set is a child of 'set'.
482  */
483 static int
484 cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask)
485 {
486 	struct cpuset *parent;
487 
488 	if (set->cs_id == CPUSET_INVALID)
489 		parent = set->cs_parent;
490 	else
491 		parent = set;
492 	if (!CPU_SUBSET(&parent->cs_mask, mask))
493 		return (EDEADLK);
494 	return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
495 }
496 
497 /*
498  * Handle two cases for replacing the base set or mask of an entire process.
499  *
500  * 1) Set is non-null and mask is null.  This reparents all anonymous sets
501  *    to the provided set and replaces all non-anonymous td_cpusets with the
502  *    provided set.
503  * 2) Mask is non-null and set is null.  This replaces or creates anonymous
504  *    sets for every thread with the existing base as a parent.
505  *
506  * This is overly complicated because we can't allocate while holding a
507  * spinlock and spinlocks must be held while changing and examining thread
508  * state.
509  */
510 static int
511 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
512 {
513 	struct setlist freelist;
514 	struct setlist droplist;
515 	struct cpuset *tdset;
516 	struct cpuset *nset;
517 	struct thread *td;
518 	struct proc *p;
519 	int threads;
520 	int nfree;
521 	int error;
522 	/*
523 	 * The algorithm requires two passes due to locking considerations.
524 	 *
525 	 * 1) Lookup the process and acquire the locks in the required order.
526 	 * 2) If enough cpusets have not been allocated release the locks and
527 	 *    allocate them.  Loop.
528 	 */
529 	LIST_INIT(&freelist);
530 	LIST_INIT(&droplist);
531 	nfree = 0;
532 	for (;;) {
533 		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
534 		if (error)
535 			goto out;
536 		if (nfree >= p->p_numthreads)
537 			break;
538 		threads = p->p_numthreads;
539 		PROC_UNLOCK(p);
540 		for (; nfree < threads; nfree++) {
541 			nset = uma_zalloc(cpuset_zone, M_WAITOK);
542 			LIST_INSERT_HEAD(&freelist, nset, cs_link);
543 		}
544 	}
545 	PROC_LOCK_ASSERT(p, MA_OWNED);
546 	/*
547 	 * Now that the appropriate locks are held and we have enough cpusets,
548 	 * make sure the operation will succeed before applying changes.  The
549 	 * proc lock prevents td_cpuset from changing between calls.
550 	 */
551 	error = 0;
552 	FOREACH_THREAD_IN_PROC(p, td) {
553 		thread_lock(td);
554 		tdset = td->td_cpuset;
555 		/*
556 		 * Verify that a new mask doesn't specify cpus outside of
557 		 * the set the thread is a member of.
558 		 */
559 		if (mask) {
560 			if (tdset->cs_id == CPUSET_INVALID)
561 				tdset = tdset->cs_parent;
562 			if (!CPU_SUBSET(&tdset->cs_mask, mask))
563 				error = EDEADLK;
564 		/*
565 		 * Verify that a new set won't leave an existing thread
566 		 * mask without a cpu to run on.  It can, however, restrict
567 		 * the set.
568 		 */
569 		} else if (tdset->cs_id == CPUSET_INVALID) {
570 			if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
571 				error = EDEADLK;
572 		}
573 		thread_unlock(td);
574 		if (error)
575 			goto unlock_out;
576 	}
577 	/*
578 	 * Replace each thread's cpuset while using deferred release.  We
579 	 * must do this because the thread lock must be held while operating
580 	 * on the thread and this limits the type of operations allowed.
581 	 */
582 	FOREACH_THREAD_IN_PROC(p, td) {
583 		thread_lock(td);
584 		/*
585 		 * If we presently have an anonymous set or are applying a
586 		 * mask we must create an anonymous shadow set.  That is
587 		 * either parented to our existing base or the supplied set.
588 		 *
589 		 * If we have a base set with no anonymous shadow we simply
590 		 * replace it outright.
591 		 */
592 		tdset = td->td_cpuset;
593 		if (tdset->cs_id == CPUSET_INVALID || mask) {
594 			nset = LIST_FIRST(&freelist);
595 			LIST_REMOVE(nset, cs_link);
596 			if (mask)
597 				error = cpuset_shadow(tdset, nset, mask);
598 			else
599 				error = _cpuset_create(nset, set,
600 				    &tdset->cs_mask, CPUSET_INVALID);
601 			if (error) {
602 				LIST_INSERT_HEAD(&freelist, nset, cs_link);
603 				thread_unlock(td);
604 				break;
605 			}
606 		} else
607 			nset = cpuset_ref(set);
608 		cpuset_rel_defer(&droplist, tdset);
609 		td->td_cpuset = nset;
610 		sched_affinity(td);
611 		thread_unlock(td);
612 	}
613 unlock_out:
614 	PROC_UNLOCK(p);
615 out:
616 	while ((nset = LIST_FIRST(&droplist)) != NULL)
617 		cpuset_rel_complete(nset);
618 	while ((nset = LIST_FIRST(&freelist)) != NULL) {
619 		LIST_REMOVE(nset, cs_link);
620 		uma_zfree(cpuset_zone, nset);
621 	}
622 	return (error);
623 }
624 
625 /*
626  * Apply an anonymous mask to a single thread.
627  */
628 int
629 cpuset_setthread(lwpid_t id, cpuset_t *mask)
630 {
631 	struct cpuset *nset;
632 	struct cpuset *set;
633 	struct thread *td;
634 	struct proc *p;
635 	int error;
636 
637 	nset = uma_zalloc(cpuset_zone, M_WAITOK);
638 	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
639 	if (error)
640 		goto out;
641 	set = NULL;
642 	thread_lock(td);
643 	error = cpuset_shadow(td->td_cpuset, nset, mask);
644 	if (error == 0) {
645 		set = td->td_cpuset;
646 		td->td_cpuset = nset;
647 		sched_affinity(td);
648 		nset = NULL;
649 	}
650 	thread_unlock(td);
651 	PROC_UNLOCK(p);
652 	if (set)
653 		cpuset_rel(set);
654 out:
655 	if (nset)
656 		uma_zfree(cpuset_zone, nset);
657 	return (error);
658 }
659 
660 /*
661  * Creates the cpuset for thread0.  We make two sets:
662  *
663  * 0 - The root set which should represent all valid processors in the
664  *     system.  It is initially created with a mask of all processors
665  *     because we don't know what processors are valid until cpuset_init()
666  *     runs.  This set is immutable.
667  * 1 - The default set which all processes are a member of until changed.
668  *     This allows an administrator to move all threads off of given cpus to
669  *     dedicate them to high priority tasks or save power etc.
670  */
671 struct cpuset *
672 cpuset_thread0(void)
673 {
674 	struct cpuset *set;
675 	int error;
676 
677 	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
678 	    NULL, NULL, UMA_ALIGN_PTR, 0);
679 	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
680 	/*
681 	 * Create the root system set for the whole machine.  Doesn't use
682 	 * cpuset_create() due to NULL parent.
683 	 */
684 	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
685 	set->cs_mask.__bits[0] = -1;
686 	LIST_INIT(&set->cs_children);
687 	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
688 	set->cs_ref = 1;
689 	set->cs_flags = CPU_SET_ROOT;
690 	cpuset_zero = set;
691 	cpuset_root = &set->cs_mask;
692 	/*
693 	 * Now derive a default, modifiable set from that to give out.
694 	 */
695 	set = uma_zalloc(cpuset_zone, M_WAITOK);
696 	error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
697 	KASSERT(error == 0, ("Error creating default set: %d\n", error));
698 	/*
699 	 * Initialize the unit allocator. 0 and 1 are allocated above.
700 	 */
701 	cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
702 
703 	return (set);
704 }
705 
706 /*
707  * Create a cpuset, which would be cpuset_create() but
708  * mark the new 'set' as root.
709  *
710  * We are not going to reparent the td to it.  Use cpuset_setproc_update_set()
711  * for that.
712  *
713  * In case of no error, returns the set in *setp locked with a reference.
714  */
715 int
716 cpuset_create_root(struct prison *pr, struct cpuset **setp)
717 {
718 	struct cpuset *set;
719 	int error;
720 
721 	KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
722 	KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
723 
724 	error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
725 	if (error)
726 		return (error);
727 
728 	KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data",
729 	    __func__, __LINE__));
730 
731 	/* Mark the set as root. */
732 	set = *setp;
733 	set->cs_flags |= CPU_SET_ROOT;
734 
735 	return (0);
736 }
737 
738 int
739 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
740 {
741 	int error;
742 
743 	KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
744 	KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
745 
746 	cpuset_ref(set);
747 	error = cpuset_setproc(p->p_pid, set, NULL);
748 	if (error)
749 		return (error);
750 	cpuset_rel(set);
751 	return (0);
752 }
753 
754 /*
755  * This is called once the final set of system cpus is known.  Modifies
756  * the root set and all children and mark the root readonly.
757  */
758 static void
759 cpuset_init(void *arg)
760 {
761 	cpuset_t mask;
762 
763 	CPU_ZERO(&mask);
764 #ifdef SMP
765 	mask.__bits[0] = all_cpus;
766 #else
767 	mask.__bits[0] = 1;
768 #endif
769 	if (cpuset_modify(cpuset_zero, &mask))
770 		panic("Can't set initial cpuset mask.\n");
771 	cpuset_zero->cs_flags |= CPU_SET_RDONLY;
772 }
773 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
774 
775 #ifndef _SYS_SYSPROTO_H_
776 struct cpuset_args {
777 	cpusetid_t	*setid;
778 };
779 #endif
780 int
781 cpuset(struct thread *td, struct cpuset_args *uap)
782 {
783 	struct cpuset *root;
784 	struct cpuset *set;
785 	int error;
786 
787 	thread_lock(td);
788 	root = cpuset_refroot(td->td_cpuset);
789 	thread_unlock(td);
790 	error = cpuset_create(&set, root, &root->cs_mask);
791 	cpuset_rel(root);
792 	if (error)
793 		return (error);
794 	error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
795 	if (error == 0)
796 		error = cpuset_setproc(-1, set, NULL);
797 	cpuset_rel(set);
798 	return (error);
799 }
800 
801 #ifndef _SYS_SYSPROTO_H_
802 struct cpuset_setid_args {
803 	cpuwhich_t	which;
804 	id_t		id;
805 	cpusetid_t	setid;
806 };
807 #endif
808 int
809 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
810 {
811 	struct cpuset *set;
812 	int error;
813 
814 	/*
815 	 * Presently we only support per-process sets.
816 	 */
817 	if (uap->which != CPU_WHICH_PID)
818 		return (EINVAL);
819 	set = cpuset_lookup(uap->setid, td);
820 	if (set == NULL)
821 		return (ESRCH);
822 	error = cpuset_setproc(uap->id, set, NULL);
823 	cpuset_rel(set);
824 	return (error);
825 }
826 
827 #ifndef _SYS_SYSPROTO_H_
828 struct cpuset_getid_args {
829 	cpulevel_t	level;
830 	cpuwhich_t	which;
831 	id_t		id;
832 	cpusetid_t	*setid;
833 #endif
834 int
835 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
836 {
837 	struct cpuset *nset;
838 	struct cpuset *set;
839 	struct thread *ttd;
840 	struct proc *p;
841 	cpusetid_t id;
842 	int error;
843 
844 	if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET)
845 		return (EINVAL);
846 	error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
847 	if (error)
848 		return (error);
849 	switch (uap->which) {
850 	case CPU_WHICH_TID:
851 	case CPU_WHICH_PID:
852 		thread_lock(ttd);
853 		set = cpuset_refbase(ttd->td_cpuset);
854 		thread_unlock(ttd);
855 		PROC_UNLOCK(p);
856 		break;
857 	case CPU_WHICH_CPUSET:
858 	case CPU_WHICH_JAIL:
859 		break;
860 	case CPU_WHICH_IRQ:
861 		return (EINVAL);
862 	}
863 	switch (uap->level) {
864 	case CPU_LEVEL_ROOT:
865 		nset = cpuset_refroot(set);
866 		cpuset_rel(set);
867 		set = nset;
868 		break;
869 	case CPU_LEVEL_CPUSET:
870 		break;
871 	case CPU_LEVEL_WHICH:
872 		break;
873 	}
874 	id = set->cs_id;
875 	cpuset_rel(set);
876 	if (error == 0)
877 		error = copyout(&id, uap->setid, sizeof(id));
878 
879 	return (error);
880 }
881 
882 #ifndef _SYS_SYSPROTO_H_
883 struct cpuset_getaffinity_args {
884 	cpulevel_t	level;
885 	cpuwhich_t	which;
886 	id_t		id;
887 	size_t		cpusetsize;
888 	cpuset_t	*mask;
889 };
890 #endif
891 int
892 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
893 {
894 	struct thread *ttd;
895 	struct cpuset *nset;
896 	struct cpuset *set;
897 	struct proc *p;
898 	cpuset_t *mask;
899 	int error;
900 	size_t size;
901 
902 	if (uap->cpusetsize < sizeof(cpuset_t) ||
903 	    uap->cpusetsize > CPU_MAXSIZE / NBBY)
904 		return (ERANGE);
905 	size = uap->cpusetsize;
906 	mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
907 	error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
908 	if (error)
909 		goto out;
910 	switch (uap->level) {
911 	case CPU_LEVEL_ROOT:
912 	case CPU_LEVEL_CPUSET:
913 		switch (uap->which) {
914 		case CPU_WHICH_TID:
915 		case CPU_WHICH_PID:
916 			thread_lock(ttd);
917 			set = cpuset_ref(ttd->td_cpuset);
918 			thread_unlock(ttd);
919 			break;
920 		case CPU_WHICH_CPUSET:
921 		case CPU_WHICH_JAIL:
922 			break;
923 		case CPU_WHICH_IRQ:
924 			error = EINVAL;
925 			goto out;
926 		}
927 		if (uap->level == CPU_LEVEL_ROOT)
928 			nset = cpuset_refroot(set);
929 		else
930 			nset = cpuset_refbase(set);
931 		CPU_COPY(&nset->cs_mask, mask);
932 		cpuset_rel(nset);
933 		break;
934 	case CPU_LEVEL_WHICH:
935 		switch (uap->which) {
936 		case CPU_WHICH_TID:
937 			thread_lock(ttd);
938 			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
939 			thread_unlock(ttd);
940 			break;
941 		case CPU_WHICH_PID:
942 			FOREACH_THREAD_IN_PROC(p, ttd) {
943 				thread_lock(ttd);
944 				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
945 				thread_unlock(ttd);
946 			}
947 			break;
948 		case CPU_WHICH_CPUSET:
949 		case CPU_WHICH_JAIL:
950 			CPU_COPY(&set->cs_mask, mask);
951 			break;
952 		case CPU_WHICH_IRQ:
953 			error = intr_getaffinity(uap->id, mask);
954 			break;
955 		}
956 		break;
957 	default:
958 		error = EINVAL;
959 		break;
960 	}
961 	if (set)
962 		cpuset_rel(set);
963 	if (p)
964 		PROC_UNLOCK(p);
965 	if (error == 0)
966 		error = copyout(mask, uap->mask, size);
967 out:
968 	free(mask, M_TEMP);
969 	return (error);
970 }
971 
972 #ifndef _SYS_SYSPROTO_H_
973 struct cpuset_setaffinity_args {
974 	cpulevel_t	level;
975 	cpuwhich_t	which;
976 	id_t		id;
977 	size_t		cpusetsize;
978 	const cpuset_t	*mask;
979 };
980 #endif
981 int
982 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
983 {
984 	struct cpuset *nset;
985 	struct cpuset *set;
986 	struct thread *ttd;
987 	struct proc *p;
988 	cpuset_t *mask;
989 	int error;
990 
991 	if (uap->cpusetsize < sizeof(cpuset_t) ||
992 	    uap->cpusetsize > CPU_MAXSIZE / NBBY)
993 		return (ERANGE);
994 	mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
995 	error = copyin(uap->mask, mask, uap->cpusetsize);
996 	if (error)
997 		goto out;
998 	/*
999 	 * Verify that no high bits are set.
1000 	 */
1001 	if (uap->cpusetsize > sizeof(cpuset_t)) {
1002 		char *end;
1003 		char *cp;
1004 
1005 		end = cp = (char *)&mask->__bits;
1006 		end += uap->cpusetsize;
1007 		cp += sizeof(cpuset_t);
1008 		while (cp != end)
1009 			if (*cp++ != 0) {
1010 				error = EINVAL;
1011 				goto out;
1012 			}
1013 
1014 	}
1015 	switch (uap->level) {
1016 	case CPU_LEVEL_ROOT:
1017 	case CPU_LEVEL_CPUSET:
1018 		error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
1019 		if (error)
1020 			break;
1021 		switch (uap->which) {
1022 		case CPU_WHICH_TID:
1023 		case CPU_WHICH_PID:
1024 			thread_lock(ttd);
1025 			set = cpuset_ref(ttd->td_cpuset);
1026 			thread_unlock(ttd);
1027 			PROC_UNLOCK(p);
1028 			break;
1029 		case CPU_WHICH_CPUSET:
1030 		case CPU_WHICH_JAIL:
1031 			break;
1032 		case CPU_WHICH_IRQ:
1033 			error = EINVAL;
1034 			goto out;
1035 		}
1036 		if (uap->level == CPU_LEVEL_ROOT)
1037 			nset = cpuset_refroot(set);
1038 		else
1039 			nset = cpuset_refbase(set);
1040 		error = cpuset_modify(nset, mask);
1041 		cpuset_rel(nset);
1042 		cpuset_rel(set);
1043 		break;
1044 	case CPU_LEVEL_WHICH:
1045 		switch (uap->which) {
1046 		case CPU_WHICH_TID:
1047 			error = cpuset_setthread(uap->id, mask);
1048 			break;
1049 		case CPU_WHICH_PID:
1050 			error = cpuset_setproc(uap->id, NULL, mask);
1051 			break;
1052 		case CPU_WHICH_CPUSET:
1053 		case CPU_WHICH_JAIL:
1054 			error = cpuset_which(uap->which, uap->id, &p,
1055 			    &ttd, &set);
1056 			if (error == 0) {
1057 				error = cpuset_modify(set, mask);
1058 				cpuset_rel(set);
1059 			}
1060 			break;
1061 		case CPU_WHICH_IRQ:
1062 			error = intr_setaffinity(uap->id, mask);
1063 			break;
1064 		default:
1065 			error = EINVAL;
1066 			break;
1067 		}
1068 		break;
1069 	default:
1070 		error = EINVAL;
1071 		break;
1072 	}
1073 out:
1074 	free(mask, M_TEMP);
1075 	return (error);
1076 }
1077 
1078 #ifdef DDB
1079 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
1080 {
1081 	struct cpuset *set;
1082 	int cpu, once;
1083 
1084 	LIST_FOREACH(set, &cpuset_ids, cs_link) {
1085 		db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
1086 		    set, set->cs_id, set->cs_ref, set->cs_flags,
1087 		    (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
1088 		db_printf("  mask=");
1089 		for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
1090 			if (CPU_ISSET(cpu, &set->cs_mask)) {
1091 				if (once == 0) {
1092 					db_printf("%d", cpu);
1093 					once = 1;
1094 				} else
1095 					db_printf(",%d", cpu);
1096 			}
1097 		}
1098 		db_printf("\n");
1099 		if (db_pager_quit)
1100 			break;
1101 	}
1102 }
1103 #endif /* DDB */
1104