xref: /titanic_41/usr/src/uts/common/syscall/pset.c (revision 66f9d5cb3cc0652e2d9d1366fb950efbe4ca2f24)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/cmn_err.h>
32 #include <sys/cpuvar.h>
33 #include <sys/thread.h>
34 #include <sys/disp.h>
35 #include <sys/kmem.h>
36 #include <sys/debug.h>
37 #include <sys/sysmacros.h>
38 #include <sys/cpupart.h>
39 #include <sys/pset.h>
40 #include <sys/modctl.h>
41 #include <sys/syscall.h>
42 #include <sys/task.h>
43 #include <sys/loadavg.h>
44 #include <sys/fss.h>
45 #include <sys/pool.h>
46 #include <sys/pool_pset.h>
47 #include <sys/policy.h>
48 #include <sys/zone.h>
49 #include <sys/contract/process_impl.h>
50 
51 static int	pset(int, long, long, long, long);
52 
53 static struct sysent pset_sysent = {
54 	5,
55 	SE_ARGC | SE_NOUNLOAD,
56 	(int (*)())pset,
57 };
58 
59 static struct modlsys modlsys = {
60 	&mod_syscallops, "processor sets", &pset_sysent
61 };
62 
63 #ifdef _SYSCALL32_IMPL
64 static struct modlsys modlsys32 = {
65 	&mod_syscallops32, "32-bit pset(2) syscall", &pset_sysent
66 };
67 #endif
68 
69 static struct modlinkage modlinkage = {
70 	MODREV_1,
71 	&modlsys,
72 #ifdef _SYSCALL32_IMPL
73 	&modlsys32,
74 #endif
75 	NULL
76 };
77 
78 #define	PSET_BADATTR(attr)	((~PSET_NOESCAPE) & (attr))
79 
80 int
81 _init(void)
82 {
83 	return (mod_install(&modlinkage));
84 }
85 
86 int
87 _info(struct modinfo *modinfop)
88 {
89 	return (mod_info(&modlinkage, modinfop));
90 }
91 
92 static int
93 pset_create(psetid_t *psetp)
94 {
95 	psetid_t newpset;
96 	int error;
97 
98 	if (secpolicy_pset(CRED()) != 0)
99 		return (set_errno(EPERM));
100 
101 	pool_lock();
102 	if (pool_state == POOL_ENABLED) {
103 		pool_unlock();
104 		return (set_errno(ENOTSUP));
105 	}
106 	error = cpupart_create(&newpset);
107 	if (error) {
108 		pool_unlock();
109 		return (set_errno(error));
110 	}
111 	if (copyout(&newpset, psetp, sizeof (psetid_t)) != 0) {
112 		(void) cpupart_destroy(newpset);
113 		pool_unlock();
114 		return (set_errno(EFAULT));
115 	}
116 	pool_unlock();
117 	return (error);
118 }
119 
120 static int
121 pset_destroy(psetid_t pset)
122 {
123 	int error;
124 
125 	if (secpolicy_pset(CRED()) != 0)
126 		return (set_errno(EPERM));
127 
128 	pool_lock();
129 	if (pool_state == POOL_ENABLED) {
130 		pool_unlock();
131 		return (set_errno(ENOTSUP));
132 	}
133 	error = cpupart_destroy(pset);
134 	pool_unlock();
135 	if (error)
136 		return (set_errno(error));
137 	else
138 		return (0);
139 }
140 
141 static int
142 pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced)
143 {
144 	psetid_t oldpset;
145 	int	error = 0;
146 	cpu_t	*cp;
147 
148 	if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0)
149 		return (set_errno(EPERM));
150 
151 	pool_lock();
152 	if (pset != PS_QUERY && pool_state == POOL_ENABLED) {
153 		pool_unlock();
154 		return (set_errno(ENOTSUP));
155 	}
156 
157 	mutex_enter(&cpu_lock);
158 	if ((cp = cpu_get(cpuid)) == NULL) {
159 		mutex_exit(&cpu_lock);
160 		pool_unlock();
161 		return (set_errno(EINVAL));
162 	}
163 
164 	oldpset = cpupart_query_cpu(cp);
165 
166 	if (pset != PS_QUERY)
167 		error = cpupart_attach_cpu(pset, cp, forced);
168 	mutex_exit(&cpu_lock);
169 	pool_unlock();
170 
171 	if (error)
172 		return (set_errno(error));
173 
174 	if (opset != NULL)
175 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
176 			return (set_errno(EFAULT));
177 
178 	return (0);
179 }
180 
181 static int
182 pset_info(psetid_t pset, int *typep, uint_t *numcpusp,
183     processorid_t *cpulistp)
184 {
185 	int pset_type;
186 	uint_t user_ncpus = 0, real_ncpus, copy_ncpus;
187 	processorid_t *pset_cpus = NULL;
188 	int error = 0;
189 
190 	if (numcpusp != NULL) {
191 		if (copyin(numcpusp, &user_ncpus, sizeof (uint_t)) != 0)
192 			return (set_errno(EFAULT));
193 	}
194 
195 	if (user_ncpus > max_ncpus)	/* sanity check */
196 		user_ncpus = max_ncpus;
197 	if (user_ncpus != 0 && cpulistp != NULL)
198 		pset_cpus = kmem_alloc(sizeof (processorid_t) * user_ncpus,
199 		    KM_SLEEP);
200 
201 	real_ncpus = user_ncpus;
202 	if ((error = cpupart_get_cpus(&pset, pset_cpus, &real_ncpus)) != 0)
203 		goto out;
204 
205 	/*
206 	 * Now copyout the information about this processor set.
207 	 */
208 
209 	/*
210 	 * Get number of cpus to copy back.  If the user didn't pass in
211 	 * a big enough buffer, only copy back as many cpus as fits in
212 	 * the buffer but copy back the real number of cpus.
213 	 */
214 
215 	if (user_ncpus != 0 && cpulistp != NULL) {
216 		copy_ncpus = MIN(real_ncpus, user_ncpus);
217 		if (copyout(pset_cpus, cpulistp,
218 		    sizeof (processorid_t) * copy_ncpus) != 0) {
219 			error = EFAULT;
220 			goto out;
221 		}
222 	}
223 	if (pset_cpus != NULL)
224 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
225 	if (typep != NULL) {
226 		if (pset == PS_NONE)
227 			pset_type = PS_NONE;
228 		else
229 			pset_type = PS_PRIVATE;
230 		if (copyout(&pset_type, typep, sizeof (int)) != 0)
231 			return (set_errno(EFAULT));
232 	}
233 	if (numcpusp != NULL)
234 		if (copyout(&real_ncpus, numcpusp, sizeof (uint_t)) != 0)
235 			return (set_errno(EFAULT));
236 	return (0);
237 
238 out:
239 	if (pset_cpus != NULL)
240 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
241 	return (set_errno(error));
242 }
243 
244 static int
245 pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf,
246     void *zonebuf)
247 {
248 	int error = 0;
249 
250 	ASSERT(pool_lock_held());
251 	ASSERT(MUTEX_HELD(&cpu_lock));
252 	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
253 
254 	*oldpset = tp->t_bind_pset;
255 	if (pset != PS_QUERY) {
256 		/*
257 		 * Must have the same UID as the target process or
258 		 * have PRIV_PROC_OWNER privilege.
259 		 */
260 		if (!hasprocperm(tp->t_cred, CRED()))
261 			return (EPERM);
262 		/*
263 		 * Unbinding of an unbound thread should always succeed.
264 		 */
265 		if (*oldpset == PS_NONE && pset == PS_NONE)
266 			return (0);
267 		/*
268 		 * Only privileged processes can move threads from psets with
269 		 * PSET_NOESCAPE attribute.
270 		 */
271 		if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) &&
272 		    secpolicy_pset(CRED()) != 0)
273 			return (EPERM);
274 		if ((error = cpupart_bind_thread(tp, pset, 0,
275 		    projbuf, zonebuf)) == 0)
276 			tp->t_bind_pset = pset;
277 	}
278 	return (error);
279 }
280 
281 static int
282 pset_bind_process(proc_t *pp, psetid_t pset, psetid_t *oldpset, void *projbuf,
283     void *zonebuf)
284 {
285 	int error = 0;
286 	kthread_t *tp;
287 
288 	/* skip kernel processes */
289 	if (pset != PS_QUERY && pp->p_flag & SSYS) {
290 		*oldpset = PS_NONE;
291 		return (0);
292 	}
293 
294 	mutex_enter(&pp->p_lock);
295 	tp = pp->p_tlist;
296 	if (tp != NULL) {
297 		do {
298 			int rval;
299 
300 			rval = pset_bind_thread(tp, pset, oldpset, projbuf,
301 			    zonebuf);
302 			if (error == 0)
303 				error = rval;
304 		} while ((tp = tp->t_forw) != pp->p_tlist);
305 	} else
306 		error = ESRCH;
307 	mutex_exit(&pp->p_lock);
308 
309 	return (error);
310 }
311 
312 static int
313 pset_bind_task(task_t *tk, psetid_t pset, psetid_t *oldpset, void *projbuf,
314     void *zonebuf)
315 {
316 	int error = 0;
317 	proc_t *pp;
318 
319 	ASSERT(MUTEX_HELD(&pidlock));
320 
321 	if ((pp = tk->tk_memb_list) == NULL) {
322 		return (ESRCH);
323 	}
324 
325 	do {
326 		int rval;
327 
328 		rval = pset_bind_process(pp, pset, oldpset, projbuf, zonebuf);
329 		if (error == 0)
330 			error = rval;
331 	} while ((pp = pp->p_tasknext) != tk->tk_memb_list);
332 
333 	return (error);
334 }
335 
336 static int
337 pset_bind_project(kproject_t *kpj, psetid_t pset, psetid_t *oldpset,
338     void *projbuf, void *zonebuf)
339 {
340 	int error = 0;
341 	proc_t *pp;
342 
343 	ASSERT(MUTEX_HELD(&pidlock));
344 
345 	for (pp = practive; pp != NULL; pp = pp->p_next) {
346 		if (pp->p_tlist == NULL)
347 			continue;
348 		if (pp->p_task->tk_proj == kpj) {
349 			int rval;
350 
351 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
352 			    zonebuf);
353 			if (error == 0)
354 				error = rval;
355 		}
356 	}
357 
358 	return (error);
359 }
360 
361 static int
362 pset_bind_zone(zone_t *zptr, psetid_t pset, psetid_t *oldpset, void *projbuf,
363     void *zonebuf)
364 {
365 	int error = 0;
366 	proc_t *pp;
367 
368 	ASSERT(MUTEX_HELD(&pidlock));
369 
370 	for (pp = practive; pp != NULL; pp = pp->p_next) {
371 		if (pp->p_zone == zptr) {
372 			int rval;
373 
374 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
375 			    zonebuf);
376 			if (error == 0)
377 				error = rval;
378 		}
379 	}
380 
381 	return (error);
382 }
383 
384 /*
385  * Unbind all threads from the specified processor set, or from all
386  * processor sets.
387  */
388 static int
389 pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype)
390 {
391 	psetid_t olbind;
392 	kthread_t *tp;
393 	int error = 0;
394 	int rval;
395 	proc_t *pp;
396 
397 	ASSERT(MUTEX_HELD(&cpu_lock));
398 
399 	if (idtype == P_PSETID && cpupart_find(pset) == NULL)
400 		return (EINVAL);
401 
402 	mutex_enter(&pidlock);
403 	for (pp = practive; pp != NULL; pp = pp->p_next) {
404 		mutex_enter(&pp->p_lock);
405 		tp = pp->p_tlist;
406 		/*
407 		 * Skip zombies and kernel processes, and processes in
408 		 * other zones, if called from a non-global zone.
409 		 */
410 		if (tp == NULL || (pp->p_flag & SSYS) ||
411 		    !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
412 			mutex_exit(&pp->p_lock);
413 			continue;
414 		}
415 		do {
416 			if ((idtype == P_PSETID && tp->t_bind_pset != pset) ||
417 			    (idtype == P_ALL && tp->t_bind_pset == PS_NONE))
418 				continue;
419 			rval = pset_bind_thread(tp, PS_NONE, &olbind,
420 			    projbuf, zonebuf);
421 			if (error == 0)
422 				error = rval;
423 		} while ((tp = tp->t_forw) != pp->p_tlist);
424 		mutex_exit(&pp->p_lock);
425 	}
426 	mutex_exit(&pidlock);
427 	return (error);
428 }
429 
430 static int
431 pset_bind_contract(cont_process_t *ctp, psetid_t pset, psetid_t *oldpset,
432     void *projbuf, void *zonebuf)
433 {
434 	int error = 0;
435 	proc_t *pp;
436 
437 	ASSERT(MUTEX_HELD(&pidlock));
438 
439 	for (pp = practive; pp != NULL; pp = pp->p_next) {
440 		if (pp->p_ct_process == ctp) {
441 			int rval;
442 
443 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
444 			    zonebuf);
445 			if (error == 0)
446 				error = rval;
447 		}
448 	}
449 
450 	return (error);
451 }
452 
453 static int
454 pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset)
455 {
456 	kthread_t	*tp;
457 	proc_t		*pp;
458 	task_t		*tk;
459 	kproject_t	*kpj;
460 	contract_t	*ct;
461 	zone_t		*zptr;
462 	psetid_t	oldpset;
463 	int		error = 0;
464 	void		*projbuf, *zonebuf;
465 
466 	pool_lock();
467 	if (pset != PS_QUERY) {
468 		/*
469 		 * Check if the set actually exists before checking
470 		 * permissions.  This is the historical error
471 		 * precedence.  Note that if pset was PS_MYID, the
472 		 * cpupart_get_cpus call will change it to the
473 		 * processor set id of the caller (or PS_NONE if the
474 		 * caller is not bound to a processor set).
475 		 */
476 		if (pool_state == POOL_ENABLED) {
477 			pool_unlock();
478 			return (set_errno(ENOTSUP));
479 		}
480 		if (cpupart_get_cpus(&pset, NULL, NULL) != 0) {
481 			pool_unlock();
482 			return (set_errno(EINVAL));
483 		} else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) {
484 			pool_unlock();
485 			return (set_errno(EPERM));
486 		}
487 	}
488 
489 	/*
490 	 * Pre-allocate enough buffers for FSS for all active projects
491 	 * and for all active zones on the system.  Unused buffers will
492 	 * be freed later by fss_freebuf().
493 	 */
494 	mutex_enter(&cpu_lock);
495 	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
496 	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);
497 
498 	switch (idtype) {
499 	case P_LWPID:
500 		pp = curproc;
501 		mutex_enter(&pidlock);
502 		mutex_enter(&pp->p_lock);
503 		if (id == P_MYID) {
504 			tp = curthread;
505 		} else {
506 			if ((tp = idtot(pp, id)) == NULL) {
507 				mutex_exit(&pp->p_lock);
508 				mutex_exit(&pidlock);
509 				error = ESRCH;
510 				break;
511 			}
512 		}
513 		error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
514 		mutex_exit(&pp->p_lock);
515 		mutex_exit(&pidlock);
516 		break;
517 
518 	case P_PID:
519 		mutex_enter(&pidlock);
520 		if (id == P_MYID) {
521 			pp = curproc;
522 		} else if ((pp = prfind(id)) == NULL) {
523 			mutex_exit(&pidlock);
524 			error = ESRCH;
525 			break;
526 		}
527 		error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf);
528 		mutex_exit(&pidlock);
529 		break;
530 
531 	case P_TASKID:
532 		mutex_enter(&pidlock);
533 		if (id == P_MYID)
534 			id = curproc->p_task->tk_tkid;
535 		if ((tk = task_hold_by_id(id)) == NULL) {
536 			mutex_exit(&pidlock);
537 			error = ESRCH;
538 			break;
539 		}
540 		error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf);
541 		mutex_exit(&pidlock);
542 		task_rele(tk);
543 		break;
544 
545 	case P_PROJID:
546 		if (id == P_MYID)
547 			id = curprojid();
548 		if ((kpj = project_hold_by_id(id, getzoneid(),
549 		    PROJECT_HOLD_FIND)) == NULL) {
550 			error = ESRCH;
551 			break;
552 		}
553 		mutex_enter(&pidlock);
554 		error = pset_bind_project(kpj, pset, &oldpset, projbuf,
555 		    zonebuf);
556 		mutex_exit(&pidlock);
557 		project_rele(kpj);
558 		break;
559 
560 	case P_ZONEID:
561 		if (id == P_MYID)
562 			id = getzoneid();
563 		if ((zptr = zone_find_by_id(id)) == NULL) {
564 			error = ESRCH;
565 			break;
566 		}
567 		mutex_enter(&pidlock);
568 		error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf);
569 		mutex_exit(&pidlock);
570 		zone_rele(zptr);
571 		break;
572 
573 	case P_CTID:
574 		if (id == P_MYID)
575 			id = PRCTID(curproc);
576 		if ((ct = contract_type_ptr(process_type, id,
577 		    curproc->p_zone->zone_uniqid)) == NULL) {
578 			error = ESRCH;
579 			break;
580 		}
581 		mutex_enter(&pidlock);
582 		error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf,
583 		    zonebuf);
584 		mutex_exit(&pidlock);
585 		contract_rele(ct);
586 		break;
587 
588 	case P_PSETID:
589 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
590 			error = EINVAL;
591 			break;
592 		}
593 		error = pset_unbind(id, projbuf, zonebuf, idtype);
594 		break;
595 
596 	case P_ALL:
597 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
598 			error = EINVAL;
599 			break;
600 		}
601 		error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype);
602 		break;
603 
604 	default:
605 		error = EINVAL;
606 		break;
607 	}
608 
609 	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
610 	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
611 	mutex_exit(&cpu_lock);
612 	pool_unlock();
613 
614 	if (error != 0)
615 		return (set_errno(error));
616 	if (opset != NULL) {
617 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
618 			return (set_errno(EFAULT));
619 	}
620 	return (0);
621 }
622 
623 /*
624  * Report load average statistics for the specified processor set.
625  */
626 static int
627 pset_getloadavg(psetid_t pset, int *buf, int nelem)
628 {
629 	int *loadbuf;
630 	int error = 0;
631 
632 	if (nelem < 0)
633 		return (set_errno(EINVAL));
634 
635 	/*
636 	 * We keep the same number of load average statistics for processor
637 	 * sets as we do for the system as a whole.
638 	 */
639 	if (nelem > LOADAVG_NSTATS)
640 		nelem = LOADAVG_NSTATS;
641 
642 	loadbuf = kmem_alloc(nelem * sizeof (int), KM_SLEEP);
643 
644 	mutex_enter(&cpu_lock);
645 	error = cpupart_get_loadavg(pset, loadbuf, nelem);
646 	mutex_exit(&cpu_lock);
647 	if (!error && nelem && copyout(loadbuf, buf, nelem * sizeof (int)) != 0)
648 		error = EFAULT;
649 
650 	kmem_free(loadbuf, nelem * sizeof (int));
651 
652 	if (error)
653 		return (set_errno(error));
654 	else
655 		return (0);
656 }
657 
658 
659 /*
660  * Return list of active processor sets, up to a maximum indicated by
661  * numpsets.  The total number of processor sets is stored in the
662  * location pointed to by numpsets.
663  */
664 static int
665 pset_list(psetid_t *psetlist, uint_t *numpsets)
666 {
667 	uint_t user_npsets = 0;
668 	uint_t real_npsets;
669 	psetid_t *psets = NULL;
670 	int error = 0;
671 
672 	if (numpsets != NULL) {
673 		if (copyin(numpsets, &user_npsets, sizeof (uint_t)) != 0)
674 			return (set_errno(EFAULT));
675 	}
676 
677 	/*
678 	 * Get the list of all processor sets.  First we need to find
679 	 * out how many there are, so we can allocate a large enough
680 	 * buffer.
681 	 */
682 	mutex_enter(&cpu_lock);
683 	if (!INGLOBALZONE(curproc) && pool_pset_enabled()) {
684 		psetid_t psetid = zone_pset_get(curproc->p_zone);
685 
686 		if (psetid == PS_NONE) {
687 			real_npsets = 0;
688 		} else {
689 			real_npsets = 1;
690 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
691 			    KM_SLEEP);
692 			psets[0] = psetid;
693 		}
694 	} else {
695 		real_npsets = cpupart_list(0, NULL, CP_ALL);
696 		if (real_npsets) {
697 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
698 			    KM_SLEEP);
699 			(void) cpupart_list(psets, real_npsets, CP_ALL);
700 		}
701 	}
702 	mutex_exit(&cpu_lock);
703 
704 	if (user_npsets > real_npsets)
705 		user_npsets = real_npsets;
706 
707 	if (numpsets != NULL) {
708 		if (copyout(&real_npsets, numpsets, sizeof (uint_t)) != 0)
709 			error = EFAULT;
710 		else if (psetlist != NULL && user_npsets != 0) {
711 			if (copyout(psets, psetlist,
712 			    user_npsets * sizeof (psetid_t)) != 0)
713 				error = EFAULT;
714 		}
715 	}
716 
717 	if (real_npsets)
718 		kmem_free(psets, real_npsets * sizeof (psetid_t));
719 
720 	if (error)
721 		return (set_errno(error));
722 	else
723 		return (0);
724 }
725 
726 static int
727 pset_setattr(psetid_t pset, uint_t attr)
728 {
729 	int error;
730 
731 	if (secpolicy_pset(CRED()) != 0)
732 		return (set_errno(EPERM));
733 	pool_lock();
734 	if (pool_state == POOL_ENABLED) {
735 		pool_unlock();
736 		return (set_errno(ENOTSUP));
737 	}
738 	if (pset == PS_QUERY || PSET_BADATTR(attr)) {
739 		pool_unlock();
740 		return (set_errno(EINVAL));
741 	}
742 	if ((error = cpupart_setattr(pset, attr)) != 0) {
743 		pool_unlock();
744 		return (set_errno(error));
745 	}
746 	pool_unlock();
747 	return (0);
748 }
749 
750 static int
751 pset_getattr(psetid_t pset, uint_t *attrp)
752 {
753 	int error = 0;
754 	uint_t attr;
755 
756 	if (pset == PS_QUERY)
757 		return (set_errno(EINVAL));
758 	if ((error = cpupart_getattr(pset, &attr)) != 0)
759 		return (set_errno(error));
760 	if (copyout(&attr, attrp, sizeof (uint_t)) != 0)
761 		return (set_errno(EFAULT));
762 	return (0);
763 }
764 
765 static int
766 pset(int subcode, long arg1, long arg2, long arg3, long arg4)
767 {
768 	switch (subcode) {
769 	case PSET_CREATE:
770 		return (pset_create((psetid_t *)arg1));
771 	case PSET_DESTROY:
772 		return (pset_destroy((psetid_t)arg1));
773 	case PSET_ASSIGN:
774 		return (pset_assign((psetid_t)arg1,
775 		    (processorid_t)arg2, (psetid_t *)arg3, 0));
776 	case PSET_INFO:
777 		return (pset_info((psetid_t)arg1, (int *)arg2,
778 		    (uint_t *)arg3, (processorid_t *)arg4));
779 	case PSET_BIND:
780 		return (pset_bind((psetid_t)arg1, (idtype_t)arg2,
781 		    (id_t)arg3, (psetid_t *)arg4));
782 	case PSET_GETLOADAVG:
783 		return (pset_getloadavg((psetid_t)arg1, (int *)arg2,
784 		    (int)arg3));
785 	case PSET_LIST:
786 		return (pset_list((psetid_t *)arg1, (uint_t *)arg2));
787 	case PSET_SETATTR:
788 		return (pset_setattr((psetid_t)arg1, (uint_t)arg2));
789 	case PSET_GETATTR:
790 		return (pset_getattr((psetid_t)arg1, (uint_t *)arg2));
791 	case PSET_ASSIGN_FORCED:
792 		return (pset_assign((psetid_t)arg1,
793 		    (processorid_t)arg2, (psetid_t *)arg3, 1));
794 	default:
795 		return (set_errno(EINVAL));
796 	}
797 }
798