xref: /titanic_50/usr/src/uts/common/syscall/pset.c (revision 2df1fe9ca32bb227b9158c67f5c00b54c20b10fd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/cmn_err.h>
31 #include <sys/cpuvar.h>
32 #include <sys/thread.h>
33 #include <sys/disp.h>
34 #include <sys/kmem.h>
35 #include <sys/debug.h>
36 #include <sys/sysmacros.h>
37 #include <sys/cpupart.h>
38 #include <sys/pset.h>
39 #include <sys/modctl.h>
40 #include <sys/syscall.h>
41 #include <sys/task.h>
42 #include <sys/loadavg.h>
43 #include <sys/fss.h>
44 #include <sys/pool.h>
45 #include <sys/pool_pset.h>
46 #include <sys/policy.h>
47 #include <sys/zone.h>
48 #include <sys/contract/process_impl.h>
49 
50 static int	pset(int, long, long, long, long);
51 
52 static struct sysent pset_sysent = {
53 	5,
54 	SE_ARGC | SE_NOUNLOAD,
55 	(int (*)())pset,
56 };
57 
58 static struct modlsys modlsys = {
59 	&mod_syscallops, "processor sets", &pset_sysent
60 };
61 
62 #ifdef _SYSCALL32_IMPL
63 static struct modlsys modlsys32 = {
64 	&mod_syscallops32, "32-bit pset(2) syscall", &pset_sysent
65 };
66 #endif
67 
68 static struct modlinkage modlinkage = {
69 	MODREV_1,
70 	&modlsys,
71 #ifdef _SYSCALL32_IMPL
72 	&modlsys32,
73 #endif
74 	NULL
75 };
76 
77 #define	PSET_BADATTR(attr)	((~PSET_NOESCAPE) & (attr))
78 
79 int
80 _init(void)
81 {
82 	return (mod_install(&modlinkage));
83 }
84 
85 int
86 _info(struct modinfo *modinfop)
87 {
88 	return (mod_info(&modlinkage, modinfop));
89 }
90 
91 static int
92 pset_create(psetid_t *psetp)
93 {
94 	psetid_t newpset;
95 	int error;
96 
97 	if (secpolicy_pset(CRED()) != 0)
98 		return (set_errno(EPERM));
99 
100 	pool_lock();
101 	if (pool_state == POOL_ENABLED) {
102 		pool_unlock();
103 		return (set_errno(ENOTSUP));
104 	}
105 	error = cpupart_create(&newpset);
106 	if (error) {
107 		pool_unlock();
108 		return (set_errno(error));
109 	}
110 	if (copyout(&newpset, psetp, sizeof (psetid_t)) != 0) {
111 		(void) cpupart_destroy(newpset);
112 		pool_unlock();
113 		return (set_errno(EFAULT));
114 	}
115 	pool_unlock();
116 	return (error);
117 }
118 
119 static int
120 pset_destroy(psetid_t pset)
121 {
122 	int error;
123 
124 	if (secpolicy_pset(CRED()) != 0)
125 		return (set_errno(EPERM));
126 
127 	pool_lock();
128 	if (pool_state == POOL_ENABLED) {
129 		pool_unlock();
130 		return (set_errno(ENOTSUP));
131 	}
132 	error = cpupart_destroy(pset);
133 	pool_unlock();
134 	if (error)
135 		return (set_errno(error));
136 	else
137 		return (0);
138 }
139 
140 static int
141 pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced)
142 {
143 	psetid_t oldpset;
144 	int	error = 0;
145 	cpu_t	*cp;
146 
147 	if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0)
148 		return (set_errno(EPERM));
149 
150 	pool_lock();
151 	if (pset != PS_QUERY && pool_state == POOL_ENABLED) {
152 		pool_unlock();
153 		return (set_errno(ENOTSUP));
154 	}
155 
156 	mutex_enter(&cpu_lock);
157 	if ((cp = cpu_get(cpuid)) == NULL) {
158 		mutex_exit(&cpu_lock);
159 		pool_unlock();
160 		return (set_errno(EINVAL));
161 	}
162 
163 	oldpset = cpupart_query_cpu(cp);
164 
165 	if (pset != PS_QUERY)
166 		error = cpupart_attach_cpu(pset, cp, forced);
167 	mutex_exit(&cpu_lock);
168 	pool_unlock();
169 
170 	if (error)
171 		return (set_errno(error));
172 
173 	if (opset != NULL)
174 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
175 			return (set_errno(EFAULT));
176 
177 	return (0);
178 }
179 
180 static int
181 pset_info(psetid_t pset, int *typep, uint_t *numcpusp,
182     processorid_t *cpulistp)
183 {
184 	int pset_type;
185 	uint_t user_ncpus = 0, real_ncpus, copy_ncpus;
186 	processorid_t *pset_cpus = NULL;
187 	int error = 0;
188 
189 	if (numcpusp != NULL) {
190 		if (copyin(numcpusp, &user_ncpus, sizeof (uint_t)) != 0)
191 			return (set_errno(EFAULT));
192 	}
193 
194 	if (user_ncpus > max_ncpus)	/* sanity check */
195 		user_ncpus = max_ncpus;
196 	if (user_ncpus != 0 && cpulistp != NULL)
197 		pset_cpus = kmem_alloc(sizeof (processorid_t) * user_ncpus,
198 		    KM_SLEEP);
199 
200 	real_ncpus = user_ncpus;
201 	if ((error = cpupart_get_cpus(&pset, pset_cpus, &real_ncpus)) != 0)
202 		goto out;
203 
204 	/*
205 	 * Now copyout the information about this processor set.
206 	 */
207 
208 	/*
209 	 * Get number of cpus to copy back.  If the user didn't pass in
210 	 * a big enough buffer, only copy back as many cpus as fits in
211 	 * the buffer but copy back the real number of cpus.
212 	 */
213 
214 	if (user_ncpus != 0 && cpulistp != NULL) {
215 		copy_ncpus = MIN(real_ncpus, user_ncpus);
216 		if (copyout(pset_cpus, cpulistp,
217 		    sizeof (processorid_t) * copy_ncpus) != 0) {
218 			error = EFAULT;
219 			goto out;
220 		}
221 	}
222 	if (pset_cpus != NULL)
223 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
224 	if (typep != NULL) {
225 		if (pset == PS_NONE)
226 			pset_type = PS_NONE;
227 		else
228 			pset_type = PS_PRIVATE;
229 		if (copyout(&pset_type, typep, sizeof (int)) != 0)
230 			return (set_errno(EFAULT));
231 	}
232 	if (numcpusp != NULL)
233 		if (copyout(&real_ncpus, numcpusp, sizeof (uint_t)) != 0)
234 			return (set_errno(EFAULT));
235 	return (0);
236 
237 out:
238 	if (pset_cpus != NULL)
239 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
240 	return (set_errno(error));
241 }
242 
243 static int
244 pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf,
245     void *zonebuf)
246 {
247 	int error = 0;
248 
249 	ASSERT(pool_lock_held());
250 	ASSERT(MUTEX_HELD(&cpu_lock));
251 	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
252 
253 	*oldpset = tp->t_bind_pset;
254 	if (pset != PS_QUERY) {
255 		/*
256 		 * Must have the same UID as the target process or
257 		 * have PRIV_PROC_OWNER privilege.
258 		 */
259 		if (!hasprocperm(tp->t_cred, CRED()))
260 			return (EPERM);
261 		/*
262 		 * Unbinding of an unbound thread should always succeed.
263 		 */
264 		if (*oldpset == PS_NONE && pset == PS_NONE)
265 			return (0);
266 		/*
267 		 * Only privileged processes can move threads from psets with
268 		 * PSET_NOESCAPE attribute.
269 		 */
270 		if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) &&
271 		    secpolicy_pset(CRED()) != 0)
272 			return (EPERM);
273 		if ((error = cpupart_bind_thread(tp, pset, 0,
274 		    projbuf, zonebuf)) == 0)
275 			tp->t_bind_pset = pset;
276 	}
277 	return (error);
278 }
279 
280 static int
281 pset_bind_process(proc_t *pp, psetid_t pset, psetid_t *oldpset, void *projbuf,
282     void *zonebuf)
283 {
284 	int error = 0;
285 	kthread_t *tp;
286 
287 	/* skip kernel processes */
288 	if (pset != PS_QUERY && pp->p_flag & SSYS) {
289 		*oldpset = PS_NONE;
290 		return (0);
291 	}
292 
293 	mutex_enter(&pp->p_lock);
294 	tp = pp->p_tlist;
295 	if (tp != NULL) {
296 		do {
297 			int rval;
298 
299 			rval = pset_bind_thread(tp, pset, oldpset, projbuf,
300 			    zonebuf);
301 			if (error == 0)
302 				error = rval;
303 		} while ((tp = tp->t_forw) != pp->p_tlist);
304 	} else
305 		error = ESRCH;
306 	mutex_exit(&pp->p_lock);
307 
308 	return (error);
309 }
310 
311 static int
312 pset_bind_task(task_t *tk, psetid_t pset, psetid_t *oldpset, void *projbuf,
313     void *zonebuf)
314 {
315 	int error = 0;
316 	proc_t *pp;
317 
318 	ASSERT(MUTEX_HELD(&pidlock));
319 
320 	if ((pp = tk->tk_memb_list) == NULL) {
321 		return (ESRCH);
322 	}
323 
324 	do {
325 		int rval;
326 
327 		rval = pset_bind_process(pp, pset, oldpset, projbuf, zonebuf);
328 		if (error == 0)
329 			error = rval;
330 	} while ((pp = pp->p_tasknext) != tk->tk_memb_list);
331 
332 	return (error);
333 }
334 
335 static int
336 pset_bind_project(kproject_t *kpj, psetid_t pset, psetid_t *oldpset,
337     void *projbuf, void *zonebuf)
338 {
339 	int error = 0;
340 	proc_t *pp;
341 
342 	ASSERT(MUTEX_HELD(&pidlock));
343 
344 	for (pp = practive; pp != NULL; pp = pp->p_next) {
345 		if (pp->p_tlist == NULL)
346 			continue;
347 		if (pp->p_task->tk_proj == kpj) {
348 			int rval;
349 
350 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
351 			    zonebuf);
352 			if (error == 0)
353 				error = rval;
354 		}
355 	}
356 
357 	return (error);
358 }
359 
360 static int
361 pset_bind_zone(zone_t *zptr, psetid_t pset, psetid_t *oldpset, void *projbuf,
362     void *zonebuf)
363 {
364 	int error = 0;
365 	proc_t *pp;
366 
367 	ASSERT(MUTEX_HELD(&pidlock));
368 
369 	for (pp = practive; pp != NULL; pp = pp->p_next) {
370 		if (pp->p_zone == zptr) {
371 			int rval;
372 
373 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
374 			    zonebuf);
375 			if (error == 0)
376 				error = rval;
377 		}
378 	}
379 
380 	return (error);
381 }
382 
383 /*
384  * Unbind all threads from the specified processor set, or from all
385  * processor sets.
386  */
387 static int
388 pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype)
389 {
390 	psetid_t olbind;
391 	kthread_t *tp;
392 	int error = 0;
393 	int rval;
394 	proc_t *pp;
395 
396 	ASSERT(MUTEX_HELD(&cpu_lock));
397 
398 	if (idtype == P_PSETID && cpupart_find(pset) == NULL)
399 		return (EINVAL);
400 
401 	mutex_enter(&pidlock);
402 	for (pp = practive; pp != NULL; pp = pp->p_next) {
403 		mutex_enter(&pp->p_lock);
404 		tp = pp->p_tlist;
405 		/*
406 		 * Skip zombies and kernel processes, and processes in
407 		 * other zones, if called from a non-global zone.
408 		 */
409 		if (tp == NULL || (pp->p_flag & SSYS) ||
410 		    !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
411 			mutex_exit(&pp->p_lock);
412 			continue;
413 		}
414 		do {
415 			if ((idtype == P_PSETID && tp->t_bind_pset != pset) ||
416 			    (idtype == P_ALL && tp->t_bind_pset == PS_NONE))
417 				continue;
418 			rval = pset_bind_thread(tp, PS_NONE, &olbind,
419 			    projbuf, zonebuf);
420 			if (error == 0)
421 				error = rval;
422 		} while ((tp = tp->t_forw) != pp->p_tlist);
423 		mutex_exit(&pp->p_lock);
424 	}
425 	mutex_exit(&pidlock);
426 	return (error);
427 }
428 
429 static int
430 pset_bind_contract(cont_process_t *ctp, psetid_t pset, psetid_t *oldpset,
431     void *projbuf, void *zonebuf)
432 {
433 	int error = 0;
434 	proc_t *pp;
435 
436 	ASSERT(MUTEX_HELD(&pidlock));
437 
438 	for (pp = practive; pp != NULL; pp = pp->p_next) {
439 		if (pp->p_ct_process == ctp) {
440 			int rval;
441 
442 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
443 			    zonebuf);
444 			if (error == 0)
445 				error = rval;
446 		}
447 	}
448 
449 	return (error);
450 }
451 
452 static int
453 pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset)
454 {
455 	kthread_t	*tp;
456 	proc_t		*pp;
457 	task_t		*tk;
458 	kproject_t	*kpj;
459 	contract_t	*ct;
460 	zone_t		*zptr;
461 	psetid_t	oldpset;
462 	int		error = 0;
463 	void		*projbuf, *zonebuf;
464 
465 	pool_lock();
466 	if (pset != PS_QUERY) {
467 		/*
468 		 * Check if the set actually exists before checking
469 		 * permissions.  This is the historical error
470 		 * precedence.  Note that if pset was PS_MYID, the
471 		 * cpupart_get_cpus call will change it to the
472 		 * processor set id of the caller (or PS_NONE if the
473 		 * caller is not bound to a processor set).
474 		 */
475 		if (pool_state == POOL_ENABLED) {
476 			pool_unlock();
477 			return (set_errno(ENOTSUP));
478 		}
479 		if (cpupart_get_cpus(&pset, NULL, NULL) != 0) {
480 			pool_unlock();
481 			return (set_errno(EINVAL));
482 		} else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) {
483 			pool_unlock();
484 			return (set_errno(EPERM));
485 		}
486 	}
487 
488 	/*
489 	 * Pre-allocate enough buffers for FSS for all active projects
490 	 * and for all active zones on the system.  Unused buffers will
491 	 * be freed later by fss_freebuf().
492 	 */
493 	mutex_enter(&cpu_lock);
494 	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
495 	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);
496 
497 	switch (idtype) {
498 	case P_LWPID:
499 		pp = curproc;
500 		mutex_enter(&pidlock);
501 		mutex_enter(&pp->p_lock);
502 		if (id == P_MYID) {
503 			tp = curthread;
504 		} else {
505 			if ((tp = idtot(pp, id)) == NULL) {
506 				mutex_exit(&pp->p_lock);
507 				mutex_exit(&pidlock);
508 				error = ESRCH;
509 				break;
510 			}
511 		}
512 		error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
513 		mutex_exit(&pp->p_lock);
514 		mutex_exit(&pidlock);
515 		break;
516 
517 	case P_PID:
518 		mutex_enter(&pidlock);
519 		if (id == P_MYID) {
520 			pp = curproc;
521 		} else if ((pp = prfind(id)) == NULL) {
522 			mutex_exit(&pidlock);
523 			error = ESRCH;
524 			break;
525 		}
526 		error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf);
527 		mutex_exit(&pidlock);
528 		break;
529 
530 	case P_TASKID:
531 		mutex_enter(&pidlock);
532 		if (id == P_MYID)
533 			id = curproc->p_task->tk_tkid;
534 		if ((tk = task_hold_by_id(id)) == NULL) {
535 			mutex_exit(&pidlock);
536 			error = ESRCH;
537 			break;
538 		}
539 		error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf);
540 		mutex_exit(&pidlock);
541 		task_rele(tk);
542 		break;
543 
544 	case P_PROJID:
545 		pp = curproc;
546 		if (id == P_MYID)
547 			id = curprojid();
548 		if ((kpj = project_hold_by_id(id, pp->p_zone,
549 		    PROJECT_HOLD_FIND)) == NULL) {
550 			error = ESRCH;
551 			break;
552 		}
553 		mutex_enter(&pidlock);
554 		error = pset_bind_project(kpj, pset, &oldpset, projbuf,
555 		    zonebuf);
556 		mutex_exit(&pidlock);
557 		project_rele(kpj);
558 		break;
559 
560 	case P_ZONEID:
561 		if (id == P_MYID)
562 			id = getzoneid();
563 		if ((zptr = zone_find_by_id(id)) == NULL) {
564 			error = ESRCH;
565 			break;
566 		}
567 		mutex_enter(&pidlock);
568 		error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf);
569 		mutex_exit(&pidlock);
570 		zone_rele(zptr);
571 		break;
572 
573 	case P_CTID:
574 		if (id == P_MYID)
575 			id = PRCTID(curproc);
576 		if ((ct = contract_type_ptr(process_type, id,
577 		    curproc->p_zone->zone_uniqid)) == NULL) {
578 			error = ESRCH;
579 			break;
580 		}
581 		mutex_enter(&pidlock);
582 		error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf,
583 		    zonebuf);
584 		mutex_exit(&pidlock);
585 		contract_rele(ct);
586 		break;
587 
588 	case P_PSETID:
589 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
590 			error = EINVAL;
591 			break;
592 		}
593 		error = pset_unbind(id, projbuf, zonebuf, idtype);
594 		break;
595 
596 	case P_ALL:
597 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
598 			error = EINVAL;
599 			break;
600 		}
601 		error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype);
602 		break;
603 
604 	default:
605 		error = EINVAL;
606 		break;
607 	}
608 
609 	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
610 	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
611 	mutex_exit(&cpu_lock);
612 	pool_unlock();
613 
614 	if (error != 0)
615 		return (set_errno(error));
616 	if (opset != NULL) {
617 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
618 			return (set_errno(EFAULT));
619 	}
620 	return (0);
621 }
622 
623 /*
624  * Report load average statistics for the specified processor set.
625  */
626 static int
627 pset_getloadavg(psetid_t pset, int *buf, int nelem)
628 {
629 	int loadbuf[LOADAVG_NSTATS];
630 	int error = 0;
631 
632 	if (nelem < 0)
633 		return (set_errno(EINVAL));
634 
635 	/*
636 	 * We keep the same number of load average statistics for processor
637 	 * sets as we do for the system as a whole.
638 	 */
639 	if (nelem > LOADAVG_NSTATS)
640 		nelem = LOADAVG_NSTATS;
641 
642 	mutex_enter(&cpu_lock);
643 	error = cpupart_get_loadavg(pset, loadbuf, nelem);
644 	mutex_exit(&cpu_lock);
645 	if (!error && nelem && copyout(loadbuf, buf, nelem * sizeof (int)) != 0)
646 		error = EFAULT;
647 
648 	if (error)
649 		return (set_errno(error));
650 	else
651 		return (0);
652 }
653 
654 
655 /*
656  * Return list of active processor sets, up to a maximum indicated by
657  * numpsets.  The total number of processor sets is stored in the
658  * location pointed to by numpsets.
659  */
660 static int
661 pset_list(psetid_t *psetlist, uint_t *numpsets)
662 {
663 	uint_t user_npsets = 0;
664 	uint_t real_npsets;
665 	psetid_t *psets = NULL;
666 	int error = 0;
667 
668 	if (numpsets != NULL) {
669 		if (copyin(numpsets, &user_npsets, sizeof (uint_t)) != 0)
670 			return (set_errno(EFAULT));
671 	}
672 
673 	/*
674 	 * Get the list of all processor sets.  First we need to find
675 	 * out how many there are, so we can allocate a large enough
676 	 * buffer.
677 	 */
678 	mutex_enter(&cpu_lock);
679 	if (!INGLOBALZONE(curproc) && pool_pset_enabled()) {
680 		psetid_t psetid = zone_pset_get(curproc->p_zone);
681 
682 		if (psetid == PS_NONE) {
683 			real_npsets = 0;
684 		} else {
685 			real_npsets = 1;
686 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
687 			    KM_SLEEP);
688 			psets[0] = psetid;
689 		}
690 	} else {
691 		real_npsets = cpupart_list(0, NULL, CP_ALL);
692 		if (real_npsets) {
693 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
694 			    KM_SLEEP);
695 			(void) cpupart_list(psets, real_npsets, CP_ALL);
696 		}
697 	}
698 	mutex_exit(&cpu_lock);
699 
700 	if (user_npsets > real_npsets)
701 		user_npsets = real_npsets;
702 
703 	if (numpsets != NULL) {
704 		if (copyout(&real_npsets, numpsets, sizeof (uint_t)) != 0)
705 			error = EFAULT;
706 		else if (psetlist != NULL && user_npsets != 0) {
707 			if (copyout(psets, psetlist,
708 			    user_npsets * sizeof (psetid_t)) != 0)
709 				error = EFAULT;
710 		}
711 	}
712 
713 	if (real_npsets)
714 		kmem_free(psets, real_npsets * sizeof (psetid_t));
715 
716 	if (error)
717 		return (set_errno(error));
718 	else
719 		return (0);
720 }
721 
722 static int
723 pset_setattr(psetid_t pset, uint_t attr)
724 {
725 	int error;
726 
727 	if (secpolicy_pset(CRED()) != 0)
728 		return (set_errno(EPERM));
729 	pool_lock();
730 	if (pool_state == POOL_ENABLED) {
731 		pool_unlock();
732 		return (set_errno(ENOTSUP));
733 	}
734 	if (pset == PS_QUERY || PSET_BADATTR(attr)) {
735 		pool_unlock();
736 		return (set_errno(EINVAL));
737 	}
738 	if ((error = cpupart_setattr(pset, attr)) != 0) {
739 		pool_unlock();
740 		return (set_errno(error));
741 	}
742 	pool_unlock();
743 	return (0);
744 }
745 
746 static int
747 pset_getattr(psetid_t pset, uint_t *attrp)
748 {
749 	int error = 0;
750 	uint_t attr;
751 
752 	if (pset == PS_QUERY)
753 		return (set_errno(EINVAL));
754 	if ((error = cpupart_getattr(pset, &attr)) != 0)
755 		return (set_errno(error));
756 	if (copyout(&attr, attrp, sizeof (uint_t)) != 0)
757 		return (set_errno(EFAULT));
758 	return (0);
759 }
760 
761 static int
762 pset(int subcode, long arg1, long arg2, long arg3, long arg4)
763 {
764 	switch (subcode) {
765 	case PSET_CREATE:
766 		return (pset_create((psetid_t *)arg1));
767 	case PSET_DESTROY:
768 		return (pset_destroy((psetid_t)arg1));
769 	case PSET_ASSIGN:
770 		return (pset_assign((psetid_t)arg1,
771 		    (processorid_t)arg2, (psetid_t *)arg3, 0));
772 	case PSET_INFO:
773 		return (pset_info((psetid_t)arg1, (int *)arg2,
774 		    (uint_t *)arg3, (processorid_t *)arg4));
775 	case PSET_BIND:
776 		return (pset_bind((psetid_t)arg1, (idtype_t)arg2,
777 		    (id_t)arg3, (psetid_t *)arg4));
778 	case PSET_GETLOADAVG:
779 		return (pset_getloadavg((psetid_t)arg1, (int *)arg2,
780 		    (int)arg3));
781 	case PSET_LIST:
782 		return (pset_list((psetid_t *)arg1, (uint_t *)arg2));
783 	case PSET_SETATTR:
784 		return (pset_setattr((psetid_t)arg1, (uint_t)arg2));
785 	case PSET_GETATTR:
786 		return (pset_getattr((psetid_t)arg1, (uint_t *)arg2));
787 	case PSET_ASSIGN_FORCED:
788 		return (pset_assign((psetid_t)arg1,
789 		    (processorid_t)arg2, (psetid_t *)arg3, 1));
790 	default:
791 		return (set_errno(EINVAL));
792 	}
793 }
794