xref: /titanic_44/usr/src/uts/common/syscall/pset.c (revision 01ef659d9b1ead333ef0adc346e7051f7eae7520)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/systm.h>
28 #include <sys/cmn_err.h>
29 #include <sys/cpuvar.h>
30 #include <sys/thread.h>
31 #include <sys/disp.h>
32 #include <sys/kmem.h>
33 #include <sys/debug.h>
34 #include <sys/sysmacros.h>
35 #include <sys/cpupart.h>
36 #include <sys/pset.h>
37 #include <sys/modctl.h>
38 #include <sys/syscall.h>
39 #include <sys/task.h>
40 #include <sys/loadavg.h>
41 #include <sys/fss.h>
42 #include <sys/pool.h>
43 #include <sys/pool_pset.h>
44 #include <sys/policy.h>
45 #include <sys/zone.h>
46 #include <sys/contract/process_impl.h>
47 
48 static int	pset(int, long, long, long, long);
49 
50 static struct sysent pset_sysent = {
51 	5,
52 	SE_ARGC | SE_NOUNLOAD,
53 	(int (*)())pset,
54 };
55 
56 static struct modlsys modlsys = {
57 	&mod_syscallops, "processor sets", &pset_sysent
58 };
59 
60 #ifdef _SYSCALL32_IMPL
61 static struct modlsys modlsys32 = {
62 	&mod_syscallops32, "32-bit pset(2) syscall", &pset_sysent
63 };
64 #endif
65 
66 static struct modlinkage modlinkage = {
67 	MODREV_1,
68 	&modlsys,
69 #ifdef _SYSCALL32_IMPL
70 	&modlsys32,
71 #endif
72 	NULL
73 };
74 
75 #define	PSET_BADATTR(attr)	((~PSET_NOESCAPE) & (attr))
76 
77 int
78 _init(void)
79 {
80 	return (mod_install(&modlinkage));
81 }
82 
83 int
84 _info(struct modinfo *modinfop)
85 {
86 	return (mod_info(&modlinkage, modinfop));
87 }
88 
89 static int
90 pset_create(psetid_t *psetp)
91 {
92 	psetid_t newpset;
93 	int error;
94 
95 	if (secpolicy_pset(CRED()) != 0)
96 		return (set_errno(EPERM));
97 
98 	pool_lock();
99 	if (pool_state == POOL_ENABLED) {
100 		pool_unlock();
101 		return (set_errno(ENOTSUP));
102 	}
103 	error = cpupart_create(&newpset);
104 	if (error) {
105 		pool_unlock();
106 		return (set_errno(error));
107 	}
108 	if (copyout(&newpset, psetp, sizeof (psetid_t)) != 0) {
109 		(void) cpupart_destroy(newpset);
110 		pool_unlock();
111 		return (set_errno(EFAULT));
112 	}
113 	pool_unlock();
114 	return (error);
115 }
116 
117 static int
118 pset_destroy(psetid_t pset)
119 {
120 	int error;
121 
122 	if (secpolicy_pset(CRED()) != 0)
123 		return (set_errno(EPERM));
124 
125 	pool_lock();
126 	if (pool_state == POOL_ENABLED) {
127 		pool_unlock();
128 		return (set_errno(ENOTSUP));
129 	}
130 	error = cpupart_destroy(pset);
131 	pool_unlock();
132 	if (error)
133 		return (set_errno(error));
134 	else
135 		return (0);
136 }
137 
138 static int
139 pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced)
140 {
141 	psetid_t oldpset;
142 	int	error = 0;
143 	cpu_t	*cp;
144 
145 	if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0)
146 		return (set_errno(EPERM));
147 
148 	pool_lock();
149 	if (pset != PS_QUERY && pool_state == POOL_ENABLED) {
150 		pool_unlock();
151 		return (set_errno(ENOTSUP));
152 	}
153 
154 	mutex_enter(&cpu_lock);
155 	if ((cp = cpu_get(cpuid)) == NULL) {
156 		mutex_exit(&cpu_lock);
157 		pool_unlock();
158 		return (set_errno(EINVAL));
159 	}
160 
161 	oldpset = cpupart_query_cpu(cp);
162 
163 	if (pset != PS_QUERY)
164 		error = cpupart_attach_cpu(pset, cp, forced);
165 	mutex_exit(&cpu_lock);
166 	pool_unlock();
167 
168 	if (error)
169 		return (set_errno(error));
170 
171 	if (opset != NULL)
172 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
173 			return (set_errno(EFAULT));
174 
175 	return (0);
176 }
177 
178 static int
179 pset_info(psetid_t pset, int *typep, uint_t *numcpusp,
180     processorid_t *cpulistp)
181 {
182 	int pset_type;
183 	uint_t user_ncpus = 0, real_ncpus, copy_ncpus;
184 	processorid_t *pset_cpus = NULL;
185 	int error = 0;
186 
187 	if (numcpusp != NULL) {
188 		if (copyin(numcpusp, &user_ncpus, sizeof (uint_t)) != 0)
189 			return (set_errno(EFAULT));
190 	}
191 
192 	if (user_ncpus > max_ncpus)	/* sanity check */
193 		user_ncpus = max_ncpus;
194 	if (user_ncpus != 0 && cpulistp != NULL)
195 		pset_cpus = kmem_alloc(sizeof (processorid_t) * user_ncpus,
196 		    KM_SLEEP);
197 
198 	real_ncpus = user_ncpus;
199 	if ((error = cpupart_get_cpus(&pset, pset_cpus, &real_ncpus)) != 0)
200 		goto out;
201 
202 	/*
203 	 * Now copyout the information about this processor set.
204 	 */
205 
206 	/*
207 	 * Get number of cpus to copy back.  If the user didn't pass in
208 	 * a big enough buffer, only copy back as many cpus as fits in
209 	 * the buffer but copy back the real number of cpus.
210 	 */
211 
212 	if (user_ncpus != 0 && cpulistp != NULL) {
213 		copy_ncpus = MIN(real_ncpus, user_ncpus);
214 		if (copyout(pset_cpus, cpulistp,
215 		    sizeof (processorid_t) * copy_ncpus) != 0) {
216 			error = EFAULT;
217 			goto out;
218 		}
219 	}
220 	if (pset_cpus != NULL)
221 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
222 	if (typep != NULL) {
223 		if (pset == PS_NONE)
224 			pset_type = PS_NONE;
225 		else
226 			pset_type = PS_PRIVATE;
227 		if (copyout(&pset_type, typep, sizeof (int)) != 0)
228 			return (set_errno(EFAULT));
229 	}
230 	if (numcpusp != NULL)
231 		if (copyout(&real_ncpus, numcpusp, sizeof (uint_t)) != 0)
232 			return (set_errno(EFAULT));
233 	return (0);
234 
235 out:
236 	if (pset_cpus != NULL)
237 		kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus);
238 	return (set_errno(error));
239 }
240 
241 static int
242 pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf,
243     void *zonebuf)
244 {
245 	int error = 0;
246 
247 	ASSERT(pool_lock_held());
248 	ASSERT(MUTEX_HELD(&cpu_lock));
249 	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
250 
251 	*oldpset = tp->t_bind_pset;
252 
253 	switch (pset) {
254 	case PS_SOFT:
255 		TB_PSET_SOFT_SET(tp);
256 		break;
257 
258 	case PS_HARD:
259 		TB_PSET_HARD_SET(tp);
260 		break;
261 
262 	case PS_QUERY:
263 		break;
264 
265 	case PS_QUERY_TYPE:
266 		*oldpset = TB_PSET_IS_SOFT(tp) ? PS_SOFT : PS_HARD;
267 		break;
268 
269 	default:
270 		/*
271 		 * Must have the same UID as the target process or
272 		 * have PRIV_PROC_OWNER privilege.
273 		 */
274 		if (!hasprocperm(tp->t_cred, CRED()))
275 			return (EPERM);
276 		/*
277 		 * Unbinding of an unbound thread should always succeed.
278 		 */
279 		if (*oldpset == PS_NONE && pset == PS_NONE)
280 			return (0);
281 		/*
282 		 * Only privileged processes can move threads from psets with
283 		 * PSET_NOESCAPE attribute.
284 		 */
285 		if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) &&
286 		    secpolicy_pset(CRED()) != 0)
287 			return (EPERM);
288 		if ((error = cpupart_bind_thread(tp, pset, 0,
289 		    projbuf, zonebuf)) == 0)
290 			tp->t_bind_pset = pset;
291 
292 		break;
293 	}
294 
295 	return (error);
296 }
297 
298 static int
299 pset_bind_process(proc_t *pp, psetid_t pset, psetid_t *oldpset, void *projbuf,
300     void *zonebuf)
301 {
302 	int error = 0;
303 	kthread_t *tp;
304 
305 	/* skip kernel processes */
306 	if ((pset != PS_QUERY) && pp->p_flag & SSYS) {
307 		*oldpset = PS_NONE;
308 		return (ENOTSUP);
309 	}
310 
311 	mutex_enter(&pp->p_lock);
312 	tp = pp->p_tlist;
313 	if (tp != NULL) {
314 		do {
315 			int rval;
316 
317 			rval = pset_bind_thread(tp, pset, oldpset, projbuf,
318 			    zonebuf);
319 			if (error == 0)
320 				error = rval;
321 		} while ((tp = tp->t_forw) != pp->p_tlist);
322 	} else
323 		error = ESRCH;
324 	mutex_exit(&pp->p_lock);
325 
326 	return (error);
327 }
328 
329 static int
330 pset_bind_task(task_t *tk, psetid_t pset, psetid_t *oldpset, void *projbuf,
331     void *zonebuf)
332 {
333 	int error = 0;
334 	proc_t *pp;
335 
336 	ASSERT(MUTEX_HELD(&pidlock));
337 
338 	if ((pp = tk->tk_memb_list) == NULL) {
339 		return (ESRCH);
340 	}
341 
342 	do {
343 		int rval;
344 
345 		if (!(pp->p_flag & SSYS)) {
346 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
347 			    zonebuf);
348 			if (error == 0)
349 				error = rval;
350 		}
351 	} while ((pp = pp->p_tasknext) != tk->tk_memb_list);
352 
353 	return (error);
354 }
355 
356 static int
357 pset_bind_project(kproject_t *kpj, psetid_t pset, psetid_t *oldpset,
358     void *projbuf, void *zonebuf)
359 {
360 	int error = 0;
361 	proc_t *pp;
362 
363 	ASSERT(MUTEX_HELD(&pidlock));
364 
365 	for (pp = practive; pp != NULL; pp = pp->p_next) {
366 		if (pp->p_tlist == NULL)
367 			continue;
368 		if (pp->p_task->tk_proj == kpj && !(pp->p_flag & SSYS)) {
369 			int rval;
370 
371 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
372 			    zonebuf);
373 			if (error == 0)
374 				error = rval;
375 		}
376 	}
377 
378 	return (error);
379 }
380 
381 static int
382 pset_bind_zone(zone_t *zptr, psetid_t pset, psetid_t *oldpset, void *projbuf,
383     void *zonebuf)
384 {
385 	int error = 0;
386 	proc_t *pp;
387 
388 	ASSERT(MUTEX_HELD(&pidlock));
389 
390 	for (pp = practive; pp != NULL; pp = pp->p_next) {
391 		if (pp->p_zone == zptr && !(pp->p_flag & SSYS)) {
392 			int rval;
393 
394 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
395 			    zonebuf);
396 			if (error == 0)
397 				error = rval;
398 		}
399 	}
400 
401 	return (error);
402 }
403 
404 /*
405  * Unbind all threads from the specified processor set, or from all
406  * processor sets.
407  */
408 static int
409 pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype)
410 {
411 	psetid_t olbind;
412 	kthread_t *tp;
413 	int error = 0;
414 	int rval;
415 	proc_t *pp;
416 
417 	ASSERT(MUTEX_HELD(&cpu_lock));
418 
419 	if (idtype == P_PSETID && cpupart_find(pset) == NULL)
420 		return (EINVAL);
421 
422 	mutex_enter(&pidlock);
423 	for (pp = practive; pp != NULL; pp = pp->p_next) {
424 		mutex_enter(&pp->p_lock);
425 		tp = pp->p_tlist;
426 		/*
427 		 * Skip zombies and kernel processes, and processes in
428 		 * other zones, if called from a non-global zone.
429 		 */
430 		if (tp == NULL || (pp->p_flag & SSYS) ||
431 		    !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
432 			mutex_exit(&pp->p_lock);
433 			continue;
434 		}
435 		do {
436 			if ((idtype == P_PSETID && tp->t_bind_pset != pset) ||
437 			    (idtype == P_ALL && tp->t_bind_pset == PS_NONE))
438 				continue;
439 			rval = pset_bind_thread(tp, PS_NONE, &olbind,
440 			    projbuf, zonebuf);
441 			if (error == 0)
442 				error = rval;
443 		} while ((tp = tp->t_forw) != pp->p_tlist);
444 		mutex_exit(&pp->p_lock);
445 	}
446 	mutex_exit(&pidlock);
447 	return (error);
448 }
449 
450 static int
451 pset_bind_contract(cont_process_t *ctp, psetid_t pset, psetid_t *oldpset,
452     void *projbuf, void *zonebuf)
453 {
454 	int error = 0;
455 	proc_t *pp;
456 
457 	ASSERT(MUTEX_HELD(&pidlock));
458 
459 	for (pp = practive; pp != NULL; pp = pp->p_next) {
460 		if (pp->p_ct_process == ctp) {
461 			int rval;
462 
463 			rval = pset_bind_process(pp, pset, oldpset, projbuf,
464 			    zonebuf);
465 			if (error == 0)
466 				error = rval;
467 		}
468 	}
469 
470 	return (error);
471 }
472 
473 static int
474 pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset)
475 {
476 	kthread_t	*tp;
477 	proc_t		*pp;
478 	task_t		*tk;
479 	kproject_t	*kpj;
480 	contract_t	*ct;
481 	zone_t		*zptr;
482 	psetid_t	oldpset;
483 	int		error = 0;
484 	void		*projbuf, *zonebuf;
485 
486 	pool_lock();
487 	if ((pset != PS_QUERY) && (pset != PS_SOFT) &&
488 	    (pset != PS_HARD) && (pset != PS_QUERY_TYPE)) {
489 		/*
490 		 * Check if the set actually exists before checking
491 		 * permissions.  This is the historical error
492 		 * precedence.  Note that if pset was PS_MYID, the
493 		 * cpupart_get_cpus call will change it to the
494 		 * processor set id of the caller (or PS_NONE if the
495 		 * caller is not bound to a processor set).
496 		 */
497 		if (pool_state == POOL_ENABLED) {
498 			pool_unlock();
499 			return (set_errno(ENOTSUP));
500 		}
501 		if (cpupart_get_cpus(&pset, NULL, NULL) != 0) {
502 			pool_unlock();
503 			return (set_errno(EINVAL));
504 		} else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) {
505 			pool_unlock();
506 			return (set_errno(EPERM));
507 		}
508 	}
509 
510 	/*
511 	 * Pre-allocate enough buffers for FSS for all active projects
512 	 * and for all active zones on the system.  Unused buffers will
513 	 * be freed later by fss_freebuf().
514 	 */
515 	mutex_enter(&cpu_lock);
516 	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
517 	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);
518 
519 	switch (idtype) {
520 	case P_LWPID:
521 		pp = curproc;
522 		mutex_enter(&pidlock);
523 		mutex_enter(&pp->p_lock);
524 		if (id == P_MYID) {
525 			tp = curthread;
526 		} else {
527 			if ((tp = idtot(pp, id)) == NULL) {
528 				mutex_exit(&pp->p_lock);
529 				mutex_exit(&pidlock);
530 				error = ESRCH;
531 				break;
532 			}
533 		}
534 		error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
535 		mutex_exit(&pp->p_lock);
536 		mutex_exit(&pidlock);
537 		break;
538 
539 	case P_PID:
540 		mutex_enter(&pidlock);
541 		if (id == P_MYID) {
542 			pp = curproc;
543 		} else if ((pp = prfind(id)) == NULL) {
544 			mutex_exit(&pidlock);
545 			error = ESRCH;
546 			break;
547 		}
548 		error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf);
549 		mutex_exit(&pidlock);
550 		break;
551 
552 	case P_TASKID:
553 		mutex_enter(&pidlock);
554 		if (id == P_MYID)
555 			id = curproc->p_task->tk_tkid;
556 		if ((tk = task_hold_by_id(id)) == NULL) {
557 			mutex_exit(&pidlock);
558 			error = ESRCH;
559 			break;
560 		}
561 		error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf);
562 		mutex_exit(&pidlock);
563 		task_rele(tk);
564 		break;
565 
566 	case P_PROJID:
567 		pp = curproc;
568 		if (id == P_MYID)
569 			id = curprojid();
570 		if ((kpj = project_hold_by_id(id, pp->p_zone,
571 		    PROJECT_HOLD_FIND)) == NULL) {
572 			error = ESRCH;
573 			break;
574 		}
575 		mutex_enter(&pidlock);
576 		error = pset_bind_project(kpj, pset, &oldpset, projbuf,
577 		    zonebuf);
578 		mutex_exit(&pidlock);
579 		project_rele(kpj);
580 		break;
581 
582 	case P_ZONEID:
583 		if (id == P_MYID)
584 			id = getzoneid();
585 		if ((zptr = zone_find_by_id(id)) == NULL) {
586 			error = ESRCH;
587 			break;
588 		}
589 		mutex_enter(&pidlock);
590 		error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf);
591 		mutex_exit(&pidlock);
592 		zone_rele(zptr);
593 		break;
594 
595 	case P_CTID:
596 		if (id == P_MYID)
597 			id = PRCTID(curproc);
598 		if ((ct = contract_type_ptr(process_type, id,
599 		    curproc->p_zone->zone_uniqid)) == NULL) {
600 			error = ESRCH;
601 			break;
602 		}
603 		mutex_enter(&pidlock);
604 		error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf,
605 		    zonebuf);
606 		mutex_exit(&pidlock);
607 		contract_rele(ct);
608 		break;
609 
610 	case P_PSETID:
611 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
612 			error = EINVAL;
613 			break;
614 		}
615 		error = pset_unbind(id, projbuf, zonebuf, idtype);
616 		break;
617 
618 	case P_ALL:
619 		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
620 			error = EINVAL;
621 			break;
622 		}
623 		error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype);
624 		break;
625 
626 	default:
627 		error = EINVAL;
628 		break;
629 	}
630 
631 	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
632 	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
633 	mutex_exit(&cpu_lock);
634 	pool_unlock();
635 
636 	if (error != 0)
637 		return (set_errno(error));
638 	if (opset != NULL) {
639 		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
640 			return (set_errno(EFAULT));
641 	}
642 	return (0);
643 }
644 
645 /*
646  * Report load average statistics for the specified processor set.
647  */
648 static int
649 pset_getloadavg(psetid_t pset, int *buf, int nelem)
650 {
651 	int loadbuf[LOADAVG_NSTATS];
652 	int error = 0;
653 
654 	if (nelem < 0)
655 		return (set_errno(EINVAL));
656 
657 	/*
658 	 * We keep the same number of load average statistics for processor
659 	 * sets as we do for the system as a whole.
660 	 */
661 	if (nelem > LOADAVG_NSTATS)
662 		nelem = LOADAVG_NSTATS;
663 
664 	mutex_enter(&cpu_lock);
665 	error = cpupart_get_loadavg(pset, loadbuf, nelem);
666 	mutex_exit(&cpu_lock);
667 	if (!error && nelem && copyout(loadbuf, buf, nelem * sizeof (int)) != 0)
668 		error = EFAULT;
669 
670 	if (error)
671 		return (set_errno(error));
672 	else
673 		return (0);
674 }
675 
676 
677 /*
678  * Return list of active processor sets, up to a maximum indicated by
679  * numpsets.  The total number of processor sets is stored in the
680  * location pointed to by numpsets.
681  */
682 static int
683 pset_list(psetid_t *psetlist, uint_t *numpsets)
684 {
685 	uint_t user_npsets = 0;
686 	uint_t real_npsets;
687 	psetid_t *psets = NULL;
688 	int error = 0;
689 
690 	if (numpsets != NULL) {
691 		if (copyin(numpsets, &user_npsets, sizeof (uint_t)) != 0)
692 			return (set_errno(EFAULT));
693 	}
694 
695 	/*
696 	 * Get the list of all processor sets.  First we need to find
697 	 * out how many there are, so we can allocate a large enough
698 	 * buffer.
699 	 */
700 	mutex_enter(&cpu_lock);
701 	if (!INGLOBALZONE(curproc) && pool_pset_enabled()) {
702 		psetid_t psetid = zone_pset_get(curproc->p_zone);
703 
704 		if (psetid == PS_NONE) {
705 			real_npsets = 0;
706 		} else {
707 			real_npsets = 1;
708 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
709 			    KM_SLEEP);
710 			psets[0] = psetid;
711 		}
712 	} else {
713 		real_npsets = cpupart_list(0, NULL, CP_ALL);
714 		if (real_npsets) {
715 			psets = kmem_alloc(real_npsets * sizeof (psetid_t),
716 			    KM_SLEEP);
717 			(void) cpupart_list(psets, real_npsets, CP_ALL);
718 		}
719 	}
720 	mutex_exit(&cpu_lock);
721 
722 	if (user_npsets > real_npsets)
723 		user_npsets = real_npsets;
724 
725 	if (numpsets != NULL) {
726 		if (copyout(&real_npsets, numpsets, sizeof (uint_t)) != 0)
727 			error = EFAULT;
728 		else if (psetlist != NULL && user_npsets != 0) {
729 			if (copyout(psets, psetlist,
730 			    user_npsets * sizeof (psetid_t)) != 0)
731 				error = EFAULT;
732 		}
733 	}
734 
735 	if (real_npsets)
736 		kmem_free(psets, real_npsets * sizeof (psetid_t));
737 
738 	if (error)
739 		return (set_errno(error));
740 	else
741 		return (0);
742 }
743 
744 static int
745 pset_setattr(psetid_t pset, uint_t attr)
746 {
747 	int error;
748 
749 	if (secpolicy_pset(CRED()) != 0)
750 		return (set_errno(EPERM));
751 	pool_lock();
752 	if (pool_state == POOL_ENABLED) {
753 		pool_unlock();
754 		return (set_errno(ENOTSUP));
755 	}
756 	if (pset == PS_QUERY || PSET_BADATTR(attr)) {
757 		pool_unlock();
758 		return (set_errno(EINVAL));
759 	}
760 	if ((error = cpupart_setattr(pset, attr)) != 0) {
761 		pool_unlock();
762 		return (set_errno(error));
763 	}
764 	pool_unlock();
765 	return (0);
766 }
767 
768 static int
769 pset_getattr(psetid_t pset, uint_t *attrp)
770 {
771 	int error = 0;
772 	uint_t attr;
773 
774 	if (pset == PS_QUERY)
775 		return (set_errno(EINVAL));
776 	if ((error = cpupart_getattr(pset, &attr)) != 0)
777 		return (set_errno(error));
778 	if (copyout(&attr, attrp, sizeof (uint_t)) != 0)
779 		return (set_errno(EFAULT));
780 	return (0);
781 }
782 
783 static int
784 pset(int subcode, long arg1, long arg2, long arg3, long arg4)
785 {
786 	switch (subcode) {
787 	case PSET_CREATE:
788 		return (pset_create((psetid_t *)arg1));
789 	case PSET_DESTROY:
790 		return (pset_destroy((psetid_t)arg1));
791 	case PSET_ASSIGN:
792 		return (pset_assign((psetid_t)arg1,
793 		    (processorid_t)arg2, (psetid_t *)arg3, 0));
794 	case PSET_INFO:
795 		return (pset_info((psetid_t)arg1, (int *)arg2,
796 		    (uint_t *)arg3, (processorid_t *)arg4));
797 	case PSET_BIND:
798 		return (pset_bind((psetid_t)arg1, (idtype_t)arg2,
799 		    (id_t)arg3, (psetid_t *)arg4));
800 	case PSET_GETLOADAVG:
801 		return (pset_getloadavg((psetid_t)arg1, (int *)arg2,
802 		    (int)arg3));
803 	case PSET_LIST:
804 		return (pset_list((psetid_t *)arg1, (uint_t *)arg2));
805 	case PSET_SETATTR:
806 		return (pset_setattr((psetid_t)arg1, (uint_t)arg2));
807 	case PSET_GETATTR:
808 		return (pset_getattr((psetid_t)arg1, (uint_t *)arg2));
809 	case PSET_ASSIGN_FORCED:
810 		return (pset_assign((psetid_t)arg1,
811 		    (processorid_t)arg2, (psetid_t *)arg3, 1));
812 	default:
813 		return (set_errno(EINVAL));
814 	}
815 }
816