1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/systm.h> 30 #include <sys/cmn_err.h> 31 #include <sys/cpuvar.h> 32 #include <sys/thread.h> 33 #include <sys/disp.h> 34 #include <sys/kmem.h> 35 #include <sys/debug.h> 36 #include <sys/sysmacros.h> 37 #include <sys/cpupart.h> 38 #include <sys/pset.h> 39 #include <sys/modctl.h> 40 #include <sys/syscall.h> 41 #include <sys/task.h> 42 #include <sys/loadavg.h> 43 #include <sys/fss.h> 44 #include <sys/pool.h> 45 #include <sys/pool_pset.h> 46 #include <sys/policy.h> 47 #include <sys/zone.h> 48 #include <sys/contract/process_impl.h> 49 50 static int pset(int, long, long, long, long); 51 52 static struct sysent pset_sysent = { 53 5, 54 SE_ARGC | SE_NOUNLOAD, 55 (int (*)())pset, 56 }; 57 58 static struct modlsys modlsys = { 59 &mod_syscallops, "processor sets", &pset_sysent 60 }; 61 62 #ifdef _SYSCALL32_IMPL 63 static struct modlsys modlsys32 = { 64 &mod_syscallops32, "32-bit pset(2) syscall", &pset_sysent 65 }; 66 #endif 67 68 static struct modlinkage modlinkage = { 69 MODREV_1, 70 &modlsys, 71 #ifdef _SYSCALL32_IMPL 72 &modlsys32, 73 #endif 74 NULL 75 }; 76 77 #define PSET_BADATTR(attr) ((~PSET_NOESCAPE) & (attr)) 78 79 int 80 _init(void) 81 { 82 return (mod_install(&modlinkage)); 83 } 84 85 int 86 _info(struct modinfo *modinfop) 87 { 88 return (mod_info(&modlinkage, modinfop)); 89 } 90 91 static int 92 pset_create(psetid_t *psetp) 93 { 94 psetid_t newpset; 95 int error; 96 97 if (secpolicy_pset(CRED()) != 0) 98 return (set_errno(EPERM)); 99 100 pool_lock(); 101 if (pool_state == POOL_ENABLED) { 102 pool_unlock(); 103 return (set_errno(ENOTSUP)); 104 } 105 error = cpupart_create(&newpset); 106 if (error) { 107 pool_unlock(); 108 return (set_errno(error)); 109 } 110 if (copyout(&newpset, psetp, sizeof (psetid_t)) != 0) { 111 (void) cpupart_destroy(newpset); 112 pool_unlock(); 113 return (set_errno(EFAULT)); 114 } 115 pool_unlock(); 116 return (error); 117 } 118 119 static int 120 pset_destroy(psetid_t pset) 121 { 122 int error; 123 124 if (secpolicy_pset(CRED()) != 0) 125 return (set_errno(EPERM)); 126 127 pool_lock(); 128 if (pool_state == POOL_ENABLED) { 129 pool_unlock(); 130 return (set_errno(ENOTSUP)); 131 } 132 error = cpupart_destroy(pset); 133 pool_unlock(); 134 if (error) 135 return (set_errno(error)); 136 else 137 return (0); 138 } 139 140 static int 141 pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced) 142 { 143 psetid_t oldpset; 144 int error = 0; 145 cpu_t *cp; 146 147 if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0) 148 return (set_errno(EPERM)); 149 150 pool_lock(); 151 if (pset != PS_QUERY && pool_state == POOL_ENABLED) { 152 pool_unlock(); 153 return (set_errno(ENOTSUP)); 154 } 155 156 mutex_enter(&cpu_lock); 157 if ((cp = cpu_get(cpuid)) == NULL) { 158 mutex_exit(&cpu_lock); 159 pool_unlock(); 160 return (set_errno(EINVAL)); 161 } 162 163 oldpset = cpupart_query_cpu(cp); 164 165 if (pset != PS_QUERY) 166 error = cpupart_attach_cpu(pset, cp, forced); 167 mutex_exit(&cpu_lock); 168 pool_unlock(); 169 170 if (error) 171 return (set_errno(error)); 172 173 if (opset != NULL) 174 if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0) 175 return (set_errno(EFAULT)); 176 177 return (0); 178 } 179 180 static int 181 pset_info(psetid_t pset, int *typep, uint_t *numcpusp, 182 processorid_t *cpulistp) 183 { 184 int pset_type; 185 uint_t user_ncpus = 0, real_ncpus, copy_ncpus; 186 processorid_t *pset_cpus = NULL; 187 int error = 0; 188 189 if (numcpusp != NULL) { 190 if (copyin(numcpusp, &user_ncpus, sizeof (uint_t)) != 0) 191 return (set_errno(EFAULT)); 192 } 193 194 if (user_ncpus > max_ncpus) /* sanity check */ 195 user_ncpus = max_ncpus; 196 if (user_ncpus != 0 && cpulistp != NULL) 197 pset_cpus = kmem_alloc(sizeof (processorid_t) * user_ncpus, 198 KM_SLEEP); 199 200 real_ncpus = user_ncpus; 201 if ((error = cpupart_get_cpus(&pset, pset_cpus, &real_ncpus)) != 0) 202 goto out; 203 204 /* 205 * Now copyout the information about this processor set. 206 */ 207 208 /* 209 * Get number of cpus to copy back. If the user didn't pass in 210 * a big enough buffer, only copy back as many cpus as fits in 211 * the buffer but copy back the real number of cpus. 212 */ 213 214 if (user_ncpus != 0 && cpulistp != NULL) { 215 copy_ncpus = MIN(real_ncpus, user_ncpus); 216 if (copyout(pset_cpus, cpulistp, 217 sizeof (processorid_t) * copy_ncpus) != 0) { 218 error = EFAULT; 219 goto out; 220 } 221 } 222 if (pset_cpus != NULL) 223 kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus); 224 if (typep != NULL) { 225 if (pset == PS_NONE) 226 pset_type = PS_NONE; 227 else 228 pset_type = PS_PRIVATE; 229 if (copyout(&pset_type, typep, sizeof (int)) != 0) 230 return (set_errno(EFAULT)); 231 } 232 if (numcpusp != NULL) 233 if (copyout(&real_ncpus, numcpusp, sizeof (uint_t)) != 0) 234 return (set_errno(EFAULT)); 235 return (0); 236 237 out: 238 if (pset_cpus != NULL) 239 kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus); 240 return (set_errno(error)); 241 } 242 243 static int 244 pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf, 245 void *zonebuf) 246 { 247 int error = 0; 248 249 ASSERT(pool_lock_held()); 250 ASSERT(MUTEX_HELD(&cpu_lock)); 251 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 252 253 *oldpset = tp->t_bind_pset; 254 255 switch (pset) { 256 case PS_SOFT: 257 TB_PSET_SOFT_SET(tp); 258 break; 259 260 case PS_HARD: 261 TB_PSET_HARD_SET(tp); 262 break; 263 264 case PS_QUERY: 265 break; 266 267 case PS_QUERY_TYPE: 268 *oldpset = TB_PSET_IS_SOFT(tp) ? PS_SOFT : PS_HARD; 269 break; 270 271 default: 272 /* 273 * Must have the same UID as the target process or 274 * have PRIV_PROC_OWNER privilege. 275 */ 276 if (!hasprocperm(tp->t_cred, CRED())) 277 return (EPERM); 278 /* 279 * Unbinding of an unbound thread should always succeed. 280 */ 281 if (*oldpset == PS_NONE && pset == PS_NONE) 282 return (0); 283 /* 284 * Only privileged processes can move threads from psets with 285 * PSET_NOESCAPE attribute. 286 */ 287 if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) && 288 secpolicy_pset(CRED()) != 0) 289 return (EPERM); 290 if ((error = cpupart_bind_thread(tp, pset, 0, 291 projbuf, zonebuf)) == 0) 292 tp->t_bind_pset = pset; 293 294 break; 295 } 296 297 return (error); 298 } 299 300 static int 301 pset_bind_process(proc_t *pp, psetid_t pset, psetid_t *oldpset, void *projbuf, 302 void *zonebuf) 303 { 304 int error = 0; 305 kthread_t *tp; 306 307 /* skip kernel processes */ 308 if ((pset != PS_QUERY) && pp->p_flag & SSYS) { 309 *oldpset = PS_NONE; 310 return (0); 311 } 312 313 mutex_enter(&pp->p_lock); 314 tp = pp->p_tlist; 315 if (tp != NULL) { 316 do { 317 int rval; 318 319 rval = pset_bind_thread(tp, pset, oldpset, projbuf, 320 zonebuf); 321 if (error == 0) 322 error = rval; 323 } while ((tp = tp->t_forw) != pp->p_tlist); 324 } else 325 error = ESRCH; 326 mutex_exit(&pp->p_lock); 327 328 return (error); 329 } 330 331 static int 332 pset_bind_task(task_t *tk, psetid_t pset, psetid_t *oldpset, void *projbuf, 333 void *zonebuf) 334 { 335 int error = 0; 336 proc_t *pp; 337 338 ASSERT(MUTEX_HELD(&pidlock)); 339 340 if ((pp = tk->tk_memb_list) == NULL) { 341 return (ESRCH); 342 } 343 344 do { 345 int rval; 346 347 rval = pset_bind_process(pp, pset, oldpset, projbuf, zonebuf); 348 if (error == 0) 349 error = rval; 350 } while ((pp = pp->p_tasknext) != tk->tk_memb_list); 351 352 return (error); 353 } 354 355 static int 356 pset_bind_project(kproject_t *kpj, psetid_t pset, psetid_t *oldpset, 357 void *projbuf, void *zonebuf) 358 { 359 int error = 0; 360 proc_t *pp; 361 362 ASSERT(MUTEX_HELD(&pidlock)); 363 364 for (pp = practive; pp != NULL; pp = pp->p_next) { 365 if (pp->p_tlist == NULL) 366 continue; 367 if (pp->p_task->tk_proj == kpj) { 368 int rval; 369 370 rval = pset_bind_process(pp, pset, oldpset, projbuf, 371 zonebuf); 372 if (error == 0) 373 error = rval; 374 } 375 } 376 377 return (error); 378 } 379 380 static int 381 pset_bind_zone(zone_t *zptr, psetid_t pset, psetid_t *oldpset, void *projbuf, 382 void *zonebuf) 383 { 384 int error = 0; 385 proc_t *pp; 386 387 ASSERT(MUTEX_HELD(&pidlock)); 388 389 for (pp = practive; pp != NULL; pp = pp->p_next) { 390 if (pp->p_zone == zptr) { 391 int rval; 392 393 rval = pset_bind_process(pp, pset, oldpset, projbuf, 394 zonebuf); 395 if (error == 0) 396 error = rval; 397 } 398 } 399 400 return (error); 401 } 402 403 /* 404 * Unbind all threads from the specified processor set, or from all 405 * processor sets. 406 */ 407 static int 408 pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype) 409 { 410 psetid_t olbind; 411 kthread_t *tp; 412 int error = 0; 413 int rval; 414 proc_t *pp; 415 416 ASSERT(MUTEX_HELD(&cpu_lock)); 417 418 if (idtype == P_PSETID && cpupart_find(pset) == NULL) 419 return (EINVAL); 420 421 mutex_enter(&pidlock); 422 for (pp = practive; pp != NULL; pp = pp->p_next) { 423 mutex_enter(&pp->p_lock); 424 tp = pp->p_tlist; 425 /* 426 * Skip zombies and kernel processes, and processes in 427 * other zones, if called from a non-global zone. 428 */ 429 if (tp == NULL || (pp->p_flag & SSYS) || 430 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 431 mutex_exit(&pp->p_lock); 432 continue; 433 } 434 do { 435 if ((idtype == P_PSETID && tp->t_bind_pset != pset) || 436 (idtype == P_ALL && tp->t_bind_pset == PS_NONE)) 437 continue; 438 rval = pset_bind_thread(tp, PS_NONE, &olbind, 439 projbuf, zonebuf); 440 if (error == 0) 441 error = rval; 442 } while ((tp = tp->t_forw) != pp->p_tlist); 443 mutex_exit(&pp->p_lock); 444 } 445 mutex_exit(&pidlock); 446 return (error); 447 } 448 449 static int 450 pset_bind_contract(cont_process_t *ctp, psetid_t pset, psetid_t *oldpset, 451 void *projbuf, void *zonebuf) 452 { 453 int error = 0; 454 proc_t *pp; 455 456 ASSERT(MUTEX_HELD(&pidlock)); 457 458 for (pp = practive; pp != NULL; pp = pp->p_next) { 459 if (pp->p_ct_process == ctp) { 460 int rval; 461 462 rval = pset_bind_process(pp, pset, oldpset, projbuf, 463 zonebuf); 464 if (error == 0) 465 error = rval; 466 } 467 } 468 469 return (error); 470 } 471 472 static int 473 pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset) 474 { 475 kthread_t *tp; 476 proc_t *pp; 477 task_t *tk; 478 kproject_t *kpj; 479 contract_t *ct; 480 zone_t *zptr; 481 psetid_t oldpset; 482 int error = 0; 483 void *projbuf, *zonebuf; 484 485 pool_lock(); 486 if ((pset != PS_QUERY) && (pset != PS_SOFT) && 487 (pset != PS_HARD) && (pset != PS_QUERY_TYPE)) { 488 /* 489 * Check if the set actually exists before checking 490 * permissions. This is the historical error 491 * precedence. Note that if pset was PS_MYID, the 492 * cpupart_get_cpus call will change it to the 493 * processor set id of the caller (or PS_NONE if the 494 * caller is not bound to a processor set). 495 */ 496 if (pool_state == POOL_ENABLED) { 497 pool_unlock(); 498 return (set_errno(ENOTSUP)); 499 } 500 if (cpupart_get_cpus(&pset, NULL, NULL) != 0) { 501 pool_unlock(); 502 return (set_errno(EINVAL)); 503 } else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) { 504 pool_unlock(); 505 return (set_errno(EPERM)); 506 } 507 } 508 509 /* 510 * Pre-allocate enough buffers for FSS for all active projects 511 * and for all active zones on the system. Unused buffers will 512 * be freed later by fss_freebuf(). 513 */ 514 mutex_enter(&cpu_lock); 515 projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ); 516 zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE); 517 518 switch (idtype) { 519 case P_LWPID: 520 pp = curproc; 521 mutex_enter(&pidlock); 522 mutex_enter(&pp->p_lock); 523 if (id == P_MYID) { 524 tp = curthread; 525 } else { 526 if ((tp = idtot(pp, id)) == NULL) { 527 mutex_exit(&pp->p_lock); 528 mutex_exit(&pidlock); 529 error = ESRCH; 530 break; 531 } 532 } 533 error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf); 534 mutex_exit(&pp->p_lock); 535 mutex_exit(&pidlock); 536 break; 537 538 case P_PID: 539 mutex_enter(&pidlock); 540 if (id == P_MYID) { 541 pp = curproc; 542 } else if ((pp = prfind(id)) == NULL) { 543 mutex_exit(&pidlock); 544 error = ESRCH; 545 break; 546 } 547 error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf); 548 mutex_exit(&pidlock); 549 break; 550 551 case P_TASKID: 552 mutex_enter(&pidlock); 553 if (id == P_MYID) 554 id = curproc->p_task->tk_tkid; 555 if ((tk = task_hold_by_id(id)) == NULL) { 556 mutex_exit(&pidlock); 557 error = ESRCH; 558 break; 559 } 560 error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf); 561 mutex_exit(&pidlock); 562 task_rele(tk); 563 break; 564 565 case P_PROJID: 566 pp = curproc; 567 if (id == P_MYID) 568 id = curprojid(); 569 if ((kpj = project_hold_by_id(id, pp->p_zone, 570 PROJECT_HOLD_FIND)) == NULL) { 571 error = ESRCH; 572 break; 573 } 574 mutex_enter(&pidlock); 575 error = pset_bind_project(kpj, pset, &oldpset, projbuf, 576 zonebuf); 577 mutex_exit(&pidlock); 578 project_rele(kpj); 579 break; 580 581 case P_ZONEID: 582 if (id == P_MYID) 583 id = getzoneid(); 584 if ((zptr = zone_find_by_id(id)) == NULL) { 585 error = ESRCH; 586 break; 587 } 588 mutex_enter(&pidlock); 589 error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf); 590 mutex_exit(&pidlock); 591 zone_rele(zptr); 592 break; 593 594 case P_CTID: 595 if (id == P_MYID) 596 id = PRCTID(curproc); 597 if ((ct = contract_type_ptr(process_type, id, 598 curproc->p_zone->zone_uniqid)) == NULL) { 599 error = ESRCH; 600 break; 601 } 602 mutex_enter(&pidlock); 603 error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf, 604 zonebuf); 605 mutex_exit(&pidlock); 606 contract_rele(ct); 607 break; 608 609 case P_PSETID: 610 if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) { 611 error = EINVAL; 612 break; 613 } 614 error = pset_unbind(id, projbuf, zonebuf, idtype); 615 break; 616 617 case P_ALL: 618 if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) { 619 error = EINVAL; 620 break; 621 } 622 error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype); 623 break; 624 625 default: 626 error = EINVAL; 627 break; 628 } 629 630 fss_freebuf(projbuf, FSS_ALLOC_PROJ); 631 fss_freebuf(zonebuf, FSS_ALLOC_ZONE); 632 mutex_exit(&cpu_lock); 633 pool_unlock(); 634 635 if (error != 0) 636 return (set_errno(error)); 637 if (opset != NULL) { 638 if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0) 639 return (set_errno(EFAULT)); 640 } 641 return (0); 642 } 643 644 /* 645 * Report load average statistics for the specified processor set. 646 */ 647 static int 648 pset_getloadavg(psetid_t pset, int *buf, int nelem) 649 { 650 int loadbuf[LOADAVG_NSTATS]; 651 int error = 0; 652 653 if (nelem < 0) 654 return (set_errno(EINVAL)); 655 656 /* 657 * We keep the same number of load average statistics for processor 658 * sets as we do for the system as a whole. 659 */ 660 if (nelem > LOADAVG_NSTATS) 661 nelem = LOADAVG_NSTATS; 662 663 mutex_enter(&cpu_lock); 664 error = cpupart_get_loadavg(pset, loadbuf, nelem); 665 mutex_exit(&cpu_lock); 666 if (!error && nelem && copyout(loadbuf, buf, nelem * sizeof (int)) != 0) 667 error = EFAULT; 668 669 if (error) 670 return (set_errno(error)); 671 else 672 return (0); 673 } 674 675 676 /* 677 * Return list of active processor sets, up to a maximum indicated by 678 * numpsets. The total number of processor sets is stored in the 679 * location pointed to by numpsets. 680 */ 681 static int 682 pset_list(psetid_t *psetlist, uint_t *numpsets) 683 { 684 uint_t user_npsets = 0; 685 uint_t real_npsets; 686 psetid_t *psets = NULL; 687 int error = 0; 688 689 if (numpsets != NULL) { 690 if (copyin(numpsets, &user_npsets, sizeof (uint_t)) != 0) 691 return (set_errno(EFAULT)); 692 } 693 694 /* 695 * Get the list of all processor sets. First we need to find 696 * out how many there are, so we can allocate a large enough 697 * buffer. 698 */ 699 mutex_enter(&cpu_lock); 700 if (!INGLOBALZONE(curproc) && pool_pset_enabled()) { 701 psetid_t psetid = zone_pset_get(curproc->p_zone); 702 703 if (psetid == PS_NONE) { 704 real_npsets = 0; 705 } else { 706 real_npsets = 1; 707 psets = kmem_alloc(real_npsets * sizeof (psetid_t), 708 KM_SLEEP); 709 psets[0] = psetid; 710 } 711 } else { 712 real_npsets = cpupart_list(0, NULL, CP_ALL); 713 if (real_npsets) { 714 psets = kmem_alloc(real_npsets * sizeof (psetid_t), 715 KM_SLEEP); 716 (void) cpupart_list(psets, real_npsets, CP_ALL); 717 } 718 } 719 mutex_exit(&cpu_lock); 720 721 if (user_npsets > real_npsets) 722 user_npsets = real_npsets; 723 724 if (numpsets != NULL) { 725 if (copyout(&real_npsets, numpsets, sizeof (uint_t)) != 0) 726 error = EFAULT; 727 else if (psetlist != NULL && user_npsets != 0) { 728 if (copyout(psets, psetlist, 729 user_npsets * sizeof (psetid_t)) != 0) 730 error = EFAULT; 731 } 732 } 733 734 if (real_npsets) 735 kmem_free(psets, real_npsets * sizeof (psetid_t)); 736 737 if (error) 738 return (set_errno(error)); 739 else 740 return (0); 741 } 742 743 static int 744 pset_setattr(psetid_t pset, uint_t attr) 745 { 746 int error; 747 748 if (secpolicy_pset(CRED()) != 0) 749 return (set_errno(EPERM)); 750 pool_lock(); 751 if (pool_state == POOL_ENABLED) { 752 pool_unlock(); 753 return (set_errno(ENOTSUP)); 754 } 755 if (pset == PS_QUERY || PSET_BADATTR(attr)) { 756 pool_unlock(); 757 return (set_errno(EINVAL)); 758 } 759 if ((error = cpupart_setattr(pset, attr)) != 0) { 760 pool_unlock(); 761 return (set_errno(error)); 762 } 763 pool_unlock(); 764 return (0); 765 } 766 767 static int 768 pset_getattr(psetid_t pset, uint_t *attrp) 769 { 770 int error = 0; 771 uint_t attr; 772 773 if (pset == PS_QUERY) 774 return (set_errno(EINVAL)); 775 if ((error = cpupart_getattr(pset, &attr)) != 0) 776 return (set_errno(error)); 777 if (copyout(&attr, attrp, sizeof (uint_t)) != 0) 778 return (set_errno(EFAULT)); 779 return (0); 780 } 781 782 static int 783 pset(int subcode, long arg1, long arg2, long arg3, long arg4) 784 { 785 switch (subcode) { 786 case PSET_CREATE: 787 return (pset_create((psetid_t *)arg1)); 788 case PSET_DESTROY: 789 return (pset_destroy((psetid_t)arg1)); 790 case PSET_ASSIGN: 791 return (pset_assign((psetid_t)arg1, 792 (processorid_t)arg2, (psetid_t *)arg3, 0)); 793 case PSET_INFO: 794 return (pset_info((psetid_t)arg1, (int *)arg2, 795 (uint_t *)arg3, (processorid_t *)arg4)); 796 case PSET_BIND: 797 return (pset_bind((psetid_t)arg1, (idtype_t)arg2, 798 (id_t)arg3, (psetid_t *)arg4)); 799 case PSET_GETLOADAVG: 800 return (pset_getloadavg((psetid_t)arg1, (int *)arg2, 801 (int)arg3)); 802 case PSET_LIST: 803 return (pset_list((psetid_t *)arg1, (uint_t *)arg2)); 804 case PSET_SETATTR: 805 return (pset_setattr((psetid_t)arg1, (uint_t)arg2)); 806 case PSET_GETATTR: 807 return (pset_getattr((psetid_t)arg1, (uint_t *)arg2)); 808 case PSET_ASSIGN_FORCED: 809 return (pset_assign((psetid_t)arg1, 810 (processorid_t)arg2, (psetid_t *)arg3, 1)); 811 default: 812 return (set_errno(EINVAL)); 813 } 814 } 815