1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/systm.h> 30 #include <sys/cmn_err.h> 31 #include <sys/cpuvar.h> 32 #include <sys/thread.h> 33 #include <sys/disp.h> 34 #include <sys/kmem.h> 35 #include <sys/debug.h> 36 #include <sys/sysmacros.h> 37 #include <sys/cpupart.h> 38 #include <sys/pset.h> 39 #include <sys/modctl.h> 40 #include <sys/syscall.h> 41 #include <sys/task.h> 42 #include <sys/loadavg.h> 43 #include <sys/fss.h> 44 #include <sys/pool.h> 45 #include <sys/pool_pset.h> 46 #include <sys/policy.h> 47 #include <sys/zone.h> 48 #include <sys/contract/process_impl.h> 49 50 static int pset(int, long, long, long, long); 51 52 static struct sysent pset_sysent = { 53 5, 54 SE_ARGC | SE_NOUNLOAD, 55 (int (*)())pset, 56 }; 57 58 static struct modlsys modlsys = { 59 &mod_syscallops, "processor sets", &pset_sysent 60 }; 61 62 #ifdef _SYSCALL32_IMPL 63 static struct modlsys modlsys32 = { 64 &mod_syscallops32, "32-bit pset(2) syscall", &pset_sysent 65 }; 66 #endif 67 68 static struct modlinkage modlinkage = { 69 MODREV_1, 70 &modlsys, 71 #ifdef _SYSCALL32_IMPL 72 &modlsys32, 73 #endif 74 NULL 75 }; 76 77 #define PSET_BADATTR(attr) ((~PSET_NOESCAPE) & (attr)) 78 79 int 80 _init(void) 81 { 82 return (mod_install(&modlinkage)); 83 } 84 85 int 86 _info(struct modinfo *modinfop) 87 { 88 return (mod_info(&modlinkage, modinfop)); 89 } 90 91 static int 92 pset_create(psetid_t *psetp) 93 { 94 psetid_t newpset; 95 int error; 96 97 if (secpolicy_pset(CRED()) != 0) 98 return (set_errno(EPERM)); 99 100 pool_lock(); 101 if (pool_state == POOL_ENABLED) { 102 pool_unlock(); 103 return (set_errno(ENOTSUP)); 104 } 105 error = cpupart_create(&newpset); 106 if (error) { 107 pool_unlock(); 108 return (set_errno(error)); 109 } 110 if (copyout(&newpset, psetp, sizeof (psetid_t)) != 0) { 111 (void) cpupart_destroy(newpset); 112 pool_unlock(); 113 return (set_errno(EFAULT)); 114 } 115 pool_unlock(); 116 return (error); 117 } 118 119 static int 120 pset_destroy(psetid_t pset) 121 { 122 int error; 123 124 if (secpolicy_pset(CRED()) != 0) 125 return (set_errno(EPERM)); 126 127 pool_lock(); 128 if (pool_state == POOL_ENABLED) { 129 pool_unlock(); 130 return (set_errno(ENOTSUP)); 131 } 132 error = cpupart_destroy(pset); 133 pool_unlock(); 134 if (error) 135 return (set_errno(error)); 136 else 137 return (0); 138 } 139 140 static int 141 pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced) 142 { 143 psetid_t oldpset; 144 int error = 0; 145 cpu_t *cp; 146 147 if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0) 148 return (set_errno(EPERM)); 149 150 pool_lock(); 151 if (pset != PS_QUERY && pool_state == POOL_ENABLED) { 152 pool_unlock(); 153 return (set_errno(ENOTSUP)); 154 } 155 156 mutex_enter(&cpu_lock); 157 if ((cp = cpu_get(cpuid)) == NULL) { 158 mutex_exit(&cpu_lock); 159 pool_unlock(); 160 return (set_errno(EINVAL)); 161 } 162 163 oldpset = cpupart_query_cpu(cp); 164 165 if (pset != PS_QUERY) 166 error = cpupart_attach_cpu(pset, cp, forced); 167 mutex_exit(&cpu_lock); 168 pool_unlock(); 169 170 if (error) 171 return (set_errno(error)); 172 173 if (opset != NULL) 174 if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0) 175 return (set_errno(EFAULT)); 176 177 return (0); 178 } 179 180 static int 181 pset_info(psetid_t pset, int *typep, uint_t *numcpusp, 182 processorid_t *cpulistp) 183 { 184 int pset_type; 185 uint_t user_ncpus = 0, real_ncpus, copy_ncpus; 186 processorid_t *pset_cpus = NULL; 187 int error = 0; 188 189 if (numcpusp != NULL) { 190 if (copyin(numcpusp, &user_ncpus, sizeof (uint_t)) != 0) 191 return (set_errno(EFAULT)); 192 } 193 194 if (user_ncpus > max_ncpus) /* sanity check */ 195 user_ncpus = max_ncpus; 196 if (user_ncpus != 0 && cpulistp != NULL) 197 pset_cpus = kmem_alloc(sizeof (processorid_t) * user_ncpus, 198 KM_SLEEP); 199 200 real_ncpus = user_ncpus; 201 if ((error = cpupart_get_cpus(&pset, pset_cpus, &real_ncpus)) != 0) 202 goto out; 203 204 /* 205 * Now copyout the information about this processor set. 206 */ 207 208 /* 209 * Get number of cpus to copy back. If the user didn't pass in 210 * a big enough buffer, only copy back as many cpus as fits in 211 * the buffer but copy back the real number of cpus. 212 */ 213 214 if (user_ncpus != 0 && cpulistp != NULL) { 215 copy_ncpus = MIN(real_ncpus, user_ncpus); 216 if (copyout(pset_cpus, cpulistp, 217 sizeof (processorid_t) * copy_ncpus) != 0) { 218 error = EFAULT; 219 goto out; 220 } 221 } 222 if (pset_cpus != NULL) 223 kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus); 224 if (typep != NULL) { 225 if (pset == PS_NONE) 226 pset_type = PS_NONE; 227 else 228 pset_type = PS_PRIVATE; 229 if (copyout(&pset_type, typep, sizeof (int)) != 0) 230 return (set_errno(EFAULT)); 231 } 232 if (numcpusp != NULL) 233 if (copyout(&real_ncpus, numcpusp, sizeof (uint_t)) != 0) 234 return (set_errno(EFAULT)); 235 return (0); 236 237 out: 238 if (pset_cpus != NULL) 239 kmem_free(pset_cpus, sizeof (processorid_t) * user_ncpus); 240 return (set_errno(error)); 241 } 242 243 static int 244 pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf, 245 void *zonebuf) 246 { 247 int error = 0; 248 249 ASSERT(pool_lock_held()); 250 ASSERT(MUTEX_HELD(&cpu_lock)); 251 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 252 253 *oldpset = tp->t_bind_pset; 254 if (pset != PS_QUERY) { 255 /* 256 * Must have the same UID as the target process or 257 * have PRIV_PROC_OWNER privilege. 258 */ 259 if (!hasprocperm(tp->t_cred, CRED())) 260 return (EPERM); 261 /* 262 * Unbinding of an unbound thread should always succeed. 263 */ 264 if (*oldpset == PS_NONE && pset == PS_NONE) 265 return (0); 266 /* 267 * Only privileged processes can move threads from psets with 268 * PSET_NOESCAPE attribute. 269 */ 270 if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) && 271 secpolicy_pset(CRED()) != 0) 272 return (EPERM); 273 if ((error = cpupart_bind_thread(tp, pset, 0, 274 projbuf, zonebuf)) == 0) 275 tp->t_bind_pset = pset; 276 } 277 return (error); 278 } 279 280 static int 281 pset_bind_process(proc_t *pp, psetid_t pset, psetid_t *oldpset, void *projbuf, 282 void *zonebuf) 283 { 284 int error = 0; 285 kthread_t *tp; 286 287 /* skip kernel processes */ 288 if (pset != PS_QUERY && pp->p_flag & SSYS) { 289 *oldpset = PS_NONE; 290 return (0); 291 } 292 293 mutex_enter(&pp->p_lock); 294 tp = pp->p_tlist; 295 if (tp != NULL) { 296 do { 297 int rval; 298 299 rval = pset_bind_thread(tp, pset, oldpset, projbuf, 300 zonebuf); 301 if (error == 0) 302 error = rval; 303 } while ((tp = tp->t_forw) != pp->p_tlist); 304 } else 305 error = ESRCH; 306 mutex_exit(&pp->p_lock); 307 308 return (error); 309 } 310 311 static int 312 pset_bind_task(task_t *tk, psetid_t pset, psetid_t *oldpset, void *projbuf, 313 void *zonebuf) 314 { 315 int error = 0; 316 proc_t *pp; 317 318 ASSERT(MUTEX_HELD(&pidlock)); 319 320 if ((pp = tk->tk_memb_list) == NULL) { 321 return (ESRCH); 322 } 323 324 do { 325 int rval; 326 327 rval = pset_bind_process(pp, pset, oldpset, projbuf, zonebuf); 328 if (error == 0) 329 error = rval; 330 } while ((pp = pp->p_tasknext) != tk->tk_memb_list); 331 332 return (error); 333 } 334 335 static int 336 pset_bind_project(kproject_t *kpj, psetid_t pset, psetid_t *oldpset, 337 void *projbuf, void *zonebuf) 338 { 339 int error = 0; 340 proc_t *pp; 341 342 ASSERT(MUTEX_HELD(&pidlock)); 343 344 for (pp = practive; pp != NULL; pp = pp->p_next) { 345 if (pp->p_tlist == NULL) 346 continue; 347 if (pp->p_task->tk_proj == kpj) { 348 int rval; 349 350 rval = pset_bind_process(pp, pset, oldpset, projbuf, 351 zonebuf); 352 if (error == 0) 353 error = rval; 354 } 355 } 356 357 return (error); 358 } 359 360 static int 361 pset_bind_zone(zone_t *zptr, psetid_t pset, psetid_t *oldpset, void *projbuf, 362 void *zonebuf) 363 { 364 int error = 0; 365 proc_t *pp; 366 367 ASSERT(MUTEX_HELD(&pidlock)); 368 369 for (pp = practive; pp != NULL; pp = pp->p_next) { 370 if (pp->p_zone == zptr) { 371 int rval; 372 373 rval = pset_bind_process(pp, pset, oldpset, projbuf, 374 zonebuf); 375 if (error == 0) 376 error = rval; 377 } 378 } 379 380 return (error); 381 } 382 383 /* 384 * Unbind all threads from the specified processor set, or from all 385 * processor sets. 386 */ 387 static int 388 pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype) 389 { 390 psetid_t olbind; 391 kthread_t *tp; 392 int error = 0; 393 int rval; 394 proc_t *pp; 395 396 ASSERT(MUTEX_HELD(&cpu_lock)); 397 398 if (idtype == P_PSETID && cpupart_find(pset) == NULL) 399 return (EINVAL); 400 401 mutex_enter(&pidlock); 402 for (pp = practive; pp != NULL; pp = pp->p_next) { 403 mutex_enter(&pp->p_lock); 404 tp = pp->p_tlist; 405 /* 406 * Skip zombies and kernel processes, and processes in 407 * other zones, if called from a non-global zone. 408 */ 409 if (tp == NULL || (pp->p_flag & SSYS) || 410 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 411 mutex_exit(&pp->p_lock); 412 continue; 413 } 414 do { 415 if ((idtype == P_PSETID && tp->t_bind_pset != pset) || 416 (idtype == P_ALL && tp->t_bind_pset == PS_NONE)) 417 continue; 418 rval = pset_bind_thread(tp, PS_NONE, &olbind, 419 projbuf, zonebuf); 420 if (error == 0) 421 error = rval; 422 } while ((tp = tp->t_forw) != pp->p_tlist); 423 mutex_exit(&pp->p_lock); 424 } 425 mutex_exit(&pidlock); 426 return (error); 427 } 428 429 static int 430 pset_bind_contract(cont_process_t *ctp, psetid_t pset, psetid_t *oldpset, 431 void *projbuf, void *zonebuf) 432 { 433 int error = 0; 434 proc_t *pp; 435 436 ASSERT(MUTEX_HELD(&pidlock)); 437 438 for (pp = practive; pp != NULL; pp = pp->p_next) { 439 if (pp->p_ct_process == ctp) { 440 int rval; 441 442 rval = pset_bind_process(pp, pset, oldpset, projbuf, 443 zonebuf); 444 if (error == 0) 445 error = rval; 446 } 447 } 448 449 return (error); 450 } 451 452 static int 453 pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset) 454 { 455 kthread_t *tp; 456 proc_t *pp; 457 task_t *tk; 458 kproject_t *kpj; 459 contract_t *ct; 460 zone_t *zptr; 461 psetid_t oldpset; 462 int error = 0; 463 void *projbuf, *zonebuf; 464 465 pool_lock(); 466 if (pset != PS_QUERY) { 467 /* 468 * Check if the set actually exists before checking 469 * permissions. This is the historical error 470 * precedence. Note that if pset was PS_MYID, the 471 * cpupart_get_cpus call will change it to the 472 * processor set id of the caller (or PS_NONE if the 473 * caller is not bound to a processor set). 474 */ 475 if (pool_state == POOL_ENABLED) { 476 pool_unlock(); 477 return (set_errno(ENOTSUP)); 478 } 479 if (cpupart_get_cpus(&pset, NULL, NULL) != 0) { 480 pool_unlock(); 481 return (set_errno(EINVAL)); 482 } else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) { 483 pool_unlock(); 484 return (set_errno(EPERM)); 485 } 486 } 487 488 /* 489 * Pre-allocate enough buffers for FSS for all active projects 490 * and for all active zones on the system. Unused buffers will 491 * be freed later by fss_freebuf(). 492 */ 493 mutex_enter(&cpu_lock); 494 projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ); 495 zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE); 496 497 switch (idtype) { 498 case P_LWPID: 499 pp = curproc; 500 mutex_enter(&pidlock); 501 mutex_enter(&pp->p_lock); 502 if (id == P_MYID) { 503 tp = curthread; 504 } else { 505 if ((tp = idtot(pp, id)) == NULL) { 506 mutex_exit(&pp->p_lock); 507 mutex_exit(&pidlock); 508 error = ESRCH; 509 break; 510 } 511 } 512 error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf); 513 mutex_exit(&pp->p_lock); 514 mutex_exit(&pidlock); 515 break; 516 517 case P_PID: 518 mutex_enter(&pidlock); 519 if (id == P_MYID) { 520 pp = curproc; 521 } else if ((pp = prfind(id)) == NULL) { 522 mutex_exit(&pidlock); 523 error = ESRCH; 524 break; 525 } 526 error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf); 527 mutex_exit(&pidlock); 528 break; 529 530 case P_TASKID: 531 mutex_enter(&pidlock); 532 if (id == P_MYID) 533 id = curproc->p_task->tk_tkid; 534 if ((tk = task_hold_by_id(id)) == NULL) { 535 mutex_exit(&pidlock); 536 error = ESRCH; 537 break; 538 } 539 error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf); 540 mutex_exit(&pidlock); 541 task_rele(tk); 542 break; 543 544 case P_PROJID: 545 if (id == P_MYID) 546 id = curprojid(); 547 if ((kpj = project_hold_by_id(id, getzoneid(), 548 PROJECT_HOLD_FIND)) == NULL) { 549 error = ESRCH; 550 break; 551 } 552 mutex_enter(&pidlock); 553 error = pset_bind_project(kpj, pset, &oldpset, projbuf, 554 zonebuf); 555 mutex_exit(&pidlock); 556 project_rele(kpj); 557 break; 558 559 case P_ZONEID: 560 if (id == P_MYID) 561 id = getzoneid(); 562 if ((zptr = zone_find_by_id(id)) == NULL) { 563 error = ESRCH; 564 break; 565 } 566 mutex_enter(&pidlock); 567 error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf); 568 mutex_exit(&pidlock); 569 zone_rele(zptr); 570 break; 571 572 case P_CTID: 573 if (id == P_MYID) 574 id = PRCTID(curproc); 575 if ((ct = contract_type_ptr(process_type, id, 576 curproc->p_zone->zone_uniqid)) == NULL) { 577 error = ESRCH; 578 break; 579 } 580 mutex_enter(&pidlock); 581 error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf, 582 zonebuf); 583 mutex_exit(&pidlock); 584 contract_rele(ct); 585 break; 586 587 case P_PSETID: 588 if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) { 589 error = EINVAL; 590 break; 591 } 592 error = pset_unbind(id, projbuf, zonebuf, idtype); 593 break; 594 595 case P_ALL: 596 if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) { 597 error = EINVAL; 598 break; 599 } 600 error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype); 601 break; 602 603 default: 604 error = EINVAL; 605 break; 606 } 607 608 fss_freebuf(projbuf, FSS_ALLOC_PROJ); 609 fss_freebuf(zonebuf, FSS_ALLOC_ZONE); 610 mutex_exit(&cpu_lock); 611 pool_unlock(); 612 613 if (error != 0) 614 return (set_errno(error)); 615 if (opset != NULL) { 616 if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0) 617 return (set_errno(EFAULT)); 618 } 619 return (0); 620 } 621 622 /* 623 * Report load average statistics for the specified processor set. 624 */ 625 static int 626 pset_getloadavg(psetid_t pset, int *buf, int nelem) 627 { 628 int loadbuf[LOADAVG_NSTATS]; 629 int error = 0; 630 631 if (nelem < 0) 632 return (set_errno(EINVAL)); 633 634 /* 635 * We keep the same number of load average statistics for processor 636 * sets as we do for the system as a whole. 637 */ 638 if (nelem > LOADAVG_NSTATS) 639 nelem = LOADAVG_NSTATS; 640 641 mutex_enter(&cpu_lock); 642 error = cpupart_get_loadavg(pset, loadbuf, nelem); 643 mutex_exit(&cpu_lock); 644 if (!error && nelem && copyout(loadbuf, buf, nelem * sizeof (int)) != 0) 645 error = EFAULT; 646 647 if (error) 648 return (set_errno(error)); 649 else 650 return (0); 651 } 652 653 654 /* 655 * Return list of active processor sets, up to a maximum indicated by 656 * numpsets. The total number of processor sets is stored in the 657 * location pointed to by numpsets. 658 */ 659 static int 660 pset_list(psetid_t *psetlist, uint_t *numpsets) 661 { 662 uint_t user_npsets = 0; 663 uint_t real_npsets; 664 psetid_t *psets = NULL; 665 int error = 0; 666 667 if (numpsets != NULL) { 668 if (copyin(numpsets, &user_npsets, sizeof (uint_t)) != 0) 669 return (set_errno(EFAULT)); 670 } 671 672 /* 673 * Get the list of all processor sets. First we need to find 674 * out how many there are, so we can allocate a large enough 675 * buffer. 676 */ 677 mutex_enter(&cpu_lock); 678 if (!INGLOBALZONE(curproc) && pool_pset_enabled()) { 679 psetid_t psetid = zone_pset_get(curproc->p_zone); 680 681 if (psetid == PS_NONE) { 682 real_npsets = 0; 683 } else { 684 real_npsets = 1; 685 psets = kmem_alloc(real_npsets * sizeof (psetid_t), 686 KM_SLEEP); 687 psets[0] = psetid; 688 } 689 } else { 690 real_npsets = cpupart_list(0, NULL, CP_ALL); 691 if (real_npsets) { 692 psets = kmem_alloc(real_npsets * sizeof (psetid_t), 693 KM_SLEEP); 694 (void) cpupart_list(psets, real_npsets, CP_ALL); 695 } 696 } 697 mutex_exit(&cpu_lock); 698 699 if (user_npsets > real_npsets) 700 user_npsets = real_npsets; 701 702 if (numpsets != NULL) { 703 if (copyout(&real_npsets, numpsets, sizeof (uint_t)) != 0) 704 error = EFAULT; 705 else if (psetlist != NULL && user_npsets != 0) { 706 if (copyout(psets, psetlist, 707 user_npsets * sizeof (psetid_t)) != 0) 708 error = EFAULT; 709 } 710 } 711 712 if (real_npsets) 713 kmem_free(psets, real_npsets * sizeof (psetid_t)); 714 715 if (error) 716 return (set_errno(error)); 717 else 718 return (0); 719 } 720 721 static int 722 pset_setattr(psetid_t pset, uint_t attr) 723 { 724 int error; 725 726 if (secpolicy_pset(CRED()) != 0) 727 return (set_errno(EPERM)); 728 pool_lock(); 729 if (pool_state == POOL_ENABLED) { 730 pool_unlock(); 731 return (set_errno(ENOTSUP)); 732 } 733 if (pset == PS_QUERY || PSET_BADATTR(attr)) { 734 pool_unlock(); 735 return (set_errno(EINVAL)); 736 } 737 if ((error = cpupart_setattr(pset, attr)) != 0) { 738 pool_unlock(); 739 return (set_errno(error)); 740 } 741 pool_unlock(); 742 return (0); 743 } 744 745 static int 746 pset_getattr(psetid_t pset, uint_t *attrp) 747 { 748 int error = 0; 749 uint_t attr; 750 751 if (pset == PS_QUERY) 752 return (set_errno(EINVAL)); 753 if ((error = cpupart_getattr(pset, &attr)) != 0) 754 return (set_errno(error)); 755 if (copyout(&attr, attrp, sizeof (uint_t)) != 0) 756 return (set_errno(EFAULT)); 757 return (0); 758 } 759 760 static int 761 pset(int subcode, long arg1, long arg2, long arg3, long arg4) 762 { 763 switch (subcode) { 764 case PSET_CREATE: 765 return (pset_create((psetid_t *)arg1)); 766 case PSET_DESTROY: 767 return (pset_destroy((psetid_t)arg1)); 768 case PSET_ASSIGN: 769 return (pset_assign((psetid_t)arg1, 770 (processorid_t)arg2, (psetid_t *)arg3, 0)); 771 case PSET_INFO: 772 return (pset_info((psetid_t)arg1, (int *)arg2, 773 (uint_t *)arg3, (processorid_t *)arg4)); 774 case PSET_BIND: 775 return (pset_bind((psetid_t)arg1, (idtype_t)arg2, 776 (id_t)arg3, (psetid_t *)arg4)); 777 case PSET_GETLOADAVG: 778 return (pset_getloadavg((psetid_t)arg1, (int *)arg2, 779 (int)arg3)); 780 case PSET_LIST: 781 return (pset_list((psetid_t *)arg1, (uint_t *)arg2)); 782 case PSET_SETATTR: 783 return (pset_setattr((psetid_t)arg1, (uint_t)arg2)); 784 case PSET_GETATTR: 785 return (pset_getattr((psetid_t)arg1, (uint_t *)arg2)); 786 case PSET_ASSIGN_FORCED: 787 return (pset_assign((psetid_t)arg1, 788 (processorid_t)arg2, (psetid_t *)arg3, 1)); 789 default: 790 return (set_errno(EINVAL)); 791 } 792 } 793