1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/project.h> 29 #include <sys/modhash.h> 30 #include <sys/modctl.h> 31 #include <sys/kmem.h> 32 #include <sys/atomic.h> 33 #include <sys/cmn_err.h> 34 #include <sys/proc.h> 35 #include <sys/rctl.h> 36 #include <sys/sunddi.h> 37 #include <sys/fss.h> 38 #include <sys/systm.h> 39 #include <sys/ipc_impl.h> 40 #include <sys/port_kernel.h> 41 #include <sys/task.h> 42 #include <sys/zone.h> 43 44 int project_hash_size = 64; 45 static kmutex_t project_hash_lock; 46 static kmutex_t projects_list_lock; 47 static mod_hash_t *projects_hash; 48 static kproject_t *projects_list; 49 50 rctl_hndl_t rc_project_cpu_shares; 51 rctl_hndl_t rc_project_nlwps; 52 rctl_hndl_t rc_project_ntasks; 53 rctl_hndl_t rc_project_msgmni; 54 rctl_hndl_t rc_project_semmni; 55 rctl_hndl_t rc_project_shmmax; 56 rctl_hndl_t rc_project_shmmni; 57 rctl_hndl_t rc_project_portids; 58 rctl_hndl_t rc_project_devlockmem; 59 rctl_hndl_t rc_project_contract; 60 rctl_hndl_t rc_project_crypto_mem; 61 62 /* 63 * Dummy structure used when comparing projects. This structure must be kept 64 * identical to the first two fields of kproject_t. 65 */ 66 struct project_zone { 67 projid_t kpj_id; 68 zoneid_t kpj_zoneid; 69 }; 70 71 /* 72 * Projects 73 * 74 * A dictionary of all active projects is maintained by the kernel so that we 75 * may track project usage and limits. (By an active project, we mean a 76 * project associated with one or more task, and therefore with one or more 77 * processes.) We build the dictionary on top of the mod_hash facility, since 78 * project additions and deletions are relatively rare events. An 79 * integer-to-pointer mapping is maintained within the hash, representing the 80 * map from project id to project structure. All projects, including the 81 * primordial "project 0", are allocated via the project_hold_by_id() 82 * interface. 83 * 84 * Currently, the project contains a reference count; the project ID, which is 85 * examined by the extended accounting subsystem as well as /proc; a resource 86 * control set, which contains the allowable values (and actions on exceeding 87 * those values) for controlled project-level resources on the system; and a 88 * number of CPU shares, which is used by the fair share scheduling class 89 * (FSS) to support its proportion-based scheduling algorithm. 90 * 91 * Reference counting convention 92 * The dictionary entry does not itself count as a reference--only references 93 * outside of the subsystem are tallied. At the drop of the final external 94 * reference, the project entry is removed. The reference counter keeps 95 * track of the number of threads *and* tasks within a project. 96 * 97 * Locking 98 * Walking the doubly-linked project list must be done while holding 99 * projects_list_lock. Thus, any dereference of kpj_next or kpj_prev must be 100 * under projects_list_lock. 101 * 102 * If both the hash lock, project_hash_lock, and the list lock are to be 103 * acquired, the hash lock is to be acquired first. 104 */ 105 106 107 static void 108 project_data_init(kproject_data_t *data) 109 { 110 /* 111 * Initialize subsystem-specific data 112 */ 113 data->kpd_shmmax = 0; 114 data->kpd_ipc.ipcq_shmmni = 0; 115 data->kpd_ipc.ipcq_semmni = 0; 116 data->kpd_ipc.ipcq_msgmni = 0; 117 data->kpd_devlockmem = 0; 118 data->kpd_contract = 0; 119 data->kpd_crypto_mem = 0; 120 } 121 122 /*ARGSUSED*/ 123 static uint_t 124 project_hash_by_id(void *hash_data, mod_hash_key_t key) 125 { 126 struct project_zone *pz = key; 127 uint_t mykey; 128 129 /* 130 * Merge the zoneid and projectid together to a 32-bit quantity, and 131 * then pass that in to the existing idhash. 132 */ 133 mykey = (pz->kpj_zoneid << 16) | pz->kpj_id; 134 return (mod_hash_byid(hash_data, (mod_hash_key_t)(uintptr_t)mykey)); 135 } 136 137 static int 138 project_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2) 139 { 140 struct project_zone *pz1 = key1, *pz2 = key2; 141 int retval; 142 143 return ((int)((retval = pz1->kpj_id - pz2->kpj_id) != 0 ? retval : 144 pz1->kpj_zoneid - pz2->kpj_zoneid)); 145 } 146 147 static void 148 project_hash_val_dtor(mod_hash_val_t val) 149 { 150 kproject_t *kp = (kproject_t *)val; 151 152 ASSERT(kp->kpj_count == 0); 153 kmem_free(kp, sizeof (kproject_t)); 154 } 155 156 /* 157 * kproject_t *project_hold(kproject_t *) 158 * 159 * Overview 160 * Record that an additional reference on the indicated project has been 161 * taken. 162 * 163 * Return values 164 * A pointer to the indicated project. 165 * 166 * Caller's context 167 * project_hash_lock must not be held across the project_hold() call. 168 */ 169 kproject_t * 170 project_hold(kproject_t *p) 171 { 172 mutex_enter(&project_hash_lock); 173 ASSERT(p != NULL); 174 p->kpj_count++; 175 ASSERT(p->kpj_count != 0); 176 mutex_exit(&project_hash_lock); 177 return (p); 178 } 179 180 /* 181 * kproject_t *project_hold_by_id(projid_t, zoneid_t, int) 182 * 183 * Overview 184 * project_hold_by_id() performs a look-up in the dictionary of projects 185 * active on the system by specified project ID + zone ID and puts a hold on 186 * it. The third argument defines the desired behavior in the case when 187 * project with given project ID cannot be found: 188 * 189 * PROJECT_HOLD_INSERT New entry is made in dictionary and the project 190 * is added to the global list. 191 * 192 * PROJECT_HOLD_FIND Return NULL. 193 * 194 * The project is returned with its reference count incremented by one. 195 * A new project derives its resource controls from those of project 0. 196 * 197 * Return values 198 * A pointer to the held project. 199 * 200 * Caller's context 201 * Caller must be in a context suitable for KM_SLEEP allocations. 202 */ 203 kproject_t * 204 project_hold_by_id(projid_t id, zoneid_t zoneid, int flag) 205 { 206 kproject_t *spare_p; 207 kproject_t *p; 208 mod_hash_hndl_t hndl; 209 rctl_set_t *set; 210 rctl_alloc_gp_t *gp; 211 rctl_entity_p_t e; 212 struct project_zone pz; 213 214 pz.kpj_id = id; 215 pz.kpj_zoneid = zoneid; 216 217 if (flag == PROJECT_HOLD_FIND) { 218 mutex_enter(&project_hash_lock); 219 220 if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz, 221 (mod_hash_val_t)&p) == MH_ERR_NOTFOUND) 222 p = NULL; 223 else 224 p->kpj_count++; 225 226 mutex_exit(&project_hash_lock); 227 return (p); 228 } 229 230 ASSERT(flag == PROJECT_HOLD_INSERT); 231 232 spare_p = kmem_zalloc(sizeof (kproject_t), KM_SLEEP); 233 set = rctl_set_create(); 234 235 gp = rctl_set_init_prealloc(RCENTITY_PROJECT); 236 237 (void) mod_hash_reserve(projects_hash, &hndl); 238 239 mutex_enter(&curproc->p_lock); 240 mutex_enter(&project_hash_lock); 241 if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz, 242 (mod_hash_val_t *)&p) == MH_ERR_NOTFOUND) { 243 p = spare_p; 244 p->kpj_id = id; 245 p->kpj_zoneid = zoneid; 246 p->kpj_count = 0; 247 p->kpj_shares = 1; 248 p->kpj_nlwps = 0; 249 p->kpj_ntasks = 0; 250 p->kpj_nlwps_ctl = INT_MAX; 251 p->kpj_ntasks_ctl = INT_MAX; 252 project_data_init(&p->kpj_data); 253 e.rcep_p.proj = p; 254 e.rcep_t = RCENTITY_PROJECT; 255 p->kpj_rctls = rctl_set_init(RCENTITY_PROJECT, curproc, &e, 256 set, gp); 257 mutex_exit(&curproc->p_lock); 258 259 if (mod_hash_insert_reserve(projects_hash, (mod_hash_key_t)p, 260 (mod_hash_val_t)p, hndl)) 261 panic("unable to insert project %d(%p)", id, (void *)p); 262 263 /* 264 * Insert project into global project list. 265 */ 266 mutex_enter(&projects_list_lock); 267 if (id != 0 || zoneid != GLOBAL_ZONEID) { 268 p->kpj_next = projects_list; 269 p->kpj_prev = projects_list->kpj_prev; 270 p->kpj_prev->kpj_next = p; 271 projects_list->kpj_prev = p; 272 } else { 273 /* 274 * Special case: primordial hold on project 0. 275 */ 276 p->kpj_next = p; 277 p->kpj_prev = p; 278 projects_list = p; 279 } 280 mutex_exit(&projects_list_lock); 281 } else { 282 mutex_exit(&curproc->p_lock); 283 mod_hash_cancel(projects_hash, &hndl); 284 kmem_free(spare_p, sizeof (kproject_t)); 285 rctl_set_free(set); 286 } 287 288 rctl_prealloc_destroy(gp); 289 p->kpj_count++; 290 mutex_exit(&project_hash_lock); 291 292 return (p); 293 } 294 295 296 /* 297 * void project_rele(kproject_t *) 298 * 299 * Overview 300 * Advertise that one external reference to this project is no longer needed. 301 * 302 * Return values 303 * None. 304 * 305 * Caller's context 306 * No restriction on context. 307 */ 308 void 309 project_rele(kproject_t *p) 310 { 311 mutex_enter(&project_hash_lock); 312 ASSERT(p->kpj_count != 0); 313 p->kpj_count--; 314 if (p->kpj_count == 0) { 315 316 /* 317 * Remove project from global list. 318 */ 319 mutex_enter(&projects_list_lock); 320 p->kpj_next->kpj_prev = p->kpj_prev; 321 p->kpj_prev->kpj_next = p->kpj_next; 322 if (projects_list == p) 323 projects_list = p->kpj_next; 324 mutex_exit(&projects_list_lock); 325 326 rctl_set_free(p->kpj_rctls); 327 328 if (mod_hash_destroy(projects_hash, (mod_hash_key_t)p)) 329 panic("unable to delete project %d zone %d", p->kpj_id, 330 p->kpj_zoneid); 331 332 } 333 mutex_exit(&project_hash_lock); 334 } 335 336 /* 337 * int project_walk_all(zoneid_t, int (*)(kproject_t *, void *), void *) 338 * 339 * Overview 340 * Walk the project list for the given zoneid with a callback. 341 * 342 * Return values 343 * -1 for an invalid walk, number of projects visited otherwise. 344 * 345 * Caller's context 346 * projects_list_lock must not be held, as it is acquired by 347 * project_walk_all(). Accordingly, callbacks may not perform KM_SLEEP 348 * allocations. 349 */ 350 int 351 project_walk_all(zoneid_t zoneid, int (*cb)(kproject_t *, void *), 352 void *walk_data) 353 { 354 int cnt = 0; 355 kproject_t *kp = proj0p; 356 357 mutex_enter(&projects_list_lock); 358 do { 359 if (zoneid != ALL_ZONES && kp->kpj_zoneid != zoneid) 360 continue; 361 if (cb(kp, walk_data) == -1) { 362 cnt = -1; 363 break; 364 } else { 365 cnt++; 366 } 367 } while ((kp = kp->kpj_next) != proj0p); 368 mutex_exit(&projects_list_lock); 369 return (cnt); 370 } 371 372 /* 373 * projid_t curprojid(void) 374 * 375 * Overview 376 * Return project ID of the current thread 377 * 378 * Caller's context 379 * No restrictions. 380 */ 381 projid_t 382 curprojid() 383 { 384 return (ttoproj(curthread)->kpj_id); 385 } 386 387 /* 388 * project.cpu-shares resource control support. 389 */ 390 /*ARGSUSED*/ 391 static rctl_qty_t 392 project_cpu_shares_usage(rctl_t *rctl, struct proc *p) 393 { 394 ASSERT(MUTEX_HELD(&p->p_lock)); 395 return (p->p_task->tk_proj->kpj_shares); 396 } 397 398 /*ARGSUSED*/ 399 static int 400 project_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, 401 rctl_qty_t nv) 402 { 403 ASSERT(MUTEX_HELD(&p->p_lock)); 404 ASSERT(e->rcep_t == RCENTITY_PROJECT); 405 if (e->rcep_p.proj == NULL) 406 return (0); 407 408 e->rcep_p.proj->kpj_shares = nv; 409 410 return (0); 411 } 412 413 414 static rctl_ops_t project_cpu_shares_ops = { 415 rcop_no_action, 416 project_cpu_shares_usage, 417 project_cpu_shares_set, 418 rcop_no_test 419 }; 420 421 /*ARGSUSED*/ 422 static rctl_qty_t 423 project_lwps_usage(rctl_t *r, proc_t *p) 424 { 425 kproject_t *pj; 426 rctl_qty_t nlwps; 427 428 ASSERT(MUTEX_HELD(&p->p_lock)); 429 pj = p->p_task->tk_proj; 430 mutex_enter(&p->p_zone->zone_nlwps_lock); 431 nlwps = pj->kpj_nlwps; 432 mutex_exit(&p->p_zone->zone_nlwps_lock); 433 434 return (nlwps); 435 } 436 437 /*ARGSUSED*/ 438 static int 439 project_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl, 440 rctl_qty_t incr, uint_t flags) 441 { 442 rctl_qty_t nlwps; 443 444 ASSERT(MUTEX_HELD(&p->p_lock)); 445 ASSERT(e->rcep_t == RCENTITY_PROJECT); 446 if (e->rcep_p.proj == NULL) 447 return (0); 448 449 nlwps = e->rcep_p.proj->kpj_nlwps; 450 if (nlwps + incr > rcntl->rcv_value) 451 return (1); 452 453 return (0); 454 } 455 456 /*ARGSUSED*/ 457 static int 458 project_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, 459 rctl_qty_t nv) { 460 461 ASSERT(MUTEX_HELD(&p->p_lock)); 462 ASSERT(e->rcep_t == RCENTITY_PROJECT); 463 if (e->rcep_p.proj == NULL) 464 return (0); 465 466 e->rcep_p.proj->kpj_nlwps_ctl = nv; 467 return (0); 468 } 469 470 static rctl_ops_t project_lwps_ops = { 471 rcop_no_action, 472 project_lwps_usage, 473 project_lwps_set, 474 project_lwps_test, 475 }; 476 477 /*ARGSUSED*/ 478 static rctl_qty_t 479 project_ntasks_usage(rctl_t *r, proc_t *p) 480 { 481 kproject_t *pj; 482 rctl_qty_t ntasks; 483 484 ASSERT(MUTEX_HELD(&p->p_lock)); 485 pj = p->p_task->tk_proj; 486 mutex_enter(&p->p_zone->zone_nlwps_lock); 487 ntasks = pj->kpj_ntasks; 488 mutex_exit(&p->p_zone->zone_nlwps_lock); 489 490 return (ntasks); 491 } 492 493 /*ARGSUSED*/ 494 static int 495 project_ntasks_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl, 496 rctl_qty_t incr, uint_t flags) 497 { 498 rctl_qty_t ntasks; 499 500 ASSERT(MUTEX_HELD(&p->p_lock)); 501 ASSERT(e->rcep_t == RCENTITY_PROJECT); 502 ntasks = e->rcep_p.proj->kpj_ntasks; 503 if (ntasks + incr > rcntl->rcv_value) 504 return (1); 505 506 return (0); 507 } 508 509 /*ARGSUSED*/ 510 static int 511 project_ntasks_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, 512 rctl_qty_t nv) { 513 514 ASSERT(MUTEX_HELD(&p->p_lock)); 515 ASSERT(e->rcep_t == RCENTITY_PROJECT); 516 e->rcep_p.proj->kpj_ntasks_ctl = nv; 517 return (0); 518 } 519 520 static rctl_ops_t project_tasks_ops = { 521 rcop_no_action, 522 project_ntasks_usage, 523 project_ntasks_set, 524 project_ntasks_test, 525 }; 526 527 /* 528 * project.max-shm-memory resource control support. 529 */ 530 531 /*ARGSUSED*/ 532 static int 533 project_shmmax_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 534 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 535 { 536 rctl_qty_t v; 537 ASSERT(MUTEX_HELD(&p->p_lock)); 538 ASSERT(e->rcep_t == RCENTITY_PROJECT); 539 v = e->rcep_p.proj->kpj_data.kpd_shmmax + inc; 540 if (v > rval->rcv_value) 541 return (1); 542 543 return (0); 544 } 545 546 static rctl_ops_t project_shmmax_ops = { 547 rcop_no_action, 548 rcop_no_usage, 549 rcop_no_set, 550 project_shmmax_test 551 }; 552 553 /* 554 * project.max-shm-ids resource control support. 555 */ 556 557 /*ARGSUSED*/ 558 static int 559 project_shmmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 560 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 561 { 562 rctl_qty_t v; 563 ASSERT(MUTEX_HELD(&p->p_lock)); 564 ASSERT(e->rcep_t == RCENTITY_PROJECT); 565 v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_shmmni + inc; 566 if (v > rval->rcv_value) 567 return (1); 568 569 return (0); 570 } 571 572 static rctl_ops_t project_shmmni_ops = { 573 rcop_no_action, 574 rcop_no_usage, 575 rcop_no_set, 576 project_shmmni_test 577 }; 578 579 /* 580 * project.max-sem-ids resource control support. 581 */ 582 583 /*ARGSUSED*/ 584 static int 585 project_semmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 586 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 587 { 588 rctl_qty_t v; 589 ASSERT(MUTEX_HELD(&p->p_lock)); 590 ASSERT(e->rcep_t == RCENTITY_PROJECT); 591 v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_semmni + inc; 592 if (v > rval->rcv_value) 593 return (1); 594 595 return (0); 596 } 597 598 static rctl_ops_t project_semmni_ops = { 599 rcop_no_action, 600 rcop_no_usage, 601 rcop_no_set, 602 project_semmni_test 603 }; 604 605 /* 606 * project.max-msg-ids resource control support. 607 */ 608 609 /*ARGSUSED*/ 610 static int 611 project_msgmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 612 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 613 { 614 rctl_qty_t v; 615 ASSERT(MUTEX_HELD(&p->p_lock)); 616 ASSERT(e->rcep_t == RCENTITY_PROJECT); 617 v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_msgmni + inc; 618 if (v > rval->rcv_value) 619 return (1); 620 621 return (0); 622 } 623 624 static rctl_ops_t project_msgmni_ops = { 625 rcop_no_action, 626 rcop_no_usage, 627 rcop_no_set, 628 project_msgmni_test 629 }; 630 631 /* 632 * project.max-device-locked-memory resource control support. 633 */ 634 635 /*ARGSUSED*/ 636 static int 637 project_devlockmem_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 638 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 639 { 640 rctl_qty_t v; 641 ASSERT(MUTEX_HELD(&p->p_lock)); 642 ASSERT(e->rcep_t == RCENTITY_PROJECT); 643 v = e->rcep_p.proj->kpj_data.kpd_devlockmem + inc; 644 if (v > rval->rcv_value) 645 return (1); 646 return (0); 647 } 648 649 static rctl_ops_t project_devlockmem_ops = { 650 rcop_no_action, 651 rcop_no_usage, 652 rcop_no_set, 653 project_devlockmem_test 654 }; 655 656 /* 657 * project.max-contracts resource control support. 658 */ 659 660 /*ARGSUSED*/ 661 static int 662 project_contract_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e, 663 rctl_val_t *rval, rctl_qty_t inc, uint_t flags) 664 { 665 rctl_qty_t v; 666 667 ASSERT(MUTEX_HELD(&p->p_lock)); 668 ASSERT(e->rcep_t == RCENTITY_PROJECT); 669 670 v = e->rcep_p.proj->kpj_data.kpd_contract + inc; 671 672 if ((p->p_task != NULL) && (p->p_task->tk_proj) != NULL && 673 (v > rval->rcv_value)) 674 return (1); 675 676 return (0); 677 } 678 679 static rctl_ops_t project_contract_ops = { 680 rcop_no_action, 681 rcop_no_usage, 682 rcop_no_set, 683 project_contract_test 684 }; 685 686 /*ARGSUSED*/ 687 static int 688 project_crypto_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, 689 rctl_val_t *rval, rctl_qty_t incr, uint_t flags) 690 { 691 rctl_qty_t v; 692 ASSERT(MUTEX_HELD(&p->p_lock)); 693 ASSERT(e->rcep_t == RCENTITY_PROJECT); 694 v = e->rcep_p.proj->kpj_data.kpd_crypto_mem + incr; 695 if (v > rval->rcv_value) 696 return (1); 697 return (0); 698 } 699 700 static rctl_ops_t project_crypto_mem_ops = { 701 rcop_no_action, 702 rcop_no_usage, 703 rcop_no_set, 704 project_crypto_test 705 }; 706 707 /* 708 * void project_init(void) 709 * 710 * Overview 711 * Initialize the project subsystem, including the primordial project 0 entry. 712 * Register generic project resource controls, if any. 713 * 714 * Return values 715 * None. 716 * 717 * Caller's context 718 * Safe for KM_SLEEP allocations. 719 */ 720 void 721 project_init(void) 722 { 723 rctl_qty_t shmmni, shmmax, qty; 724 boolean_t check; 725 726 projects_hash = mod_hash_create_extended("projects_hash", 727 project_hash_size, mod_hash_null_keydtor, project_hash_val_dtor, 728 project_hash_by_id, 729 (void *)(uintptr_t)mod_hash_iddata_gen(project_hash_size), 730 project_hash_key_cmp, KM_SLEEP); 731 732 rc_project_cpu_shares = rctl_register("project.cpu-shares", 733 RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER | 734 RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_NOBASIC | 735 RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER, 736 FSS_MAXSHARES, FSS_MAXSHARES, 737 &project_cpu_shares_ops); 738 rctl_add_default_limit("project.cpu-shares", 1, RCPRIV_PRIVILEGED, 739 RCTL_LOCAL_NOACTION); 740 741 rc_project_nlwps = rctl_register("project.max-lwps", RCENTITY_PROJECT, 742 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT, 743 INT_MAX, INT_MAX, &project_lwps_ops); 744 745 rc_project_ntasks = rctl_register("project.max-tasks", RCENTITY_PROJECT, 746 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT, 747 INT_MAX, INT_MAX, &project_tasks_ops); 748 749 /* 750 * This rctl handle is used by /dev/crypto. It is here rather than 751 * in misc/kcf or the drv/crypto module because resource controls 752 * currently don't allow modules to be unloaded, and the control 753 * must be registered before init starts. 754 */ 755 rc_project_crypto_mem = rctl_register("project.max-crypto-memory", 756 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 757 RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, 758 &project_crypto_mem_ops); 759 760 /* 761 * Default to a quarter of the machine's memory 762 */ 763 qty = availrmem_initial << (PAGESHIFT - 2); 764 rctl_add_default_limit("project.max-crypto-memory", qty, 765 RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY); 766 767 /* 768 * System V IPC resource controls 769 */ 770 rc_project_semmni = rctl_register("project.max-sem-ids", 771 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 772 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_semmni_ops); 773 rctl_add_legacy_limit("project.max-sem-ids", "semsys", 774 "seminfo_semmni", 128, IPC_IDS_MAX); 775 776 rc_project_msgmni = rctl_register("project.max-msg-ids", 777 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 778 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_msgmni_ops); 779 rctl_add_legacy_limit("project.max-msg-ids", "msgsys", 780 "msginfo_msgmni", 128, IPC_IDS_MAX); 781 782 rc_project_shmmni = rctl_register("project.max-shm-ids", 783 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 784 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_shmmni_ops); 785 rctl_add_legacy_limit("project.max-shm-ids", "shmsys", 786 "shminfo_shmmni", 128, IPC_IDS_MAX); 787 788 rc_project_shmmax = rctl_register("project.max-shm-memory", 789 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 790 RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &project_shmmax_ops); 791 792 check = B_FALSE; 793 if (!mod_sysvar("shmsys", "shminfo_shmmni", &shmmni)) 794 shmmni = 100; 795 else 796 check = B_TRUE; 797 if (!mod_sysvar("shmsys", "shminfo_shmmax", &shmmax)) 798 shmmax = 0x800000; 799 else 800 check = B_TRUE; 801 802 /* 803 * Default to a quarter of the machine's memory 804 */ 805 qty = availrmem_initial << (PAGESHIFT - 2); 806 if (check) { 807 if ((shmmax > 0) && (UINT64_MAX / shmmax <= shmmni)) 808 qty = UINT64_MAX; 809 else if (shmmni * shmmax > qty) 810 qty = shmmni * shmmax; 811 } 812 rctl_add_default_limit("project.max-shm-memory", qty, 813 RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY); 814 815 /* 816 * Event Ports resource controls 817 */ 818 819 rc_project_portids = rctl_register("project.max-port-ids", 820 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | 821 RCTL_GLOBAL_COUNT, PORT_MAX_PORTS, PORT_MAX_PORTS, 822 &rctl_absolute_ops); 823 rctl_add_default_limit("project.max-port-ids", PORT_DEFAULT_PORTS, 824 RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY); 825 826 /* 827 * Resource control for locked memory 828 */ 829 rc_project_devlockmem = rctl_register( 830 "project.max-device-locked-memory", RCENTITY_PROJECT, 831 RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES, 832 UINT64_MAX, UINT64_MAX, &project_devlockmem_ops); 833 834 /* 835 * Defaults to 1/16th of the machine's memory 836 */ 837 qty = availrmem_initial << (PAGESHIFT - 4); 838 839 rctl_add_default_limit("project.max-device-locked-memory", qty, 840 RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY); 841 842 /* 843 * Per project limit on contracts. 844 */ 845 rc_project_contract = rctl_register("project.max-contracts", 846 RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_COUNT, 847 INT_MAX, INT_MAX, &project_contract_ops); 848 rctl_add_default_limit("project.max-contracts", 10000, 849 RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY); 850 851 t0.t_proj = proj0p = project_hold_by_id(0, GLOBAL_ZONEID, 852 PROJECT_HOLD_INSERT); 853 854 mutex_enter(&p0.p_lock); 855 proj0p->kpj_nlwps = p0.p_lwpcnt; 856 mutex_exit(&p0.p_lock); 857 proj0p->kpj_ntasks = 1; 858 } 859