xref: /illumos-gate/usr/src/uts/common/os/project.c (revision a192e900f6d2b0e1a822e3252c0dfd795ed49d76)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/project.h>
29 #include <sys/modhash.h>
30 #include <sys/modctl.h>
31 #include <sys/kmem.h>
32 #include <sys/atomic.h>
33 #include <sys/cmn_err.h>
34 #include <sys/proc.h>
35 #include <sys/rctl.h>
36 #include <sys/sunddi.h>
37 #include <sys/fss.h>
38 #include <sys/systm.h>
39 #include <sys/ipc_impl.h>
40 #include <sys/port_kernel.h>
41 #include <sys/task.h>
42 #include <sys/zone.h>
43 
44 int project_hash_size = 64;
45 static kmutex_t project_hash_lock;
46 static kmutex_t projects_list_lock;
47 static mod_hash_t *projects_hash;
48 static kproject_t *projects_list;
49 
50 rctl_hndl_t rc_project_cpu_shares;
51 rctl_hndl_t rc_project_nlwps;
52 rctl_hndl_t rc_project_ntasks;
53 rctl_hndl_t rc_project_msgmni;
54 rctl_hndl_t rc_project_semmni;
55 rctl_hndl_t rc_project_shmmax;
56 rctl_hndl_t rc_project_shmmni;
57 rctl_hndl_t rc_project_portids;
58 rctl_hndl_t rc_project_locked_mem;
59 rctl_hndl_t rc_project_contract;
60 rctl_hndl_t rc_project_crypto_mem;
61 
62 /*
63  * Dummy structure used when comparing projects.  This structure must be kept
64  * identical to the first two fields of kproject_t.
65  */
66 struct project_zone {
67 	projid_t	kpj_id;
68 	zoneid_t	kpj_zoneid;
69 };
70 
71 /*
72  * Projects
73  *
74  *   A dictionary of all active projects is maintained by the kernel so that we
75  *   may track project usage and limits.  (By an active project, we mean a
76  *   project associated with one or more task, and therefore with one or more
77  *   processes.) We build the dictionary on top of the mod_hash facility, since
78  *   project additions and deletions are relatively rare events.  An
79  *   integer-to-pointer mapping is maintained within the hash, representing the
80  *   map from project id to project structure.  All projects, including the
81  *   primordial "project 0", are allocated via the project_hold_by_id()
82  *   interface.
83  *
84  *   Currently, the project contains a reference count; the project ID, which is
85  *   examined by the extended accounting subsystem as well as /proc; a resource
86  *   control set, which contains the allowable values (and actions on exceeding
87  *   those values) for controlled project-level resources on the system; and a
88  *   number of CPU shares, which is used by the fair share scheduling class
89  *   (FSS) to support its proportion-based scheduling algorithm.
90  *
91  * Reference counting convention
92  *   The dictionary entry does not itself count as a reference--only references
93  *   outside of the subsystem are tallied.  At the drop of the final external
94  *   reference, the project entry is removed.  The reference counter keeps
95  *   track of the number of threads *and* tasks within a project.
96  *
97  * Locking
98  *   Walking the doubly-linked project list must be done while holding
99  *   projects_list_lock.  Thus, any dereference of kpj_next or kpj_prev must be
100  *   under projects_list_lock.
101  *
102  *   If both the hash lock, project_hash_lock, and the list lock are to be
103  *   acquired, the hash lock is to be acquired first.
104  */
105 
106 
107 static void
108 project_data_init(kproject_data_t *data)
109 {
110 	/*
111 	 * Initialize subsystem-specific data
112 	 */
113 	data->kpd_shmmax = 0;
114 	data->kpd_ipc.ipcq_shmmni = 0;
115 	data->kpd_ipc.ipcq_semmni = 0;
116 	data->kpd_ipc.ipcq_msgmni = 0;
117 	data->kpd_locked_mem = 0;
118 	data->kpd_locked_mem_ctl = UINT64_MAX;
119 	data->kpd_contract = 0;
120 	data->kpd_crypto_mem = 0;
121 }
122 
123 /*ARGSUSED*/
124 static uint_t
125 project_hash_by_id(void *hash_data, mod_hash_key_t key)
126 {
127 	struct project_zone *pz = key;
128 	uint_t mykey;
129 
130 	/*
131 	 * Merge the zoneid and projectid together to a 32-bit quantity, and
132 	 * then pass that in to the existing idhash.
133 	 */
134 	mykey = (pz->kpj_zoneid << 16) | pz->kpj_id;
135 	return (mod_hash_byid(hash_data, (mod_hash_key_t)(uintptr_t)mykey));
136 }
137 
138 static int
139 project_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
140 {
141 	struct project_zone *pz1 = key1, *pz2 = key2;
142 	int retval;
143 
144 	return ((int)((retval = pz1->kpj_id - pz2->kpj_id) != 0 ? retval :
145 	    pz1->kpj_zoneid - pz2->kpj_zoneid));
146 }
147 
148 static void
149 project_hash_val_dtor(mod_hash_val_t val)
150 {
151 	kproject_t *kp = (kproject_t *)val;
152 
153 	ASSERT(kp->kpj_count == 0);
154 	kmem_free(kp, sizeof (kproject_t));
155 }
156 
157 /*
158  * kproject_t *project_hold(kproject_t *)
159  *
160  * Overview
161  *   Record that an additional reference on the indicated project has been
162  *   taken.
163  *
164  * Return values
165  *   A pointer to the indicated project.
166  *
167  * Caller's context
168  *   project_hash_lock must not be held across the project_hold() call.
169  */
170 kproject_t *
171 project_hold(kproject_t *p)
172 {
173 	mutex_enter(&project_hash_lock);
174 	ASSERT(p != NULL);
175 	p->kpj_count++;
176 	ASSERT(p->kpj_count != 0);
177 	mutex_exit(&project_hash_lock);
178 	return (p);
179 }
180 
181 /*
182  * kproject_t *project_hold_by_id(projid_t, zoneid_t, int)
183  *
184  * Overview
185  *   project_hold_by_id() performs a look-up in the dictionary of projects
186  *   active on the system by specified project ID + zone ID and puts a hold on
187  *   it.  The third argument defines the desired behavior in the case when
188  *   project with given project ID cannot be found:
189  *
190  *   PROJECT_HOLD_INSERT	New entry is made in dictionary and the project
191  *   				is added to the global list.
192  *
193  *   PROJECT_HOLD_FIND		Return NULL.
194  *
195  *   The project is returned with its reference count incremented by one.
196  *   A new project derives its resource controls from those of project 0.
197  *
198  * Return values
199  *   A pointer to the held project.
200  *
201  * Caller's context
202  *   Caller must be in a context suitable for KM_SLEEP allocations.
203  */
204 kproject_t *
205 project_hold_by_id(projid_t id, zoneid_t zoneid, int flag)
206 {
207 	kproject_t *spare_p;
208 	kproject_t *p;
209 	mod_hash_hndl_t hndl;
210 	rctl_set_t *set;
211 	rctl_alloc_gp_t *gp;
212 	rctl_entity_p_t e;
213 	struct project_zone pz;
214 
215 	pz.kpj_id = id;
216 	pz.kpj_zoneid = zoneid;
217 
218 	if (flag == PROJECT_HOLD_FIND) {
219 		mutex_enter(&project_hash_lock);
220 
221 		if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
222 		    (mod_hash_val_t)&p) == MH_ERR_NOTFOUND)
223 			p = NULL;
224 		else
225 			p->kpj_count++;
226 
227 		mutex_exit(&project_hash_lock);
228 		return (p);
229 	}
230 
231 	ASSERT(flag == PROJECT_HOLD_INSERT);
232 
233 	spare_p = kmem_zalloc(sizeof (kproject_t), KM_SLEEP);
234 	set = rctl_set_create();
235 
236 	gp = rctl_set_init_prealloc(RCENTITY_PROJECT);
237 
238 	(void) mod_hash_reserve(projects_hash, &hndl);
239 
240 	mutex_enter(&curproc->p_lock);
241 	mutex_enter(&project_hash_lock);
242 	if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
243 	    (mod_hash_val_t *)&p) == MH_ERR_NOTFOUND) {
244 		p = spare_p;
245 		p->kpj_id = id;
246 		p->kpj_zoneid = zoneid;
247 		p->kpj_count = 0;
248 		p->kpj_shares = 1;
249 		p->kpj_nlwps = 0;
250 		p->kpj_ntasks = 0;
251 		p->kpj_nlwps_ctl = INT_MAX;
252 		p->kpj_ntasks_ctl = INT_MAX;
253 		project_data_init(&p->kpj_data);
254 		e.rcep_p.proj = p;
255 		e.rcep_t = RCENTITY_PROJECT;
256 		p->kpj_rctls = rctl_set_init(RCENTITY_PROJECT, curproc, &e,
257 		    set, gp);
258 		mutex_exit(&curproc->p_lock);
259 
260 		if (mod_hash_insert_reserve(projects_hash, (mod_hash_key_t)p,
261 		    (mod_hash_val_t)p, hndl))
262 			panic("unable to insert project %d(%p)", id, (void *)p);
263 
264 		/*
265 		 * Insert project into global project list.
266 		 */
267 		mutex_enter(&projects_list_lock);
268 		if (id != 0 || zoneid != GLOBAL_ZONEID) {
269 			p->kpj_next = projects_list;
270 			p->kpj_prev = projects_list->kpj_prev;
271 			p->kpj_prev->kpj_next = p;
272 			projects_list->kpj_prev = p;
273 		} else {
274 			/*
275 			 * Special case: primordial hold on project 0.
276 			 */
277 			p->kpj_next = p;
278 			p->kpj_prev = p;
279 			projects_list = p;
280 		}
281 		mutex_exit(&projects_list_lock);
282 	} else {
283 		mutex_exit(&curproc->p_lock);
284 		mod_hash_cancel(projects_hash, &hndl);
285 		kmem_free(spare_p, sizeof (kproject_t));
286 		rctl_set_free(set);
287 	}
288 
289 	rctl_prealloc_destroy(gp);
290 	p->kpj_count++;
291 	mutex_exit(&project_hash_lock);
292 
293 	return (p);
294 }
295 
296 
297 /*
298  * void project_rele(kproject_t *)
299  *
300  * Overview
301  *   Advertise that one external reference to this project is no longer needed.
302  *
303  * Return values
304  *   None.
305  *
306  * Caller's context
307  *   No restriction on context.
308  */
309 void
310 project_rele(kproject_t *p)
311 {
312 	mutex_enter(&project_hash_lock);
313 	ASSERT(p->kpj_count != 0);
314 	p->kpj_count--;
315 	if (p->kpj_count == 0) {
316 
317 		/*
318 		 * Remove project from global list.
319 		 */
320 		mutex_enter(&projects_list_lock);
321 		p->kpj_next->kpj_prev = p->kpj_prev;
322 		p->kpj_prev->kpj_next = p->kpj_next;
323 		if (projects_list == p)
324 			projects_list = p->kpj_next;
325 		mutex_exit(&projects_list_lock);
326 
327 		rctl_set_free(p->kpj_rctls);
328 
329 		if (mod_hash_destroy(projects_hash, (mod_hash_key_t)p))
330 			panic("unable to delete project %d zone %d", p->kpj_id,
331 			    p->kpj_zoneid);
332 
333 		}
334 	mutex_exit(&project_hash_lock);
335 }
336 
337 /*
338  * int project_walk_all(zoneid_t, int (*)(kproject_t *, void *), void *)
339  *
340  * Overview
341  *   Walk the project list for the given zoneid with a callback.
342  *
343  * Return values
344  *   -1 for an invalid walk, number of projects visited otherwise.
345  *
346  * Caller's context
347  *   projects_list_lock must not be held, as it is acquired by
348  *   project_walk_all().  Accordingly, callbacks may not perform KM_SLEEP
349  *   allocations.
350  */
351 int
352 project_walk_all(zoneid_t zoneid, int (*cb)(kproject_t *, void *),
353     void *walk_data)
354 {
355 	int cnt = 0;
356 	kproject_t *kp = proj0p;
357 
358 	mutex_enter(&projects_list_lock);
359 	do {
360 		if (zoneid != ALL_ZONES && kp->kpj_zoneid != zoneid)
361 			continue;
362 		if (cb(kp, walk_data) == -1) {
363 			cnt = -1;
364 			break;
365 		} else {
366 			cnt++;
367 		}
368 	} while ((kp = kp->kpj_next) != proj0p);
369 	mutex_exit(&projects_list_lock);
370 	return (cnt);
371 }
372 
373 /*
374  * projid_t curprojid(void)
375  *
376  * Overview
377  *   Return project ID of the current thread
378  *
379  * Caller's context
380  *   No restrictions.
381  */
382 projid_t
383 curprojid()
384 {
385 	return (ttoproj(curthread)->kpj_id);
386 }
387 
388 /*
389  * project.cpu-shares resource control support.
390  */
391 /*ARGSUSED*/
392 static rctl_qty_t
393 project_cpu_shares_usage(rctl_t *rctl, struct proc *p)
394 {
395 	ASSERT(MUTEX_HELD(&p->p_lock));
396 	return (p->p_task->tk_proj->kpj_shares);
397 }
398 
399 /*ARGSUSED*/
400 static int
401 project_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
402     rctl_qty_t nv)
403 {
404 	ASSERT(MUTEX_HELD(&p->p_lock));
405 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
406 	if (e->rcep_p.proj == NULL)
407 		return (0);
408 
409 	e->rcep_p.proj->kpj_shares = nv;
410 
411 	return (0);
412 }
413 
414 
415 static rctl_ops_t project_cpu_shares_ops = {
416 	rcop_no_action,
417 	project_cpu_shares_usage,
418 	project_cpu_shares_set,
419 	rcop_no_test
420 };
421 
422 /*ARGSUSED*/
423 static rctl_qty_t
424 project_lwps_usage(rctl_t *r, proc_t *p)
425 {
426 	kproject_t *pj;
427 	rctl_qty_t nlwps;
428 
429 	ASSERT(MUTEX_HELD(&p->p_lock));
430 	pj = p->p_task->tk_proj;
431 	mutex_enter(&p->p_zone->zone_nlwps_lock);
432 	nlwps = pj->kpj_nlwps;
433 	mutex_exit(&p->p_zone->zone_nlwps_lock);
434 
435 	return (nlwps);
436 }
437 
438 /*ARGSUSED*/
439 static int
440 project_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
441     rctl_qty_t incr, uint_t flags)
442 {
443 	rctl_qty_t nlwps;
444 
445 	ASSERT(MUTEX_HELD(&p->p_lock));
446 	ASSERT(MUTEX_HELD(&p->p_zone->zone_nlwps_lock));
447 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
448 	if (e->rcep_p.proj == NULL)
449 		return (0);
450 
451 	nlwps = e->rcep_p.proj->kpj_nlwps;
452 	if (nlwps + incr > rcntl->rcv_value)
453 		return (1);
454 
455 	return (0);
456 }
457 
458 /*ARGSUSED*/
459 static int
460 project_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
461     rctl_qty_t nv) {
462 
463 	ASSERT(MUTEX_HELD(&p->p_lock));
464 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
465 	if (e->rcep_p.proj == NULL)
466 		return (0);
467 
468 	e->rcep_p.proj->kpj_nlwps_ctl = nv;
469 	return (0);
470 }
471 
472 static rctl_ops_t project_lwps_ops = {
473 	rcop_no_action,
474 	project_lwps_usage,
475 	project_lwps_set,
476 	project_lwps_test,
477 };
478 
479 /*ARGSUSED*/
480 static rctl_qty_t
481 project_ntasks_usage(rctl_t *r, proc_t *p)
482 {
483 	kproject_t *pj;
484 	rctl_qty_t ntasks;
485 
486 	ASSERT(MUTEX_HELD(&p->p_lock));
487 	pj = p->p_task->tk_proj;
488 	mutex_enter(&p->p_zone->zone_nlwps_lock);
489 	ntasks = pj->kpj_ntasks;
490 	mutex_exit(&p->p_zone->zone_nlwps_lock);
491 
492 	return (ntasks);
493 }
494 
495 /*ARGSUSED*/
496 static int
497 project_ntasks_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
498     rctl_qty_t incr, uint_t flags)
499 {
500 	rctl_qty_t ntasks;
501 
502 	ASSERT(MUTEX_HELD(&p->p_lock));
503 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
504 	ntasks = e->rcep_p.proj->kpj_ntasks;
505 	if (ntasks + incr > rcntl->rcv_value)
506 		return (1);
507 
508 	return (0);
509 }
510 
511 /*ARGSUSED*/
512 static int
513 project_ntasks_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
514     rctl_qty_t nv) {
515 
516 	ASSERT(MUTEX_HELD(&p->p_lock));
517 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
518 	e->rcep_p.proj->kpj_ntasks_ctl = nv;
519 	return (0);
520 }
521 
522 static rctl_ops_t project_tasks_ops = {
523 	rcop_no_action,
524 	project_ntasks_usage,
525 	project_ntasks_set,
526 	project_ntasks_test,
527 };
528 
529 /*
530  * project.max-shm-memory resource control support.
531  */
532 
533 /*ARGSUSED*/
534 static int
535 project_shmmax_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
536     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
537 {
538 	rctl_qty_t v;
539 	ASSERT(MUTEX_HELD(&p->p_lock));
540 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
541 	v = e->rcep_p.proj->kpj_data.kpd_shmmax + inc;
542 	if (v > rval->rcv_value)
543 		return (1);
544 
545 	return (0);
546 }
547 
548 static rctl_ops_t project_shmmax_ops = {
549 	rcop_no_action,
550 	rcop_no_usage,
551 	rcop_no_set,
552 	project_shmmax_test
553 };
554 
555 /*
556  * project.max-shm-ids resource control support.
557  */
558 
559 /*ARGSUSED*/
560 static int
561 project_shmmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
562     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
563 {
564 	rctl_qty_t v;
565 	ASSERT(MUTEX_HELD(&p->p_lock));
566 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
567 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_shmmni + inc;
568 	if (v > rval->rcv_value)
569 		return (1);
570 
571 	return (0);
572 }
573 
574 static rctl_ops_t project_shmmni_ops = {
575 	rcop_no_action,
576 	rcop_no_usage,
577 	rcop_no_set,
578 	project_shmmni_test
579 };
580 
581 /*
582  * project.max-sem-ids resource control support.
583  */
584 
585 /*ARGSUSED*/
586 static int
587 project_semmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
588     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
589 {
590 	rctl_qty_t v;
591 	ASSERT(MUTEX_HELD(&p->p_lock));
592 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
593 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_semmni + inc;
594 	if (v > rval->rcv_value)
595 		return (1);
596 
597 	return (0);
598 }
599 
600 static rctl_ops_t project_semmni_ops = {
601 	rcop_no_action,
602 	rcop_no_usage,
603 	rcop_no_set,
604 	project_semmni_test
605 };
606 
607 /*
608  * project.max-msg-ids resource control support.
609  */
610 
611 /*ARGSUSED*/
612 static int
613 project_msgmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
614     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
615 {
616 	rctl_qty_t v;
617 	ASSERT(MUTEX_HELD(&p->p_lock));
618 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
619 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_msgmni + inc;
620 	if (v > rval->rcv_value)
621 		return (1);
622 
623 	return (0);
624 }
625 
626 static rctl_ops_t project_msgmni_ops = {
627 	rcop_no_action,
628 	rcop_no_usage,
629 	rcop_no_set,
630 	project_msgmni_test
631 };
632 
633 /*ARGSUSED*/
634 static rctl_qty_t
635 project_locked_mem_usage(rctl_t *rctl, struct proc *p)
636 {
637 	rctl_qty_t q;
638 	ASSERT(MUTEX_HELD(&p->p_lock));
639 	mutex_enter(&p->p_zone->zone_rctl_lock);
640 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
641 	mutex_exit(&p->p_zone->zone_rctl_lock);
642 	return (q);
643 }
644 
645 /*ARGSUSED*/
646 static int
647 project_locked_mem_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
648     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
649 {
650 	rctl_qty_t q;
651 	ASSERT(MUTEX_HELD(&p->p_lock));
652 	ASSERT(MUTEX_HELD(&p->p_zone->zone_rctl_lock));
653 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
654 	if (q + inc > rval->rcv_value)
655 		return (1);
656 	return (0);
657 }
658 
659 /*ARGSUSED*/
660 static int
661 project_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
662     rctl_qty_t nv) {
663 
664 	ASSERT(MUTEX_HELD(&p->p_lock));
665 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
666 	if (e->rcep_p.proj == NULL)
667 		return (0);
668 
669 	e->rcep_p.proj->kpj_data.kpd_locked_mem_ctl = nv;
670 	return (0);
671 }
672 
673 static rctl_ops_t project_locked_mem_ops = {
674 	rcop_no_action,
675 	project_locked_mem_usage,
676 	project_locked_mem_set,
677 	project_locked_mem_test
678 };
679 
680 /*
681  * project.max-contracts resource control support.
682  */
683 
684 /*ARGSUSED*/
685 static int
686 project_contract_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
687     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
688 {
689 	rctl_qty_t v;
690 
691 	ASSERT(MUTEX_HELD(&p->p_lock));
692 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
693 
694 	v = e->rcep_p.proj->kpj_data.kpd_contract + inc;
695 
696 	if ((p->p_task != NULL) && (p->p_task->tk_proj) != NULL &&
697 	    (v > rval->rcv_value))
698 		return (1);
699 
700 	return (0);
701 }
702 
703 static rctl_ops_t project_contract_ops = {
704 	rcop_no_action,
705 	rcop_no_usage,
706 	rcop_no_set,
707 	project_contract_test
708 };
709 
710 /*ARGSUSED*/
711 static int
712 project_crypto_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
713     rctl_val_t *rval, rctl_qty_t incr, uint_t flags)
714 {
715 	rctl_qty_t v;
716 	ASSERT(MUTEX_HELD(&p->p_lock));
717 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
718 	v = e->rcep_p.proj->kpj_data.kpd_crypto_mem + incr;
719 	if (v > rval->rcv_value)
720 		return (1);
721 	return (0);
722 }
723 
724 static rctl_ops_t project_crypto_mem_ops = {
725 	rcop_no_action,
726 	rcop_no_usage,
727 	rcop_no_set,
728 	project_crypto_test
729 };
730 
731 /*
732  * void project_init(void)
733  *
734  * Overview
735  *   Initialize the project subsystem, including the primordial project 0 entry.
736  *   Register generic project resource controls, if any.
737  *
738  * Return values
739  *   None.
740  *
741  * Caller's context
742  *   Safe for KM_SLEEP allocations.
743  */
744 void
745 project_init(void)
746 {
747 	rctl_qty_t shmmni, shmmax, qty;
748 	boolean_t check;
749 
750 	projects_hash = mod_hash_create_extended("projects_hash",
751 	    project_hash_size, mod_hash_null_keydtor, project_hash_val_dtor,
752 	    project_hash_by_id,
753 	    (void *)(uintptr_t)mod_hash_iddata_gen(project_hash_size),
754 	    project_hash_key_cmp, KM_SLEEP);
755 
756 	rc_project_cpu_shares = rctl_register("project.cpu-shares",
757 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
758 	    RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_NOBASIC |
759 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
760 	    FSS_MAXSHARES, FSS_MAXSHARES,
761 	    &project_cpu_shares_ops);
762 	rctl_add_default_limit("project.cpu-shares", 1, RCPRIV_PRIVILEGED,
763 	    RCTL_LOCAL_NOACTION);
764 
765 	rc_project_nlwps = rctl_register("project.max-lwps", RCENTITY_PROJECT,
766 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
767 	    INT_MAX, INT_MAX, &project_lwps_ops);
768 
769 	rc_project_ntasks = rctl_register("project.max-tasks", RCENTITY_PROJECT,
770 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
771 	    INT_MAX, INT_MAX, &project_tasks_ops);
772 
773 	/*
774 	 * This rctl handle is used by /dev/crypto. It is here rather than
775 	 * in misc/kcf or the drv/crypto module because resource controls
776 	 * currently don't allow modules to be unloaded, and the control
777 	 * must be registered before init starts.
778 	 */
779 	rc_project_crypto_mem = rctl_register("project.max-crypto-memory",
780 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
781 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX,
782 	    &project_crypto_mem_ops);
783 
784 	/*
785 	 * Default to a quarter of the machine's memory
786 	 */
787 	qty = availrmem_initial << (PAGESHIFT - 2);
788 	rctl_add_default_limit("project.max-crypto-memory", qty,
789 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
790 
791 	/*
792 	 * System V IPC resource controls
793 	 */
794 	rc_project_semmni = rctl_register("project.max-sem-ids",
795 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
796 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_semmni_ops);
797 	rctl_add_legacy_limit("project.max-sem-ids", "semsys",
798 	    "seminfo_semmni", 128, IPC_IDS_MAX);
799 
800 	rc_project_msgmni = rctl_register("project.max-msg-ids",
801 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
802 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_msgmni_ops);
803 	rctl_add_legacy_limit("project.max-msg-ids", "msgsys",
804 	    "msginfo_msgmni", 128, IPC_IDS_MAX);
805 
806 	rc_project_shmmni = rctl_register("project.max-shm-ids",
807 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
808 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_shmmni_ops);
809 	rctl_add_legacy_limit("project.max-shm-ids", "shmsys",
810 	    "shminfo_shmmni", 128, IPC_IDS_MAX);
811 
812 	rc_project_shmmax = rctl_register("project.max-shm-memory",
813 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
814 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &project_shmmax_ops);
815 
816 	check = B_FALSE;
817 	if (!mod_sysvar("shmsys", "shminfo_shmmni", &shmmni))
818 		shmmni = 100;
819 	else
820 		check = B_TRUE;
821 	if (!mod_sysvar("shmsys", "shminfo_shmmax", &shmmax))
822 		shmmax = 0x800000;
823 	else
824 		check = B_TRUE;
825 
826 	/*
827 	 * Default to a quarter of the machine's memory
828 	 */
829 	qty = availrmem_initial << (PAGESHIFT - 2);
830 	if (check) {
831 		if ((shmmax > 0) && (UINT64_MAX / shmmax <= shmmni))
832 			qty = UINT64_MAX;
833 		else if (shmmni * shmmax > qty)
834 			qty = shmmni * shmmax;
835 	}
836 	rctl_add_default_limit("project.max-shm-memory", qty,
837 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
838 
839 	/*
840 	 * Event Ports resource controls
841 	 */
842 
843 	rc_project_portids = rctl_register("project.max-port-ids",
844 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
845 	    RCTL_GLOBAL_COUNT, PORT_MAX_PORTS, PORT_MAX_PORTS,
846 	    &rctl_absolute_ops);
847 	rctl_add_default_limit("project.max-port-ids", PORT_DEFAULT_PORTS,
848 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
849 
850 	/*
851 	 * Resource control for locked memory
852 	 */
853 	rc_project_locked_mem = rctl_register(
854 	    "project.max-locked-memory", RCENTITY_PROJECT,
855 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES,
856 	    UINT64_MAX, UINT64_MAX, &project_locked_mem_ops);
857 
858 	/* Default value equals that of max-shm-memory. */
859 	rctl_add_default_limit("project.max-locked-memory", qty,
860 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
861 
862 	/*
863 	 * Per project limit on contracts.
864 	 */
865 	rc_project_contract = rctl_register("project.max-contracts",
866 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_COUNT,
867 	    INT_MAX, INT_MAX, &project_contract_ops);
868 	rctl_add_default_limit("project.max-contracts", 10000,
869 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
870 
871 	t0.t_proj = proj0p = project_hold_by_id(0, GLOBAL_ZONEID,
872 	    PROJECT_HOLD_INSERT);
873 
874 	mutex_enter(&p0.p_lock);
875 	proj0p->kpj_nlwps = p0.p_lwpcnt;
876 	mutex_exit(&p0.p_lock);
877 	proj0p->kpj_ntasks = 1;
878 }
879