xref: /titanic_50/usr/src/uts/common/os/project.c (revision ecd343b647e2ba2d0bf8f09646e721f05eb752aa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/project.h>
29 #include <sys/modhash.h>
30 #include <sys/modctl.h>
31 #include <sys/kmem.h>
32 #include <sys/kstat.h>
33 #include <sys/atomic.h>
34 #include <sys/cmn_err.h>
35 #include <sys/proc.h>
36 #include <sys/rctl.h>
37 #include <sys/sunddi.h>
38 #include <sys/fss.h>
39 #include <sys/systm.h>
40 #include <sys/ipc_impl.h>
41 #include <sys/port_kernel.h>
42 #include <sys/task.h>
43 #include <sys/zone.h>
44 #include <sys/cpucaps.h>
45 
46 int project_hash_size = 64;
47 static kmutex_t project_hash_lock;
48 static kmutex_t projects_list_lock;
49 static mod_hash_t *projects_hash;
50 static kproject_t *projects_list;
51 
52 rctl_hndl_t rc_project_cpu_shares;
53 rctl_hndl_t rc_project_cpu_cap;
54 rctl_hndl_t rc_project_nlwps;
55 rctl_hndl_t rc_project_ntasks;
56 rctl_hndl_t rc_project_msgmni;
57 rctl_hndl_t rc_project_semmni;
58 rctl_hndl_t rc_project_shmmax;
59 rctl_hndl_t rc_project_shmmni;
60 rctl_hndl_t rc_project_portids;
61 rctl_hndl_t rc_project_locked_mem;
62 rctl_hndl_t rc_project_contract;
63 rctl_hndl_t rc_project_crypto_mem;
64 
65 /*
66  * Dummy structure used when comparing projects.  This structure must be kept
67  * identical to the first two fields of kproject_t.
68  */
69 struct project_zone {
70 	projid_t	kpj_id;
71 	zoneid_t	kpj_zoneid;
72 };
73 
74 /*
75  * Projects
76  *
77  *   A dictionary of all active projects is maintained by the kernel so that we
78  *   may track project usage and limits.  (By an active project, we mean a
79  *   project associated with one or more task, and therefore with one or more
80  *   processes.) We build the dictionary on top of the mod_hash facility, since
81  *   project additions and deletions are relatively rare events.  An
82  *   integer-to-pointer mapping is maintained within the hash, representing the
83  *   map from project id to project structure.  All projects, including the
84  *   primordial "project 0", are allocated via the project_hold_by_id()
85  *   interface.
86  *
87  *   Currently, the project contains a reference count; the project ID, which is
88  *   examined by the extended accounting subsystem as well as /proc; a resource
89  *   control set, which contains the allowable values (and actions on exceeding
90  *   those values) for controlled project-level resources on the system; and a
91  *   number of CPU shares, which is used by the fair share scheduling class
92  *   (FSS) to support its proportion-based scheduling algorithm.
93  *
94  * Reference counting convention
95  *   The dictionary entry does not itself count as a reference--only references
96  *   outside of the subsystem are tallied.  At the drop of the final external
97  *   reference, the project entry is removed.  The reference counter keeps
98  *   track of the number of threads *and* tasks within a project.
99  *
100  * Locking
101  *   Walking the doubly-linked project list must be done while holding
102  *   projects_list_lock.  Thus, any dereference of kpj_next or kpj_prev must be
103  *   under projects_list_lock.
104  *
105  *   If both the hash lock, project_hash_lock, and the list lock are to be
106  *   acquired, the hash lock is to be acquired first.
107  */
108 
109 static kstat_t *project_kstat_create(kproject_t *pj, zone_t *zone);
110 static void project_kstat_delete(kproject_t *pj);
111 
112 static void
113 project_data_init(kproject_data_t *data)
114 {
115 	/*
116 	 * Initialize subsystem-specific data
117 	 */
118 	data->kpd_shmmax = 0;
119 	data->kpd_ipc.ipcq_shmmni = 0;
120 	data->kpd_ipc.ipcq_semmni = 0;
121 	data->kpd_ipc.ipcq_msgmni = 0;
122 	data->kpd_locked_mem = 0;
123 	data->kpd_locked_mem_ctl = UINT64_MAX;
124 	data->kpd_contract = 0;
125 	data->kpd_crypto_mem = 0;
126 	data->kpd_crypto_mem_ctl = UINT64_MAX;
127 	data->kpd_lockedmem_kstat = NULL;
128 }
129 
130 /*ARGSUSED*/
131 static uint_t
132 project_hash_by_id(void *hash_data, mod_hash_key_t key)
133 {
134 	struct project_zone *pz = key;
135 	uint_t mykey;
136 
137 	/*
138 	 * Merge the zoneid and projectid together to a 32-bit quantity, and
139 	 * then pass that in to the existing idhash.
140 	 */
141 	mykey = (pz->kpj_zoneid << 16) | pz->kpj_id;
142 	return (mod_hash_byid(hash_data, (mod_hash_key_t)(uintptr_t)mykey));
143 }
144 
145 static int
146 project_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
147 {
148 	struct project_zone *pz1 = key1, *pz2 = key2;
149 	int retval;
150 
151 	return ((int)((retval = pz1->kpj_id - pz2->kpj_id) != 0 ? retval :
152 	    pz1->kpj_zoneid - pz2->kpj_zoneid));
153 }
154 
155 static void
156 project_hash_val_dtor(mod_hash_val_t val)
157 {
158 	kproject_t *kp = (kproject_t *)val;
159 
160 	ASSERT(kp->kpj_count == 0);
161 	ASSERT(kp->kpj_cpucap == NULL);
162 	kmem_free(kp, sizeof (kproject_t));
163 }
164 
165 /*
166  * kproject_t *project_hold(kproject_t *)
167  *
168  * Overview
169  *   Record that an additional reference on the indicated project has been
170  *   taken.
171  *
172  * Return values
173  *   A pointer to the indicated project.
174  *
175  * Caller's context
176  *   project_hash_lock must not be held across the project_hold() call.
177  */
178 kproject_t *
179 project_hold(kproject_t *p)
180 {
181 	mutex_enter(&project_hash_lock);
182 	ASSERT(p != NULL);
183 	p->kpj_count++;
184 	ASSERT(p->kpj_count != 0);
185 	mutex_exit(&project_hash_lock);
186 	return (p);
187 }
188 
189 /*
190  * kproject_t *project_hold_by_id(projid_t, zone_t *, int)
191  *
192  * Overview
193  *   project_hold_by_id() performs a look-up in the dictionary of projects
194  *   active on the system by specified project ID + zone and puts a hold on
195  *   it.  The third argument defines the desired behavior in the case when
196  *   project with given project ID cannot be found:
197  *
198  *   PROJECT_HOLD_INSERT	New entry is made in dictionary and the project
199  *   				is added to the global list.
200  *
201  *   PROJECT_HOLD_FIND		Return NULL.
202  *
203  *   The project is returned with its reference count incremented by one.
204  *   A new project derives its resource controls from those of project 0.
205  *
206  * Return values
207  *   A pointer to the held project.
208  *
209  * Caller's context
210  *   Caller must be in a context suitable for KM_SLEEP allocations.
211  */
212 kproject_t *
213 project_hold_by_id(projid_t id, zone_t *zone, int flag)
214 {
215 	kproject_t *spare_p;
216 	kproject_t *p;
217 	mod_hash_hndl_t hndl;
218 	rctl_set_t *set;
219 	rctl_alloc_gp_t *gp;
220 	rctl_entity_p_t e;
221 	struct project_zone pz;
222 	boolean_t create = B_FALSE;
223 	kstat_t *ksp;
224 
225 	pz.kpj_id = id;
226 	pz.kpj_zoneid = zone->zone_id;
227 
228 	if (flag == PROJECT_HOLD_FIND) {
229 		mutex_enter(&project_hash_lock);
230 
231 		if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
232 		    (mod_hash_val_t)&p) == MH_ERR_NOTFOUND)
233 			p = NULL;
234 		else
235 			p->kpj_count++;
236 
237 		mutex_exit(&project_hash_lock);
238 		return (p);
239 	}
240 
241 	ASSERT(flag == PROJECT_HOLD_INSERT);
242 
243 	spare_p = kmem_zalloc(sizeof (kproject_t), KM_SLEEP);
244 	set = rctl_set_create();
245 
246 	gp = rctl_set_init_prealloc(RCENTITY_PROJECT);
247 
248 	(void) mod_hash_reserve(projects_hash, &hndl);
249 
250 	mutex_enter(&curproc->p_lock);
251 	mutex_enter(&project_hash_lock);
252 	if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
253 	    (mod_hash_val_t *)&p) == MH_ERR_NOTFOUND) {
254 
255 		p = spare_p;
256 		p->kpj_id = id;
257 		p->kpj_zone = zone;
258 		p->kpj_zoneid = zone->zone_id;
259 		p->kpj_count = 0;
260 		p->kpj_shares = 1;
261 		p->kpj_nlwps = 0;
262 		p->kpj_ntasks = 0;
263 		p->kpj_nlwps_ctl = INT_MAX;
264 		p->kpj_ntasks_ctl = INT_MAX;
265 		project_data_init(&p->kpj_data);
266 		e.rcep_p.proj = p;
267 		e.rcep_t = RCENTITY_PROJECT;
268 		p->kpj_rctls = rctl_set_init(RCENTITY_PROJECT, curproc, &e,
269 		    set, gp);
270 		mutex_exit(&curproc->p_lock);
271 
272 		if (mod_hash_insert_reserve(projects_hash, (mod_hash_key_t)p,
273 		    (mod_hash_val_t)p, hndl))
274 			panic("unable to insert project %d(%p)", id, (void *)p);
275 
276 		/*
277 		 * Insert project into global project list.
278 		 */
279 		mutex_enter(&projects_list_lock);
280 		if (id != 0 || zone != &zone0) {
281 			p->kpj_next = projects_list;
282 			p->kpj_prev = projects_list->kpj_prev;
283 			p->kpj_prev->kpj_next = p;
284 			projects_list->kpj_prev = p;
285 		} else {
286 			/*
287 			 * Special case: primordial hold on project 0.
288 			 */
289 			p->kpj_next = p;
290 			p->kpj_prev = p;
291 			projects_list = p;
292 		}
293 		mutex_exit(&projects_list_lock);
294 		create = B_TRUE;
295 	} else {
296 		mutex_exit(&curproc->p_lock);
297 		mod_hash_cancel(projects_hash, &hndl);
298 		kmem_free(spare_p, sizeof (kproject_t));
299 		rctl_set_free(set);
300 	}
301 
302 	rctl_prealloc_destroy(gp);
303 	p->kpj_count++;
304 	mutex_exit(&project_hash_lock);
305 
306 	/*
307 	 * The kstat stores the project's zone name, as zoneid's may change
308 	 * across reboots.
309 	 */
310 	if (create == B_TRUE) {
311 		/*
312 		 * Inform CPU caps framework of the new project
313 		 */
314 		cpucaps_project_add(p);
315 		/*
316 		 * Set up project kstats
317 		 */
318 		ksp = project_kstat_create(p, zone);
319 		mutex_enter(&project_hash_lock);
320 		ASSERT(p->kpj_data.kpd_lockedmem_kstat == NULL);
321 		p->kpj_data.kpd_lockedmem_kstat = ksp;
322 		mutex_exit(&project_hash_lock);
323 	}
324 	return (p);
325 }
326 
327 /*
328  * void project_rele(kproject_t *)
329  *
330  * Overview
331  *   Advertise that one external reference to this project is no longer needed.
332  *
333  * Return values
334  *   None.
335  *
336  * Caller's context
337  *   No restriction on context.
338  */
339 void
340 project_rele(kproject_t *p)
341 {
342 	mutex_enter(&project_hash_lock);
343 	ASSERT(p->kpj_count != 0);
344 	p->kpj_count--;
345 	if (p->kpj_count == 0) {
346 
347 		/*
348 		 * Remove project from global list.
349 		 */
350 		mutex_enter(&projects_list_lock);
351 		p->kpj_next->kpj_prev = p->kpj_prev;
352 		p->kpj_prev->kpj_next = p->kpj_next;
353 		if (projects_list == p)
354 			projects_list = p->kpj_next;
355 		mutex_exit(&projects_list_lock);
356 
357 		cpucaps_project_remove(p);
358 
359 		rctl_set_free(p->kpj_rctls);
360 		project_kstat_delete(p);
361 
362 		if (mod_hash_destroy(projects_hash, (mod_hash_key_t)p))
363 			panic("unable to delete project %d zone %d", p->kpj_id,
364 			    p->kpj_zoneid);
365 
366 		}
367 	mutex_exit(&project_hash_lock);
368 }
369 
370 /*
371  * int project_walk_all(zoneid_t, int (*)(kproject_t *, void *), void *)
372  *
373  * Overview
374  *   Walk the project list for the given zoneid with a callback.
375  *
376  * Return values
377  *   -1 for an invalid walk, number of projects visited otherwise.
378  *
379  * Caller's context
380  *   projects_list_lock must not be held, as it is acquired by
381  *   project_walk_all().  Accordingly, callbacks may not perform KM_SLEEP
382  *   allocations.
383  */
384 int
385 project_walk_all(zoneid_t zoneid, int (*cb)(kproject_t *, void *),
386     void *walk_data)
387 {
388 	int cnt = 0;
389 	kproject_t *kp = proj0p;
390 
391 	mutex_enter(&projects_list_lock);
392 	do {
393 		if (zoneid != ALL_ZONES && kp->kpj_zoneid != zoneid)
394 			continue;
395 		if (cb(kp, walk_data) == -1) {
396 			cnt = -1;
397 			break;
398 		} else {
399 			cnt++;
400 		}
401 	} while ((kp = kp->kpj_next) != proj0p);
402 	mutex_exit(&projects_list_lock);
403 	return (cnt);
404 }
405 
406 /*
407  * projid_t curprojid(void)
408  *
409  * Overview
410  *   Return project ID of the current thread
411  *
412  * Caller's context
413  *   No restrictions.
414  */
415 projid_t
416 curprojid()
417 {
418 	return (ttoproj(curthread)->kpj_id);
419 }
420 
421 /*
422  * project.cpu-shares resource control support.
423  */
424 /*ARGSUSED*/
425 static rctl_qty_t
426 project_cpu_shares_usage(rctl_t *rctl, struct proc *p)
427 {
428 	ASSERT(MUTEX_HELD(&p->p_lock));
429 	return (p->p_task->tk_proj->kpj_shares);
430 }
431 
432 /*ARGSUSED*/
433 static int
434 project_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
435     rctl_qty_t nv)
436 {
437 	ASSERT(MUTEX_HELD(&p->p_lock));
438 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
439 	if (e->rcep_p.proj == NULL)
440 		return (0);
441 
442 	e->rcep_p.proj->kpj_shares = nv;
443 
444 	return (0);
445 }
446 
447 static rctl_ops_t project_cpu_shares_ops = {
448 	rcop_no_action,
449 	project_cpu_shares_usage,
450 	project_cpu_shares_set,
451 	rcop_no_test
452 };
453 
454 
455 /*
456  * project.cpu-cap resource control support.
457  */
458 /*ARGSUSED*/
459 static rctl_qty_t
460 project_cpu_cap_get(rctl_t *rctl, struct proc *p)
461 {
462 	ASSERT(MUTEX_HELD(&p->p_lock));
463 	return (cpucaps_project_get(p->p_task->tk_proj));
464 }
465 
466 /*ARGSUSED*/
467 static int
468 project_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
469     rctl_qty_t nv)
470 {
471 	kproject_t *kpj = e->rcep_p.proj;
472 
473 	ASSERT(MUTEX_HELD(&p->p_lock));
474 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
475 	if (kpj == NULL)
476 		return (0);
477 
478 	/*
479 	 * set cap to the new value.
480 	 */
481 	return (cpucaps_project_set(kpj,  nv));
482 }
483 
484 static rctl_ops_t project_cpu_cap_ops = {
485 	rcop_no_action,
486 	project_cpu_cap_get,
487 	project_cpu_cap_set,
488 	rcop_no_test
489 };
490 
491 /*ARGSUSED*/
492 static rctl_qty_t
493 project_lwps_usage(rctl_t *r, proc_t *p)
494 {
495 	kproject_t *pj;
496 	rctl_qty_t nlwps;
497 
498 	ASSERT(MUTEX_HELD(&p->p_lock));
499 	pj = p->p_task->tk_proj;
500 	mutex_enter(&p->p_zone->zone_nlwps_lock);
501 	nlwps = pj->kpj_nlwps;
502 	mutex_exit(&p->p_zone->zone_nlwps_lock);
503 
504 	return (nlwps);
505 }
506 
507 /*ARGSUSED*/
508 static int
509 project_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
510     rctl_qty_t incr, uint_t flags)
511 {
512 	rctl_qty_t nlwps;
513 
514 	ASSERT(MUTEX_HELD(&p->p_lock));
515 	ASSERT(MUTEX_HELD(&p->p_zone->zone_nlwps_lock));
516 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
517 	if (e->rcep_p.proj == NULL)
518 		return (0);
519 
520 	nlwps = e->rcep_p.proj->kpj_nlwps;
521 	if (nlwps + incr > rcntl->rcv_value)
522 		return (1);
523 
524 	return (0);
525 }
526 
527 /*ARGSUSED*/
528 static int
529 project_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
530     rctl_qty_t nv) {
531 
532 	ASSERT(MUTEX_HELD(&p->p_lock));
533 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
534 	if (e->rcep_p.proj == NULL)
535 		return (0);
536 
537 	e->rcep_p.proj->kpj_nlwps_ctl = nv;
538 	return (0);
539 }
540 
541 static rctl_ops_t project_lwps_ops = {
542 	rcop_no_action,
543 	project_lwps_usage,
544 	project_lwps_set,
545 	project_lwps_test,
546 };
547 
548 /*ARGSUSED*/
549 static rctl_qty_t
550 project_ntasks_usage(rctl_t *r, proc_t *p)
551 {
552 	kproject_t *pj;
553 	rctl_qty_t ntasks;
554 
555 	ASSERT(MUTEX_HELD(&p->p_lock));
556 	pj = p->p_task->tk_proj;
557 	mutex_enter(&p->p_zone->zone_nlwps_lock);
558 	ntasks = pj->kpj_ntasks;
559 	mutex_exit(&p->p_zone->zone_nlwps_lock);
560 
561 	return (ntasks);
562 }
563 
564 /*ARGSUSED*/
565 static int
566 project_ntasks_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
567     rctl_qty_t incr, uint_t flags)
568 {
569 	rctl_qty_t ntasks;
570 
571 	ASSERT(MUTEX_HELD(&p->p_lock));
572 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
573 	ntasks = e->rcep_p.proj->kpj_ntasks;
574 	if (ntasks + incr > rcntl->rcv_value)
575 		return (1);
576 
577 	return (0);
578 }
579 
580 /*ARGSUSED*/
581 static int
582 project_ntasks_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
583     rctl_qty_t nv) {
584 
585 	ASSERT(MUTEX_HELD(&p->p_lock));
586 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
587 	e->rcep_p.proj->kpj_ntasks_ctl = nv;
588 	return (0);
589 }
590 
591 static rctl_ops_t project_tasks_ops = {
592 	rcop_no_action,
593 	project_ntasks_usage,
594 	project_ntasks_set,
595 	project_ntasks_test,
596 };
597 
598 /*
599  * project.max-shm-memory resource control support.
600  */
601 
602 /*ARGSUSED*/
603 static int
604 project_shmmax_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
605     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
606 {
607 	rctl_qty_t v;
608 	ASSERT(MUTEX_HELD(&p->p_lock));
609 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
610 	v = e->rcep_p.proj->kpj_data.kpd_shmmax + inc;
611 	if (v > rval->rcv_value)
612 		return (1);
613 
614 	return (0);
615 }
616 
617 static rctl_ops_t project_shmmax_ops = {
618 	rcop_no_action,
619 	rcop_no_usage,
620 	rcop_no_set,
621 	project_shmmax_test
622 };
623 
624 /*
625  * project.max-shm-ids resource control support.
626  */
627 
628 /*ARGSUSED*/
629 static int
630 project_shmmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
631     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
632 {
633 	rctl_qty_t v;
634 	ASSERT(MUTEX_HELD(&p->p_lock));
635 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
636 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_shmmni + inc;
637 	if (v > rval->rcv_value)
638 		return (1);
639 
640 	return (0);
641 }
642 
643 static rctl_ops_t project_shmmni_ops = {
644 	rcop_no_action,
645 	rcop_no_usage,
646 	rcop_no_set,
647 	project_shmmni_test
648 };
649 
650 /*
651  * project.max-sem-ids resource control support.
652  */
653 
654 /*ARGSUSED*/
655 static int
656 project_semmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
657     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
658 {
659 	rctl_qty_t v;
660 	ASSERT(MUTEX_HELD(&p->p_lock));
661 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
662 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_semmni + inc;
663 	if (v > rval->rcv_value)
664 		return (1);
665 
666 	return (0);
667 }
668 
669 static rctl_ops_t project_semmni_ops = {
670 	rcop_no_action,
671 	rcop_no_usage,
672 	rcop_no_set,
673 	project_semmni_test
674 };
675 
676 /*
677  * project.max-msg-ids resource control support.
678  */
679 
680 /*ARGSUSED*/
681 static int
682 project_msgmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
683     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
684 {
685 	rctl_qty_t v;
686 	ASSERT(MUTEX_HELD(&p->p_lock));
687 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
688 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_msgmni + inc;
689 	if (v > rval->rcv_value)
690 		return (1);
691 
692 	return (0);
693 }
694 
695 static rctl_ops_t project_msgmni_ops = {
696 	rcop_no_action,
697 	rcop_no_usage,
698 	rcop_no_set,
699 	project_msgmni_test
700 };
701 
702 /*ARGSUSED*/
703 static rctl_qty_t
704 project_locked_mem_usage(rctl_t *rctl, struct proc *p)
705 {
706 	rctl_qty_t q;
707 	ASSERT(MUTEX_HELD(&p->p_lock));
708 	mutex_enter(&p->p_zone->zone_mem_lock);
709 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
710 	mutex_exit(&p->p_zone->zone_mem_lock);
711 	return (q);
712 }
713 
714 /*ARGSUSED*/
715 static int
716 project_locked_mem_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
717     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
718 {
719 	rctl_qty_t q;
720 	ASSERT(MUTEX_HELD(&p->p_lock));
721 	ASSERT(MUTEX_HELD(&p->p_zone->zone_mem_lock));
722 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
723 	if (q + inc > rval->rcv_value)
724 		return (1);
725 	return (0);
726 }
727 
728 /*ARGSUSED*/
729 static int
730 project_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
731     rctl_qty_t nv) {
732 
733 	ASSERT(MUTEX_HELD(&p->p_lock));
734 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
735 	if (e->rcep_p.proj == NULL)
736 		return (0);
737 
738 	e->rcep_p.proj->kpj_data.kpd_locked_mem_ctl = nv;
739 	return (0);
740 }
741 
742 static rctl_ops_t project_locked_mem_ops = {
743 	rcop_no_action,
744 	project_locked_mem_usage,
745 	project_locked_mem_set,
746 	project_locked_mem_test
747 };
748 
749 /*
750  * project.max-contracts resource control support.
751  */
752 
753 /*ARGSUSED*/
754 static int
755 project_contract_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
756     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
757 {
758 	rctl_qty_t v;
759 
760 	ASSERT(MUTEX_HELD(&p->p_lock));
761 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
762 
763 	v = e->rcep_p.proj->kpj_data.kpd_contract + inc;
764 
765 	if ((p->p_task != NULL) && (p->p_task->tk_proj) != NULL &&
766 	    (v > rval->rcv_value))
767 		return (1);
768 
769 	return (0);
770 }
771 
772 static rctl_ops_t project_contract_ops = {
773 	rcop_no_action,
774 	rcop_no_usage,
775 	rcop_no_set,
776 	project_contract_test
777 };
778 
779 /*ARGSUSED*/
780 static rctl_qty_t
781 project_crypto_usage(rctl_t *r, proc_t *p)
782 {
783 	ASSERT(MUTEX_HELD(&p->p_lock));
784 	return (p->p_task->tk_proj->kpj_data.kpd_crypto_mem);
785 }
786 
787 /*ARGSUSED*/
788 static int
789 project_crypto_set(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
790     rctl_qty_t nv)
791 {
792 	ASSERT(MUTEX_HELD(&p->p_lock));
793 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
794 	if (e->rcep_p.proj == NULL)
795 		return (0);
796 
797 	e->rcep_p.proj->kpj_data.kpd_crypto_mem_ctl = nv;
798 	return (0);
799 }
800 
801 /*ARGSUSED*/
802 static int
803 project_crypto_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
804     rctl_val_t *rval, rctl_qty_t incr, uint_t flags)
805 {
806 	rctl_qty_t v;
807 	ASSERT(MUTEX_HELD(&p->p_lock));
808 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
809 	v = e->rcep_p.proj->kpj_data.kpd_crypto_mem + incr;
810 	if (v > rval->rcv_value)
811 		return (1);
812 	return (0);
813 }
814 
815 static rctl_ops_t project_crypto_mem_ops = {
816 	rcop_no_action,
817 	project_crypto_usage,
818 	project_crypto_set,
819 	project_crypto_test
820 };
821 
822 /*
823  * void project_init(void)
824  *
825  * Overview
826  *   Initialize the project subsystem, including the primordial project 0 entry.
827  *   Register generic project resource controls, if any.
828  *
829  * Return values
830  *   None.
831  *
832  * Caller's context
833  *   Safe for KM_SLEEP allocations.
834  */
835 void
836 project_init(void)
837 {
838 	rctl_qty_t shmmni, shmmax, qty;
839 	boolean_t check;
840 
841 	projects_hash = mod_hash_create_extended("projects_hash",
842 	    project_hash_size, mod_hash_null_keydtor, project_hash_val_dtor,
843 	    project_hash_by_id,
844 	    (void *)(uintptr_t)mod_hash_iddata_gen(project_hash_size),
845 	    project_hash_key_cmp, KM_SLEEP);
846 
847 	rc_project_cpu_shares = rctl_register("project.cpu-shares",
848 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
849 	    RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_NOBASIC |
850 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
851 	    FSS_MAXSHARES, FSS_MAXSHARES,
852 	    &project_cpu_shares_ops);
853 	rctl_add_default_limit("project.cpu-shares", 1, RCPRIV_PRIVILEGED,
854 	    RCTL_LOCAL_NOACTION);
855 
856 	rc_project_cpu_cap = rctl_register("project.cpu-cap",
857 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
858 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
859 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER |
860 	    RCTL_GLOBAL_INFINITE,
861 	    MAXCAP, MAXCAP, &project_cpu_cap_ops);
862 
863 	rc_project_nlwps = rctl_register("project.max-lwps", RCENTITY_PROJECT,
864 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
865 	    INT_MAX, INT_MAX, &project_lwps_ops);
866 
867 	rc_project_ntasks = rctl_register("project.max-tasks", RCENTITY_PROJECT,
868 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
869 	    INT_MAX, INT_MAX, &project_tasks_ops);
870 
871 	/*
872 	 * This rctl handle is used by /dev/crypto. It is here rather than
873 	 * in misc/kcf or the drv/crypto module because resource controls
874 	 * currently don't allow modules to be unloaded, and the control
875 	 * must be registered before init starts.
876 	 */
877 	rc_project_crypto_mem = rctl_register("project.max-crypto-memory",
878 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
879 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX,
880 	    &project_crypto_mem_ops);
881 
882 	/*
883 	 * Default to a quarter of the machine's memory
884 	 */
885 	qty = availrmem_initial << (PAGESHIFT - 2);
886 	rctl_add_default_limit("project.max-crypto-memory", qty,
887 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
888 
889 	/*
890 	 * System V IPC resource controls
891 	 */
892 	rc_project_semmni = rctl_register("project.max-sem-ids",
893 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
894 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_semmni_ops);
895 	rctl_add_legacy_limit("project.max-sem-ids", "semsys",
896 	    "seminfo_semmni", 128, IPC_IDS_MAX);
897 
898 	rc_project_msgmni = rctl_register("project.max-msg-ids",
899 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
900 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_msgmni_ops);
901 	rctl_add_legacy_limit("project.max-msg-ids", "msgsys",
902 	    "msginfo_msgmni", 128, IPC_IDS_MAX);
903 
904 	rc_project_shmmni = rctl_register("project.max-shm-ids",
905 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
906 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_shmmni_ops);
907 	rctl_add_legacy_limit("project.max-shm-ids", "shmsys",
908 	    "shminfo_shmmni", 128, IPC_IDS_MAX);
909 
910 	rc_project_shmmax = rctl_register("project.max-shm-memory",
911 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
912 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &project_shmmax_ops);
913 
914 	check = B_FALSE;
915 	if (!mod_sysvar("shmsys", "shminfo_shmmni", &shmmni))
916 		shmmni = 100;
917 	else
918 		check = B_TRUE;
919 	if (!mod_sysvar("shmsys", "shminfo_shmmax", &shmmax))
920 		shmmax = 0x800000;
921 	else
922 		check = B_TRUE;
923 
924 	/*
925 	 * Default to a quarter of the machine's memory
926 	 */
927 	qty = availrmem_initial << (PAGESHIFT - 2);
928 	if (check) {
929 		if ((shmmax > 0) && (UINT64_MAX / shmmax <= shmmni))
930 			qty = UINT64_MAX;
931 		else if (shmmni * shmmax > qty)
932 			qty = shmmni * shmmax;
933 	}
934 	rctl_add_default_limit("project.max-shm-memory", qty,
935 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
936 
937 	/*
938 	 * Event Ports resource controls
939 	 */
940 
941 	rc_project_portids = rctl_register("project.max-port-ids",
942 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
943 	    RCTL_GLOBAL_COUNT, PORT_MAX_PORTS, PORT_MAX_PORTS,
944 	    &rctl_absolute_ops);
945 	rctl_add_default_limit("project.max-port-ids", PORT_DEFAULT_PORTS,
946 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
947 
948 	/*
949 	 * Resource control for locked memory
950 	 */
951 	rc_project_locked_mem = rctl_register(
952 	    "project.max-locked-memory", RCENTITY_PROJECT,
953 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES,
954 	    UINT64_MAX, UINT64_MAX, &project_locked_mem_ops);
955 
956 	/* Default value equals that of max-shm-memory. */
957 	rctl_add_default_limit("project.max-locked-memory", qty,
958 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
959 
960 	/*
961 	 * Per project limit on contracts.
962 	 */
963 	rc_project_contract = rctl_register("project.max-contracts",
964 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
965 	    RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &project_contract_ops);
966 	rctl_add_default_limit("project.max-contracts", 10000,
967 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
968 
969 	t0.t_proj = proj0p = project_hold_by_id(0, &zone0,
970 	    PROJECT_HOLD_INSERT);
971 
972 	mutex_enter(&p0.p_lock);
973 	proj0p->kpj_nlwps = p0.p_lwpcnt;
974 	mutex_exit(&p0.p_lock);
975 	proj0p->kpj_ntasks = 1;
976 }
977 
978 static int
979 project_lockedmem_kstat_update(kstat_t *ksp, int rw)
980 {
981 	kproject_t *pj = ksp->ks_private;
982 	kproject_kstat_t *kpk = ksp->ks_data;
983 
984 	if (rw == KSTAT_WRITE)
985 		return (EACCES);
986 
987 	kpk->kpk_usage.value.ui64 = pj->kpj_data.kpd_locked_mem;
988 	kpk->kpk_value.value.ui64 = pj->kpj_data.kpd_locked_mem_ctl;
989 	return (0);
990 }
991 
992 static kstat_t *
993 project_kstat_create(kproject_t *pj, zone_t *zone)
994 {
995 	kstat_t *ksp;
996 	kproject_kstat_t *kpk;
997 	char *zonename = zone->zone_name;
998 
999 	ksp = rctl_kstat_create_project(pj, "lockedmem", KSTAT_TYPE_NAMED,
1000 	    sizeof (kproject_kstat_t) / sizeof (kstat_named_t),
1001 	    KSTAT_FLAG_VIRTUAL);
1002 
1003 	if (ksp == NULL)
1004 		return (NULL);
1005 
1006 	kpk = ksp->ks_data = kmem_alloc(sizeof (kproject_kstat_t), KM_SLEEP);
1007 	ksp->ks_data_size += strlen(zonename) + 1;
1008 	kstat_named_init(&kpk->kpk_zonename, "zonename", KSTAT_DATA_STRING);
1009 	kstat_named_setstr(&kpk->kpk_zonename, zonename);
1010 	kstat_named_init(&kpk->kpk_usage, "usage", KSTAT_DATA_UINT64);
1011 	kstat_named_init(&kpk->kpk_value, "value", KSTAT_DATA_UINT64);
1012 	ksp->ks_update = project_lockedmem_kstat_update;
1013 	ksp->ks_private = pj;
1014 	kstat_install(ksp);
1015 
1016 	return (ksp);
1017 }
1018 
1019 static void
1020 project_kstat_delete(kproject_t *pj)
1021 {
1022 	void *data;
1023 
1024 	if (pj->kpj_data.kpd_lockedmem_kstat != NULL) {
1025 		data = pj->kpj_data.kpd_lockedmem_kstat->ks_data;
1026 		kstat_delete(pj->kpj_data.kpd_lockedmem_kstat);
1027 		kmem_free(data, sizeof (zone_kstat_t));
1028 	}
1029 	pj->kpj_data.kpd_lockedmem_kstat = NULL;
1030 }
1031