xref: /titanic_50/usr/src/uts/common/os/project.c (revision c77a61a72b5ecdc507d6cf104142edd371a16c84)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/project.h>
29 #include <sys/modhash.h>
30 #include <sys/modctl.h>
31 #include <sys/kmem.h>
32 #include <sys/kstat.h>
33 #include <sys/atomic.h>
34 #include <sys/cmn_err.h>
35 #include <sys/proc.h>
36 #include <sys/rctl.h>
37 #include <sys/sunddi.h>
38 #include <sys/fss.h>
39 #include <sys/systm.h>
40 #include <sys/ipc_impl.h>
41 #include <sys/port_kernel.h>
42 #include <sys/task.h>
43 #include <sys/zone.h>
44 
45 int project_hash_size = 64;
46 static kmutex_t project_hash_lock;
47 static kmutex_t projects_list_lock;
48 static mod_hash_t *projects_hash;
49 static kproject_t *projects_list;
50 
51 rctl_hndl_t rc_project_cpu_shares;
52 rctl_hndl_t rc_project_nlwps;
53 rctl_hndl_t rc_project_ntasks;
54 rctl_hndl_t rc_project_msgmni;
55 rctl_hndl_t rc_project_semmni;
56 rctl_hndl_t rc_project_shmmax;
57 rctl_hndl_t rc_project_shmmni;
58 rctl_hndl_t rc_project_portids;
59 rctl_hndl_t rc_project_locked_mem;
60 rctl_hndl_t rc_project_contract;
61 rctl_hndl_t rc_project_crypto_mem;
62 
63 /*
64  * Dummy structure used when comparing projects.  This structure must be kept
65  * identical to the first two fields of kproject_t.
66  */
67 struct project_zone {
68 	projid_t	kpj_id;
69 	zoneid_t	kpj_zoneid;
70 };
71 
72 /*
73  * Projects
74  *
75  *   A dictionary of all active projects is maintained by the kernel so that we
76  *   may track project usage and limits.  (By an active project, we mean a
77  *   project associated with one or more task, and therefore with one or more
78  *   processes.) We build the dictionary on top of the mod_hash facility, since
79  *   project additions and deletions are relatively rare events.  An
80  *   integer-to-pointer mapping is maintained within the hash, representing the
81  *   map from project id to project structure.  All projects, including the
82  *   primordial "project 0", are allocated via the project_hold_by_id()
83  *   interface.
84  *
85  *   Currently, the project contains a reference count; the project ID, which is
86  *   examined by the extended accounting subsystem as well as /proc; a resource
87  *   control set, which contains the allowable values (and actions on exceeding
88  *   those values) for controlled project-level resources on the system; and a
89  *   number of CPU shares, which is used by the fair share scheduling class
90  *   (FSS) to support its proportion-based scheduling algorithm.
91  *
92  * Reference counting convention
93  *   The dictionary entry does not itself count as a reference--only references
94  *   outside of the subsystem are tallied.  At the drop of the final external
95  *   reference, the project entry is removed.  The reference counter keeps
96  *   track of the number of threads *and* tasks within a project.
97  *
98  * Locking
99  *   Walking the doubly-linked project list must be done while holding
100  *   projects_list_lock.  Thus, any dereference of kpj_next or kpj_prev must be
101  *   under projects_list_lock.
102  *
103  *   If both the hash lock, project_hash_lock, and the list lock are to be
104  *   acquired, the hash lock is to be acquired first.
105  */
106 
107 static kstat_t *project_kstat_create(kproject_t *pj, zone_t *zone);
108 static void project_kstat_delete(kproject_t *pj);
109 
110 static void
111 project_data_init(kproject_data_t *data)
112 {
113 	/*
114 	 * Initialize subsystem-specific data
115 	 */
116 	data->kpd_shmmax = 0;
117 	data->kpd_ipc.ipcq_shmmni = 0;
118 	data->kpd_ipc.ipcq_semmni = 0;
119 	data->kpd_ipc.ipcq_msgmni = 0;
120 	data->kpd_locked_mem = 0;
121 	data->kpd_locked_mem_ctl = UINT64_MAX;
122 	data->kpd_contract = 0;
123 	data->kpd_crypto_mem = 0;
124 	data->kpd_lockedmem_kstat = NULL;
125 }
126 
127 /*ARGSUSED*/
128 static uint_t
129 project_hash_by_id(void *hash_data, mod_hash_key_t key)
130 {
131 	struct project_zone *pz = key;
132 	uint_t mykey;
133 
134 	/*
135 	 * Merge the zoneid and projectid together to a 32-bit quantity, and
136 	 * then pass that in to the existing idhash.
137 	 */
138 	mykey = (pz->kpj_zoneid << 16) | pz->kpj_id;
139 	return (mod_hash_byid(hash_data, (mod_hash_key_t)(uintptr_t)mykey));
140 }
141 
142 static int
143 project_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
144 {
145 	struct project_zone *pz1 = key1, *pz2 = key2;
146 	int retval;
147 
148 	return ((int)((retval = pz1->kpj_id - pz2->kpj_id) != 0 ? retval :
149 	    pz1->kpj_zoneid - pz2->kpj_zoneid));
150 }
151 
152 static void
153 project_hash_val_dtor(mod_hash_val_t val)
154 {
155 	kproject_t *kp = (kproject_t *)val;
156 
157 	ASSERT(kp->kpj_count == 0);
158 	kmem_free(kp, sizeof (kproject_t));
159 }
160 
161 /*
162  * kproject_t *project_hold(kproject_t *)
163  *
164  * Overview
165  *   Record that an additional reference on the indicated project has been
166  *   taken.
167  *
168  * Return values
169  *   A pointer to the indicated project.
170  *
171  * Caller's context
172  *   project_hash_lock must not be held across the project_hold() call.
173  */
174 kproject_t *
175 project_hold(kproject_t *p)
176 {
177 	mutex_enter(&project_hash_lock);
178 	ASSERT(p != NULL);
179 	p->kpj_count++;
180 	ASSERT(p->kpj_count != 0);
181 	mutex_exit(&project_hash_lock);
182 	return (p);
183 }
184 
185 /*
186  * kproject_t *project_hold_by_id(projid_t, zone_t *, int)
187  *
188  * Overview
189  *   project_hold_by_id() performs a look-up in the dictionary of projects
190  *   active on the system by specified project ID + zone and puts a hold on
191  *   it.  The third argument defines the desired behavior in the case when
192  *   project with given project ID cannot be found:
193  *
194  *   PROJECT_HOLD_INSERT	New entry is made in dictionary and the project
195  *   				is added to the global list.
196  *
197  *   PROJECT_HOLD_FIND		Return NULL.
198  *
199  *   The project is returned with its reference count incremented by one.
200  *   A new project derives its resource controls from those of project 0.
201  *
202  * Return values
203  *   A pointer to the held project.
204  *
205  * Caller's context
206  *   Caller must be in a context suitable for KM_SLEEP allocations.
207  */
208 kproject_t *
209 project_hold_by_id(projid_t id, zone_t *zone, int flag)
210 {
211 	kproject_t *spare_p;
212 	kproject_t *p;
213 	mod_hash_hndl_t hndl;
214 	rctl_set_t *set;
215 	rctl_alloc_gp_t *gp;
216 	rctl_entity_p_t e;
217 	struct project_zone pz;
218 	boolean_t create = B_FALSE;
219 	kstat_t *ksp;
220 
221 	pz.kpj_id = id;
222 	pz.kpj_zoneid = zone->zone_id;
223 
224 	if (flag == PROJECT_HOLD_FIND) {
225 		mutex_enter(&project_hash_lock);
226 
227 		if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
228 		    (mod_hash_val_t)&p) == MH_ERR_NOTFOUND)
229 			p = NULL;
230 		else
231 			p->kpj_count++;
232 
233 		mutex_exit(&project_hash_lock);
234 		return (p);
235 	}
236 
237 	ASSERT(flag == PROJECT_HOLD_INSERT);
238 
239 	spare_p = kmem_zalloc(sizeof (kproject_t), KM_SLEEP);
240 	set = rctl_set_create();
241 
242 	gp = rctl_set_init_prealloc(RCENTITY_PROJECT);
243 
244 	(void) mod_hash_reserve(projects_hash, &hndl);
245 
246 	mutex_enter(&curproc->p_lock);
247 	mutex_enter(&project_hash_lock);
248 	if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
249 	    (mod_hash_val_t *)&p) == MH_ERR_NOTFOUND) {
250 
251 		p = spare_p;
252 		p->kpj_id = id;
253 		p->kpj_zoneid = zone->zone_id;
254 		p->kpj_count = 0;
255 		p->kpj_shares = 1;
256 		p->kpj_nlwps = 0;
257 		p->kpj_ntasks = 0;
258 		p->kpj_nlwps_ctl = INT_MAX;
259 		p->kpj_ntasks_ctl = INT_MAX;
260 		project_data_init(&p->kpj_data);
261 		e.rcep_p.proj = p;
262 		e.rcep_t = RCENTITY_PROJECT;
263 		p->kpj_rctls = rctl_set_init(RCENTITY_PROJECT, curproc, &e,
264 		    set, gp);
265 		mutex_exit(&curproc->p_lock);
266 
267 		if (mod_hash_insert_reserve(projects_hash, (mod_hash_key_t)p,
268 		    (mod_hash_val_t)p, hndl))
269 			panic("unable to insert project %d(%p)", id, (void *)p);
270 
271 		/*
272 		 * Insert project into global project list.
273 		 */
274 		mutex_enter(&projects_list_lock);
275 		if (id != 0 || zone != &zone0) {
276 			p->kpj_next = projects_list;
277 			p->kpj_prev = projects_list->kpj_prev;
278 			p->kpj_prev->kpj_next = p;
279 			projects_list->kpj_prev = p;
280 		} else {
281 			/*
282 			 * Special case: primordial hold on project 0.
283 			 */
284 			p->kpj_next = p;
285 			p->kpj_prev = p;
286 			projects_list = p;
287 		}
288 		mutex_exit(&projects_list_lock);
289 		create = B_TRUE;
290 	} else {
291 		mutex_exit(&curproc->p_lock);
292 		mod_hash_cancel(projects_hash, &hndl);
293 		kmem_free(spare_p, sizeof (kproject_t));
294 		rctl_set_free(set);
295 	}
296 
297 	rctl_prealloc_destroy(gp);
298 	p->kpj_count++;
299 	mutex_exit(&project_hash_lock);
300 
301 	/*
302 	 * The kstat stores the project's zone name, as zoneid's may change
303 	 * across reboots.
304 	 */
305 	if (create == B_TRUE) {
306 		ksp = project_kstat_create(p, zone);
307 		mutex_enter(&project_hash_lock);
308 		ASSERT(p->kpj_data.kpd_lockedmem_kstat == NULL);
309 		p->kpj_data.kpd_lockedmem_kstat = ksp;
310 		mutex_exit(&project_hash_lock);
311 	}
312 	return (p);
313 }
314 
315 /*
316  * void project_rele(kproject_t *)
317  *
318  * Overview
319  *   Advertise that one external reference to this project is no longer needed.
320  *
321  * Return values
322  *   None.
323  *
324  * Caller's context
325  *   No restriction on context.
326  */
327 void
328 project_rele(kproject_t *p)
329 {
330 	mutex_enter(&project_hash_lock);
331 	ASSERT(p->kpj_count != 0);
332 	p->kpj_count--;
333 	if (p->kpj_count == 0) {
334 
335 		/*
336 		 * Remove project from global list.
337 		 */
338 		mutex_enter(&projects_list_lock);
339 		p->kpj_next->kpj_prev = p->kpj_prev;
340 		p->kpj_prev->kpj_next = p->kpj_next;
341 		if (projects_list == p)
342 			projects_list = p->kpj_next;
343 		mutex_exit(&projects_list_lock);
344 
345 		rctl_set_free(p->kpj_rctls);
346 		project_kstat_delete(p);
347 
348 		if (mod_hash_destroy(projects_hash, (mod_hash_key_t)p))
349 			panic("unable to delete project %d zone %d", p->kpj_id,
350 			    p->kpj_zoneid);
351 
352 		}
353 	mutex_exit(&project_hash_lock);
354 }
355 
356 /*
357  * int project_walk_all(zoneid_t, int (*)(kproject_t *, void *), void *)
358  *
359  * Overview
360  *   Walk the project list for the given zoneid with a callback.
361  *
362  * Return values
363  *   -1 for an invalid walk, number of projects visited otherwise.
364  *
365  * Caller's context
366  *   projects_list_lock must not be held, as it is acquired by
367  *   project_walk_all().  Accordingly, callbacks may not perform KM_SLEEP
368  *   allocations.
369  */
370 int
371 project_walk_all(zoneid_t zoneid, int (*cb)(kproject_t *, void *),
372     void *walk_data)
373 {
374 	int cnt = 0;
375 	kproject_t *kp = proj0p;
376 
377 	mutex_enter(&projects_list_lock);
378 	do {
379 		if (zoneid != ALL_ZONES && kp->kpj_zoneid != zoneid)
380 			continue;
381 		if (cb(kp, walk_data) == -1) {
382 			cnt = -1;
383 			break;
384 		} else {
385 			cnt++;
386 		}
387 	} while ((kp = kp->kpj_next) != proj0p);
388 	mutex_exit(&projects_list_lock);
389 	return (cnt);
390 }
391 
392 /*
393  * projid_t curprojid(void)
394  *
395  * Overview
396  *   Return project ID of the current thread
397  *
398  * Caller's context
399  *   No restrictions.
400  */
401 projid_t
402 curprojid()
403 {
404 	return (ttoproj(curthread)->kpj_id);
405 }
406 
407 /*
408  * project.cpu-shares resource control support.
409  */
410 /*ARGSUSED*/
411 static rctl_qty_t
412 project_cpu_shares_usage(rctl_t *rctl, struct proc *p)
413 {
414 	ASSERT(MUTEX_HELD(&p->p_lock));
415 	return (p->p_task->tk_proj->kpj_shares);
416 }
417 
418 /*ARGSUSED*/
419 static int
420 project_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
421     rctl_qty_t nv)
422 {
423 	ASSERT(MUTEX_HELD(&p->p_lock));
424 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
425 	if (e->rcep_p.proj == NULL)
426 		return (0);
427 
428 	e->rcep_p.proj->kpj_shares = nv;
429 
430 	return (0);
431 }
432 
433 
434 static rctl_ops_t project_cpu_shares_ops = {
435 	rcop_no_action,
436 	project_cpu_shares_usage,
437 	project_cpu_shares_set,
438 	rcop_no_test
439 };
440 
441 /*ARGSUSED*/
442 static rctl_qty_t
443 project_lwps_usage(rctl_t *r, proc_t *p)
444 {
445 	kproject_t *pj;
446 	rctl_qty_t nlwps;
447 
448 	ASSERT(MUTEX_HELD(&p->p_lock));
449 	pj = p->p_task->tk_proj;
450 	mutex_enter(&p->p_zone->zone_nlwps_lock);
451 	nlwps = pj->kpj_nlwps;
452 	mutex_exit(&p->p_zone->zone_nlwps_lock);
453 
454 	return (nlwps);
455 }
456 
457 /*ARGSUSED*/
458 static int
459 project_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
460     rctl_qty_t incr, uint_t flags)
461 {
462 	rctl_qty_t nlwps;
463 
464 	ASSERT(MUTEX_HELD(&p->p_lock));
465 	ASSERT(MUTEX_HELD(&p->p_zone->zone_nlwps_lock));
466 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
467 	if (e->rcep_p.proj == NULL)
468 		return (0);
469 
470 	nlwps = e->rcep_p.proj->kpj_nlwps;
471 	if (nlwps + incr > rcntl->rcv_value)
472 		return (1);
473 
474 	return (0);
475 }
476 
477 /*ARGSUSED*/
478 static int
479 project_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
480     rctl_qty_t nv) {
481 
482 	ASSERT(MUTEX_HELD(&p->p_lock));
483 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
484 	if (e->rcep_p.proj == NULL)
485 		return (0);
486 
487 	e->rcep_p.proj->kpj_nlwps_ctl = nv;
488 	return (0);
489 }
490 
491 static rctl_ops_t project_lwps_ops = {
492 	rcop_no_action,
493 	project_lwps_usage,
494 	project_lwps_set,
495 	project_lwps_test,
496 };
497 
498 /*ARGSUSED*/
499 static rctl_qty_t
500 project_ntasks_usage(rctl_t *r, proc_t *p)
501 {
502 	kproject_t *pj;
503 	rctl_qty_t ntasks;
504 
505 	ASSERT(MUTEX_HELD(&p->p_lock));
506 	pj = p->p_task->tk_proj;
507 	mutex_enter(&p->p_zone->zone_nlwps_lock);
508 	ntasks = pj->kpj_ntasks;
509 	mutex_exit(&p->p_zone->zone_nlwps_lock);
510 
511 	return (ntasks);
512 }
513 
514 /*ARGSUSED*/
515 static int
516 project_ntasks_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
517     rctl_qty_t incr, uint_t flags)
518 {
519 	rctl_qty_t ntasks;
520 
521 	ASSERT(MUTEX_HELD(&p->p_lock));
522 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
523 	ntasks = e->rcep_p.proj->kpj_ntasks;
524 	if (ntasks + incr > rcntl->rcv_value)
525 		return (1);
526 
527 	return (0);
528 }
529 
530 /*ARGSUSED*/
531 static int
532 project_ntasks_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
533     rctl_qty_t nv) {
534 
535 	ASSERT(MUTEX_HELD(&p->p_lock));
536 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
537 	e->rcep_p.proj->kpj_ntasks_ctl = nv;
538 	return (0);
539 }
540 
541 static rctl_ops_t project_tasks_ops = {
542 	rcop_no_action,
543 	project_ntasks_usage,
544 	project_ntasks_set,
545 	project_ntasks_test,
546 };
547 
548 /*
549  * project.max-shm-memory resource control support.
550  */
551 
552 /*ARGSUSED*/
553 static int
554 project_shmmax_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
555     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
556 {
557 	rctl_qty_t v;
558 	ASSERT(MUTEX_HELD(&p->p_lock));
559 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
560 	v = e->rcep_p.proj->kpj_data.kpd_shmmax + inc;
561 	if (v > rval->rcv_value)
562 		return (1);
563 
564 	return (0);
565 }
566 
567 static rctl_ops_t project_shmmax_ops = {
568 	rcop_no_action,
569 	rcop_no_usage,
570 	rcop_no_set,
571 	project_shmmax_test
572 };
573 
574 /*
575  * project.max-shm-ids resource control support.
576  */
577 
578 /*ARGSUSED*/
579 static int
580 project_shmmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
581     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
582 {
583 	rctl_qty_t v;
584 	ASSERT(MUTEX_HELD(&p->p_lock));
585 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
586 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_shmmni + inc;
587 	if (v > rval->rcv_value)
588 		return (1);
589 
590 	return (0);
591 }
592 
593 static rctl_ops_t project_shmmni_ops = {
594 	rcop_no_action,
595 	rcop_no_usage,
596 	rcop_no_set,
597 	project_shmmni_test
598 };
599 
600 /*
601  * project.max-sem-ids resource control support.
602  */
603 
604 /*ARGSUSED*/
605 static int
606 project_semmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
607     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
608 {
609 	rctl_qty_t v;
610 	ASSERT(MUTEX_HELD(&p->p_lock));
611 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
612 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_semmni + inc;
613 	if (v > rval->rcv_value)
614 		return (1);
615 
616 	return (0);
617 }
618 
619 static rctl_ops_t project_semmni_ops = {
620 	rcop_no_action,
621 	rcop_no_usage,
622 	rcop_no_set,
623 	project_semmni_test
624 };
625 
626 /*
627  * project.max-msg-ids resource control support.
628  */
629 
630 /*ARGSUSED*/
631 static int
632 project_msgmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
633     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
634 {
635 	rctl_qty_t v;
636 	ASSERT(MUTEX_HELD(&p->p_lock));
637 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
638 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_msgmni + inc;
639 	if (v > rval->rcv_value)
640 		return (1);
641 
642 	return (0);
643 }
644 
645 static rctl_ops_t project_msgmni_ops = {
646 	rcop_no_action,
647 	rcop_no_usage,
648 	rcop_no_set,
649 	project_msgmni_test
650 };
651 
652 /*ARGSUSED*/
653 static rctl_qty_t
654 project_locked_mem_usage(rctl_t *rctl, struct proc *p)
655 {
656 	rctl_qty_t q;
657 	ASSERT(MUTEX_HELD(&p->p_lock));
658 	mutex_enter(&p->p_zone->zone_mem_lock);
659 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
660 	mutex_exit(&p->p_zone->zone_mem_lock);
661 	return (q);
662 }
663 
664 /*ARGSUSED*/
665 static int
666 project_locked_mem_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
667     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
668 {
669 	rctl_qty_t q;
670 	ASSERT(MUTEX_HELD(&p->p_lock));
671 	ASSERT(MUTEX_HELD(&p->p_zone->zone_mem_lock));
672 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
673 	if (q + inc > rval->rcv_value)
674 		return (1);
675 	return (0);
676 }
677 
678 /*ARGSUSED*/
679 static int
680 project_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
681     rctl_qty_t nv) {
682 
683 	ASSERT(MUTEX_HELD(&p->p_lock));
684 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
685 	if (e->rcep_p.proj == NULL)
686 		return (0);
687 
688 	e->rcep_p.proj->kpj_data.kpd_locked_mem_ctl = nv;
689 	return (0);
690 }
691 
692 static rctl_ops_t project_locked_mem_ops = {
693 	rcop_no_action,
694 	project_locked_mem_usage,
695 	project_locked_mem_set,
696 	project_locked_mem_test
697 };
698 
699 /*
700  * project.max-contracts resource control support.
701  */
702 
703 /*ARGSUSED*/
704 static int
705 project_contract_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
706     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
707 {
708 	rctl_qty_t v;
709 
710 	ASSERT(MUTEX_HELD(&p->p_lock));
711 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
712 
713 	v = e->rcep_p.proj->kpj_data.kpd_contract + inc;
714 
715 	if ((p->p_task != NULL) && (p->p_task->tk_proj) != NULL &&
716 	    (v > rval->rcv_value))
717 		return (1);
718 
719 	return (0);
720 }
721 
722 static rctl_ops_t project_contract_ops = {
723 	rcop_no_action,
724 	rcop_no_usage,
725 	rcop_no_set,
726 	project_contract_test
727 };
728 
729 /*ARGSUSED*/
730 static int
731 project_crypto_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
732     rctl_val_t *rval, rctl_qty_t incr, uint_t flags)
733 {
734 	rctl_qty_t v;
735 	ASSERT(MUTEX_HELD(&p->p_lock));
736 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
737 	v = e->rcep_p.proj->kpj_data.kpd_crypto_mem + incr;
738 	if (v > rval->rcv_value)
739 		return (1);
740 	return (0);
741 }
742 
743 static rctl_ops_t project_crypto_mem_ops = {
744 	rcop_no_action,
745 	rcop_no_usage,
746 	rcop_no_set,
747 	project_crypto_test
748 };
749 
750 /*
751  * void project_init(void)
752  *
753  * Overview
754  *   Initialize the project subsystem, including the primordial project 0 entry.
755  *   Register generic project resource controls, if any.
756  *
757  * Return values
758  *   None.
759  *
760  * Caller's context
761  *   Safe for KM_SLEEP allocations.
762  */
763 void
764 project_init(void)
765 {
766 	rctl_qty_t shmmni, shmmax, qty;
767 	boolean_t check;
768 
769 	projects_hash = mod_hash_create_extended("projects_hash",
770 	    project_hash_size, mod_hash_null_keydtor, project_hash_val_dtor,
771 	    project_hash_by_id,
772 	    (void *)(uintptr_t)mod_hash_iddata_gen(project_hash_size),
773 	    project_hash_key_cmp, KM_SLEEP);
774 
775 	rc_project_cpu_shares = rctl_register("project.cpu-shares",
776 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
777 	    RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_NOBASIC |
778 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
779 	    FSS_MAXSHARES, FSS_MAXSHARES,
780 	    &project_cpu_shares_ops);
781 	rctl_add_default_limit("project.cpu-shares", 1, RCPRIV_PRIVILEGED,
782 	    RCTL_LOCAL_NOACTION);
783 
784 	rc_project_nlwps = rctl_register("project.max-lwps", RCENTITY_PROJECT,
785 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
786 	    INT_MAX, INT_MAX, &project_lwps_ops);
787 
788 	rc_project_ntasks = rctl_register("project.max-tasks", RCENTITY_PROJECT,
789 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
790 	    INT_MAX, INT_MAX, &project_tasks_ops);
791 
792 	/*
793 	 * This rctl handle is used by /dev/crypto. It is here rather than
794 	 * in misc/kcf or the drv/crypto module because resource controls
795 	 * currently don't allow modules to be unloaded, and the control
796 	 * must be registered before init starts.
797 	 */
798 	rc_project_crypto_mem = rctl_register("project.max-crypto-memory",
799 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
800 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX,
801 	    &project_crypto_mem_ops);
802 
803 	/*
804 	 * Default to a quarter of the machine's memory
805 	 */
806 	qty = availrmem_initial << (PAGESHIFT - 2);
807 	rctl_add_default_limit("project.max-crypto-memory", qty,
808 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
809 
810 	/*
811 	 * System V IPC resource controls
812 	 */
813 	rc_project_semmni = rctl_register("project.max-sem-ids",
814 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
815 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_semmni_ops);
816 	rctl_add_legacy_limit("project.max-sem-ids", "semsys",
817 	    "seminfo_semmni", 128, IPC_IDS_MAX);
818 
819 	rc_project_msgmni = rctl_register("project.max-msg-ids",
820 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
821 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_msgmni_ops);
822 	rctl_add_legacy_limit("project.max-msg-ids", "msgsys",
823 	    "msginfo_msgmni", 128, IPC_IDS_MAX);
824 
825 	rc_project_shmmni = rctl_register("project.max-shm-ids",
826 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
827 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_shmmni_ops);
828 	rctl_add_legacy_limit("project.max-shm-ids", "shmsys",
829 	    "shminfo_shmmni", 128, IPC_IDS_MAX);
830 
831 	rc_project_shmmax = rctl_register("project.max-shm-memory",
832 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
833 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &project_shmmax_ops);
834 
835 	check = B_FALSE;
836 	if (!mod_sysvar("shmsys", "shminfo_shmmni", &shmmni))
837 		shmmni = 100;
838 	else
839 		check = B_TRUE;
840 	if (!mod_sysvar("shmsys", "shminfo_shmmax", &shmmax))
841 		shmmax = 0x800000;
842 	else
843 		check = B_TRUE;
844 
845 	/*
846 	 * Default to a quarter of the machine's memory
847 	 */
848 	qty = availrmem_initial << (PAGESHIFT - 2);
849 	if (check) {
850 		if ((shmmax > 0) && (UINT64_MAX / shmmax <= shmmni))
851 			qty = UINT64_MAX;
852 		else if (shmmni * shmmax > qty)
853 			qty = shmmni * shmmax;
854 	}
855 	rctl_add_default_limit("project.max-shm-memory", qty,
856 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
857 
858 	/*
859 	 * Event Ports resource controls
860 	 */
861 
862 	rc_project_portids = rctl_register("project.max-port-ids",
863 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
864 	    RCTL_GLOBAL_COUNT, PORT_MAX_PORTS, PORT_MAX_PORTS,
865 	    &rctl_absolute_ops);
866 	rctl_add_default_limit("project.max-port-ids", PORT_DEFAULT_PORTS,
867 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
868 
869 	/*
870 	 * Resource control for locked memory
871 	 */
872 	rc_project_locked_mem = rctl_register(
873 	    "project.max-locked-memory", RCENTITY_PROJECT,
874 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES,
875 	    UINT64_MAX, UINT64_MAX, &project_locked_mem_ops);
876 
877 	/* Default value equals that of max-shm-memory. */
878 	rctl_add_default_limit("project.max-locked-memory", qty,
879 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
880 
881 	/*
882 	 * Per project limit on contracts.
883 	 */
884 	rc_project_contract = rctl_register("project.max-contracts",
885 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_COUNT,
886 	    INT_MAX, INT_MAX, &project_contract_ops);
887 	rctl_add_default_limit("project.max-contracts", 10000,
888 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
889 
890 	t0.t_proj = proj0p = project_hold_by_id(0, &zone0,
891 	    PROJECT_HOLD_INSERT);
892 
893 	mutex_enter(&p0.p_lock);
894 	proj0p->kpj_nlwps = p0.p_lwpcnt;
895 	mutex_exit(&p0.p_lock);
896 	proj0p->kpj_ntasks = 1;
897 }
898 
899 static int
900 project_lockedmem_kstat_update(kstat_t *ksp, int rw)
901 {
902 	kproject_t *pj = ksp->ks_private;
903 	kproject_kstat_t *kpk = ksp->ks_data;
904 
905 	if (rw == KSTAT_WRITE)
906 		return (EACCES);
907 
908 	kpk->kpk_usage.value.ui64 = pj->kpj_data.kpd_locked_mem;
909 	kpk->kpk_value.value.ui64 = pj->kpj_data.kpd_locked_mem_ctl;
910 	return (0);
911 }
912 
913 static kstat_t *
914 project_kstat_create(kproject_t *pj, zone_t *zone)
915 {
916 	kstat_t *ksp;
917 	kproject_kstat_t *kpk;
918 	char *zonename = zone->zone_name;
919 
920 	ksp = rctl_kstat_create_project(pj, "lockedmem", KSTAT_TYPE_NAMED,
921 	    sizeof (kproject_kstat_t) / sizeof (kstat_named_t),
922 	    KSTAT_FLAG_VIRTUAL);
923 
924 	if (ksp == NULL)
925 		return (NULL);
926 
927 	kpk = ksp->ks_data = kmem_alloc(sizeof (kproject_kstat_t), KM_SLEEP);
928 	ksp->ks_data_size += strlen(zonename) + 1;
929 	kstat_named_init(&kpk->kpk_zonename, "zonename", KSTAT_DATA_STRING);
930 	kstat_named_setstr(&kpk->kpk_zonename, zonename);
931 	kstat_named_init(&kpk->kpk_usage, "usage", KSTAT_DATA_UINT64);
932 	kstat_named_init(&kpk->kpk_value, "value", KSTAT_DATA_UINT64);
933 	ksp->ks_update = project_lockedmem_kstat_update;
934 	ksp->ks_private = pj;
935 	kstat_install(ksp);
936 
937 	return (ksp);
938 }
939 
940 static void
941 project_kstat_delete(kproject_t *pj)
942 {
943 	void *data;
944 
945 	if (pj->kpj_data.kpd_lockedmem_kstat != NULL) {
946 		data = pj->kpj_data.kpd_lockedmem_kstat->ks_data;
947 		kstat_delete(pj->kpj_data.kpd_lockedmem_kstat);
948 		kmem_free(data, sizeof (zone_kstat_t));
949 	}
950 	pj->kpj_data.kpd_lockedmem_kstat = NULL;
951 }
952