xref: /illumos-gate/usr/src/uts/common/os/project.c (revision 8533946bd264dca901fdf56bf3da1d81e728b423)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2016, Joyent, Inc.
24  */
25 
26 #include <sys/project.h>
27 #include <sys/modhash.h>
28 #include <sys/modctl.h>
29 #include <sys/kmem.h>
30 #include <sys/kstat.h>
31 #include <sys/atomic.h>
32 #include <sys/cmn_err.h>
33 #include <sys/proc.h>
34 #include <sys/rctl.h>
35 #include <sys/sunddi.h>
36 #include <sys/fss.h>
37 #include <sys/systm.h>
38 #include <sys/ipc_impl.h>
39 #include <sys/port_kernel.h>
40 #include <sys/task.h>
41 #include <sys/zone.h>
42 #include <sys/cpucaps.h>
43 #include <sys/klpd.h>
44 
45 int project_hash_size = 64;
46 static kmutex_t project_hash_lock;
47 static kmutex_t projects_list_lock;
48 static mod_hash_t *projects_hash;
49 static kproject_t *projects_list;
50 
51 rctl_hndl_t rc_project_cpu_shares;
52 rctl_hndl_t rc_project_cpu_cap;
53 rctl_hndl_t rc_project_nlwps;
54 rctl_hndl_t rc_project_nprocs;
55 rctl_hndl_t rc_project_ntasks;
56 rctl_hndl_t rc_project_msgmni;
57 rctl_hndl_t rc_project_semmni;
58 rctl_hndl_t rc_project_shmmax;
59 rctl_hndl_t rc_project_shmmni;
60 rctl_hndl_t rc_project_portids;
61 rctl_hndl_t rc_project_locked_mem;
62 rctl_hndl_t rc_project_contract;
63 rctl_hndl_t rc_project_crypto_mem;
64 
65 /*
66  * Dummy structure used when comparing projects.  This structure must be kept
67  * identical to the first two fields of kproject_t.
68  */
69 struct project_zone {
70 	projid_t	kpj_id;
71 	zoneid_t	kpj_zoneid;
72 };
73 
74 /*
75  * Projects
76  *
77  *   A dictionary of all active projects is maintained by the kernel so that we
78  *   may track project usage and limits.  (By an active project, we mean a
79  *   project associated with one or more task, and therefore with one or more
80  *   processes.) We build the dictionary on top of the mod_hash facility, since
81  *   project additions and deletions are relatively rare events.  An
82  *   integer-to-pointer mapping is maintained within the hash, representing the
83  *   map from project id to project structure.  All projects, including the
84  *   primordial "project 0", are allocated via the project_hold_by_id()
85  *   interface.
86  *
87  *   Currently, the project contains a reference count; the project ID, which is
88  *   examined by the extended accounting subsystem as well as /proc; a resource
89  *   control set, which contains the allowable values (and actions on exceeding
90  *   those values) for controlled project-level resources on the system; and a
91  *   number of CPU shares, which is used by the fair share scheduling class
92  *   (FSS) to support its proportion-based scheduling algorithm.
93  *
94  * Reference counting convention
95  *   The dictionary entry does not itself count as a reference--only references
96  *   outside of the subsystem are tallied.  At the drop of the final external
97  *   reference, the project entry is removed.  The reference counter keeps
98  *   track of the number of threads *and* tasks within a project.
99  *
100  * Locking
101  *   Walking the doubly-linked project list must be done while holding
102  *   projects_list_lock.  Thus, any dereference of kpj_next or kpj_prev must be
103  *   under projects_list_lock.
104  *
105  *   If both the hash lock, project_hash_lock, and the list lock are to be
106  *   acquired, the hash lock is to be acquired first.
107  */
108 
109 static void project_kstat_create(kproject_t *pj, zone_t *zone);
110 static void project_kstat_delete(kproject_t *pj);
111 
112 static void
113 project_data_init(kproject_data_t *data)
114 {
115 	/*
116 	 * Initialize subsystem-specific data
117 	 */
118 	data->kpd_shmmax = 0;
119 	data->kpd_ipc.ipcq_shmmni = 0;
120 	data->kpd_ipc.ipcq_semmni = 0;
121 	data->kpd_ipc.ipcq_msgmni = 0;
122 	data->kpd_locked_mem = 0;
123 	data->kpd_locked_mem_ctl = UINT64_MAX;
124 	data->kpd_contract = 0;
125 	data->kpd_crypto_mem = 0;
126 	data->kpd_crypto_mem_ctl = UINT64_MAX;
127 	data->kpd_lockedmem_kstat = NULL;
128 	data->kpd_nprocs_kstat = NULL;
129 }
130 
131 /*ARGSUSED*/
132 static uint_t
133 project_hash_by_id(void *hash_data, mod_hash_key_t key)
134 {
135 	struct project_zone *pz = key;
136 	uint_t mykey;
137 
138 	/*
139 	 * Merge the zoneid and projectid together to a 32-bit quantity, and
140 	 * then pass that in to the existing idhash.
141 	 */
142 	mykey = (pz->kpj_zoneid << 16) | pz->kpj_id;
143 	return (mod_hash_byid(hash_data, (mod_hash_key_t)(uintptr_t)mykey));
144 }
145 
146 static int
147 project_hash_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
148 {
149 	struct project_zone *pz1 = key1, *pz2 = key2;
150 	int retval;
151 
152 	return ((int)((retval = pz1->kpj_id - pz2->kpj_id) != 0 ? retval :
153 	    pz1->kpj_zoneid - pz2->kpj_zoneid));
154 }
155 
156 static void
157 project_hash_val_dtor(mod_hash_val_t val)
158 {
159 	kproject_t *kp = (kproject_t *)val;
160 
161 	ASSERT(kp->kpj_count == 0);
162 	ASSERT(kp->kpj_cpucap == NULL);
163 	kmem_free(kp, sizeof (kproject_t));
164 }
165 
166 /*
167  * kproject_t *project_hold(kproject_t *)
168  *
169  * Overview
170  *   Record that an additional reference on the indicated project has been
171  *   taken.
172  *
173  * Return values
174  *   A pointer to the indicated project.
175  *
176  * Caller's context
177  *   project_hash_lock must not be held across the project_hold() call.
178  */
179 kproject_t *
180 project_hold(kproject_t *p)
181 {
182 	mutex_enter(&project_hash_lock);
183 	ASSERT(p != NULL);
184 	p->kpj_count++;
185 	ASSERT(p->kpj_count != 0);
186 	mutex_exit(&project_hash_lock);
187 	return (p);
188 }
189 
190 /*
191  * kproject_t *project_hold_by_id(projid_t, zone_t *, int)
192  *
193  * Overview
194  *   project_hold_by_id() performs a look-up in the dictionary of projects
195  *   active on the system by specified project ID + zone and puts a hold on
196  *   it.  The third argument defines the desired behavior in the case when
197  *   project with given project ID cannot be found:
198  *
199  *   PROJECT_HOLD_INSERT	New entry is made in dictionary and the project
200  *   				is added to the global list.
201  *
202  *   PROJECT_HOLD_FIND		Return NULL.
203  *
204  *   The project is returned with its reference count incremented by one.
205  *   A new project derives its resource controls from those of project 0.
206  *
207  * Return values
208  *   A pointer to the held project.
209  *
210  * Caller's context
211  *   Caller must be in a context suitable for KM_SLEEP allocations.
212  */
213 kproject_t *
214 project_hold_by_id(projid_t id, zone_t *zone, int flag)
215 {
216 	kproject_t *spare_p;
217 	kproject_t *p;
218 	mod_hash_hndl_t hndl;
219 	rctl_set_t *set;
220 	rctl_alloc_gp_t *gp;
221 	rctl_entity_p_t e;
222 	struct project_zone pz;
223 	boolean_t create = B_FALSE;
224 
225 	pz.kpj_id = id;
226 	pz.kpj_zoneid = zone->zone_id;
227 
228 	if (flag == PROJECT_HOLD_FIND) {
229 		mutex_enter(&project_hash_lock);
230 
231 		if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
232 		    (mod_hash_val_t)&p) == MH_ERR_NOTFOUND)
233 			p = NULL;
234 		else
235 			p->kpj_count++;
236 
237 		mutex_exit(&project_hash_lock);
238 		return (p);
239 	}
240 
241 	ASSERT(flag == PROJECT_HOLD_INSERT);
242 
243 	spare_p = kmem_zalloc(sizeof (kproject_t), KM_SLEEP);
244 	set = rctl_set_create();
245 
246 	gp = rctl_set_init_prealloc(RCENTITY_PROJECT);
247 
248 	(void) mod_hash_reserve(projects_hash, &hndl);
249 
250 	mutex_enter(&curproc->p_lock);
251 	mutex_enter(&project_hash_lock);
252 	if (mod_hash_find(projects_hash, (mod_hash_key_t)&pz,
253 	    (mod_hash_val_t *)&p) == MH_ERR_NOTFOUND) {
254 
255 		p = spare_p;
256 		p->kpj_id = id;
257 		p->kpj_zone = zone;
258 		p->kpj_zoneid = zone->zone_id;
259 		p->kpj_count = 0;
260 		p->kpj_shares = 1;
261 		p->kpj_nlwps = 0;
262 		p->kpj_nprocs = 0;
263 		p->kpj_ntasks = 0;
264 		p->kpj_nlwps_ctl = INT_MAX;
265 		p->kpj_nprocs_ctl = INT_MAX;
266 		p->kpj_ntasks_ctl = INT_MAX;
267 		project_data_init(&p->kpj_data);
268 		e.rcep_p.proj = p;
269 		e.rcep_t = RCENTITY_PROJECT;
270 		p->kpj_rctls = rctl_set_init(RCENTITY_PROJECT, curproc, &e,
271 		    set, gp);
272 		mutex_exit(&curproc->p_lock);
273 
274 		if (mod_hash_insert_reserve(projects_hash, (mod_hash_key_t)p,
275 		    (mod_hash_val_t)p, hndl))
276 			panic("unable to insert project %d(%p)", id, (void *)p);
277 
278 		/*
279 		 * Insert project into global project list.
280 		 */
281 		mutex_enter(&projects_list_lock);
282 		if (id != 0 || zone != &zone0) {
283 			p->kpj_next = projects_list;
284 			p->kpj_prev = projects_list->kpj_prev;
285 			p->kpj_prev->kpj_next = p;
286 			projects_list->kpj_prev = p;
287 		} else {
288 			/*
289 			 * Special case: primordial hold on project 0.
290 			 */
291 			p->kpj_next = p;
292 			p->kpj_prev = p;
293 			projects_list = p;
294 		}
295 		mutex_exit(&projects_list_lock);
296 		create = B_TRUE;
297 	} else {
298 		mutex_exit(&curproc->p_lock);
299 		mod_hash_cancel(projects_hash, &hndl);
300 		kmem_free(spare_p, sizeof (kproject_t));
301 		rctl_set_free(set);
302 	}
303 
304 	rctl_prealloc_destroy(gp);
305 	p->kpj_count++;
306 	mutex_exit(&project_hash_lock);
307 
308 	/*
309 	 * The kstat stores the project's zone name, as zoneid's may change
310 	 * across reboots.
311 	 */
312 	if (create == B_TRUE) {
313 		/*
314 		 * Inform CPU caps framework of the new project
315 		 */
316 		cpucaps_project_add(p);
317 		/*
318 		 * Set up project kstats
319 		 */
320 		project_kstat_create(p, zone);
321 	}
322 	return (p);
323 }
324 
325 /*
326  * void project_rele(kproject_t *)
327  *
328  * Overview
329  *   Advertise that one external reference to this project is no longer needed.
330  *
331  * Return values
332  *   None.
333  *
334  * Caller's context
335  *   No restriction on context.
336  */
337 void
338 project_rele(kproject_t *p)
339 {
340 	mutex_enter(&project_hash_lock);
341 	ASSERT(p->kpj_count != 0);
342 	p->kpj_count--;
343 	if (p->kpj_count == 0) {
344 
345 		/*
346 		 * Remove project from global list.
347 		 */
348 		ASSERT(p->kpj_nprocs == 0);
349 
350 		mutex_enter(&projects_list_lock);
351 		p->kpj_next->kpj_prev = p->kpj_prev;
352 		p->kpj_prev->kpj_next = p->kpj_next;
353 		if (projects_list == p)
354 			projects_list = p->kpj_next;
355 		mutex_exit(&projects_list_lock);
356 
357 		cpucaps_project_remove(p);
358 
359 		rctl_set_free(p->kpj_rctls);
360 		project_kstat_delete(p);
361 
362 		if (p->kpj_klpd != NULL)
363 			klpd_freelist(&p->kpj_klpd);
364 
365 		if (mod_hash_destroy(projects_hash, (mod_hash_key_t)p))
366 			panic("unable to delete project %d zone %d", p->kpj_id,
367 			    p->kpj_zoneid);
368 
369 	}
370 	mutex_exit(&project_hash_lock);
371 }
372 
373 /*
374  * int project_walk_all(zoneid_t, int (*)(kproject_t *, void *), void *)
375  *
376  * Overview
377  *   Walk the project list for the given zoneid with a callback.
378  *
379  * Return values
380  *   -1 for an invalid walk, number of projects visited otherwise.
381  *
382  * Caller's context
383  *   projects_list_lock must not be held, as it is acquired by
384  *   project_walk_all().  Accordingly, callbacks may not perform KM_SLEEP
385  *   allocations.
386  */
387 int
388 project_walk_all(zoneid_t zoneid, int (*cb)(kproject_t *, void *),
389     void *walk_data)
390 {
391 	int cnt = 0;
392 	kproject_t *kp = proj0p;
393 
394 	mutex_enter(&projects_list_lock);
395 	do {
396 		if (zoneid != ALL_ZONES && kp->kpj_zoneid != zoneid)
397 			continue;
398 		if (cb(kp, walk_data) == -1) {
399 			cnt = -1;
400 			break;
401 		} else {
402 			cnt++;
403 		}
404 	} while ((kp = kp->kpj_next) != proj0p);
405 	mutex_exit(&projects_list_lock);
406 	return (cnt);
407 }
408 
409 /*
410  * projid_t curprojid(void)
411  *
412  * Overview
413  *   Return project ID of the current thread
414  *
415  * Caller's context
416  *   No restrictions.
417  */
418 projid_t
419 curprojid()
420 {
421 	return (ttoproj(curthread)->kpj_id);
422 }
423 
424 /*
425  * project.cpu-shares resource control support.
426  */
427 /*ARGSUSED*/
428 static rctl_qty_t
429 project_cpu_shares_usage(rctl_t *rctl, struct proc *p)
430 {
431 	ASSERT(MUTEX_HELD(&p->p_lock));
432 	return (p->p_task->tk_proj->kpj_shares);
433 }
434 
435 /*ARGSUSED*/
436 static int
437 project_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
438     rctl_qty_t nv)
439 {
440 	ASSERT(MUTEX_HELD(&p->p_lock));
441 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
442 	if (e->rcep_p.proj == NULL)
443 		return (0);
444 
445 	e->rcep_p.proj->kpj_shares = nv;
446 
447 	return (0);
448 }
449 
450 static rctl_ops_t project_cpu_shares_ops = {
451 	rcop_no_action,
452 	project_cpu_shares_usage,
453 	project_cpu_shares_set,
454 	rcop_no_test
455 };
456 
457 
458 /*
459  * project.cpu-cap resource control support.
460  */
461 /*ARGSUSED*/
462 static rctl_qty_t
463 project_cpu_cap_get(rctl_t *rctl, struct proc *p)
464 {
465 	ASSERT(MUTEX_HELD(&p->p_lock));
466 	return (cpucaps_project_get(p->p_task->tk_proj));
467 }
468 
469 /*ARGSUSED*/
470 static int
471 project_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
472     rctl_qty_t nv)
473 {
474 	kproject_t *kpj = e->rcep_p.proj;
475 
476 	ASSERT(MUTEX_HELD(&p->p_lock));
477 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
478 	if (kpj == NULL)
479 		return (0);
480 
481 	/*
482 	 * set cap to the new value.
483 	 */
484 	return (cpucaps_project_set(kpj,  nv));
485 }
486 
487 static rctl_ops_t project_cpu_cap_ops = {
488 	rcop_no_action,
489 	project_cpu_cap_get,
490 	project_cpu_cap_set,
491 	rcop_no_test
492 };
493 
494 /*ARGSUSED*/
495 static rctl_qty_t
496 project_lwps_usage(rctl_t *r, proc_t *p)
497 {
498 	kproject_t *pj;
499 	rctl_qty_t nlwps;
500 
501 	ASSERT(MUTEX_HELD(&p->p_lock));
502 	pj = p->p_task->tk_proj;
503 	mutex_enter(&p->p_zone->zone_nlwps_lock);
504 	nlwps = pj->kpj_nlwps;
505 	mutex_exit(&p->p_zone->zone_nlwps_lock);
506 
507 	return (nlwps);
508 }
509 
510 /*ARGSUSED*/
511 static int
512 project_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
513     rctl_qty_t incr, uint_t flags)
514 {
515 	rctl_qty_t nlwps;
516 
517 	ASSERT(MUTEX_HELD(&p->p_lock));
518 	ASSERT(MUTEX_HELD(&p->p_zone->zone_nlwps_lock));
519 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
520 	if (e->rcep_p.proj == NULL)
521 		return (0);
522 
523 	nlwps = e->rcep_p.proj->kpj_nlwps;
524 	if (nlwps + incr > rcntl->rcv_value)
525 		return (1);
526 
527 	return (0);
528 }
529 
530 /*ARGSUSED*/
531 static int
532 project_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
533     rctl_qty_t nv) {
534 
535 	ASSERT(MUTEX_HELD(&p->p_lock));
536 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
537 	if (e->rcep_p.proj == NULL)
538 		return (0);
539 
540 	e->rcep_p.proj->kpj_nlwps_ctl = nv;
541 	return (0);
542 }
543 
544 static rctl_ops_t project_lwps_ops = {
545 	rcop_no_action,
546 	project_lwps_usage,
547 	project_lwps_set,
548 	project_lwps_test,
549 };
550 
551 /*ARGSUSED*/
552 static rctl_qty_t
553 project_procs_usage(rctl_t *r, proc_t *p)
554 {
555 	kproject_t *pj;
556 	rctl_qty_t nprocs;
557 
558 	ASSERT(MUTEX_HELD(&p->p_lock));
559 	pj = p->p_task->tk_proj;
560 	mutex_enter(&p->p_zone->zone_nlwps_lock);
561 	nprocs = pj->kpj_nprocs;
562 	mutex_exit(&p->p_zone->zone_nlwps_lock);
563 
564 	return (nprocs);
565 }
566 
567 /*ARGSUSED*/
568 static int
569 project_procs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
570     rctl_qty_t incr, uint_t flags)
571 {
572 	rctl_qty_t nprocs;
573 
574 	ASSERT(MUTEX_HELD(&p->p_lock));
575 	ASSERT(MUTEX_HELD(&p->p_zone->zone_nlwps_lock));
576 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
577 	if (e->rcep_p.proj == NULL)
578 		return (0);
579 
580 	nprocs = e->rcep_p.proj->kpj_nprocs;
581 	if (nprocs + incr > rcntl->rcv_value)
582 		return (1);
583 
584 	return (0);
585 }
586 
587 /*ARGSUSED*/
588 static int
589 project_procs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
590     rctl_qty_t nv) {
591 
592 	ASSERT(MUTEX_HELD(&p->p_lock));
593 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
594 	if (e->rcep_p.proj == NULL)
595 		return (0);
596 
597 	e->rcep_p.proj->kpj_nprocs_ctl = nv;
598 	return (0);
599 }
600 
601 static rctl_ops_t project_procs_ops = {
602 	rcop_no_action,
603 	project_procs_usage,
604 	project_procs_set,
605 	project_procs_test,
606 };
607 
608 /*ARGSUSED*/
609 static rctl_qty_t
610 project_ntasks_usage(rctl_t *r, proc_t *p)
611 {
612 	kproject_t *pj;
613 	rctl_qty_t ntasks;
614 
615 	ASSERT(MUTEX_HELD(&p->p_lock));
616 	pj = p->p_task->tk_proj;
617 	mutex_enter(&p->p_zone->zone_nlwps_lock);
618 	ntasks = pj->kpj_ntasks;
619 	mutex_exit(&p->p_zone->zone_nlwps_lock);
620 
621 	return (ntasks);
622 }
623 
624 /*ARGSUSED*/
625 static int
626 project_ntasks_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
627     rctl_qty_t incr, uint_t flags)
628 {
629 	rctl_qty_t ntasks;
630 
631 	ASSERT(MUTEX_HELD(&p->p_lock));
632 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
633 	ntasks = e->rcep_p.proj->kpj_ntasks;
634 	if (ntasks + incr > rcntl->rcv_value)
635 		return (1);
636 
637 	return (0);
638 }
639 
640 /*ARGSUSED*/
641 static int
642 project_ntasks_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
643     rctl_qty_t nv) {
644 
645 	ASSERT(MUTEX_HELD(&p->p_lock));
646 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
647 	e->rcep_p.proj->kpj_ntasks_ctl = nv;
648 	return (0);
649 }
650 
651 static rctl_ops_t project_tasks_ops = {
652 	rcop_no_action,
653 	project_ntasks_usage,
654 	project_ntasks_set,
655 	project_ntasks_test,
656 };
657 
658 /*
659  * project.max-shm-memory resource control support.
660  */
661 
662 /*ARGSUSED*/
663 static rctl_qty_t
664 project_shmmax_usage(rctl_t *rctl, struct proc *p)
665 {
666 	ASSERT(MUTEX_HELD(&p->p_lock));
667 	return (p->p_task->tk_proj->kpj_data.kpd_shmmax);
668 }
669 
670 /*ARGSUSED*/
671 static int
672 project_shmmax_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
673     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
674 {
675 	rctl_qty_t v;
676 	ASSERT(MUTEX_HELD(&p->p_lock));
677 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
678 	v = e->rcep_p.proj->kpj_data.kpd_shmmax + inc;
679 	if (v > rval->rcv_value)
680 		return (1);
681 
682 	return (0);
683 }
684 
685 static rctl_ops_t project_shmmax_ops = {
686 	rcop_no_action,
687 	project_shmmax_usage,
688 	rcop_no_set,
689 	project_shmmax_test
690 };
691 
692 /*
693  * project.max-shm-ids resource control support.
694  */
695 
696 /*ARGSUSED*/
697 static rctl_qty_t
698 project_shmmni_usage(rctl_t *rctl, struct proc *p)
699 {
700 	ASSERT(MUTEX_HELD(&p->p_lock));
701 	return (p->p_task->tk_proj->kpj_data.kpd_ipc.ipcq_shmmni);
702 }
703 
704 /*ARGSUSED*/
705 static int
706 project_shmmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
707     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
708 {
709 	rctl_qty_t v;
710 	ASSERT(MUTEX_HELD(&p->p_lock));
711 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
712 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_shmmni + inc;
713 	if (v > rval->rcv_value)
714 		return (1);
715 
716 	return (0);
717 }
718 
719 static rctl_ops_t project_shmmni_ops = {
720 	rcop_no_action,
721 	project_shmmni_usage,
722 	rcop_no_set,
723 	project_shmmni_test
724 };
725 
726 /*
727  * project.max-sem-ids resource control support.
728  */
729 
730 /*ARGSUSED*/
731 static rctl_qty_t
732 project_semmni_usage(rctl_t *rctl, struct proc *p)
733 {
734 	ASSERT(MUTEX_HELD(&p->p_lock));
735 	return (p->p_task->tk_proj->kpj_data.kpd_ipc.ipcq_semmni);
736 }
737 
738 /*ARGSUSED*/
739 static int
740 project_semmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
741     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
742 {
743 	rctl_qty_t v;
744 	ASSERT(MUTEX_HELD(&p->p_lock));
745 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
746 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_semmni + inc;
747 	if (v > rval->rcv_value)
748 		return (1);
749 
750 	return (0);
751 }
752 
753 static rctl_ops_t project_semmni_ops = {
754 	rcop_no_action,
755 	project_semmni_usage,
756 	rcop_no_set,
757 	project_semmni_test
758 };
759 
760 /*
761  * project.max-msg-ids resource control support.
762  */
763 
764 /*ARGSUSED*/
765 static rctl_qty_t
766 project_msgmni_usage(rctl_t *rctl, struct proc *p)
767 {
768 	ASSERT(MUTEX_HELD(&p->p_lock));
769 	return (p->p_task->tk_proj->kpj_data.kpd_ipc.ipcq_msgmni);
770 }
771 
772 /*ARGSUSED*/
773 static int
774 project_msgmni_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
775     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
776 {
777 	rctl_qty_t v;
778 	ASSERT(MUTEX_HELD(&p->p_lock));
779 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
780 	v = e->rcep_p.proj->kpj_data.kpd_ipc.ipcq_msgmni + inc;
781 	if (v > rval->rcv_value)
782 		return (1);
783 
784 	return (0);
785 }
786 
787 static rctl_ops_t project_msgmni_ops = {
788 	rcop_no_action,
789 	project_msgmni_usage,
790 	rcop_no_set,
791 	project_msgmni_test
792 };
793 
794 /*ARGSUSED*/
795 static rctl_qty_t
796 project_locked_mem_usage(rctl_t *rctl, struct proc *p)
797 {
798 	rctl_qty_t q;
799 	ASSERT(MUTEX_HELD(&p->p_lock));
800 	mutex_enter(&p->p_zone->zone_mem_lock);
801 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
802 	mutex_exit(&p->p_zone->zone_mem_lock);
803 	return (q);
804 }
805 
806 /*ARGSUSED*/
807 static int
808 project_locked_mem_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
809     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
810 {
811 	rctl_qty_t q;
812 	ASSERT(MUTEX_HELD(&p->p_lock));
813 	ASSERT(MUTEX_HELD(&p->p_zone->zone_mem_lock));
814 	q = p->p_task->tk_proj->kpj_data.kpd_locked_mem;
815 	if (q + inc > rval->rcv_value)
816 		return (1);
817 	return (0);
818 }
819 
820 /*ARGSUSED*/
821 static int
822 project_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
823     rctl_qty_t nv) {
824 
825 	ASSERT(MUTEX_HELD(&p->p_lock));
826 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
827 	if (e->rcep_p.proj == NULL)
828 		return (0);
829 
830 	e->rcep_p.proj->kpj_data.kpd_locked_mem_ctl = nv;
831 	return (0);
832 }
833 
834 static rctl_ops_t project_locked_mem_ops = {
835 	rcop_no_action,
836 	project_locked_mem_usage,
837 	project_locked_mem_set,
838 	project_locked_mem_test
839 };
840 
841 /*
842  * project.max-contracts resource control support.
843  */
844 
845 /*ARGSUSED*/
846 static int
847 project_contract_test(struct rctl *rctl, struct proc *p, rctl_entity_p_t *e,
848     rctl_val_t *rval, rctl_qty_t inc, uint_t flags)
849 {
850 	rctl_qty_t v;
851 
852 	ASSERT(MUTEX_HELD(&p->p_lock));
853 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
854 
855 	v = e->rcep_p.proj->kpj_data.kpd_contract + inc;
856 
857 	if ((p->p_task != NULL) && (p->p_task->tk_proj) != NULL &&
858 	    (v > rval->rcv_value))
859 		return (1);
860 
861 	return (0);
862 }
863 
864 static rctl_ops_t project_contract_ops = {
865 	rcop_no_action,
866 	rcop_no_usage,
867 	rcop_no_set,
868 	project_contract_test
869 };
870 
871 /*ARGSUSED*/
872 static rctl_qty_t
873 project_crypto_usage(rctl_t *r, proc_t *p)
874 {
875 	ASSERT(MUTEX_HELD(&p->p_lock));
876 	return (p->p_task->tk_proj->kpj_data.kpd_crypto_mem);
877 }
878 
879 /*ARGSUSED*/
880 static int
881 project_crypto_set(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
882     rctl_qty_t nv)
883 {
884 	ASSERT(MUTEX_HELD(&p->p_lock));
885 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
886 	if (e->rcep_p.proj == NULL)
887 		return (0);
888 
889 	e->rcep_p.proj->kpj_data.kpd_crypto_mem_ctl = nv;
890 	return (0);
891 }
892 
893 /*ARGSUSED*/
894 static int
895 project_crypto_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
896     rctl_val_t *rval, rctl_qty_t incr, uint_t flags)
897 {
898 	rctl_qty_t v;
899 	ASSERT(MUTEX_HELD(&p->p_lock));
900 	ASSERT(e->rcep_t == RCENTITY_PROJECT);
901 	v = e->rcep_p.proj->kpj_data.kpd_crypto_mem + incr;
902 	if (v > rval->rcv_value)
903 		return (1);
904 	return (0);
905 }
906 
907 static rctl_ops_t project_crypto_mem_ops = {
908 	rcop_no_action,
909 	project_crypto_usage,
910 	project_crypto_set,
911 	project_crypto_test
912 };
913 
914 /*
915  * void project_init(void)
916  *
917  * Overview
918  *   Initialize the project subsystem, including the primordial project 0 entry.
919  *   Register generic project resource controls, if any.
920  *
921  * Return values
922  *   None.
923  *
924  * Caller's context
925  *   Safe for KM_SLEEP allocations.
926  */
927 void
928 project_init(void)
929 {
930 	rctl_qty_t shmmni, shmmax, qty;
931 	boolean_t check;
932 
933 	projects_hash = mod_hash_create_extended("projects_hash",
934 	    project_hash_size, mod_hash_null_keydtor, project_hash_val_dtor,
935 	    project_hash_by_id,
936 	    (void *)(uintptr_t)mod_hash_iddata_gen(project_hash_size),
937 	    project_hash_key_cmp, KM_SLEEP);
938 
939 	rc_project_cpu_shares = rctl_register("project.cpu-shares",
940 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
941 	    RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_NOBASIC |
942 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
943 	    FSS_MAXSHARES, FSS_MAXSHARES,
944 	    &project_cpu_shares_ops);
945 	rctl_add_default_limit("project.cpu-shares", 1, RCPRIV_PRIVILEGED,
946 	    RCTL_LOCAL_NOACTION);
947 
948 	rc_project_cpu_cap = rctl_register("project.cpu-cap",
949 	    RCENTITY_PROJECT, RCTL_GLOBAL_SIGNAL_NEVER |
950 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
951 	    RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER |
952 	    RCTL_GLOBAL_INFINITE,
953 	    MAXCAP, MAXCAP, &project_cpu_cap_ops);
954 
955 	rc_project_nlwps = rctl_register("project.max-lwps", RCENTITY_PROJECT,
956 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
957 	    INT_MAX, INT_MAX, &project_lwps_ops);
958 
959 	rc_project_nprocs = rctl_register("project.max-processes",
960 	    RCENTITY_PROJECT, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC |
961 	    RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &project_procs_ops);
962 
963 	rc_project_ntasks = rctl_register("project.max-tasks", RCENTITY_PROJECT,
964 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
965 	    INT_MAX, INT_MAX, &project_tasks_ops);
966 
967 	/*
968 	 * This rctl handle is used by /dev/crypto. It is here rather than
969 	 * in misc/kcf or the drv/crypto module because resource controls
970 	 * currently don't allow modules to be unloaded, and the control
971 	 * must be registered before init starts.
972 	 */
973 	rc_project_crypto_mem = rctl_register("project.max-crypto-memory",
974 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
975 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX,
976 	    &project_crypto_mem_ops);
977 
978 	/*
979 	 * Default to a quarter of the machine's memory
980 	 */
981 	qty = availrmem_initial << (PAGESHIFT - 2);
982 	rctl_add_default_limit("project.max-crypto-memory", qty,
983 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
984 
985 	/*
986 	 * System V IPC resource controls
987 	 */
988 	rc_project_semmni = rctl_register("project.max-sem-ids",
989 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
990 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_semmni_ops);
991 	rctl_add_legacy_limit("project.max-sem-ids", "semsys",
992 	    "seminfo_semmni", 128, IPC_IDS_MAX);
993 
994 	rc_project_msgmni = rctl_register("project.max-msg-ids",
995 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
996 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_msgmni_ops);
997 	rctl_add_legacy_limit("project.max-msg-ids", "msgsys",
998 	    "msginfo_msgmni", 128, IPC_IDS_MAX);
999 
1000 	rc_project_shmmni = rctl_register("project.max-shm-ids",
1001 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1002 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &project_shmmni_ops);
1003 	rctl_add_legacy_limit("project.max-shm-ids", "shmsys",
1004 	    "shminfo_shmmni", 128, IPC_IDS_MAX);
1005 
1006 	rc_project_shmmax = rctl_register("project.max-shm-memory",
1007 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1008 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &project_shmmax_ops);
1009 
1010 	check = B_FALSE;
1011 	if (!mod_sysvar("shmsys", "shminfo_shmmni", &shmmni))
1012 		shmmni = 100;
1013 	else
1014 		check = B_TRUE;
1015 	if (!mod_sysvar("shmsys", "shminfo_shmmax", &shmmax))
1016 		shmmax = 0x800000;
1017 	else
1018 		check = B_TRUE;
1019 
1020 	/*
1021 	 * Default to a quarter of the machine's memory
1022 	 */
1023 	qty = availrmem_initial << (PAGESHIFT - 2);
1024 	if (check) {
1025 		if ((shmmax > 0) && (UINT64_MAX / shmmax <= shmmni))
1026 			qty = UINT64_MAX;
1027 		else if (shmmni * shmmax > qty)
1028 			qty = shmmni * shmmax;
1029 	}
1030 	rctl_add_default_limit("project.max-shm-memory", qty,
1031 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
1032 
1033 	/*
1034 	 * Event Ports resource controls
1035 	 */
1036 
1037 	rc_project_portids = rctl_register("project.max-port-ids",
1038 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1039 	    RCTL_GLOBAL_COUNT, PORT_MAX_PORTS, PORT_MAX_PORTS,
1040 	    &rctl_absolute_ops);
1041 	rctl_add_default_limit("project.max-port-ids", PORT_DEFAULT_PORTS,
1042 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
1043 
1044 	/*
1045 	 * Resource control for locked memory
1046 	 */
1047 	rc_project_locked_mem = rctl_register(
1048 	    "project.max-locked-memory", RCENTITY_PROJECT,
1049 	    RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES,
1050 	    UINT64_MAX, UINT64_MAX, &project_locked_mem_ops);
1051 
1052 	/*
1053 	 * Per project limit on contracts.
1054 	 */
1055 	rc_project_contract = rctl_register("project.max-contracts",
1056 	    RCENTITY_PROJECT, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1057 	    RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &project_contract_ops);
1058 	rctl_add_default_limit("project.max-contracts", 10000,
1059 	    RCPRIV_PRIVILEGED, RCTL_LOCAL_DENY);
1060 
1061 	t0.t_proj = proj0p = project_hold_by_id(0, &zone0,
1062 	    PROJECT_HOLD_INSERT);
1063 
1064 	mutex_enter(&p0.p_lock);
1065 	proj0p->kpj_nlwps = p0.p_lwpcnt;
1066 	mutex_exit(&p0.p_lock);
1067 	proj0p->kpj_nprocs = 1;
1068 	proj0p->kpj_ntasks = 1;
1069 }
1070 
1071 static int
1072 project_lockedmem_kstat_update(kstat_t *ksp, int rw)
1073 {
1074 	kproject_t *pj = ksp->ks_private;
1075 	kproject_kstat_t *kpk = ksp->ks_data;
1076 
1077 	if (rw == KSTAT_WRITE)
1078 		return (EACCES);
1079 
1080 	kpk->kpk_usage.value.ui64 = pj->kpj_data.kpd_locked_mem;
1081 	kpk->kpk_value.value.ui64 = pj->kpj_data.kpd_locked_mem_ctl;
1082 	return (0);
1083 }
1084 
1085 static int
1086 project_nprocs_kstat_update(kstat_t *ksp, int rw)
1087 {
1088 	kproject_t *pj = ksp->ks_private;
1089 	kproject_kstat_t *kpk = ksp->ks_data;
1090 
1091 	if (rw == KSTAT_WRITE)
1092 		return (EACCES);
1093 
1094 	kpk->kpk_usage.value.ui64 = pj->kpj_nprocs;
1095 	kpk->kpk_value.value.ui64 = pj->kpj_nprocs_ctl;
1096 	return (0);
1097 }
1098 
1099 static kstat_t *
1100 project_kstat_create_common(kproject_t *pj, char *name, char *zonename,
1101     int (*updatefunc) (kstat_t *, int))
1102 {
1103 	kstat_t *ksp;
1104 	kproject_kstat_t *kpk;
1105 
1106 	ksp = rctl_kstat_create_project(pj, name, KSTAT_TYPE_NAMED,
1107 	    sizeof (kproject_kstat_t) / sizeof (kstat_named_t),
1108 	    KSTAT_FLAG_VIRTUAL);
1109 
1110 	if (ksp == NULL)
1111 		return (NULL);
1112 
1113 	kpk = ksp->ks_data = kmem_alloc(sizeof (kproject_kstat_t), KM_SLEEP);
1114 	ksp->ks_data_size += strlen(zonename) + 1;
1115 	kstat_named_init(&kpk->kpk_zonename, "zonename", KSTAT_DATA_STRING);
1116 	kstat_named_setstr(&kpk->kpk_zonename, zonename);
1117 	kstat_named_init(&kpk->kpk_usage, "usage", KSTAT_DATA_UINT64);
1118 	kstat_named_init(&kpk->kpk_value, "value", KSTAT_DATA_UINT64);
1119 	ksp->ks_update = updatefunc;
1120 	ksp->ks_private = pj;
1121 	kstat_install(ksp);
1122 	return (ksp);
1123 }
1124 
1125 static void
1126 project_kstat_create(kproject_t *pj, zone_t *zone)
1127 {
1128 	kstat_t *ksp_lockedmem;
1129 	kstat_t *ksp_nprocs;
1130 
1131 	ksp_lockedmem = project_kstat_create_common(pj, "lockedmem",
1132 	    zone->zone_name, project_lockedmem_kstat_update);
1133 	ksp_nprocs = project_kstat_create_common(pj, "nprocs",
1134 	    zone->zone_name, project_nprocs_kstat_update);
1135 
1136 	mutex_enter(&project_hash_lock);
1137 	ASSERT(pj->kpj_data.kpd_lockedmem_kstat == NULL);
1138 	pj->kpj_data.kpd_lockedmem_kstat = ksp_lockedmem;
1139 	ASSERT(pj->kpj_data.kpd_nprocs_kstat == NULL);
1140 	pj->kpj_data.kpd_nprocs_kstat = ksp_nprocs;
1141 	mutex_exit(&project_hash_lock);
1142 }
1143 
1144 static void
1145 project_kstat_delete_common(kstat_t **kstat)
1146 {
1147 	void *data;
1148 
1149 	if (*kstat != NULL) {
1150 		data = (*kstat)->ks_data;
1151 		kstat_delete(*kstat);
1152 		kmem_free(data, sizeof (kproject_kstat_t));
1153 		*kstat = NULL;
1154 	}
1155 }
1156 
1157 static void
1158 project_kstat_delete(kproject_t *pj)
1159 {
1160 	project_kstat_delete_common(&pj->kpj_data.kpd_lockedmem_kstat);
1161 	project_kstat_delete_common(&pj->kpj_data.kpd_nprocs_kstat);
1162 }
1163