xref: /titanic_44/usr/src/uts/common/os/task.c (revision 1e4c938b57d1656808e4112127ff1dce3eba5314)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/atomic.h>
26 #include <sys/callb.h>
27 #include <sys/cmn_err.h>
28 #include <sys/exacct.h>
29 #include <sys/id_space.h>
30 #include <sys/kmem.h>
31 #include <sys/kstat.h>
32 #include <sys/modhash.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/project.h>
36 #include <sys/rctl.h>
37 #include <sys/systm.h>
38 #include <sys/task.h>
39 #include <sys/time.h>
40 #include <sys/types.h>
41 #include <sys/zone.h>
42 #include <sys/cpuvar.h>
43 #include <sys/fss.h>
44 #include <sys/class.h>
45 #include <sys/project.h>
46 
47 /*
48  * Tasks
49  *
50  *   A task is a collection of processes, associated with a common project ID
51  *   and related by a common initial parent.  The task primarily represents a
52  *   natural process sequence with known resource usage, although it can also be
53  *   viewed as a convenient grouping of processes for signal delivery, processor
54  *   binding, and administrative operations.
55  *
56  * Membership and observership
57  *   We can conceive of situations where processes outside of the task may wish
58  *   to examine the resource usage of the task.  Similarly, a number of the
59  *   administrative operations on a task can be performed by processes who are
60  *   not members of the task.  Accordingly, we must design a locking strategy
61  *   where observers of the task, who wish to examine or operate on the task,
62  *   and members of task, who can perform the mentioned operations, as well as
63  *   leave the task, see a consistent and correct representation of the task at
64  *   all times.
65  *
66  * Locking
67  *   Because the task membership is a new relation between processes, its
68  *   locking becomes an additional responsibility of the pidlock/p_lock locking
69  *   sequence; however, tasks closely resemble sessions and the session locking
70  *   model is mostly appropriate for the interaction of tasks, processes, and
71  *   procfs.
72  *
73  *   kmutex_t task_hash_lock
74  *     task_hash_lock is a global lock protecting the contents of the task
75  *     ID-to-task pointer hash.  Holders of task_hash_lock must not attempt to
76  *     acquire pidlock or p_lock.
77  *   uint_t tk_hold_count
78  *     tk_hold_count, the number of members and observers of the current task,
79  *     must be manipulated atomically.
80  *   proc_t *tk_memb_list
81  *   proc_t *p_tasknext
82  *   proc_t *p_taskprev
83  *     The task's membership list is protected by pidlock, and is therefore
84  *     always acquired before any of its members' p_lock mutexes.  The p_task
85  *     member of the proc structure is protected by pidlock or p_lock for
86  *     reading, and by both pidlock and p_lock for modification, as is done for
87  *     p_sessp.  The key point is that only the process can modify its p_task,
88  *     and not any entity on the system.  (/proc will use prlock() to prevent
89  *     the process from leaving, as opposed to pidlock.)
90  *   kmutex_t tk_usage_lock
91  *     tk_usage_lock is a per-task lock protecting the contents of the task
92  *     usage structure and tk_nlwps counter for the task.max-lwps resource
93  *     control.
94  */
95 
96 int task_hash_size = 256;
97 static kmutex_t task_hash_lock;
98 static mod_hash_t *task_hash;
99 
100 static id_space_t *taskid_space;	/* global taskid space */
101 static kmem_cache_t *task_cache;	/* kmem cache for task structures */
102 
103 rctl_hndl_t rc_task_lwps;
104 rctl_hndl_t rc_task_nprocs;
105 rctl_hndl_t rc_task_cpu_time;
106 
107 /*
108  * Resource usage is committed using task queues; if taskq_dispatch() fails
109  * due to resource constraints, the task is placed on a list for background
110  * processing by the task_commit_thread() backup thread.
111  */
112 static kmutex_t task_commit_lock;	/* protects list pointers and cv */
113 static kcondvar_t task_commit_cv;	/* wakeup task_commit_thread */
114 static task_t *task_commit_head = NULL;
115 static task_t *task_commit_tail = NULL;
116 kthread_t *task_commit_thread;
117 
118 static void task_commit();
119 static kstat_t *task_kstat_create(task_t *, zone_t *);
120 static void task_kstat_delete(task_t *);
121 
122 /*
123  * static rctl_qty_t task_usage_lwps(void *taskp)
124  *
125  * Overview
126  *   task_usage_lwps() is the usage operation for the resource control
127  *   associated with the number of LWPs in a task.
128  *
129  * Return values
130  *   The number of LWPs in the given task is returned.
131  *
132  * Caller's context
133  *   The p->p_lock must be held across the call.
134  */
135 /*ARGSUSED*/
136 static rctl_qty_t
137 task_lwps_usage(rctl_t *r, proc_t *p)
138 {
139 	task_t *t;
140 	rctl_qty_t nlwps;
141 
142 	ASSERT(MUTEX_HELD(&p->p_lock));
143 
144 	t = p->p_task;
145 	mutex_enter(&p->p_zone->zone_nlwps_lock);
146 	nlwps = t->tk_nlwps;
147 	mutex_exit(&p->p_zone->zone_nlwps_lock);
148 
149 	return (nlwps);
150 }
151 
152 /*
153  * static int task_test_lwps(void *taskp, rctl_val_t *, int64_t incr,
154  *   int flags)
155  *
156  * Overview
157  *   task_test_lwps() is the test-if-valid-increment for the resource control
158  *   for the number of processes in a task.
159  *
160  * Return values
161  *   0 if the threshold limit was not passed, 1 if the limit was passed.
162  *
163  * Caller's context
164  *   p->p_lock must be held across the call.
165  */
166 /*ARGSUSED*/
167 static int
168 task_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
169     rctl_qty_t incr,
170     uint_t flags)
171 {
172 	rctl_qty_t nlwps;
173 
174 	ASSERT(MUTEX_HELD(&p->p_lock));
175 	ASSERT(e->rcep_t == RCENTITY_TASK);
176 	if (e->rcep_p.task == NULL)
177 		return (0);
178 
179 	ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
180 	nlwps = e->rcep_p.task->tk_nlwps;
181 
182 	if (nlwps + incr > rcntl->rcv_value)
183 		return (1);
184 
185 	return (0);
186 }
187 
188 /*ARGSUSED*/
189 static int
190 task_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv) {
191 
192 	ASSERT(MUTEX_HELD(&p->p_lock));
193 	ASSERT(e->rcep_t == RCENTITY_TASK);
194 	if (e->rcep_p.task == NULL)
195 		return (0);
196 
197 	e->rcep_p.task->tk_nlwps_ctl = nv;
198 	return (0);
199 }
200 
201 /*ARGSUSED*/
202 static rctl_qty_t
203 task_nprocs_usage(rctl_t *r, proc_t *p)
204 {
205 	task_t *t;
206 	rctl_qty_t nprocs;
207 
208 	ASSERT(MUTEX_HELD(&p->p_lock));
209 
210 	t = p->p_task;
211 	mutex_enter(&p->p_zone->zone_nlwps_lock);
212 	nprocs = t->tk_nprocs;
213 	mutex_exit(&p->p_zone->zone_nlwps_lock);
214 
215 	return (nprocs);
216 }
217 
218 /*ARGSUSED*/
219 static int
220 task_nprocs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
221     rctl_qty_t incr, uint_t flags)
222 {
223 	rctl_qty_t nprocs;
224 
225 	ASSERT(MUTEX_HELD(&p->p_lock));
226 	ASSERT(e->rcep_t == RCENTITY_TASK);
227 	if (e->rcep_p.task == NULL)
228 		return (0);
229 
230 	ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
231 	nprocs = e->rcep_p.task->tk_nprocs;
232 
233 	if (nprocs + incr > rcntl->rcv_value)
234 		return (1);
235 
236 	return (0);
237 }
238 
239 /*ARGSUSED*/
240 static int
241 task_nprocs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
242     rctl_qty_t nv) {
243 
244 	ASSERT(MUTEX_HELD(&p->p_lock));
245 	ASSERT(e->rcep_t == RCENTITY_TASK);
246 	if (e->rcep_p.task == NULL)
247 		return (0);
248 
249 	e->rcep_p.task->tk_nprocs_ctl = nv;
250 	return (0);
251 }
252 
253 /*
254  * static rctl_qty_t task_usage_cpu_secs(void *taskp)
255  *
256  * Overview
257  *   task_usage_cpu_secs() is the usage operation for the resource control
258  *   associated with the total accrued CPU seconds for a task.
259  *
260  * Return values
261  *   The number of CPU seconds consumed by the task is returned.
262  *
263  * Caller's context
264  *   The given task must be held across the call.
265  */
266 /*ARGSUSED*/
267 static rctl_qty_t
268 task_cpu_time_usage(rctl_t *r, proc_t *p)
269 {
270 	task_t *t = p->p_task;
271 
272 	ASSERT(MUTEX_HELD(&p->p_lock));
273 	return (t->tk_cpu_time);
274 }
275 
276 /*
277  * int task_cpu_time_incr(task_t *t, rctl_qty_t incr)
278  *
279  * Overview
280  *   task_cpu_time_incr() increments the amount of CPU time used
281  *   by this task.
282  *
283  * Return values
284  *   1   if a second or more time is accumulated
285  *   0   otherwise
286  *
287  * Caller's context
288  *   This is called by the clock tick accounting function to charge
289  *   CPU time to a task.
290  */
291 rctl_qty_t
292 task_cpu_time_incr(task_t *t, rctl_qty_t incr)
293 {
294 	rctl_qty_t ret = 0;
295 
296 	mutex_enter(&t->tk_cpu_time_lock);
297 	t->tk_cpu_ticks += incr;
298 	if (t->tk_cpu_ticks >= hz) {
299 		t->tk_cpu_time += t->tk_cpu_ticks / hz;
300 		t->tk_cpu_ticks = t->tk_cpu_ticks % hz;
301 		ret = t->tk_cpu_time;
302 	}
303 	mutex_exit(&t->tk_cpu_time_lock);
304 
305 	return (ret);
306 }
307 
308 /*
309  * static int task_test_cpu_secs(void *taskp, rctl_val_t *, int64_t incr,
310  *   int flags)
311  *
312  * Overview
313  *   task_test_cpu_secs() is the test-if-valid-increment for the resource
314  *   control for the total accrued CPU seconds for a task.
315  *
316  * Return values
317  *   0 if the threshold limit was not passed, 1 if the limit was passed.
318  *
319  * Caller's context
320  *   The given task must be held across the call.
321  */
322 /*ARGSUSED*/
323 static int
324 task_cpu_time_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
325     struct rctl_val *rcntl, rctl_qty_t incr, uint_t flags)
326 {
327 	ASSERT(MUTEX_HELD(&p->p_lock));
328 	ASSERT(e->rcep_t == RCENTITY_TASK);
329 	if (e->rcep_p.task == NULL)
330 		return (0);
331 
332 	if (incr >= rcntl->rcv_value)
333 		return (1);
334 
335 	return (0);
336 }
337 
338 static task_t *
339 task_find(taskid_t id, zoneid_t zoneid)
340 {
341 	task_t *tk;
342 
343 	ASSERT(MUTEX_HELD(&task_hash_lock));
344 
345 	if (mod_hash_find(task_hash, (mod_hash_key_t)(uintptr_t)id,
346 	    (mod_hash_val_t *)&tk) == MH_ERR_NOTFOUND ||
347 	    (zoneid != ALL_ZONES && zoneid != tk->tk_zone->zone_id))
348 		return (NULL);
349 
350 	return (tk);
351 }
352 
353 /*
354  * task_hold_by_id(), task_hold_by_id_zone()
355  *
356  * Overview
357  *   task_hold_by_id() is used to take a reference on a task by its task id,
358  *   supporting the various system call interfaces for obtaining resource data,
359  *   delivering signals, and so forth.
360  *
361  * Return values
362  *   Returns a pointer to the task_t with taskid_t id.  The task is returned
363  *   with its hold count incremented by one.  Returns NULL if there
364  *   is no task with the requested id.
365  *
366  * Caller's context
367  *   Caller must not be holding task_hash_lock.  No restrictions on context.
368  */
369 task_t *
370 task_hold_by_id_zone(taskid_t id, zoneid_t zoneid)
371 {
372 	task_t *tk;
373 
374 	mutex_enter(&task_hash_lock);
375 	if ((tk = task_find(id, zoneid)) != NULL)
376 		atomic_add_32(&tk->tk_hold_count, 1);
377 	mutex_exit(&task_hash_lock);
378 
379 	return (tk);
380 }
381 
382 task_t *
383 task_hold_by_id(taskid_t id)
384 {
385 	zoneid_t zoneid;
386 
387 	if (INGLOBALZONE(curproc))
388 		zoneid = ALL_ZONES;
389 	else
390 		zoneid = getzoneid();
391 	return (task_hold_by_id_zone(id, zoneid));
392 }
393 
394 /*
395  * void task_hold(task_t *)
396  *
397  * Overview
398  *   task_hold() is used to take an additional reference to the given task.
399  *
400  * Return values
401  *   None.
402  *
403  * Caller's context
404  *   No restriction on context.
405  */
406 void
407 task_hold(task_t *tk)
408 {
409 	atomic_add_32(&tk->tk_hold_count, 1);
410 }
411 
412 /*
413  * void task_rele(task_t *)
414  *
415  * Overview
416  *   task_rele() relinquishes a reference on the given task, which was acquired
417  *   via task_hold() or task_hold_by_id().  If this is the last member or
418  *   observer of the task, dispatch it for commitment via the accounting
419  *   subsystem.
420  *
421  * Return values
422  *   None.
423  *
424  * Caller's context
425  *   Caller must not be holding the task_hash_lock.
426  */
427 void
428 task_rele(task_t *tk)
429 {
430 	mutex_enter(&task_hash_lock);
431 	if (atomic_add_32_nv(&tk->tk_hold_count, -1) > 0) {
432 		mutex_exit(&task_hash_lock);
433 		return;
434 	}
435 
436 	ASSERT(tk->tk_nprocs == 0);
437 
438 	mutex_enter(&tk->tk_zone->zone_nlwps_lock);
439 	tk->tk_proj->kpj_ntasks--;
440 	mutex_exit(&tk->tk_zone->zone_nlwps_lock);
441 
442 	task_kstat_delete(tk);
443 
444 	if (mod_hash_destroy(task_hash,
445 	    (mod_hash_key_t)(uintptr_t)tk->tk_tkid) != 0)
446 		panic("unable to delete task %d", tk->tk_tkid);
447 	mutex_exit(&task_hash_lock);
448 
449 	/*
450 	 * At this point, there are no members or observers of the task, so we
451 	 * can safely send it on for commitment to the accounting subsystem.
452 	 * The task will be destroyed in task_end() subsequent to commitment.
453 	 * Since we may be called with pidlock held, taskq_dispatch() cannot
454 	 * sleep. Commitment is handled by a backup thread in case dispatching
455 	 * the task fails.
456 	 */
457 	if (taskq_dispatch(exacct_queue, exacct_commit_task, tk,
458 	    TQ_NOSLEEP | TQ_NOQUEUE) == NULL) {
459 		mutex_enter(&task_commit_lock);
460 		if (task_commit_head == NULL) {
461 			task_commit_head = task_commit_tail = tk;
462 		} else {
463 			task_commit_tail->tk_commit_next = tk;
464 			task_commit_tail = tk;
465 		}
466 		cv_signal(&task_commit_cv);
467 		mutex_exit(&task_commit_lock);
468 	}
469 }
470 
471 /*
472  * task_t *task_create(projid_t, zone *)
473  *
474  * Overview
475  *   A process constructing a new task calls task_create() to construct and
476  *   preinitialize the task for the appropriate destination project.  Only one
477  *   task, the primordial task0, is not created with task_create().
478  *
479  * Return values
480  *   None.
481  *
482  * Caller's context
483  *   Caller's context should be safe for KM_SLEEP allocations.
484  *   The caller should appropriately bump the kpj_ntasks counter on the
485  *   project that contains this task.
486  */
487 task_t *
488 task_create(projid_t projid, zone_t *zone)
489 {
490 	task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP);
491 	task_t *ancestor_tk;
492 	taskid_t tkid;
493 	task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
494 	mod_hash_hndl_t hndl;
495 	rctl_set_t *set = rctl_set_create();
496 	rctl_alloc_gp_t *gp;
497 	rctl_entity_p_t e;
498 
499 	bzero(tk, sizeof (task_t));
500 
501 	tk->tk_tkid = tkid = id_alloc(taskid_space);
502 	tk->tk_nlwps = 0;
503 	tk->tk_nlwps_ctl = INT_MAX;
504 	tk->tk_nprocs = 0;
505 	tk->tk_nprocs_ctl = INT_MAX;
506 	tk->tk_usage = tu;
507 	tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
508 	tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT);
509 	tk->tk_flags = TASK_NORMAL;
510 	tk->tk_commit_next = NULL;
511 
512 	/*
513 	 * Copy ancestor task's resource controls.
514 	 */
515 	zone_task_hold(zone);
516 	mutex_enter(&curproc->p_lock);
517 	ancestor_tk = curproc->p_task;
518 	task_hold(ancestor_tk);
519 	tk->tk_zone = zone;
520 	mutex_exit(&curproc->p_lock);
521 
522 	for (;;) {
523 		gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls);
524 
525 		mutex_enter(&ancestor_tk->tk_rctls->rcs_lock);
526 		if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp))
527 			break;
528 
529 		mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
530 
531 		rctl_prealloc_destroy(gp);
532 	}
533 
534 	/*
535 	 * At this point, curproc does not have the appropriate linkage
536 	 * through the task to the project. So, rctl_set_dup should only
537 	 * copy the rctls, and leave the callbacks for later.
538 	 */
539 	e.rcep_p.task = tk;
540 	e.rcep_t = RCENTITY_TASK;
541 	tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e,
542 	    set, gp, RCD_DUP);
543 	mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
544 
545 	rctl_prealloc_destroy(gp);
546 
547 	/*
548 	 * Record the ancestor task's ID for use by extended accounting.
549 	 */
550 	tu->tu_anctaskid = ancestor_tk->tk_tkid;
551 	task_rele(ancestor_tk);
552 
553 	/*
554 	 * Put new task structure in the hash table.
555 	 */
556 	(void) mod_hash_reserve(task_hash, &hndl);
557 	mutex_enter(&task_hash_lock);
558 	ASSERT(task_find(tkid, zone->zone_id) == NULL);
559 	if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid,
560 	    (mod_hash_val_t *)tk, hndl) != 0) {
561 		mod_hash_cancel(task_hash, &hndl);
562 		panic("unable to insert task %d(%p)", tkid, (void *)tk);
563 	}
564 	mutex_exit(&task_hash_lock);
565 
566 	tk->tk_nprocs_kstat = task_kstat_create(tk, zone);
567 	return (tk);
568 }
569 
570 /*
571  * void task_attach(task_t *, proc_t *)
572  *
573  * Overview
574  *   task_attach() is used to attach a process to a task; this operation is only
575  *   performed as a result of a fork() or settaskid() system call.  The proc_t's
576  *   p_tasknext and p_taskprev fields will be set such that the proc_t is a
577  *   member of the doubly-linked list of proc_t's that make up the task.
578  *
579  * Return values
580  *   None.
581  *
582  * Caller's context
583  *   pidlock and p->p_lock must be held on entry.
584  */
585 void
586 task_attach(task_t *tk, proc_t *p)
587 {
588 	proc_t *first, *prev;
589 	ASSERT(tk != NULL);
590 	ASSERT(p != NULL);
591 	ASSERT(MUTEX_HELD(&pidlock));
592 	ASSERT(MUTEX_HELD(&p->p_lock));
593 
594 	if (tk->tk_memb_list == NULL) {
595 		p->p_tasknext = p;
596 		p->p_taskprev = p;
597 	} else {
598 		first = tk->tk_memb_list;
599 		prev = first->p_taskprev;
600 		first->p_taskprev = p;
601 		p->p_tasknext = first;
602 		p->p_taskprev = prev;
603 		prev->p_tasknext = p;
604 	}
605 	tk->tk_memb_list = p;
606 	task_hold(tk);
607 	p->p_task = tk;
608 }
609 
610 /*
611  * task_begin()
612  *
613  * Overview
614  *   A process constructing a new task calls task_begin() to initialize the
615  *   task, by attaching itself as a member.
616  *
617  * Return values
618  *   None.
619  *
620  * Caller's context
621  *   pidlock and p_lock must be held across the call to task_begin().
622  */
623 void
624 task_begin(task_t *tk, proc_t *p)
625 {
626 	timestruc_t ts;
627 	task_usage_t *tu;
628 	rctl_entity_p_t e;
629 
630 	ASSERT(MUTEX_HELD(&pidlock));
631 	ASSERT(MUTEX_HELD(&p->p_lock));
632 
633 	mutex_enter(&tk->tk_usage_lock);
634 	tu = tk->tk_usage;
635 	gethrestime(&ts);
636 	tu->tu_startsec = (uint64_t)ts.tv_sec;
637 	tu->tu_startnsec = (uint64_t)ts.tv_nsec;
638 	mutex_exit(&tk->tk_usage_lock);
639 
640 	/*
641 	 * Join process to the task as a member.
642 	 */
643 	task_attach(tk, p);
644 
645 	/*
646 	 * Now that the linkage from process to task is complete, do the
647 	 * required callback for the task rctl set.
648 	 */
649 	e.rcep_p.task = tk;
650 	e.rcep_t = RCENTITY_TASK;
651 	(void) rctl_set_dup(NULL, NULL, p, &e, tk->tk_rctls, NULL,
652 	    RCD_CALLBACK);
653 }
654 
655 /*
656  * void task_detach(proc_t *)
657  *
658  * Overview
659  *   task_detach() removes the specified process from its task.  task_detach
660  *   sets the process's task membership to NULL, in anticipation of a final exit
661  *   or of joining a new task.  Because task_rele() requires a context safe for
662  *   KM_SLEEP allocations, a task_detach() is followed by a subsequent
663  *   task_rele() once appropriate context is available.
664  *
665  *   Because task_detach() involves relinquishing the process's membership in
666  *   the project, any observational rctls the process may have had on the task
667  *   or project are destroyed.
668  *
669  * Return values
670  *   None.
671  *
672  * Caller's context
673  *   pidlock and p_lock held across task_detach().
674  */
675 void
676 task_detach(proc_t *p)
677 {
678 	task_t *tk = p->p_task;
679 
680 	ASSERT(MUTEX_HELD(&pidlock));
681 	ASSERT(MUTEX_HELD(&p->p_lock));
682 	ASSERT(p->p_task != NULL);
683 	ASSERT(tk->tk_memb_list != NULL);
684 
685 	if (tk->tk_memb_list == p)
686 		tk->tk_memb_list = p->p_tasknext;
687 	if (tk->tk_memb_list == p)
688 		tk->tk_memb_list = NULL;
689 	p->p_taskprev->p_tasknext = p->p_tasknext;
690 	p->p_tasknext->p_taskprev = p->p_taskprev;
691 
692 	rctl_set_tearoff(p->p_task->tk_rctls, p);
693 	rctl_set_tearoff(p->p_task->tk_proj->kpj_rctls, p);
694 
695 	p->p_task = NULL;
696 	p->p_tasknext = p->p_taskprev = NULL;
697 }
698 
699 /*
700  * task_change(task_t *, proc_t *)
701  *
702  * Overview
703  *   task_change() removes the specified process from its current task.  The
704  *   process is then attached to the specified task.  This routine is called
705  *   from settaskid() when process is being moved to a new task.
706  *
707  * Return values
708  *   None.
709  *
710  * Caller's context
711  *   pidlock and p_lock held across task_change()
712  */
713 void
714 task_change(task_t *newtk, proc_t *p)
715 {
716 	task_t *oldtk = p->p_task;
717 
718 	ASSERT(MUTEX_HELD(&pidlock));
719 	ASSERT(MUTEX_HELD(&p->p_lock));
720 	ASSERT(oldtk != NULL);
721 	ASSERT(oldtk->tk_memb_list != NULL);
722 
723 	mutex_enter(&oldtk->tk_zone->zone_nlwps_lock);
724 	oldtk->tk_nlwps -= p->p_lwpcnt;
725 	oldtk->tk_nprocs--;
726 	mutex_exit(&oldtk->tk_zone->zone_nlwps_lock);
727 
728 	mutex_enter(&newtk->tk_zone->zone_nlwps_lock);
729 	newtk->tk_nlwps += p->p_lwpcnt;
730 	newtk->tk_nprocs++;
731 	mutex_exit(&newtk->tk_zone->zone_nlwps_lock);
732 
733 	task_detach(p);
734 	task_begin(newtk, p);
735 	exacct_move_mstate(p, oldtk, newtk);
736 }
737 
738 /*
739  * task_end()
740  *
741  * Overview
742  *   task_end() contains the actions executed once the final member of
743  *   a task has released the task, and all actions connected with the task, such
744  *   as committing an accounting record to a file, are completed.  It is called
745  *   by the known last consumer of the task information.  Additionally,
746  *   task_end() must never refer to any process in the system.
747  *
748  * Return values
749  *   None.
750  *
751  * Caller's context
752  *   No restrictions on context, beyond that given above.
753  */
754 void
755 task_end(task_t *tk)
756 {
757 	ASSERT(tk->tk_hold_count == 0);
758 
759 	project_rele(tk->tk_proj);
760 	kmem_free(tk->tk_usage, sizeof (task_usage_t));
761 	kmem_free(tk->tk_inherited, sizeof (task_usage_t));
762 	if (tk->tk_prevusage != NULL)
763 		kmem_free(tk->tk_prevusage, sizeof (task_usage_t));
764 	if (tk->tk_zoneusage != NULL)
765 		kmem_free(tk->tk_zoneusage, sizeof (task_usage_t));
766 	rctl_set_free(tk->tk_rctls);
767 	id_free(taskid_space, tk->tk_tkid);
768 	zone_task_rele(tk->tk_zone);
769 	kmem_cache_free(task_cache, tk);
770 }
771 
772 static void
773 changeproj(proc_t *p, kproject_t *kpj, zone_t *zone, void *projbuf,
774     void *zonebuf)
775 {
776 	kproject_t *oldkpj;
777 	kthread_t *t;
778 
779 	ASSERT(MUTEX_HELD(&pidlock));
780 	ASSERT(MUTEX_HELD(&p->p_lock));
781 
782 	if ((t = p->p_tlist) != NULL) {
783 		do {
784 			(void) project_hold(kpj);
785 
786 			thread_lock(t);
787 			oldkpj = ttoproj(t);
788 
789 			/*
790 			 * Kick this thread so that he doesn't sit
791 			 * on a wrong wait queue.
792 			 */
793 			if (ISWAITING(t))
794 				setrun_locked(t);
795 
796 			/*
797 			 * The thread wants to go on the project wait queue, but
798 			 * the waitq is changing.
799 			 */
800 			if (t->t_schedflag & TS_PROJWAITQ)
801 				t->t_schedflag &= ~ TS_PROJWAITQ;
802 
803 			t->t_proj = kpj;
804 			t->t_pre_sys = 1;		/* For cred update */
805 			thread_unlock(t);
806 			fss_changeproj(t, kpj, zone, projbuf, zonebuf);
807 
808 			project_rele(oldkpj);
809 		} while ((t = t->t_forw) != p->p_tlist);
810 	}
811 }
812 
813 /*
814  * task_join()
815  *
816  * Overview
817  *   task_join() contains the actions that must be executed when the first
818  *   member (curproc) of a newly created task joins it.  It may never fail.
819  *
820  *   The caller must make sure holdlwps() is called so that all other lwps are
821  *   stopped prior to calling this function.
822  *
823  *   NB: It returns with curproc->p_lock held.
824  *
825  * Return values
826  *   Pointer to the old task.
827  *
828  * Caller's context
829  *   cpu_lock must be held entering the function.  It will acquire pidlock,
830  *   p_crlock and p_lock during execution.
831  */
832 task_t *
833 task_join(task_t *tk, uint_t flags)
834 {
835 	proc_t *p = ttoproc(curthread);
836 	task_t *prev_tk;
837 	void *projbuf, *zonebuf;
838 	zone_t *zone = tk->tk_zone;
839 	projid_t projid = tk->tk_proj->kpj_id;
840 	cred_t *oldcr;
841 
842 	/*
843 	 * We can't know for sure if holdlwps() was called, but we can check to
844 	 * ensure we're single-threaded.
845 	 */
846 	ASSERT(curthread == p->p_agenttp || p->p_lwprcnt == 1);
847 
848 	/*
849 	 * Changing the credential is always hard because we cannot
850 	 * allocate memory when holding locks but we don't know whether
851 	 * we need to change it.  We first get a reference to the current
852 	 * cred if we need to change it.  Then we create a credential
853 	 * with an updated project id.  Finally we install it, first
854 	 * releasing the reference we had on the p_cred at the time we
855 	 * acquired the lock the first time and later we release the
856 	 * reference to p_cred at the time we acquired the lock the
857 	 * second time.
858 	 */
859 	mutex_enter(&p->p_crlock);
860 	if (crgetprojid(p->p_cred) == projid)
861 		oldcr = NULL;
862 	else
863 		crhold(oldcr = p->p_cred);
864 	mutex_exit(&p->p_crlock);
865 
866 	if (oldcr != NULL) {
867 		cred_t *newcr = crdup(oldcr);
868 		crsetprojid(newcr, projid);
869 		crfree(oldcr);
870 
871 		mutex_enter(&p->p_crlock);
872 		oldcr = p->p_cred;
873 		p->p_cred = newcr;
874 		mutex_exit(&p->p_crlock);
875 		crfree(oldcr);
876 	}
877 
878 	/*
879 	 * Make sure that the number of processor sets is constant
880 	 * across this operation.
881 	 */
882 	ASSERT(MUTEX_HELD(&cpu_lock));
883 
884 	projbuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_PROJ);
885 	zonebuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_ZONE);
886 
887 	mutex_enter(&pidlock);
888 	mutex_enter(&p->p_lock);
889 
890 	prev_tk = p->p_task;
891 	task_change(tk, p);
892 
893 	/*
894 	 * Now move threads one by one to their new project.
895 	 */
896 	changeproj(p, tk->tk_proj, zone, projbuf, zonebuf);
897 	if (flags & TASK_FINAL)
898 		p->p_task->tk_flags |= TASK_FINAL;
899 
900 	mutex_exit(&pidlock);
901 
902 	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
903 	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
904 	return (prev_tk);
905 }
906 
907 /*
908  * rctl ops vectors
909  */
910 static rctl_ops_t task_lwps_ops = {
911 	rcop_no_action,
912 	task_lwps_usage,
913 	task_lwps_set,
914 	task_lwps_test
915 };
916 
917 static rctl_ops_t task_procs_ops = {
918 	rcop_no_action,
919 	task_nprocs_usage,
920 	task_nprocs_set,
921 	task_nprocs_test
922 };
923 
924 static rctl_ops_t task_cpu_time_ops = {
925 	rcop_no_action,
926 	task_cpu_time_usage,
927 	rcop_no_set,
928 	task_cpu_time_test
929 };
930 
931 /*ARGSUSED*/
932 /*
933  * void task_init(void)
934  *
935  * Overview
936  *   task_init() initializes task-related hashes, caches, and the task id
937  *   space.  Additionally, task_init() establishes p0 as a member of task0.
938  *   Called by main().
939  *
940  * Return values
941  *   None.
942  *
943  * Caller's context
944  *   task_init() must be called prior to MP startup.
945  */
946 void
947 task_init(void)
948 {
949 	proc_t *p = &p0;
950 	mod_hash_hndl_t hndl;
951 	rctl_set_t *set;
952 	rctl_alloc_gp_t *gp;
953 	rctl_entity_p_t e;
954 
955 	/*
956 	 * Initialize task_cache and taskid_space.
957 	 */
958 	task_cache = kmem_cache_create("task_cache", sizeof (task_t),
959 	    0, NULL, NULL, NULL, NULL, NULL, 0);
960 	taskid_space = id_space_create("taskid_space", 0, MAX_TASKID);
961 
962 	/*
963 	 * Initialize task hash table.
964 	 */
965 	task_hash = mod_hash_create_idhash("task_hash", task_hash_size,
966 	    mod_hash_null_valdtor);
967 
968 	/*
969 	 * Initialize task-based rctls.
970 	 */
971 	rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK,
972 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
973 	    &task_lwps_ops);
974 	rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK,
975 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
976 	    &task_procs_ops);
977 	rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK,
978 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER |
979 	    RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE |
980 	    RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX,
981 	    UINT64_MAX, &task_cpu_time_ops);
982 
983 	/*
984 	 * Create task0 and place p0 in it as a member.
985 	 */
986 	task0p = kmem_cache_alloc(task_cache, KM_SLEEP);
987 	bzero(task0p, sizeof (task_t));
988 
989 	task0p->tk_tkid = id_alloc(taskid_space);
990 	task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
991 	task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
992 	task0p->tk_proj = project_hold_by_id(0, &zone0,
993 	    PROJECT_HOLD_INSERT);
994 	task0p->tk_flags = TASK_NORMAL;
995 	task0p->tk_nlwps = p->p_lwpcnt;
996 	task0p->tk_nprocs = 1;
997 	task0p->tk_zone = global_zone;
998 	task0p->tk_commit_next = NULL;
999 
1000 	set = rctl_set_create();
1001 	gp = rctl_set_init_prealloc(RCENTITY_TASK);
1002 	mutex_enter(&curproc->p_lock);
1003 	e.rcep_p.task = task0p;
1004 	e.rcep_t = RCENTITY_TASK;
1005 	task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp);
1006 	mutex_exit(&curproc->p_lock);
1007 	rctl_prealloc_destroy(gp);
1008 
1009 	(void) mod_hash_reserve(task_hash, &hndl);
1010 	mutex_enter(&task_hash_lock);
1011 	ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL);
1012 	if (mod_hash_insert_reserve(task_hash,
1013 	    (mod_hash_key_t)(uintptr_t)task0p->tk_tkid,
1014 	    (mod_hash_val_t *)task0p, hndl) != 0) {
1015 		mod_hash_cancel(task_hash, &hndl);
1016 		panic("unable to insert task %d(%p)", task0p->tk_tkid,
1017 		    (void *)task0p);
1018 	}
1019 	mutex_exit(&task_hash_lock);
1020 
1021 	task0p->tk_memb_list = p;
1022 
1023 	task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone);
1024 
1025 	/*
1026 	 * Initialize task pointers for p0, including doubly linked list of task
1027 	 * members.
1028 	 */
1029 	p->p_task = task0p;
1030 	p->p_taskprev = p->p_tasknext = p;
1031 	task_hold(task0p);
1032 }
1033 
1034 static int
1035 task_nprocs_kstat_update(kstat_t *ksp, int rw)
1036 {
1037 	task_t *tk = ksp->ks_private;
1038 	task_kstat_t *ktk = ksp->ks_data;
1039 
1040 	if (rw == KSTAT_WRITE)
1041 		return (EACCES);
1042 
1043 	ktk->ktk_usage.value.ui64 = tk->tk_nprocs;
1044 	ktk->ktk_value.value.ui64 = tk->tk_nprocs_ctl;
1045 	return (0);
1046 }
1047 
1048 static kstat_t *
1049 task_kstat_create(task_t *tk, zone_t *zone)
1050 {
1051 	kstat_t	*ksp;
1052 	task_kstat_t *ktk;
1053 	char *zonename = zone->zone_name;
1054 
1055 	ksp = rctl_kstat_create_task(tk, "nprocs", KSTAT_TYPE_NAMED,
1056 	    sizeof (task_kstat_t) / sizeof (kstat_named_t),
1057 	    KSTAT_FLAG_VIRTUAL);
1058 
1059 	if (ksp == NULL)
1060 		return (NULL);
1061 
1062 	ktk = ksp->ks_data = kmem_alloc(sizeof (task_kstat_t), KM_SLEEP);
1063 	ksp->ks_data_size += strlen(zonename) + 1;
1064 	kstat_named_init(&ktk->ktk_zonename, "zonename", KSTAT_DATA_STRING);
1065 	kstat_named_setstr(&ktk->ktk_zonename, zonename);
1066 	kstat_named_init(&ktk->ktk_usage, "usage", KSTAT_DATA_UINT64);
1067 	kstat_named_init(&ktk->ktk_value, "value", KSTAT_DATA_UINT64);
1068 	ksp->ks_update = task_nprocs_kstat_update;
1069 	ksp->ks_private = tk;
1070 	kstat_install(ksp);
1071 
1072 	return (ksp);
1073 }
1074 
1075 static void
1076 task_kstat_delete(task_t *tk)
1077 {
1078 	void *data;
1079 
1080 	if (tk->tk_nprocs_kstat != NULL) {
1081 		data = tk->tk_nprocs_kstat->ks_data;
1082 		kstat_delete(tk->tk_nprocs_kstat);
1083 		kmem_free(data, sizeof (task_kstat_t));
1084 		tk->tk_nprocs_kstat = NULL;
1085 	}
1086 }
1087 
1088 void
1089 task_commit_thread_init()
1090 {
1091 	mutex_init(&task_commit_lock, NULL, MUTEX_DEFAULT, NULL);
1092 	cv_init(&task_commit_cv, NULL, CV_DEFAULT, NULL);
1093 	task_commit_thread = thread_create(NULL, 0, task_commit, NULL, 0,
1094 	    &p0, TS_RUN, minclsyspri);
1095 }
1096 
1097 /*
1098  * Backup thread to commit task resource usage when taskq_dispatch() fails.
1099  */
1100 static void
1101 task_commit()
1102 {
1103 	callb_cpr_t cprinfo;
1104 
1105 	CALLB_CPR_INIT(&cprinfo, &task_commit_lock, callb_generic_cpr,
1106 	    "task_commit_thread");
1107 
1108 	mutex_enter(&task_commit_lock);
1109 
1110 	for (;;) {
1111 		while (task_commit_head == NULL) {
1112 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
1113 			cv_wait(&task_commit_cv, &task_commit_lock);
1114 			CALLB_CPR_SAFE_END(&cprinfo, &task_commit_lock);
1115 		}
1116 		while (task_commit_head != NULL) {
1117 			task_t *tk;
1118 
1119 			tk = task_commit_head;
1120 			task_commit_head = task_commit_head->tk_commit_next;
1121 			if (task_commit_head == NULL)
1122 				task_commit_tail = NULL;
1123 			mutex_exit(&task_commit_lock);
1124 			exacct_commit_task(tk);
1125 			mutex_enter(&task_commit_lock);
1126 		}
1127 	}
1128 }
1129