xref: /freebsd/sys/kern/kern_racct.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed by Edward Tomasz Napierala under sponsorship
8  * from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_sched.h"
38 
39 #include <sys/param.h>
40 #include <sys/buf.h>
41 #include <sys/systm.h>
42 #include <sys/eventhandler.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/lock.h>
47 #include <sys/loginclass.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/racct.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sbuf.h>
54 #include <sys/sched.h>
55 #include <sys/sdt.h>
56 #include <sys/smp.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
61 #include <sys/umtx.h>
62 #include <machine/smp.h>
63 
64 #ifdef RCTL
65 #include <sys/rctl.h>
66 #endif
67 
68 #ifdef RACCT
69 
70 FEATURE(racct, "Resource Accounting");
71 
72 /*
73  * Do not block processes that have their %cpu usage <= pcpu_threshold.
74  */
75 static int pcpu_threshold = 1;
76 #ifdef RACCT_DEFAULT_TO_DISABLED
77 bool __read_frequently racct_enable = false;
78 #else
79 bool __read_frequently racct_enable = true;
80 #endif
81 
82 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW, 0, "Resource Accounting");
83 SYSCTL_BOOL(_kern_racct, OID_AUTO, enable, CTLFLAG_RDTUN, &racct_enable,
84     0, "Enable RACCT/RCTL");
85 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold,
86     0, "Processes with higher %cpu usage than this value can be throttled.");
87 
88 /*
89  * How many seconds it takes to use the scheduler %cpu calculations.  When a
90  * process starts, we compute its %cpu usage by dividing its runtime by the
91  * process wall clock time.  After RACCT_PCPU_SECS pass, we use the value
92  * provided by the scheduler.
93  */
94 #define RACCT_PCPU_SECS		3
95 
96 struct mtx racct_lock;
97 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF);
98 
99 static uma_zone_t racct_zone;
100 
101 static void racct_sub_racct(struct racct *dest, const struct racct *src);
102 static void racct_sub_cred_locked(struct ucred *cred, int resource,
103 		uint64_t amount);
104 static void racct_add_cred_locked(struct ucred *cred, int resource,
105 		uint64_t amount);
106 
107 SDT_PROVIDER_DEFINE(racct);
108 SDT_PROBE_DEFINE3(racct, , rusage, add,
109     "struct proc *", "int", "uint64_t");
110 SDT_PROBE_DEFINE3(racct, , rusage, add__failure,
111     "struct proc *", "int", "uint64_t");
112 SDT_PROBE_DEFINE3(racct, , rusage, add__buf,
113     "struct proc *", "const struct buf *", "int");
114 SDT_PROBE_DEFINE3(racct, , rusage, add__cred,
115     "struct ucred *", "int", "uint64_t");
116 SDT_PROBE_DEFINE3(racct, , rusage, add__force,
117     "struct proc *", "int", "uint64_t");
118 SDT_PROBE_DEFINE3(racct, , rusage, set,
119     "struct proc *", "int", "uint64_t");
120 SDT_PROBE_DEFINE3(racct, , rusage, set__failure,
121     "struct proc *", "int", "uint64_t");
122 SDT_PROBE_DEFINE3(racct, , rusage, set__force,
123     "struct proc *", "int", "uint64_t");
124 SDT_PROBE_DEFINE3(racct, , rusage, sub,
125     "struct proc *", "int", "uint64_t");
126 SDT_PROBE_DEFINE3(racct, , rusage, sub__cred,
127     "struct ucred *", "int", "uint64_t");
128 SDT_PROBE_DEFINE1(racct, , racct, create,
129     "struct racct *");
130 SDT_PROBE_DEFINE1(racct, , racct, destroy,
131     "struct racct *");
132 SDT_PROBE_DEFINE2(racct, , racct, join,
133     "struct racct *", "struct racct *");
134 SDT_PROBE_DEFINE2(racct, , racct, join__failure,
135     "struct racct *", "struct racct *");
136 SDT_PROBE_DEFINE2(racct, , racct, leave,
137     "struct racct *", "struct racct *");
138 
139 int racct_types[] = {
140 	[RACCT_CPU] =
141 		RACCT_IN_MILLIONS,
142 	[RACCT_DATA] =
143 		RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
144 	[RACCT_STACK] =
145 		RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
146 	[RACCT_CORE] =
147 		RACCT_DENIABLE,
148 	[RACCT_RSS] =
149 		RACCT_RECLAIMABLE,
150 	[RACCT_MEMLOCK] =
151 		RACCT_RECLAIMABLE | RACCT_DENIABLE,
152 	[RACCT_NPROC] =
153 		RACCT_RECLAIMABLE | RACCT_DENIABLE,
154 	[RACCT_NOFILE] =
155 		RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
156 	[RACCT_VMEM] =
157 		RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
158 	[RACCT_NPTS] =
159 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
160 	[RACCT_SWAP] =
161 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
162 	[RACCT_NTHR] =
163 		RACCT_RECLAIMABLE | RACCT_DENIABLE,
164 	[RACCT_MSGQQUEUED] =
165 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
166 	[RACCT_MSGQSIZE] =
167 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
168 	[RACCT_NMSGQ] =
169 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
170 	[RACCT_NSEM] =
171 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
172 	[RACCT_NSEMOP] =
173 		RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
174 	[RACCT_NSHM] =
175 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
176 	[RACCT_SHMSIZE] =
177 		RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
178 	[RACCT_WALLCLOCK] =
179 		RACCT_IN_MILLIONS,
180 	[RACCT_PCTCPU] =
181 		RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS,
182 	[RACCT_READBPS] =
183 		RACCT_DECAYING,
184 	[RACCT_WRITEBPS] =
185 		RACCT_DECAYING,
186 	[RACCT_READIOPS] =
187 		RACCT_DECAYING,
188 	[RACCT_WRITEIOPS] =
189 		RACCT_DECAYING };
190 
191 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE;
192 
193 #ifdef SCHED_4BSD
194 /*
195  * Contains intermediate values for %cpu calculations to avoid using floating
196  * point in the kernel.
197  * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20)
198  * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to
199  * zero so the calculations are more straightforward.
200  */
201 fixpt_t ccpu_exp[] = {
202 	[0] = FSCALE * 1,
203 	[1] = FSCALE * 0.95122942450071400909,
204 	[2] = FSCALE * 0.90483741803595957316,
205 	[3] = FSCALE * 0.86070797642505780722,
206 	[4] = FSCALE * 0.81873075307798185866,
207 	[5] = FSCALE * 0.77880078307140486824,
208 	[6] = FSCALE * 0.74081822068171786606,
209 	[7] = FSCALE * 0.70468808971871343435,
210 	[8] = FSCALE * 0.67032004603563930074,
211 	[9] = FSCALE * 0.63762815162177329314,
212 	[10] = FSCALE * 0.60653065971263342360,
213 	[11] = FSCALE * 0.57694981038048669531,
214 	[12] = FSCALE * 0.54881163609402643262,
215 	[13] = FSCALE * 0.52204577676101604789,
216 	[14] = FSCALE * 0.49658530379140951470,
217 	[15] = FSCALE * 0.47236655274101470713,
218 	[16] = FSCALE * 0.44932896411722159143,
219 	[17] = FSCALE * 0.42741493194872666992,
220 	[18] = FSCALE * 0.40656965974059911188,
221 	[19] = FSCALE * 0.38674102345450120691,
222 	[20] = FSCALE * 0.36787944117144232159,
223 	[21] = FSCALE * 0.34993774911115535467,
224 	[22] = FSCALE * 0.33287108369807955328,
225 	[23] = FSCALE * 0.31663676937905321821,
226 	[24] = FSCALE * 0.30119421191220209664,
227 	[25] = FSCALE * 0.28650479686019010032,
228 	[26] = FSCALE * 0.27253179303401260312,
229 	[27] = FSCALE * 0.25924026064589150757,
230 	[28] = FSCALE * 0.24659696394160647693,
231 	[29] = FSCALE * 0.23457028809379765313,
232 	[30] = FSCALE * 0.22313016014842982893,
233 	[31] = FSCALE * 0.21224797382674305771,
234 	[32] = FSCALE * 0.20189651799465540848,
235 	[33] = FSCALE * 0.19204990862075411423,
236 	[34] = FSCALE * 0.18268352405273465022,
237 	[35] = FSCALE * 0.17377394345044512668,
238 	[36] = FSCALE * 0.16529888822158653829,
239 	[37] = FSCALE * 0.15723716631362761621,
240 	[38] = FSCALE * 0.14956861922263505264,
241 	[39] = FSCALE * 0.14227407158651357185,
242 	[40] = FSCALE * 0.13533528323661269189,
243 	[41] = FSCALE * 0.12873490358780421886,
244 	[42] = FSCALE * 0.12245642825298191021,
245 	[43] = FSCALE * 0.11648415777349695786,
246 	[44] = FSCALE * 0.11080315836233388333,
247 	[45] = FSCALE * 0.10539922456186433678,
248 	[46] = FSCALE * 0.10025884372280373372,
249 	[47] = FSCALE * 0.09536916221554961888,
250 	[48] = FSCALE * 0.09071795328941250337,
251 	[49] = FSCALE * 0.08629358649937051097,
252 	[50] = FSCALE * 0.08208499862389879516,
253 	[51] = FSCALE * 0.07808166600115315231,
254 	[52] = FSCALE * 0.07427357821433388042,
255 	[53] = FSCALE * 0.07065121306042958674,
256 	[54] = FSCALE * 0.06720551273974976512,
257 	[55] = FSCALE * 0.06392786120670757270,
258 	[56] = FSCALE * 0.06081006262521796499,
259 	[57] = FSCALE * 0.05784432087483846296,
260 	[58] = FSCALE * 0.05502322005640722902,
261 	[59] = FSCALE * 0.05233970594843239308,
262 	[60] = FSCALE * 0.04978706836786394297,
263 	[61] = FSCALE * 0.04735892439114092119,
264 	[62] = FSCALE * 0.04504920239355780606,
265 	[63] = FSCALE * 0.04285212686704017991,
266 	[64] = FSCALE * 0.04076220397836621516,
267 	[65] = FSCALE * 0.03877420783172200988,
268 	[66] = FSCALE * 0.03688316740124000544,
269 	[67] = FSCALE * 0.03508435410084502588,
270 	[68] = FSCALE * 0.03337326996032607948,
271 	[69] = FSCALE * 0.03174563637806794323,
272 	[70] = FSCALE * 0.03019738342231850073,
273 	[71] = FSCALE * 0.02872463965423942912,
274 	[72] = FSCALE * 0.02732372244729256080,
275 	[73] = FSCALE * 0.02599112877875534358,
276 	[74] = FSCALE * 0.02472352647033939120,
277 	[75] = FSCALE * 0.02351774585600910823,
278 	[76] = FSCALE * 0.02237077185616559577,
279 	[77] = FSCALE * 0.02127973643837716938,
280 	[78] = FSCALE * 0.02024191144580438847,
281 	[79] = FSCALE * 0.01925470177538692429,
282 	[80] = FSCALE * 0.01831563888873418029,
283 	[81] = FSCALE * 0.01742237463949351138,
284 	[82] = FSCALE * 0.01657267540176124754,
285 	[83] = FSCALE * 0.01576441648485449082,
286 	[84] = FSCALE * 0.01499557682047770621,
287 	[85] = FSCALE * 0.01426423390899925527,
288 	[86] = FSCALE * 0.01356855901220093175,
289 	[87] = FSCALE * 0.01290681258047986886,
290 	[88] = FSCALE * 0.01227733990306844117,
291 	[89] = FSCALE * 0.01167856697039544521,
292 	[90] = FSCALE * 0.01110899653824230649,
293 	[91] = FSCALE * 0.01056720438385265337,
294 	[92] = FSCALE * 0.01005183574463358164,
295 	[93] = FSCALE * 0.00956160193054350793,
296 	[94] = FSCALE * 0.00909527710169581709,
297 	[95] = FSCALE * 0.00865169520312063417,
298 	[96] = FSCALE * 0.00822974704902002884,
299 	[97] = FSCALE * 0.00782837754922577143,
300 	[98] = FSCALE * 0.00744658307092434051,
301 	[99] = FSCALE * 0.00708340892905212004,
302 	[100] = FSCALE * 0.00673794699908546709,
303 	[101] = FSCALE * 0.00640933344625638184,
304 	[102] = FSCALE * 0.00609674656551563610,
305 	[103] = FSCALE * 0.00579940472684214321,
306 	[104] = FSCALE * 0.00551656442076077241,
307 	[105] = FSCALE * 0.00524751839918138427,
308 	[106] = FSCALE * 0.00499159390691021621,
309 	[107] = FSCALE * 0.00474815099941147558,
310 	[108] = FSCALE * 0.00451658094261266798,
311 	[109] = FSCALE * 0.00429630469075234057,
312 	[110] = FSCALE * 0.00408677143846406699,
313 };
314 #endif
315 
316 #define	CCPU_EXP_MAX	110
317 
318 /*
319  * This function is analogical to the getpcpu() function in the ps(1) command.
320  * They should both calculate in the same way so that the racct %cpu
321  * calculations are consistent with the values showed by the ps(1) tool.
322  * The calculations are more complex in the 4BSD scheduler because of the value
323  * of the ccpu variable.  In ULE it is defined to be zero which saves us some
324  * work.
325  */
326 static uint64_t
327 racct_getpcpu(struct proc *p, u_int pcpu)
328 {
329 	u_int swtime;
330 #ifdef SCHED_4BSD
331 	fixpt_t pctcpu, pctcpu_next;
332 #endif
333 #ifdef SMP
334 	struct pcpu *pc;
335 	int found;
336 #endif
337 	fixpt_t p_pctcpu;
338 	struct thread *td;
339 
340 	ASSERT_RACCT_ENABLED();
341 
342 	/*
343 	 * If the process is swapped out, we count its %cpu usage as zero.
344 	 * This behaviour is consistent with the userland ps(1) tool.
345 	 */
346 	if ((p->p_flag & P_INMEM) == 0)
347 		return (0);
348 	swtime = (ticks - p->p_swtick) / hz;
349 
350 	/*
351 	 * For short-lived processes, the sched_pctcpu() returns small
352 	 * values even for cpu intensive processes.  Therefore we use
353 	 * our own estimate in this case.
354 	 */
355 	if (swtime < RACCT_PCPU_SECS)
356 		return (pcpu);
357 
358 	p_pctcpu = 0;
359 	FOREACH_THREAD_IN_PROC(p, td) {
360 		if (td == PCPU_GET(idlethread))
361 			continue;
362 #ifdef SMP
363 		found = 0;
364 		STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
365 			if (td == pc->pc_idlethread) {
366 				found = 1;
367 				break;
368 			}
369 		}
370 		if (found)
371 			continue;
372 #endif
373 		thread_lock(td);
374 #ifdef SCHED_4BSD
375 		pctcpu = sched_pctcpu(td);
376 		/* Count also the yet unfinished second. */
377 		pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT;
378 		pctcpu_next += sched_pctcpu_delta(td);
379 		p_pctcpu += max(pctcpu, pctcpu_next);
380 #else
381 		/*
382 		 * In ULE the %cpu statistics are updated on every
383 		 * sched_pctcpu() call.  So special calculations to
384 		 * account for the latest (unfinished) second are
385 		 * not needed.
386 		 */
387 		p_pctcpu += sched_pctcpu(td);
388 #endif
389 		thread_unlock(td);
390 	}
391 
392 #ifdef SCHED_4BSD
393 	if (swtime <= CCPU_EXP_MAX)
394 		return ((100 * (uint64_t)p_pctcpu * 1000000) /
395 		    (FSCALE - ccpu_exp[swtime]));
396 #endif
397 
398 	return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE);
399 }
400 
401 static void
402 racct_add_racct(struct racct *dest, const struct racct *src)
403 {
404 	int i;
405 
406 	ASSERT_RACCT_ENABLED();
407 	RACCT_LOCK_ASSERT();
408 
409 	/*
410 	 * Update resource usage in dest.
411 	 */
412 	for (i = 0; i <= RACCT_MAX; i++) {
413 		KASSERT(dest->r_resources[i] >= 0,
414 		    ("%s: resource %d propagation meltdown: dest < 0",
415 		    __func__, i));
416 		KASSERT(src->r_resources[i] >= 0,
417 		    ("%s: resource %d propagation meltdown: src < 0",
418 		    __func__, i));
419 		dest->r_resources[i] += src->r_resources[i];
420 	}
421 }
422 
423 static void
424 racct_sub_racct(struct racct *dest, const struct racct *src)
425 {
426 	int i;
427 
428 	ASSERT_RACCT_ENABLED();
429 	RACCT_LOCK_ASSERT();
430 
431 	/*
432 	 * Update resource usage in dest.
433 	 */
434 	for (i = 0; i <= RACCT_MAX; i++) {
435 		if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) {
436 			KASSERT(dest->r_resources[i] >= 0,
437 			    ("%s: resource %d propagation meltdown: dest < 0",
438 			    __func__, i));
439 			KASSERT(src->r_resources[i] >= 0,
440 			    ("%s: resource %d propagation meltdown: src < 0",
441 			    __func__, i));
442 			KASSERT(src->r_resources[i] <= dest->r_resources[i],
443 			    ("%s: resource %d propagation meltdown: src > dest",
444 			    __func__, i));
445 		}
446 		if (RACCT_CAN_DROP(i)) {
447 			dest->r_resources[i] -= src->r_resources[i];
448 			if (dest->r_resources[i] < 0)
449 				dest->r_resources[i] = 0;
450 		}
451 	}
452 }
453 
454 void
455 racct_create(struct racct **racctp)
456 {
457 
458 	if (!racct_enable)
459 		return;
460 
461 	SDT_PROBE1(racct, , racct, create, racctp);
462 
463 	KASSERT(*racctp == NULL, ("racct already allocated"));
464 
465 	*racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO);
466 }
467 
468 static void
469 racct_destroy_locked(struct racct **racctp)
470 {
471 	struct racct *racct;
472 	int i;
473 
474 	ASSERT_RACCT_ENABLED();
475 
476 	SDT_PROBE1(racct, , racct, destroy, racctp);
477 
478 	RACCT_LOCK_ASSERT();
479 	KASSERT(racctp != NULL, ("NULL racctp"));
480 	KASSERT(*racctp != NULL, ("NULL racct"));
481 
482 	racct = *racctp;
483 
484 	for (i = 0; i <= RACCT_MAX; i++) {
485 		if (RACCT_IS_SLOPPY(i))
486 			continue;
487 		if (!RACCT_IS_RECLAIMABLE(i))
488 			continue;
489 		KASSERT(racct->r_resources[i] == 0,
490 		    ("destroying non-empty racct: "
491 		    "%ju allocated for resource %d\n",
492 		    racct->r_resources[i], i));
493 	}
494 	uma_zfree(racct_zone, racct);
495 	*racctp = NULL;
496 }
497 
498 void
499 racct_destroy(struct racct **racct)
500 {
501 
502 	if (!racct_enable)
503 		return;
504 
505 	RACCT_LOCK();
506 	racct_destroy_locked(racct);
507 	RACCT_UNLOCK();
508 }
509 
510 /*
511  * Increase consumption of 'resource' by 'amount' for 'racct',
512  * but not its parents.  Differently from other cases, 'amount' here
513  * may be less than zero.
514  */
515 static void
516 racct_adjust_resource(struct racct *racct, int resource,
517     int64_t amount)
518 {
519 
520 	ASSERT_RACCT_ENABLED();
521 	RACCT_LOCK_ASSERT();
522 	KASSERT(racct != NULL, ("NULL racct"));
523 
524 	racct->r_resources[resource] += amount;
525 	if (racct->r_resources[resource] < 0) {
526 		KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource),
527 		    ("%s: resource %d usage < 0", __func__, resource));
528 		racct->r_resources[resource] = 0;
529 	}
530 
531 	/*
532 	 * There are some cases where the racct %cpu resource would grow
533 	 * beyond 100% per core.  For example in racct_proc_exit() we add
534 	 * the process %cpu usage to the ucred racct containers.  If too
535 	 * many processes terminated in a short time span, the ucred %cpu
536 	 * resource could grow too much.  Also, the 4BSD scheduler sometimes
537 	 * returns for a thread more than 100% cpu usage. So we set a sane
538 	 * boundary here to 100% * the maxumum number of CPUs.
539 	 */
540 	if ((resource == RACCT_PCTCPU) &&
541 	    (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000 * (int64_t)MAXCPU))
542 		racct->r_resources[RACCT_PCTCPU] = 100 * 1000000 * (int64_t)MAXCPU;
543 }
544 
545 static int
546 racct_add_locked(struct proc *p, int resource, uint64_t amount, int force)
547 {
548 #ifdef RCTL
549 	int error;
550 #endif
551 
552 	ASSERT_RACCT_ENABLED();
553 
554 	/*
555 	 * We need proc lock to dereference p->p_ucred.
556 	 */
557 	PROC_LOCK_ASSERT(p, MA_OWNED);
558 
559 #ifdef RCTL
560 	error = rctl_enforce(p, resource, amount);
561 	if (error && !force && RACCT_IS_DENIABLE(resource)) {
562 		SDT_PROBE3(racct, , rusage, add__failure, p, resource, amount);
563 		return (error);
564 	}
565 #endif
566 	racct_adjust_resource(p->p_racct, resource, amount);
567 	racct_add_cred_locked(p->p_ucred, resource, amount);
568 
569 	return (0);
570 }
571 
572 /*
573  * Increase allocation of 'resource' by 'amount' for process 'p'.
574  * Return 0 if it's below limits, or errno, if it's not.
575  */
576 int
577 racct_add(struct proc *p, int resource, uint64_t amount)
578 {
579 	int error;
580 
581 	if (!racct_enable)
582 		return (0);
583 
584 	SDT_PROBE3(racct, , rusage, add, p, resource, amount);
585 
586 	RACCT_LOCK();
587 	error = racct_add_locked(p, resource, amount, 0);
588 	RACCT_UNLOCK();
589 	return (error);
590 }
591 
592 /*
593  * Increase allocation of 'resource' by 'amount' for process 'p'.
594  * Doesn't check for limits and never fails.
595  */
596 void
597 racct_add_force(struct proc *p, int resource, uint64_t amount)
598 {
599 
600 	if (!racct_enable)
601 		return;
602 
603 	SDT_PROBE3(racct, , rusage, add__force, p, resource, amount);
604 
605 	RACCT_LOCK();
606 	racct_add_locked(p, resource, amount, 1);
607 	RACCT_UNLOCK();
608 }
609 
610 static void
611 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount)
612 {
613 	struct prison *pr;
614 
615 	ASSERT_RACCT_ENABLED();
616 
617 	racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, amount);
618 	for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
619 		racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource,
620 		    amount);
621 	racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, amount);
622 }
623 
624 /*
625  * Increase allocation of 'resource' by 'amount' for credential 'cred'.
626  * Doesn't check for limits and never fails.
627  */
628 void
629 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
630 {
631 
632 	if (!racct_enable)
633 		return;
634 
635 	SDT_PROBE3(racct, , rusage, add__cred, cred, resource, amount);
636 
637 	RACCT_LOCK();
638 	racct_add_cred_locked(cred, resource, amount);
639 	RACCT_UNLOCK();
640 }
641 
642 /*
643  * Account for disk IO resource consumption.  Checks for limits,
644  * but never fails, due to disk limits being undeniable.
645  */
646 void
647 racct_add_buf(struct proc *p, const struct buf *bp, int is_write)
648 {
649 
650 	ASSERT_RACCT_ENABLED();
651 	PROC_LOCK_ASSERT(p, MA_OWNED);
652 
653 	SDT_PROBE3(racct, , rusage, add__buf, p, bp, is_write);
654 
655 	RACCT_LOCK();
656 	if (is_write) {
657 		racct_add_locked(curproc, RACCT_WRITEBPS, bp->b_bcount, 1);
658 		racct_add_locked(curproc, RACCT_WRITEIOPS, 1, 1);
659 	} else {
660 		racct_add_locked(curproc, RACCT_READBPS, bp->b_bcount, 1);
661 		racct_add_locked(curproc, RACCT_READIOPS, 1, 1);
662 	}
663 	RACCT_UNLOCK();
664 }
665 
666 static int
667 racct_set_locked(struct proc *p, int resource, uint64_t amount, int force)
668 {
669 	int64_t old_amount, decayed_amount, diff_proc, diff_cred;
670 #ifdef RCTL
671 	int error;
672 #endif
673 
674 	ASSERT_RACCT_ENABLED();
675 
676 	/*
677 	 * We need proc lock to dereference p->p_ucred.
678 	 */
679 	PROC_LOCK_ASSERT(p, MA_OWNED);
680 
681 	old_amount = p->p_racct->r_resources[resource];
682 	/*
683 	 * The diffs may be negative.
684 	 */
685 	diff_proc = amount - old_amount;
686 	if (resource == RACCT_PCTCPU) {
687 		/*
688 		 * Resources in per-credential racct containers may decay.
689 		 * If this is the case, we need to calculate the difference
690 		 * between the new amount and the proportional value of the
691 		 * old amount that has decayed in the ucred racct containers.
692 		 */
693 		decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
694 		diff_cred = amount - decayed_amount;
695 	} else
696 		diff_cred = diff_proc;
697 #ifdef notyet
698 	KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource),
699 	    ("%s: usage of non-droppable resource %d dropping", __func__,
700 	     resource));
701 #endif
702 #ifdef RCTL
703 	if (diff_proc > 0) {
704 		error = rctl_enforce(p, resource, diff_proc);
705 		if (error && !force && RACCT_IS_DENIABLE(resource)) {
706 			SDT_PROBE3(racct, , rusage, set__failure, p, resource,
707 			    amount);
708 			return (error);
709 		}
710 	}
711 #endif
712 	racct_adjust_resource(p->p_racct, resource, diff_proc);
713 	if (diff_cred > 0)
714 		racct_add_cred_locked(p->p_ucred, resource, diff_cred);
715 	else if (diff_cred < 0)
716 		racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
717 
718 	return (0);
719 }
720 
721 /*
722  * Set allocation of 'resource' to 'amount' for process 'p'.
723  * Return 0 if it's below limits, or errno, if it's not.
724  *
725  * Note that decreasing the allocation always returns 0,
726  * even if it's above the limit.
727  */
728 int
729 racct_set_unlocked(struct proc *p, int resource, uint64_t amount)
730 {
731 	int error;
732 
733 	ASSERT_RACCT_ENABLED();
734 	PROC_LOCK(p);
735 	error = racct_set(p, resource, amount);
736 	PROC_UNLOCK(p);
737 	return (error);
738 }
739 
740 int
741 racct_set(struct proc *p, int resource, uint64_t amount)
742 {
743 	int error;
744 
745 	if (!racct_enable)
746 		return (0);
747 
748 	SDT_PROBE3(racct, , rusage, set__force, p, resource, amount);
749 
750 	RACCT_LOCK();
751 	error = racct_set_locked(p, resource, amount, 0);
752 	RACCT_UNLOCK();
753 	return (error);
754 }
755 
756 void
757 racct_set_force(struct proc *p, int resource, uint64_t amount)
758 {
759 
760 	if (!racct_enable)
761 		return;
762 
763 	SDT_PROBE3(racct, , rusage, set, p, resource, amount);
764 
765 	RACCT_LOCK();
766 	racct_set_locked(p, resource, amount, 1);
767 	RACCT_UNLOCK();
768 }
769 
770 /*
771  * Returns amount of 'resource' the process 'p' can keep allocated.
772  * Allocating more than that would be denied, unless the resource
773  * is marked undeniable.  Amount of already allocated resource does
774  * not matter.
775  */
776 uint64_t
777 racct_get_limit(struct proc *p, int resource)
778 {
779 #ifdef RCTL
780 	uint64_t available;
781 
782 	if (!racct_enable)
783 		return (UINT64_MAX);
784 
785 	RACCT_LOCK();
786 	available = rctl_get_limit(p, resource);
787 	RACCT_UNLOCK();
788 
789 	return (available);
790 #else
791 
792 	return (UINT64_MAX);
793 #endif
794 }
795 
796 /*
797  * Returns amount of 'resource' the process 'p' can keep allocated.
798  * Allocating more than that would be denied, unless the resource
799  * is marked undeniable.  Amount of already allocated resource does
800  * matter.
801  */
802 uint64_t
803 racct_get_available(struct proc *p, int resource)
804 {
805 #ifdef RCTL
806 	uint64_t available;
807 
808 	if (!racct_enable)
809 		return (UINT64_MAX);
810 
811 	RACCT_LOCK();
812 	available = rctl_get_available(p, resource);
813 	RACCT_UNLOCK();
814 
815 	return (available);
816 #else
817 
818 	return (UINT64_MAX);
819 #endif
820 }
821 
822 /*
823  * Returns amount of the %cpu resource that process 'p' can add to its %cpu
824  * utilization.  Adding more than that would lead to the process being
825  * throttled.
826  */
827 static int64_t
828 racct_pcpu_available(struct proc *p)
829 {
830 #ifdef RCTL
831 	uint64_t available;
832 
833 	ASSERT_RACCT_ENABLED();
834 
835 	RACCT_LOCK();
836 	available = rctl_pcpu_available(p);
837 	RACCT_UNLOCK();
838 
839 	return (available);
840 #else
841 
842 	return (INT64_MAX);
843 #endif
844 }
845 
846 /*
847  * Decrease allocation of 'resource' by 'amount' for process 'p'.
848  */
849 void
850 racct_sub(struct proc *p, int resource, uint64_t amount)
851 {
852 
853 	if (!racct_enable)
854 		return;
855 
856 	SDT_PROBE3(racct, , rusage, sub, p, resource, amount);
857 
858 	/*
859 	 * We need proc lock to dereference p->p_ucred.
860 	 */
861 	PROC_LOCK_ASSERT(p, MA_OWNED);
862 	KASSERT(RACCT_CAN_DROP(resource),
863 	    ("%s: called for non-droppable resource %d", __func__, resource));
864 
865 	RACCT_LOCK();
866 	KASSERT(amount <= p->p_racct->r_resources[resource],
867 	    ("%s: freeing %ju of resource %d, which is more "
868 	     "than allocated %jd for %s (pid %d)", __func__, amount, resource,
869 	    (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid));
870 
871 	racct_adjust_resource(p->p_racct, resource, -amount);
872 	racct_sub_cred_locked(p->p_ucred, resource, amount);
873 	RACCT_UNLOCK();
874 }
875 
876 static void
877 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount)
878 {
879 	struct prison *pr;
880 
881 	ASSERT_RACCT_ENABLED();
882 
883 	racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, -amount);
884 	for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
885 		racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource,
886 		    -amount);
887 	racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, -amount);
888 }
889 
890 /*
891  * Decrease allocation of 'resource' by 'amount' for credential 'cred'.
892  */
893 void
894 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
895 {
896 
897 	if (!racct_enable)
898 		return;
899 
900 	SDT_PROBE3(racct, , rusage, sub__cred, cred, resource, amount);
901 
902 #ifdef notyet
903 	KASSERT(RACCT_CAN_DROP(resource),
904 	    ("%s: called for resource %d which can not drop", __func__,
905 	     resource));
906 #endif
907 
908 	RACCT_LOCK();
909 	racct_sub_cred_locked(cred, resource, amount);
910 	RACCT_UNLOCK();
911 }
912 
913 /*
914  * Inherit resource usage information from the parent process.
915  */
916 int
917 racct_proc_fork(struct proc *parent, struct proc *child)
918 {
919 	int i, error = 0;
920 
921 	if (!racct_enable)
922 		return (0);
923 
924 	/*
925 	 * Create racct for the child process.
926 	 */
927 	racct_create(&child->p_racct);
928 
929 	PROC_LOCK(parent);
930 	PROC_LOCK(child);
931 	RACCT_LOCK();
932 
933 #ifdef RCTL
934 	error = rctl_proc_fork(parent, child);
935 	if (error != 0)
936 		goto out;
937 #endif
938 
939 	/* Init process cpu time. */
940 	child->p_prev_runtime = 0;
941 	child->p_throttled = 0;
942 
943 	/*
944 	 * Inherit resource usage.
945 	 */
946 	for (i = 0; i <= RACCT_MAX; i++) {
947 		if (parent->p_racct->r_resources[i] == 0 ||
948 		    !RACCT_IS_INHERITABLE(i))
949 			continue;
950 
951 		error = racct_set_locked(child, i,
952 		    parent->p_racct->r_resources[i], 0);
953 		if (error != 0)
954 			goto out;
955 	}
956 
957 	error = racct_add_locked(child, RACCT_NPROC, 1, 0);
958 	error += racct_add_locked(child, RACCT_NTHR, 1, 0);
959 
960 out:
961 	RACCT_UNLOCK();
962 	PROC_UNLOCK(child);
963 	PROC_UNLOCK(parent);
964 
965 	if (error != 0)
966 		racct_proc_exit(child);
967 
968 	return (error);
969 }
970 
971 /*
972  * Called at the end of fork1(), to handle rules that require the process
973  * to be fully initialized.
974  */
975 void
976 racct_proc_fork_done(struct proc *child)
977 {
978 
979 	if (!racct_enable)
980 		return;
981 
982 #ifdef RCTL
983 	PROC_LOCK(child);
984 	RACCT_LOCK();
985 	rctl_enforce(child, RACCT_NPROC, 0);
986 	rctl_enforce(child, RACCT_NTHR, 0);
987 	RACCT_UNLOCK();
988 	PROC_UNLOCK(child);
989 #endif
990 }
991 
992 void
993 racct_proc_exit(struct proc *p)
994 {
995 	struct timeval wallclock;
996 	uint64_t pct_estimate, pct, runtime;
997 	int i;
998 
999 	if (!racct_enable)
1000 		return;
1001 
1002 	PROC_LOCK(p);
1003 	/*
1004 	 * We don't need to calculate rux, proc_reap() has already done this.
1005 	 */
1006 	runtime = cputick2usec(p->p_rux.rux_runtime);
1007 #ifdef notyet
1008 	KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime"));
1009 #else
1010 	if (runtime < p->p_prev_runtime)
1011 		runtime = p->p_prev_runtime;
1012 #endif
1013 	microuptime(&wallclock);
1014 	timevalsub(&wallclock, &p->p_stats->p_start);
1015 	if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1016 		pct_estimate = (1000000 * runtime * 100) /
1017 		    ((uint64_t)wallclock.tv_sec * 1000000 +
1018 		    wallclock.tv_usec);
1019 	} else
1020 		pct_estimate = 0;
1021 	pct = racct_getpcpu(p, pct_estimate);
1022 
1023 	RACCT_LOCK();
1024 	racct_set_locked(p, RACCT_CPU, runtime, 0);
1025 	racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct);
1026 
1027 	KASSERT(p->p_racct->r_resources[RACCT_RSS] == 0,
1028 	    ("process reaped with %ju allocated for RSS\n",
1029 	    p->p_racct->r_resources[RACCT_RSS]));
1030 	for (i = 0; i <= RACCT_MAX; i++) {
1031 		if (p->p_racct->r_resources[i] == 0)
1032 			continue;
1033 		if (!RACCT_IS_RECLAIMABLE(i))
1034 			continue;
1035 		racct_set_locked(p, i, 0, 0);
1036 	}
1037 
1038 #ifdef RCTL
1039 	rctl_racct_release(p->p_racct);
1040 #endif
1041 	racct_destroy_locked(&p->p_racct);
1042 	RACCT_UNLOCK();
1043 	PROC_UNLOCK(p);
1044 }
1045 
1046 /*
1047  * Called after credentials change, to move resource utilisation
1048  * between raccts.
1049  */
1050 void
1051 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
1052     struct ucred *newcred)
1053 {
1054 	struct uidinfo *olduip, *newuip;
1055 	struct loginclass *oldlc, *newlc;
1056 	struct prison *oldpr, *newpr, *pr;
1057 
1058 	if (!racct_enable)
1059 		return;
1060 
1061 	PROC_LOCK_ASSERT(p, MA_OWNED);
1062 
1063 	newuip = newcred->cr_ruidinfo;
1064 	olduip = oldcred->cr_ruidinfo;
1065 	newlc = newcred->cr_loginclass;
1066 	oldlc = oldcred->cr_loginclass;
1067 	newpr = newcred->cr_prison;
1068 	oldpr = oldcred->cr_prison;
1069 
1070 	RACCT_LOCK();
1071 	if (newuip != olduip) {
1072 		racct_sub_racct(olduip->ui_racct, p->p_racct);
1073 		racct_add_racct(newuip->ui_racct, p->p_racct);
1074 	}
1075 	if (newlc != oldlc) {
1076 		racct_sub_racct(oldlc->lc_racct, p->p_racct);
1077 		racct_add_racct(newlc->lc_racct, p->p_racct);
1078 	}
1079 	if (newpr != oldpr) {
1080 		for (pr = oldpr; pr != NULL; pr = pr->pr_parent)
1081 			racct_sub_racct(pr->pr_prison_racct->prr_racct,
1082 			    p->p_racct);
1083 		for (pr = newpr; pr != NULL; pr = pr->pr_parent)
1084 			racct_add_racct(pr->pr_prison_racct->prr_racct,
1085 			    p->p_racct);
1086 	}
1087 	RACCT_UNLOCK();
1088 }
1089 
1090 void
1091 racct_move(struct racct *dest, struct racct *src)
1092 {
1093 
1094 	ASSERT_RACCT_ENABLED();
1095 
1096 	RACCT_LOCK();
1097 	racct_add_racct(dest, src);
1098 	racct_sub_racct(src, src);
1099 	RACCT_UNLOCK();
1100 }
1101 
1102 void
1103 racct_proc_throttled(struct proc *p)
1104 {
1105 
1106 	ASSERT_RACCT_ENABLED();
1107 
1108 	PROC_LOCK(p);
1109 	while (p->p_throttled != 0) {
1110 		msleep(p->p_racct, &p->p_mtx, 0, "racct",
1111 		    p->p_throttled < 0 ? 0 : p->p_throttled);
1112 		if (p->p_throttled > 0)
1113 			p->p_throttled = 0;
1114 	}
1115 	PROC_UNLOCK(p);
1116 }
1117 
1118 /*
1119  * Make the process sleep in userret() for 'timeout' ticks.  Setting
1120  * timeout to -1 makes it sleep until woken up by racct_proc_wakeup().
1121  */
1122 void
1123 racct_proc_throttle(struct proc *p, int timeout)
1124 {
1125 	struct thread *td;
1126 #ifdef SMP
1127 	int cpuid;
1128 #endif
1129 
1130 	KASSERT(timeout != 0, ("timeout %d", timeout));
1131 	ASSERT_RACCT_ENABLED();
1132 	PROC_LOCK_ASSERT(p, MA_OWNED);
1133 
1134 	/*
1135 	 * Do not block kernel processes.  Also do not block processes with
1136 	 * low %cpu utilization to improve interactivity.
1137 	 */
1138 	if ((p->p_flag & (P_SYSTEM | P_KPROC)) != 0)
1139 		return;
1140 
1141 	if (p->p_throttled < 0 || (timeout > 0 && p->p_throttled > timeout))
1142 		return;
1143 
1144 	p->p_throttled = timeout;
1145 
1146 	FOREACH_THREAD_IN_PROC(p, td) {
1147 		thread_lock(td);
1148 		switch (td->td_state) {
1149 		case TDS_RUNQ:
1150 			/*
1151 			 * If the thread is on the scheduler run-queue, we can
1152 			 * not just remove it from there.  So we set the flag
1153 			 * TDF_NEEDRESCHED for the thread, so that once it is
1154 			 * running, it is taken off the cpu as soon as possible.
1155 			 */
1156 			td->td_flags |= TDF_NEEDRESCHED;
1157 			break;
1158 		case TDS_RUNNING:
1159 			/*
1160 			 * If the thread is running, we request a context
1161 			 * switch for it by setting the TDF_NEEDRESCHED flag.
1162 			 */
1163 			td->td_flags |= TDF_NEEDRESCHED;
1164 #ifdef SMP
1165 			cpuid = td->td_oncpu;
1166 			if ((cpuid != NOCPU) && (td != curthread))
1167 				ipi_cpu(cpuid, IPI_AST);
1168 #endif
1169 			break;
1170 		default:
1171 			break;
1172 		}
1173 		thread_unlock(td);
1174 	}
1175 }
1176 
1177 static void
1178 racct_proc_wakeup(struct proc *p)
1179 {
1180 
1181 	ASSERT_RACCT_ENABLED();
1182 
1183 	PROC_LOCK_ASSERT(p, MA_OWNED);
1184 
1185 	if (p->p_throttled != 0) {
1186 		p->p_throttled = 0;
1187 		wakeup(p->p_racct);
1188 	}
1189 }
1190 
1191 static void
1192 racct_decay_callback(struct racct *racct, void *dummy1, void *dummy2)
1193 {
1194 	int64_t r_old, r_new;
1195 
1196 	ASSERT_RACCT_ENABLED();
1197 	RACCT_LOCK_ASSERT();
1198 
1199 #ifdef RCTL
1200 	rctl_throttle_decay(racct, RACCT_READBPS);
1201 	rctl_throttle_decay(racct, RACCT_WRITEBPS);
1202 	rctl_throttle_decay(racct, RACCT_READIOPS);
1203 	rctl_throttle_decay(racct, RACCT_WRITEIOPS);
1204 #endif
1205 
1206 	r_old = racct->r_resources[RACCT_PCTCPU];
1207 
1208 	/* If there is nothing to decay, just exit. */
1209 	if (r_old <= 0)
1210 		return;
1211 
1212 	r_new = r_old * RACCT_DECAY_FACTOR / FSCALE;
1213 	racct->r_resources[RACCT_PCTCPU] = r_new;
1214 }
1215 
1216 static void
1217 racct_decay_pre(void)
1218 {
1219 
1220 	RACCT_LOCK();
1221 }
1222 
1223 static void
1224 racct_decay_post(void)
1225 {
1226 
1227 	RACCT_UNLOCK();
1228 }
1229 
1230 static void
1231 racct_decay(void)
1232 {
1233 
1234 	ASSERT_RACCT_ENABLED();
1235 
1236 	ui_racct_foreach(racct_decay_callback, racct_decay_pre,
1237 	    racct_decay_post, NULL, NULL);
1238 	loginclass_racct_foreach(racct_decay_callback, racct_decay_pre,
1239 	    racct_decay_post, NULL, NULL);
1240 	prison_racct_foreach(racct_decay_callback, racct_decay_pre,
1241 	    racct_decay_post, NULL, NULL);
1242 }
1243 
1244 static void
1245 racctd(void)
1246 {
1247 	struct thread *td;
1248 	struct proc *p;
1249 	struct timeval wallclock;
1250 	uint64_t pct, pct_estimate, runtime;
1251 
1252 	ASSERT_RACCT_ENABLED();
1253 
1254 	for (;;) {
1255 		racct_decay();
1256 
1257 		sx_slock(&allproc_lock);
1258 
1259 		sx_slock(&zombproc_lock);
1260 		LIST_FOREACH(p, &zombproc, p_list) {
1261 			PROC_LOCK(p);
1262 			racct_set(p, RACCT_PCTCPU, 0);
1263 			PROC_UNLOCK(p);
1264 		}
1265 		sx_sunlock(&zombproc_lock);
1266 
1267 		FOREACH_PROC_IN_SYSTEM(p) {
1268 			PROC_LOCK(p);
1269 			if (p->p_state != PRS_NORMAL) {
1270 				PROC_UNLOCK(p);
1271 				continue;
1272 			}
1273 
1274 			microuptime(&wallclock);
1275 			timevalsub(&wallclock, &p->p_stats->p_start);
1276 			PROC_STATLOCK(p);
1277 			FOREACH_THREAD_IN_PROC(p, td)
1278 				ruxagg(p, td);
1279 			runtime = cputick2usec(p->p_rux.rux_runtime);
1280 			PROC_STATUNLOCK(p);
1281 #ifdef notyet
1282 			KASSERT(runtime >= p->p_prev_runtime,
1283 			    ("runtime < p_prev_runtime"));
1284 #else
1285 			if (runtime < p->p_prev_runtime)
1286 				runtime = p->p_prev_runtime;
1287 #endif
1288 			p->p_prev_runtime = runtime;
1289 			if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1290 				pct_estimate = (1000000 * runtime * 100) /
1291 				    ((uint64_t)wallclock.tv_sec * 1000000 +
1292 				    wallclock.tv_usec);
1293 			} else
1294 				pct_estimate = 0;
1295 			pct = racct_getpcpu(p, pct_estimate);
1296 			RACCT_LOCK();
1297 #ifdef RCTL
1298 			rctl_throttle_decay(p->p_racct, RACCT_READBPS);
1299 			rctl_throttle_decay(p->p_racct, RACCT_WRITEBPS);
1300 			rctl_throttle_decay(p->p_racct, RACCT_READIOPS);
1301 			rctl_throttle_decay(p->p_racct, RACCT_WRITEIOPS);
1302 #endif
1303 			racct_set_locked(p, RACCT_PCTCPU, pct, 1);
1304 			racct_set_locked(p, RACCT_CPU, runtime, 0);
1305 			racct_set_locked(p, RACCT_WALLCLOCK,
1306 			    (uint64_t)wallclock.tv_sec * 1000000 +
1307 			    wallclock.tv_usec, 0);
1308 			RACCT_UNLOCK();
1309 			PROC_UNLOCK(p);
1310 		}
1311 
1312 		/*
1313 		 * To ensure that processes are throttled in a fair way, we need
1314 		 * to iterate over all processes again and check the limits
1315 		 * for %cpu resource only after ucred racct containers have been
1316 		 * properly filled.
1317 		 */
1318 		FOREACH_PROC_IN_SYSTEM(p) {
1319 			PROC_LOCK(p);
1320 			if (p->p_state != PRS_NORMAL) {
1321 				PROC_UNLOCK(p);
1322 				continue;
1323 			}
1324 
1325 			if (racct_pcpu_available(p) <= 0) {
1326 				if (p->p_racct->r_resources[RACCT_PCTCPU] >
1327 				    pcpu_threshold)
1328 					racct_proc_throttle(p, -1);
1329 			} else if (p->p_throttled == -1) {
1330 				racct_proc_wakeup(p);
1331 			}
1332 			PROC_UNLOCK(p);
1333 		}
1334 		sx_sunlock(&allproc_lock);
1335 		pause("-", hz);
1336 	}
1337 }
1338 
1339 static struct kproc_desc racctd_kp = {
1340 	"racctd",
1341 	racctd,
1342 	NULL
1343 };
1344 
1345 static void
1346 racctd_init(void)
1347 {
1348 	if (!racct_enable)
1349 		return;
1350 
1351 	kproc_start(&racctd_kp);
1352 }
1353 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, racctd_init, NULL);
1354 
1355 static void
1356 racct_init(void)
1357 {
1358 	if (!racct_enable)
1359 		return;
1360 
1361 	racct_zone = uma_zcreate("racct", sizeof(struct racct),
1362 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1363 	/*
1364 	 * XXX: Move this somewhere.
1365 	 */
1366 	prison0.pr_prison_racct = prison_racct_find("0");
1367 }
1368 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL);
1369 
1370 #endif /* !RACCT */
1371