1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010 The FreeBSD Foundation
5 *
6 * This software was developed by Edward Tomasz Napierala under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_sched.h"
33
34 #include <sys/param.h>
35 #include <sys/buf.h>
36 #include <sys/systm.h>
37 #include <sys/eventhandler.h>
38 #include <sys/jail.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/lock.h>
42 #include <sys/loginclass.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/racct.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sbuf.h>
49 #include <sys/sched.h>
50 #include <sys/sdt.h>
51 #include <sys/smp.h>
52 #include <sys/sx.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/umtxvar.h>
56 #include <machine/smp.h>
57
58 #ifdef RCTL
59 #include <sys/rctl.h>
60 #endif
61
62 FEATURE(racct, "Resource Accounting");
63
64 /*
65 * Do not block processes that have their %cpu usage <= pcpu_threshold.
66 */
67 static int pcpu_threshold = 1;
68 #ifdef RACCT_DEFAULT_TO_DISABLED
69 bool __read_frequently racct_enable = false;
70 #else
71 bool __read_frequently racct_enable = true;
72 #endif
73
74 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75 "Resource Accounting");
76 SYSCTL_BOOL(_kern_racct, OID_AUTO, enable, CTLFLAG_RDTUN, &racct_enable,
77 0, "Enable RACCT/RCTL");
78 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold,
79 0, "Processes with higher %cpu usage than this value can be throttled.");
80
81 /*
82 * How many seconds it takes to use the scheduler %cpu calculations. When a
83 * process starts, we compute its %cpu usage by dividing its runtime by the
84 * process wall clock time. After RACCT_PCPU_SECS pass, we use the value
85 * provided by the scheduler.
86 */
87 #define RACCT_PCPU_SECS 3
88
89 struct mtx racct_lock;
90 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF);
91
92 static uma_zone_t racct_zone;
93
94 static void racct_sub_racct(struct racct *dest, const struct racct *src);
95 static void racct_sub_cred_locked(struct ucred *cred, int resource,
96 uint64_t amount);
97 static void racct_add_cred_locked(struct ucred *cred, int resource,
98 uint64_t amount);
99
100 SDT_PROVIDER_DEFINE(racct);
101 SDT_PROBE_DEFINE3(racct, , rusage, add,
102 "struct proc *", "int", "uint64_t");
103 SDT_PROBE_DEFINE3(racct, , rusage, add__failure,
104 "struct proc *", "int", "uint64_t");
105 SDT_PROBE_DEFINE3(racct, , rusage, add__buf,
106 "struct proc *", "const struct buf *", "int");
107 SDT_PROBE_DEFINE3(racct, , rusage, add__cred,
108 "struct ucred *", "int", "uint64_t");
109 SDT_PROBE_DEFINE3(racct, , rusage, add__force,
110 "struct proc *", "int", "uint64_t");
111 SDT_PROBE_DEFINE3(racct, , rusage, set,
112 "struct proc *", "int", "uint64_t");
113 SDT_PROBE_DEFINE3(racct, , rusage, set__failure,
114 "struct proc *", "int", "uint64_t");
115 SDT_PROBE_DEFINE3(racct, , rusage, set__force,
116 "struct proc *", "int", "uint64_t");
117 SDT_PROBE_DEFINE3(racct, , rusage, sub,
118 "struct proc *", "int", "uint64_t");
119 SDT_PROBE_DEFINE3(racct, , rusage, sub__cred,
120 "struct ucred *", "int", "uint64_t");
121 SDT_PROBE_DEFINE1(racct, , racct, create,
122 "struct racct *");
123 SDT_PROBE_DEFINE1(racct, , racct, destroy,
124 "struct racct *");
125 SDT_PROBE_DEFINE2(racct, , racct, join,
126 "struct racct *", "struct racct *");
127 SDT_PROBE_DEFINE2(racct, , racct, join__failure,
128 "struct racct *", "struct racct *");
129 SDT_PROBE_DEFINE2(racct, , racct, leave,
130 "struct racct *", "struct racct *");
131
132 int racct_types[] = {
133 [RACCT_CPU] =
134 RACCT_IN_MILLIONS,
135 [RACCT_DATA] =
136 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
137 [RACCT_STACK] =
138 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
139 [RACCT_CORE] =
140 RACCT_DENIABLE,
141 [RACCT_RSS] =
142 RACCT_RECLAIMABLE,
143 [RACCT_MEMLOCK] =
144 RACCT_RECLAIMABLE | RACCT_DENIABLE,
145 [RACCT_NPROC] =
146 RACCT_RECLAIMABLE | RACCT_DENIABLE,
147 [RACCT_NOFILE] =
148 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
149 [RACCT_VMEM] =
150 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
151 [RACCT_NPTS] =
152 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
153 [RACCT_SWAP] =
154 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
155 [RACCT_NTHR] =
156 RACCT_RECLAIMABLE | RACCT_DENIABLE,
157 [RACCT_MSGQQUEUED] =
158 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
159 [RACCT_MSGQSIZE] =
160 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
161 [RACCT_NMSGQ] =
162 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
163 [RACCT_NSEM] =
164 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
165 [RACCT_NSEMOP] =
166 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
167 [RACCT_NSHM] =
168 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
169 [RACCT_SHMSIZE] =
170 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
171 [RACCT_WALLCLOCK] =
172 RACCT_IN_MILLIONS,
173 [RACCT_PCTCPU] =
174 RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS,
175 [RACCT_READBPS] =
176 RACCT_DECAYING,
177 [RACCT_WRITEBPS] =
178 RACCT_DECAYING,
179 [RACCT_READIOPS] =
180 RACCT_DECAYING,
181 [RACCT_WRITEIOPS] =
182 RACCT_DECAYING };
183
184 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE;
185
186 #ifdef SCHED_4BSD
187 /*
188 * Contains intermediate values for %cpu calculations to avoid using floating
189 * point in the kernel.
190 * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20)
191 * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to
192 * zero so the calculations are more straightforward.
193 */
194 fixpt_t ccpu_exp[] = {
195 [0] = FSCALE * 1,
196 [1] = FSCALE * 0.95122942450071400909,
197 [2] = FSCALE * 0.90483741803595957316,
198 [3] = FSCALE * 0.86070797642505780722,
199 [4] = FSCALE * 0.81873075307798185866,
200 [5] = FSCALE * 0.77880078307140486824,
201 [6] = FSCALE * 0.74081822068171786606,
202 [7] = FSCALE * 0.70468808971871343435,
203 [8] = FSCALE * 0.67032004603563930074,
204 [9] = FSCALE * 0.63762815162177329314,
205 [10] = FSCALE * 0.60653065971263342360,
206 [11] = FSCALE * 0.57694981038048669531,
207 [12] = FSCALE * 0.54881163609402643262,
208 [13] = FSCALE * 0.52204577676101604789,
209 [14] = FSCALE * 0.49658530379140951470,
210 [15] = FSCALE * 0.47236655274101470713,
211 [16] = FSCALE * 0.44932896411722159143,
212 [17] = FSCALE * 0.42741493194872666992,
213 [18] = FSCALE * 0.40656965974059911188,
214 [19] = FSCALE * 0.38674102345450120691,
215 [20] = FSCALE * 0.36787944117144232159,
216 [21] = FSCALE * 0.34993774911115535467,
217 [22] = FSCALE * 0.33287108369807955328,
218 [23] = FSCALE * 0.31663676937905321821,
219 [24] = FSCALE * 0.30119421191220209664,
220 [25] = FSCALE * 0.28650479686019010032,
221 [26] = FSCALE * 0.27253179303401260312,
222 [27] = FSCALE * 0.25924026064589150757,
223 [28] = FSCALE * 0.24659696394160647693,
224 [29] = FSCALE * 0.23457028809379765313,
225 [30] = FSCALE * 0.22313016014842982893,
226 [31] = FSCALE * 0.21224797382674305771,
227 [32] = FSCALE * 0.20189651799465540848,
228 [33] = FSCALE * 0.19204990862075411423,
229 [34] = FSCALE * 0.18268352405273465022,
230 [35] = FSCALE * 0.17377394345044512668,
231 [36] = FSCALE * 0.16529888822158653829,
232 [37] = FSCALE * 0.15723716631362761621,
233 [38] = FSCALE * 0.14956861922263505264,
234 [39] = FSCALE * 0.14227407158651357185,
235 [40] = FSCALE * 0.13533528323661269189,
236 [41] = FSCALE * 0.12873490358780421886,
237 [42] = FSCALE * 0.12245642825298191021,
238 [43] = FSCALE * 0.11648415777349695786,
239 [44] = FSCALE * 0.11080315836233388333,
240 [45] = FSCALE * 0.10539922456186433678,
241 [46] = FSCALE * 0.10025884372280373372,
242 [47] = FSCALE * 0.09536916221554961888,
243 [48] = FSCALE * 0.09071795328941250337,
244 [49] = FSCALE * 0.08629358649937051097,
245 [50] = FSCALE * 0.08208499862389879516,
246 [51] = FSCALE * 0.07808166600115315231,
247 [52] = FSCALE * 0.07427357821433388042,
248 [53] = FSCALE * 0.07065121306042958674,
249 [54] = FSCALE * 0.06720551273974976512,
250 [55] = FSCALE * 0.06392786120670757270,
251 [56] = FSCALE * 0.06081006262521796499,
252 [57] = FSCALE * 0.05784432087483846296,
253 [58] = FSCALE * 0.05502322005640722902,
254 [59] = FSCALE * 0.05233970594843239308,
255 [60] = FSCALE * 0.04978706836786394297,
256 [61] = FSCALE * 0.04735892439114092119,
257 [62] = FSCALE * 0.04504920239355780606,
258 [63] = FSCALE * 0.04285212686704017991,
259 [64] = FSCALE * 0.04076220397836621516,
260 [65] = FSCALE * 0.03877420783172200988,
261 [66] = FSCALE * 0.03688316740124000544,
262 [67] = FSCALE * 0.03508435410084502588,
263 [68] = FSCALE * 0.03337326996032607948,
264 [69] = FSCALE * 0.03174563637806794323,
265 [70] = FSCALE * 0.03019738342231850073,
266 [71] = FSCALE * 0.02872463965423942912,
267 [72] = FSCALE * 0.02732372244729256080,
268 [73] = FSCALE * 0.02599112877875534358,
269 [74] = FSCALE * 0.02472352647033939120,
270 [75] = FSCALE * 0.02351774585600910823,
271 [76] = FSCALE * 0.02237077185616559577,
272 [77] = FSCALE * 0.02127973643837716938,
273 [78] = FSCALE * 0.02024191144580438847,
274 [79] = FSCALE * 0.01925470177538692429,
275 [80] = FSCALE * 0.01831563888873418029,
276 [81] = FSCALE * 0.01742237463949351138,
277 [82] = FSCALE * 0.01657267540176124754,
278 [83] = FSCALE * 0.01576441648485449082,
279 [84] = FSCALE * 0.01499557682047770621,
280 [85] = FSCALE * 0.01426423390899925527,
281 [86] = FSCALE * 0.01356855901220093175,
282 [87] = FSCALE * 0.01290681258047986886,
283 [88] = FSCALE * 0.01227733990306844117,
284 [89] = FSCALE * 0.01167856697039544521,
285 [90] = FSCALE * 0.01110899653824230649,
286 [91] = FSCALE * 0.01056720438385265337,
287 [92] = FSCALE * 0.01005183574463358164,
288 [93] = FSCALE * 0.00956160193054350793,
289 [94] = FSCALE * 0.00909527710169581709,
290 [95] = FSCALE * 0.00865169520312063417,
291 [96] = FSCALE * 0.00822974704902002884,
292 [97] = FSCALE * 0.00782837754922577143,
293 [98] = FSCALE * 0.00744658307092434051,
294 [99] = FSCALE * 0.00708340892905212004,
295 [100] = FSCALE * 0.00673794699908546709,
296 [101] = FSCALE * 0.00640933344625638184,
297 [102] = FSCALE * 0.00609674656551563610,
298 [103] = FSCALE * 0.00579940472684214321,
299 [104] = FSCALE * 0.00551656442076077241,
300 [105] = FSCALE * 0.00524751839918138427,
301 [106] = FSCALE * 0.00499159390691021621,
302 [107] = FSCALE * 0.00474815099941147558,
303 [108] = FSCALE * 0.00451658094261266798,
304 [109] = FSCALE * 0.00429630469075234057,
305 [110] = FSCALE * 0.00408677143846406699,
306 };
307 #endif
308
309 #define CCPU_EXP_MAX 110
310
311 /*
312 * This function is analogical to the getpcpu() function in the ps(1) command.
313 * They should both calculate in the same way so that the racct %cpu
314 * calculations are consistent with the values shown by the ps(1) tool.
315 * The calculations are more complex in the 4BSD scheduler because of the value
316 * of the ccpu variable. In ULE it is defined to be zero which saves us some
317 * work.
318 */
319 static uint64_t
racct_getpcpu(struct proc * p,u_int pcpu)320 racct_getpcpu(struct proc *p, u_int pcpu)
321 {
322 u_int swtime;
323 #ifdef SCHED_4BSD
324 fixpt_t pctcpu, pctcpu_next;
325 #endif
326 fixpt_t p_pctcpu;
327 struct thread *td;
328
329 ASSERT_RACCT_ENABLED();
330 KASSERT((p->p_flag & P_IDLEPROC) == 0,
331 ("racct_getpcpu: idle process %p", p));
332
333 swtime = (ticks - p->p_swtick) / hz;
334
335 /*
336 * For short-lived processes, the sched_pctcpu() returns small
337 * values even for cpu intensive processes. Therefore we use
338 * our own estimate in this case.
339 */
340 if (swtime < RACCT_PCPU_SECS)
341 return (pcpu);
342
343 p_pctcpu = 0;
344 FOREACH_THREAD_IN_PROC(p, td) {
345 thread_lock(td);
346 #ifdef SCHED_4BSD
347 pctcpu = sched_pctcpu(td);
348 /* Count also the yet unfinished second. */
349 pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT;
350 pctcpu_next += sched_pctcpu_delta(td);
351 p_pctcpu += max(pctcpu, pctcpu_next);
352 #else
353 /*
354 * In ULE the %cpu statistics are updated on every
355 * sched_pctcpu() call. So special calculations to
356 * account for the latest (unfinished) second are
357 * not needed.
358 */
359 p_pctcpu += sched_pctcpu(td);
360 #endif
361 thread_unlock(td);
362 }
363
364 #ifdef SCHED_4BSD
365 if (swtime <= CCPU_EXP_MAX)
366 return ((100 * (uint64_t)p_pctcpu * 1000000) /
367 (FSCALE - ccpu_exp[swtime]));
368 #endif
369
370 return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE);
371 }
372
373 static void
racct_add_racct(struct racct * dest,const struct racct * src)374 racct_add_racct(struct racct *dest, const struct racct *src)
375 {
376 int i;
377
378 ASSERT_RACCT_ENABLED();
379 RACCT_LOCK_ASSERT();
380
381 /*
382 * Update resource usage in dest.
383 */
384 for (i = 0; i <= RACCT_MAX; i++) {
385 KASSERT(dest->r_resources[i] >= 0,
386 ("%s: resource %d propagation meltdown: dest < 0",
387 __func__, i));
388 KASSERT(src->r_resources[i] >= 0,
389 ("%s: resource %d propagation meltdown: src < 0",
390 __func__, i));
391 dest->r_resources[i] += src->r_resources[i];
392 }
393 }
394
395 static void
racct_sub_racct(struct racct * dest,const struct racct * src)396 racct_sub_racct(struct racct *dest, const struct racct *src)
397 {
398 int i;
399
400 ASSERT_RACCT_ENABLED();
401 RACCT_LOCK_ASSERT();
402
403 /*
404 * Update resource usage in dest.
405 */
406 for (i = 0; i <= RACCT_MAX; i++) {
407 if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) {
408 KASSERT(dest->r_resources[i] >= 0,
409 ("%s: resource %d propagation meltdown: dest < 0",
410 __func__, i));
411 KASSERT(src->r_resources[i] >= 0,
412 ("%s: resource %d propagation meltdown: src < 0",
413 __func__, i));
414 KASSERT(src->r_resources[i] <= dest->r_resources[i],
415 ("%s: resource %d propagation meltdown: src > dest",
416 __func__, i));
417 }
418 if (RACCT_CAN_DROP(i)) {
419 dest->r_resources[i] -= src->r_resources[i];
420 if (dest->r_resources[i] < 0)
421 dest->r_resources[i] = 0;
422 }
423 }
424 }
425
426 void
racct_create(struct racct ** racctp)427 racct_create(struct racct **racctp)
428 {
429
430 if (!racct_enable)
431 return;
432
433 SDT_PROBE1(racct, , racct, create, racctp);
434
435 KASSERT(*racctp == NULL, ("racct already allocated"));
436
437 *racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO);
438 }
439
440 static void
racct_destroy_locked(struct racct ** racctp)441 racct_destroy_locked(struct racct **racctp)
442 {
443 struct racct *racct;
444 int i;
445
446 ASSERT_RACCT_ENABLED();
447
448 SDT_PROBE1(racct, , racct, destroy, racctp);
449
450 RACCT_LOCK_ASSERT();
451 KASSERT(racctp != NULL, ("NULL racctp"));
452 KASSERT(*racctp != NULL, ("NULL racct"));
453
454 racct = *racctp;
455
456 for (i = 0; i <= RACCT_MAX; i++) {
457 if (RACCT_IS_SLOPPY(i))
458 continue;
459 if (!RACCT_IS_RECLAIMABLE(i))
460 continue;
461 KASSERT(racct->r_resources[i] == 0,
462 ("destroying non-empty racct: "
463 "%ju allocated for resource %d\n",
464 racct->r_resources[i], i));
465 }
466 uma_zfree(racct_zone, racct);
467 *racctp = NULL;
468 }
469
470 void
racct_destroy(struct racct ** racct)471 racct_destroy(struct racct **racct)
472 {
473
474 if (!racct_enable)
475 return;
476
477 RACCT_LOCK();
478 racct_destroy_locked(racct);
479 RACCT_UNLOCK();
480 }
481
482 /*
483 * Increase consumption of 'resource' by 'amount' for 'racct',
484 * but not its parents. Differently from other cases, 'amount' here
485 * may be less than zero.
486 */
487 static void
racct_adjust_resource(struct racct * racct,int resource,int64_t amount)488 racct_adjust_resource(struct racct *racct, int resource,
489 int64_t amount)
490 {
491
492 ASSERT_RACCT_ENABLED();
493 RACCT_LOCK_ASSERT();
494 KASSERT(racct != NULL, ("NULL racct"));
495
496 racct->r_resources[resource] += amount;
497 if (racct->r_resources[resource] < 0) {
498 KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource),
499 ("%s: resource %d usage < 0", __func__, resource));
500 racct->r_resources[resource] = 0;
501 }
502
503 /*
504 * There are some cases where the racct %cpu resource would grow
505 * beyond 100% per core. For example in racct_proc_exit() we add
506 * the process %cpu usage to the ucred racct containers. If too
507 * many processes terminated in a short time span, the ucred %cpu
508 * resource could grow too much. Also, the 4BSD scheduler sometimes
509 * returns for a thread more than 100% cpu usage. So we set a sane
510 * boundary here to 100% * the maximum number of CPUs.
511 */
512 if ((resource == RACCT_PCTCPU) &&
513 (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000 * (int64_t)MAXCPU))
514 racct->r_resources[RACCT_PCTCPU] = 100 * 1000000 * (int64_t)MAXCPU;
515 }
516
517 static int
racct_add_locked(struct proc * p,int resource,uint64_t amount,int force)518 racct_add_locked(struct proc *p, int resource, uint64_t amount, int force)
519 {
520 #ifdef RCTL
521 int error;
522 #endif
523
524 ASSERT_RACCT_ENABLED();
525
526 /*
527 * We need proc lock to dereference p->p_ucred.
528 */
529 PROC_LOCK_ASSERT(p, MA_OWNED);
530
531 #ifdef RCTL
532 error = rctl_enforce(p, resource, amount);
533 if (error && !force && RACCT_IS_DENIABLE(resource)) {
534 SDT_PROBE3(racct, , rusage, add__failure, p, resource, amount);
535 return (error);
536 }
537 #endif
538 racct_adjust_resource(p->p_racct, resource, amount);
539 racct_add_cred_locked(p->p_ucred, resource, amount);
540
541 return (0);
542 }
543
544 /*
545 * Increase allocation of 'resource' by 'amount' for process 'p'.
546 * Return 0 if it's below limits, or errno, if it's not.
547 */
548 int
racct_add(struct proc * p,int resource,uint64_t amount)549 racct_add(struct proc *p, int resource, uint64_t amount)
550 {
551 int error;
552
553 if (!racct_enable)
554 return (0);
555
556 SDT_PROBE3(racct, , rusage, add, p, resource, amount);
557
558 RACCT_LOCK();
559 error = racct_add_locked(p, resource, amount, 0);
560 RACCT_UNLOCK();
561 return (error);
562 }
563
564 /*
565 * Increase allocation of 'resource' by 'amount' for process 'p'.
566 * Doesn't check for limits and never fails.
567 */
568 void
racct_add_force(struct proc * p,int resource,uint64_t amount)569 racct_add_force(struct proc *p, int resource, uint64_t amount)
570 {
571
572 if (!racct_enable)
573 return;
574
575 SDT_PROBE3(racct, , rusage, add__force, p, resource, amount);
576
577 RACCT_LOCK();
578 racct_add_locked(p, resource, amount, 1);
579 RACCT_UNLOCK();
580 }
581
582 static void
racct_add_cred_locked(struct ucred * cred,int resource,uint64_t amount)583 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount)
584 {
585 struct prison *pr;
586
587 ASSERT_RACCT_ENABLED();
588
589 racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, amount);
590 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
591 racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource,
592 amount);
593 racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, amount);
594 }
595
596 /*
597 * Increase allocation of 'resource' by 'amount' for credential 'cred'.
598 * Doesn't check for limits and never fails.
599 */
600 void
racct_add_cred(struct ucred * cred,int resource,uint64_t amount)601 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
602 {
603
604 if (!racct_enable)
605 return;
606
607 SDT_PROBE3(racct, , rusage, add__cred, cred, resource, amount);
608
609 RACCT_LOCK();
610 racct_add_cred_locked(cred, resource, amount);
611 RACCT_UNLOCK();
612 }
613
614 /*
615 * Account for disk IO resource consumption. Checks for limits,
616 * but never fails, due to disk limits being undeniable.
617 */
618 void
racct_add_buf(struct proc * p,const struct buf * bp,int is_write)619 racct_add_buf(struct proc *p, const struct buf *bp, int is_write)
620 {
621
622 ASSERT_RACCT_ENABLED();
623 PROC_LOCK_ASSERT(p, MA_OWNED);
624
625 SDT_PROBE3(racct, , rusage, add__buf, p, bp, is_write);
626
627 RACCT_LOCK();
628 if (is_write) {
629 racct_add_locked(curproc, RACCT_WRITEBPS, bp->b_bcount, 1);
630 racct_add_locked(curproc, RACCT_WRITEIOPS, 1, 1);
631 } else {
632 racct_add_locked(curproc, RACCT_READBPS, bp->b_bcount, 1);
633 racct_add_locked(curproc, RACCT_READIOPS, 1, 1);
634 }
635 RACCT_UNLOCK();
636 }
637
638 static int
racct_set_locked(struct proc * p,int resource,uint64_t amount,int force)639 racct_set_locked(struct proc *p, int resource, uint64_t amount, int force)
640 {
641 int64_t old_amount, decayed_amount, diff_proc, diff_cred;
642 #ifdef RCTL
643 int error;
644 #endif
645
646 ASSERT_RACCT_ENABLED();
647
648 /*
649 * We need proc lock to dereference p->p_ucred.
650 */
651 PROC_LOCK_ASSERT(p, MA_OWNED);
652
653 old_amount = p->p_racct->r_resources[resource];
654 /*
655 * The diffs may be negative.
656 */
657 diff_proc = amount - old_amount;
658 if (resource == RACCT_PCTCPU) {
659 /*
660 * Resources in per-credential racct containers may decay.
661 * If this is the case, we need to calculate the difference
662 * between the new amount and the proportional value of the
663 * old amount that has decayed in the ucred racct containers.
664 */
665 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
666 diff_cred = amount - decayed_amount;
667 } else
668 diff_cred = diff_proc;
669 #ifdef notyet
670 KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource),
671 ("%s: usage of non-droppable resource %d dropping", __func__,
672 resource));
673 #endif
674 #ifdef RCTL
675 if (diff_proc > 0) {
676 error = rctl_enforce(p, resource, diff_proc);
677 if (error && !force && RACCT_IS_DENIABLE(resource)) {
678 SDT_PROBE3(racct, , rusage, set__failure, p, resource,
679 amount);
680 return (error);
681 }
682 }
683 #endif
684 racct_adjust_resource(p->p_racct, resource, diff_proc);
685 if (diff_cred > 0)
686 racct_add_cred_locked(p->p_ucred, resource, diff_cred);
687 else if (diff_cred < 0)
688 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
689
690 return (0);
691 }
692
693 /*
694 * Set allocation of 'resource' to 'amount' for process 'p'.
695 * Return 0 if it's below limits, or errno, if it's not.
696 *
697 * Note that decreasing the allocation always returns 0,
698 * even if it's above the limit.
699 */
700 int
racct_set_unlocked(struct proc * p,int resource,uint64_t amount)701 racct_set_unlocked(struct proc *p, int resource, uint64_t amount)
702 {
703 int error;
704
705 ASSERT_RACCT_ENABLED();
706 PROC_LOCK(p);
707 error = racct_set(p, resource, amount);
708 PROC_UNLOCK(p);
709 return (error);
710 }
711
712 int
racct_set(struct proc * p,int resource,uint64_t amount)713 racct_set(struct proc *p, int resource, uint64_t amount)
714 {
715 int error;
716
717 if (!racct_enable)
718 return (0);
719
720 SDT_PROBE3(racct, , rusage, set__force, p, resource, amount);
721
722 RACCT_LOCK();
723 error = racct_set_locked(p, resource, amount, 0);
724 RACCT_UNLOCK();
725 return (error);
726 }
727
728 void
racct_set_force(struct proc * p,int resource,uint64_t amount)729 racct_set_force(struct proc *p, int resource, uint64_t amount)
730 {
731
732 if (!racct_enable)
733 return;
734
735 SDT_PROBE3(racct, , rusage, set, p, resource, amount);
736
737 RACCT_LOCK();
738 racct_set_locked(p, resource, amount, 1);
739 RACCT_UNLOCK();
740 }
741
742 /*
743 * Returns amount of 'resource' the process 'p' can keep allocated.
744 * Allocating more than that would be denied, unless the resource
745 * is marked undeniable. Amount of already allocated resource does
746 * not matter.
747 */
748 uint64_t
racct_get_limit(struct proc * p,int resource)749 racct_get_limit(struct proc *p, int resource)
750 {
751 #ifdef RCTL
752 uint64_t available;
753
754 if (!racct_enable)
755 return (UINT64_MAX);
756
757 RACCT_LOCK();
758 available = rctl_get_limit(p, resource);
759 RACCT_UNLOCK();
760
761 return (available);
762 #else
763
764 return (UINT64_MAX);
765 #endif
766 }
767
768 /*
769 * Returns amount of 'resource' the process 'p' can keep allocated.
770 * Allocating more than that would be denied, unless the resource
771 * is marked undeniable. Amount of already allocated resource does
772 * matter.
773 */
774 uint64_t
racct_get_available(struct proc * p,int resource)775 racct_get_available(struct proc *p, int resource)
776 {
777 #ifdef RCTL
778 uint64_t available;
779
780 if (!racct_enable)
781 return (UINT64_MAX);
782
783 RACCT_LOCK();
784 available = rctl_get_available(p, resource);
785 RACCT_UNLOCK();
786
787 return (available);
788 #else
789
790 return (UINT64_MAX);
791 #endif
792 }
793
794 /*
795 * Returns amount of the %cpu resource that process 'p' can add to its %cpu
796 * utilization. Adding more than that would lead to the process being
797 * throttled.
798 */
799 static int64_t
racct_pcpu_available(struct proc * p)800 racct_pcpu_available(struct proc *p)
801 {
802 #ifdef RCTL
803 uint64_t available;
804
805 ASSERT_RACCT_ENABLED();
806
807 RACCT_LOCK();
808 available = rctl_pcpu_available(p);
809 RACCT_UNLOCK();
810
811 return (available);
812 #else
813
814 return (INT64_MAX);
815 #endif
816 }
817
818 /*
819 * Decrease allocation of 'resource' by 'amount' for process 'p'.
820 */
821 void
racct_sub(struct proc * p,int resource,uint64_t amount)822 racct_sub(struct proc *p, int resource, uint64_t amount)
823 {
824
825 if (!racct_enable)
826 return;
827
828 SDT_PROBE3(racct, , rusage, sub, p, resource, amount);
829
830 /*
831 * We need proc lock to dereference p->p_ucred.
832 */
833 PROC_LOCK_ASSERT(p, MA_OWNED);
834 KASSERT(RACCT_CAN_DROP(resource),
835 ("%s: called for non-droppable resource %d", __func__, resource));
836
837 RACCT_LOCK();
838 KASSERT(amount <= p->p_racct->r_resources[resource],
839 ("%s: freeing %ju of resource %d, which is more "
840 "than allocated %jd for %s (pid %d)", __func__, amount, resource,
841 (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid));
842
843 racct_adjust_resource(p->p_racct, resource, -amount);
844 racct_sub_cred_locked(p->p_ucred, resource, amount);
845 RACCT_UNLOCK();
846 }
847
848 static void
racct_sub_cred_locked(struct ucred * cred,int resource,uint64_t amount)849 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount)
850 {
851 struct prison *pr;
852
853 ASSERT_RACCT_ENABLED();
854
855 racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, -amount);
856 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
857 racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource,
858 -amount);
859 racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, -amount);
860 }
861
862 /*
863 * Decrease allocation of 'resource' by 'amount' for credential 'cred'.
864 */
865 void
racct_sub_cred(struct ucred * cred,int resource,uint64_t amount)866 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
867 {
868
869 if (!racct_enable)
870 return;
871
872 SDT_PROBE3(racct, , rusage, sub__cred, cred, resource, amount);
873
874 #ifdef notyet
875 KASSERT(RACCT_CAN_DROP(resource),
876 ("%s: called for resource %d which can not drop", __func__,
877 resource));
878 #endif
879
880 RACCT_LOCK();
881 racct_sub_cred_locked(cred, resource, amount);
882 RACCT_UNLOCK();
883 }
884
885 /*
886 * Inherit resource usage information from the parent process.
887 */
888 int
racct_proc_fork(struct proc * parent,struct proc * child)889 racct_proc_fork(struct proc *parent, struct proc *child)
890 {
891 int i, error = 0;
892
893 if (!racct_enable)
894 return (0);
895
896 /*
897 * Create racct for the child process.
898 */
899 racct_create(&child->p_racct);
900
901 PROC_LOCK(parent);
902 PROC_LOCK(child);
903 RACCT_LOCK();
904
905 #ifdef RCTL
906 error = rctl_proc_fork(parent, child);
907 if (error != 0)
908 goto out;
909 #endif
910
911 /* Init process cpu time. */
912 child->p_prev_runtime = 0;
913 child->p_throttled = 0;
914
915 /*
916 * Inherit resource usage.
917 */
918 for (i = 0; i <= RACCT_MAX; i++) {
919 if (parent->p_racct->r_resources[i] == 0 ||
920 !RACCT_IS_INHERITABLE(i))
921 continue;
922
923 error = racct_set_locked(child, i,
924 parent->p_racct->r_resources[i], 0);
925 if (error != 0)
926 goto out;
927 }
928
929 error = racct_add_locked(child, RACCT_NPROC, 1, 0);
930 error += racct_add_locked(child, RACCT_NTHR, 1, 0);
931
932 out:
933 RACCT_UNLOCK();
934 PROC_UNLOCK(child);
935 PROC_UNLOCK(parent);
936
937 if (error != 0)
938 racct_proc_exit(child);
939
940 return (error);
941 }
942
943 /*
944 * Called at the end of fork1(), to handle rules that require the process
945 * to be fully initialized.
946 */
947 void
racct_proc_fork_done(struct proc * child)948 racct_proc_fork_done(struct proc *child)
949 {
950
951 if (!racct_enable)
952 return;
953
954 #ifdef RCTL
955 PROC_LOCK(child);
956 RACCT_LOCK();
957 rctl_enforce(child, RACCT_NPROC, 0);
958 rctl_enforce(child, RACCT_NTHR, 0);
959 RACCT_UNLOCK();
960 PROC_UNLOCK(child);
961 #endif
962 }
963
964 void
racct_proc_exit(struct proc * p)965 racct_proc_exit(struct proc *p)
966 {
967 struct timeval wallclock;
968 uint64_t pct_estimate, pct, runtime;
969 int i;
970
971 if (!racct_enable)
972 return;
973
974 PROC_LOCK(p);
975 /*
976 * We don't need to calculate rux, proc_reap() has already done this.
977 */
978 runtime = cputick2usec(p->p_rux.rux_runtime);
979 #ifdef notyet
980 KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime"));
981 #else
982 if (runtime < p->p_prev_runtime)
983 runtime = p->p_prev_runtime;
984 #endif
985 microuptime(&wallclock);
986 timevalsub(&wallclock, &p->p_stats->p_start);
987 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
988 pct_estimate = (1000000 * runtime * 100) /
989 ((uint64_t)wallclock.tv_sec * 1000000 +
990 wallclock.tv_usec);
991 } else
992 pct_estimate = 0;
993 pct = racct_getpcpu(p, pct_estimate);
994
995 RACCT_LOCK();
996 racct_set_locked(p, RACCT_CPU, runtime, 0);
997 racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct);
998
999 KASSERT(p->p_racct->r_resources[RACCT_RSS] == 0,
1000 ("process reaped with %ju allocated for RSS\n",
1001 p->p_racct->r_resources[RACCT_RSS]));
1002 for (i = 0; i <= RACCT_MAX; i++) {
1003 if (p->p_racct->r_resources[i] == 0)
1004 continue;
1005 if (!RACCT_IS_RECLAIMABLE(i))
1006 continue;
1007 racct_set_locked(p, i, 0, 0);
1008 }
1009
1010 #ifdef RCTL
1011 rctl_racct_release(p->p_racct);
1012 #endif
1013 racct_destroy_locked(&p->p_racct);
1014 RACCT_UNLOCK();
1015 PROC_UNLOCK(p);
1016 }
1017
1018 /*
1019 * Called after credentials change, to move resource utilisation
1020 * between raccts.
1021 */
1022 void
racct_proc_ucred_changed(struct proc * p,struct ucred * oldcred,struct ucred * newcred)1023 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
1024 struct ucred *newcred)
1025 {
1026 struct uidinfo *olduip, *newuip;
1027 struct loginclass *oldlc, *newlc;
1028 struct prison *oldpr, *newpr, *pr;
1029
1030 if (!racct_enable)
1031 return;
1032
1033 PROC_LOCK_ASSERT(p, MA_OWNED);
1034
1035 newuip = newcred->cr_ruidinfo;
1036 olduip = oldcred->cr_ruidinfo;
1037 newlc = newcred->cr_loginclass;
1038 oldlc = oldcred->cr_loginclass;
1039 newpr = newcred->cr_prison;
1040 oldpr = oldcred->cr_prison;
1041
1042 RACCT_LOCK();
1043 if (newuip != olduip) {
1044 racct_sub_racct(olduip->ui_racct, p->p_racct);
1045 racct_add_racct(newuip->ui_racct, p->p_racct);
1046 }
1047 if (newlc != oldlc) {
1048 racct_sub_racct(oldlc->lc_racct, p->p_racct);
1049 racct_add_racct(newlc->lc_racct, p->p_racct);
1050 }
1051 if (newpr != oldpr) {
1052 for (pr = oldpr; pr != NULL; pr = pr->pr_parent)
1053 racct_sub_racct(pr->pr_prison_racct->prr_racct,
1054 p->p_racct);
1055 for (pr = newpr; pr != NULL; pr = pr->pr_parent)
1056 racct_add_racct(pr->pr_prison_racct->prr_racct,
1057 p->p_racct);
1058 }
1059 RACCT_UNLOCK();
1060 }
1061
1062 void
racct_move(struct racct * dest,struct racct * src)1063 racct_move(struct racct *dest, struct racct *src)
1064 {
1065
1066 ASSERT_RACCT_ENABLED();
1067
1068 RACCT_LOCK();
1069 racct_add_racct(dest, src);
1070 racct_sub_racct(src, src);
1071 RACCT_UNLOCK();
1072 }
1073
1074 static void
ast_racct(struct thread * td,int tda __unused)1075 ast_racct(struct thread *td, int tda __unused)
1076 {
1077 struct proc *p;
1078
1079 ASSERT_RACCT_ENABLED();
1080
1081 p = td->td_proc;
1082 if (p->p_throttled == 0)
1083 return;
1084
1085 PROC_LOCK(p);
1086 while (p->p_throttled != 0) {
1087 msleep(p->p_racct, &p->p_mtx, 0, "racct",
1088 p->p_throttled < 0 ? 0 : p->p_throttled);
1089 if (p->p_throttled > 0)
1090 p->p_throttled = 0;
1091 }
1092 PROC_UNLOCK(p);
1093 }
1094
1095 /*
1096 * Make the process sleep in userret() for 'timeout' ticks. Setting
1097 * timeout to -1 makes it sleep until woken up by racct_proc_wakeup().
1098 */
1099 void
racct_proc_throttle(struct proc * p,int timeout)1100 racct_proc_throttle(struct proc *p, int timeout)
1101 {
1102 struct thread *td;
1103 #ifdef SMP
1104 int cpuid;
1105 #endif
1106
1107 KASSERT(timeout != 0, ("timeout %d", timeout));
1108 ASSERT_RACCT_ENABLED();
1109 PROC_LOCK_ASSERT(p, MA_OWNED);
1110
1111 /*
1112 * Do not block kernel processes. Also do not block processes with
1113 * low %cpu utilization to improve interactivity.
1114 */
1115 if ((p->p_flag & (P_SYSTEM | P_KPROC)) != 0)
1116 return;
1117
1118 if (p->p_throttled < 0 || (timeout > 0 && p->p_throttled > timeout))
1119 return;
1120
1121 p->p_throttled = timeout;
1122
1123 FOREACH_THREAD_IN_PROC(p, td) {
1124 thread_lock(td);
1125 ast_sched_locked(td, TDA_RACCT);
1126
1127 switch (TD_GET_STATE(td)) {
1128 case TDS_RUNQ:
1129 /*
1130 * If the thread is on the scheduler run-queue, we can
1131 * not just remove it from there. So we set the flag
1132 * TDA_SCHED for the thread, so that once it is
1133 * running, it is taken off the cpu as soon as possible.
1134 */
1135 ast_sched_locked(td, TDA_SCHED);
1136 break;
1137 case TDS_RUNNING:
1138 /*
1139 * If the thread is running, we request a context
1140 * switch for it by setting the TDA_SCHED flag.
1141 */
1142 ast_sched_locked(td, TDA_SCHED);
1143 #ifdef SMP
1144 cpuid = td->td_oncpu;
1145 if ((cpuid != NOCPU) && (td != curthread))
1146 ipi_cpu(cpuid, IPI_AST);
1147 #endif
1148 break;
1149 default:
1150 break;
1151 }
1152 thread_unlock(td);
1153 }
1154 }
1155
1156 static void
racct_proc_wakeup(struct proc * p)1157 racct_proc_wakeup(struct proc *p)
1158 {
1159
1160 ASSERT_RACCT_ENABLED();
1161
1162 PROC_LOCK_ASSERT(p, MA_OWNED);
1163
1164 if (p->p_throttled != 0) {
1165 p->p_throttled = 0;
1166 wakeup(p->p_racct);
1167 }
1168 }
1169
1170 static void
racct_decay_callback(struct racct * racct,void * dummy1,void * dummy2)1171 racct_decay_callback(struct racct *racct, void *dummy1, void *dummy2)
1172 {
1173 int64_t r_old, r_new;
1174
1175 ASSERT_RACCT_ENABLED();
1176 RACCT_LOCK_ASSERT();
1177
1178 #ifdef RCTL
1179 rctl_throttle_decay(racct, RACCT_READBPS);
1180 rctl_throttle_decay(racct, RACCT_WRITEBPS);
1181 rctl_throttle_decay(racct, RACCT_READIOPS);
1182 rctl_throttle_decay(racct, RACCT_WRITEIOPS);
1183 #endif
1184
1185 r_old = racct->r_resources[RACCT_PCTCPU];
1186
1187 /* If there is nothing to decay, just exit. */
1188 if (r_old <= 0)
1189 return;
1190
1191 r_new = r_old * RACCT_DECAY_FACTOR / FSCALE;
1192 racct->r_resources[RACCT_PCTCPU] = r_new;
1193 }
1194
1195 static void
racct_decay_pre(void)1196 racct_decay_pre(void)
1197 {
1198
1199 RACCT_LOCK();
1200 }
1201
1202 static void
racct_decay_post(void)1203 racct_decay_post(void)
1204 {
1205
1206 RACCT_UNLOCK();
1207 }
1208
1209 static void
racct_decay(void)1210 racct_decay(void)
1211 {
1212
1213 ASSERT_RACCT_ENABLED();
1214
1215 ui_racct_foreach(racct_decay_callback, racct_decay_pre,
1216 racct_decay_post, NULL, NULL);
1217 loginclass_racct_foreach(racct_decay_callback, racct_decay_pre,
1218 racct_decay_post, NULL, NULL);
1219 prison_racct_foreach(racct_decay_callback, racct_decay_pre,
1220 racct_decay_post, NULL, NULL);
1221 }
1222
1223 static void
racctd(void)1224 racctd(void)
1225 {
1226 struct thread *td;
1227 struct proc *p;
1228 struct timeval wallclock;
1229 uint64_t pct, pct_estimate, runtime;
1230
1231 ASSERT_RACCT_ENABLED();
1232
1233 for (;;) {
1234 racct_decay();
1235
1236 sx_slock(&allproc_lock);
1237
1238 FOREACH_PROC_IN_SYSTEM(p) {
1239 PROC_LOCK(p);
1240 if (p->p_state != PRS_NORMAL ||
1241 (p->p_flag & P_IDLEPROC) != 0) {
1242 if (p->p_state == PRS_ZOMBIE)
1243 racct_set(p, RACCT_PCTCPU, 0);
1244 PROC_UNLOCK(p);
1245 continue;
1246 }
1247
1248 microuptime(&wallclock);
1249 timevalsub(&wallclock, &p->p_stats->p_start);
1250 PROC_STATLOCK(p);
1251 FOREACH_THREAD_IN_PROC(p, td)
1252 ruxagg(p, td);
1253 runtime = cputick2usec(p->p_rux.rux_runtime);
1254 PROC_STATUNLOCK(p);
1255 #ifdef notyet
1256 KASSERT(runtime >= p->p_prev_runtime,
1257 ("runtime < p_prev_runtime"));
1258 #else
1259 if (runtime < p->p_prev_runtime)
1260 runtime = p->p_prev_runtime;
1261 #endif
1262 p->p_prev_runtime = runtime;
1263 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1264 pct_estimate = (1000000 * runtime * 100) /
1265 ((uint64_t)wallclock.tv_sec * 1000000 +
1266 wallclock.tv_usec);
1267 } else
1268 pct_estimate = 0;
1269 pct = racct_getpcpu(p, pct_estimate);
1270 RACCT_LOCK();
1271 #ifdef RCTL
1272 rctl_throttle_decay(p->p_racct, RACCT_READBPS);
1273 rctl_throttle_decay(p->p_racct, RACCT_WRITEBPS);
1274 rctl_throttle_decay(p->p_racct, RACCT_READIOPS);
1275 rctl_throttle_decay(p->p_racct, RACCT_WRITEIOPS);
1276 #endif
1277 racct_set_locked(p, RACCT_PCTCPU, pct, 1);
1278 racct_set_locked(p, RACCT_CPU, runtime, 0);
1279 racct_set_locked(p, RACCT_WALLCLOCK,
1280 (uint64_t)wallclock.tv_sec * 1000000 +
1281 wallclock.tv_usec, 0);
1282 RACCT_UNLOCK();
1283 PROC_UNLOCK(p);
1284 }
1285
1286 /*
1287 * To ensure that processes are throttled in a fair way, we need
1288 * to iterate over all processes again and check the limits
1289 * for %cpu resource only after ucred racct containers have been
1290 * properly filled.
1291 */
1292 FOREACH_PROC_IN_SYSTEM(p) {
1293 PROC_LOCK(p);
1294 if (p->p_state != PRS_NORMAL) {
1295 PROC_UNLOCK(p);
1296 continue;
1297 }
1298
1299 if (racct_pcpu_available(p) <= 0) {
1300 if (p->p_racct->r_resources[RACCT_PCTCPU] >
1301 pcpu_threshold)
1302 racct_proc_throttle(p, -1);
1303 } else if (p->p_throttled == -1) {
1304 racct_proc_wakeup(p);
1305 }
1306 PROC_UNLOCK(p);
1307 }
1308 sx_sunlock(&allproc_lock);
1309 pause("-", hz);
1310 }
1311 }
1312
1313 static struct kproc_desc racctd_kp = {
1314 "racctd",
1315 racctd,
1316 NULL
1317 };
1318
1319 static void
racctd_init(void)1320 racctd_init(void)
1321 {
1322 if (!racct_enable)
1323 return;
1324
1325 kproc_start(&racctd_kp);
1326 }
1327 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, racctd_init, NULL);
1328
1329 static void
racct_init(void)1330 racct_init(void)
1331 {
1332 if (!racct_enable)
1333 return;
1334
1335 racct_zone = uma_zcreate("racct", sizeof(struct racct),
1336 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1337 ast_register(TDA_RACCT, ASTR_ASTF_REQUIRED, 0, ast_racct);
1338
1339 /*
1340 * XXX: Move this somewhere.
1341 */
1342 prison0.pr_prison_racct = prison_racct_find("0");
1343 }
1344 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL);
1345