1 /*- 2 * Copyright (c) 2010 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Edward Tomasz Napierala under sponsorship 6 * from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_kdtrace.h" 36 #include "opt_sched.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/eventhandler.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/lock.h> 45 #include <sys/loginclass.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/racct.h> 50 #include <sys/resourcevar.h> 51 #include <sys/sbuf.h> 52 #include <sys/sched.h> 53 #include <sys/sdt.h> 54 #include <sys/smp.h> 55 #include <sys/sx.h> 56 #include <sys/sysctl.h> 57 #include <sys/sysent.h> 58 #include <sys/sysproto.h> 59 #include <sys/umtx.h> 60 #include <machine/smp.h> 61 62 #ifdef RCTL 63 #include <sys/rctl.h> 64 #endif 65 66 #ifdef RACCT 67 68 FEATURE(racct, "Resource Accounting"); 69 70 /* 71 * Do not block processes that have their %cpu usage <= pcpu_threshold. 72 */ 73 static int pcpu_threshold = 1; 74 75 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW, 0, "Resource Accounting"); 76 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold, 77 0, "Processes with higher %cpu usage than this value can be throttled."); 78 79 /* 80 * How many seconds it takes to use the scheduler %cpu calculations. When a 81 * process starts, we compute its %cpu usage by dividing its runtime by the 82 * process wall clock time. After RACCT_PCPU_SECS pass, we use the value 83 * provided by the scheduler. 84 */ 85 #define RACCT_PCPU_SECS 3 86 87 static struct mtx racct_lock; 88 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF); 89 90 static uma_zone_t racct_zone; 91 92 static void racct_sub_racct(struct racct *dest, const struct racct *src); 93 static void racct_sub_cred_locked(struct ucred *cred, int resource, 94 uint64_t amount); 95 static void racct_add_cred_locked(struct ucred *cred, int resource, 96 uint64_t amount); 97 98 SDT_PROVIDER_DEFINE(racct); 99 SDT_PROBE_DEFINE3(racct, kernel, rusage, add, add, "struct proc *", "int", 100 "uint64_t"); 101 SDT_PROBE_DEFINE3(racct, kernel, rusage, add_failure, add-failure, 102 "struct proc *", "int", "uint64_t"); 103 SDT_PROBE_DEFINE3(racct, kernel, rusage, add_cred, add-cred, "struct ucred *", 104 "int", "uint64_t"); 105 SDT_PROBE_DEFINE3(racct, kernel, rusage, add_force, add-force, "struct proc *", 106 "int", "uint64_t"); 107 SDT_PROBE_DEFINE3(racct, kernel, rusage, set, set, "struct proc *", "int", 108 "uint64_t"); 109 SDT_PROBE_DEFINE3(racct, kernel, rusage, set_failure, set-failure, 110 "struct proc *", "int", "uint64_t"); 111 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub, sub, "struct proc *", "int", 112 "uint64_t"); 113 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub_cred, sub-cred, "struct ucred *", 114 "int", "uint64_t"); 115 SDT_PROBE_DEFINE1(racct, kernel, racct, create, create, "struct racct *"); 116 SDT_PROBE_DEFINE1(racct, kernel, racct, destroy, destroy, "struct racct *"); 117 SDT_PROBE_DEFINE2(racct, kernel, racct, join, join, "struct racct *", 118 "struct racct *"); 119 SDT_PROBE_DEFINE2(racct, kernel, racct, join_failure, join-failure, 120 "struct racct *", "struct racct *"); 121 SDT_PROBE_DEFINE2(racct, kernel, racct, leave, leave, "struct racct *", 122 "struct racct *"); 123 124 int racct_types[] = { 125 [RACCT_CPU] = 126 RACCT_IN_MILLIONS, 127 [RACCT_DATA] = 128 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 129 [RACCT_STACK] = 130 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 131 [RACCT_CORE] = 132 RACCT_DENIABLE, 133 [RACCT_RSS] = 134 RACCT_RECLAIMABLE, 135 [RACCT_MEMLOCK] = 136 RACCT_RECLAIMABLE | RACCT_DENIABLE, 137 [RACCT_NPROC] = 138 RACCT_RECLAIMABLE | RACCT_DENIABLE, 139 [RACCT_NOFILE] = 140 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 141 [RACCT_VMEM] = 142 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 143 [RACCT_NPTS] = 144 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 145 [RACCT_SWAP] = 146 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 147 [RACCT_NTHR] = 148 RACCT_RECLAIMABLE | RACCT_DENIABLE, 149 [RACCT_MSGQQUEUED] = 150 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 151 [RACCT_MSGQSIZE] = 152 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 153 [RACCT_NMSGQ] = 154 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 155 [RACCT_NSEM] = 156 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 157 [RACCT_NSEMOP] = 158 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 159 [RACCT_NSHM] = 160 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 161 [RACCT_SHMSIZE] = 162 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 163 [RACCT_WALLCLOCK] = 164 RACCT_IN_MILLIONS, 165 [RACCT_PCTCPU] = 166 RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS }; 167 168 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE; 169 170 #ifdef SCHED_4BSD 171 /* 172 * Contains intermediate values for %cpu calculations to avoid using floating 173 * point in the kernel. 174 * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20) 175 * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to 176 * zero so the calculations are more straightforward. 177 */ 178 fixpt_t ccpu_exp[] = { 179 [0] = FSCALE * 1, 180 [1] = FSCALE * 0.95122942450071400909, 181 [2] = FSCALE * 0.90483741803595957316, 182 [3] = FSCALE * 0.86070797642505780722, 183 [4] = FSCALE * 0.81873075307798185866, 184 [5] = FSCALE * 0.77880078307140486824, 185 [6] = FSCALE * 0.74081822068171786606, 186 [7] = FSCALE * 0.70468808971871343435, 187 [8] = FSCALE * 0.67032004603563930074, 188 [9] = FSCALE * 0.63762815162177329314, 189 [10] = FSCALE * 0.60653065971263342360, 190 [11] = FSCALE * 0.57694981038048669531, 191 [12] = FSCALE * 0.54881163609402643262, 192 [13] = FSCALE * 0.52204577676101604789, 193 [14] = FSCALE * 0.49658530379140951470, 194 [15] = FSCALE * 0.47236655274101470713, 195 [16] = FSCALE * 0.44932896411722159143, 196 [17] = FSCALE * 0.42741493194872666992, 197 [18] = FSCALE * 0.40656965974059911188, 198 [19] = FSCALE * 0.38674102345450120691, 199 [20] = FSCALE * 0.36787944117144232159, 200 [21] = FSCALE * 0.34993774911115535467, 201 [22] = FSCALE * 0.33287108369807955328, 202 [23] = FSCALE * 0.31663676937905321821, 203 [24] = FSCALE * 0.30119421191220209664, 204 [25] = FSCALE * 0.28650479686019010032, 205 [26] = FSCALE * 0.27253179303401260312, 206 [27] = FSCALE * 0.25924026064589150757, 207 [28] = FSCALE * 0.24659696394160647693, 208 [29] = FSCALE * 0.23457028809379765313, 209 [30] = FSCALE * 0.22313016014842982893, 210 [31] = FSCALE * 0.21224797382674305771, 211 [32] = FSCALE * 0.20189651799465540848, 212 [33] = FSCALE * 0.19204990862075411423, 213 [34] = FSCALE * 0.18268352405273465022, 214 [35] = FSCALE * 0.17377394345044512668, 215 [36] = FSCALE * 0.16529888822158653829, 216 [37] = FSCALE * 0.15723716631362761621, 217 [38] = FSCALE * 0.14956861922263505264, 218 [39] = FSCALE * 0.14227407158651357185, 219 [40] = FSCALE * 0.13533528323661269189, 220 [41] = FSCALE * 0.12873490358780421886, 221 [42] = FSCALE * 0.12245642825298191021, 222 [43] = FSCALE * 0.11648415777349695786, 223 [44] = FSCALE * 0.11080315836233388333, 224 [45] = FSCALE * 0.10539922456186433678, 225 [46] = FSCALE * 0.10025884372280373372, 226 [47] = FSCALE * 0.09536916221554961888, 227 [48] = FSCALE * 0.09071795328941250337, 228 [49] = FSCALE * 0.08629358649937051097, 229 [50] = FSCALE * 0.08208499862389879516, 230 [51] = FSCALE * 0.07808166600115315231, 231 [52] = FSCALE * 0.07427357821433388042, 232 [53] = FSCALE * 0.07065121306042958674, 233 [54] = FSCALE * 0.06720551273974976512, 234 [55] = FSCALE * 0.06392786120670757270, 235 [56] = FSCALE * 0.06081006262521796499, 236 [57] = FSCALE * 0.05784432087483846296, 237 [58] = FSCALE * 0.05502322005640722902, 238 [59] = FSCALE * 0.05233970594843239308, 239 [60] = FSCALE * 0.04978706836786394297, 240 [61] = FSCALE * 0.04735892439114092119, 241 [62] = FSCALE * 0.04504920239355780606, 242 [63] = FSCALE * 0.04285212686704017991, 243 [64] = FSCALE * 0.04076220397836621516, 244 [65] = FSCALE * 0.03877420783172200988, 245 [66] = FSCALE * 0.03688316740124000544, 246 [67] = FSCALE * 0.03508435410084502588, 247 [68] = FSCALE * 0.03337326996032607948, 248 [69] = FSCALE * 0.03174563637806794323, 249 [70] = FSCALE * 0.03019738342231850073, 250 [71] = FSCALE * 0.02872463965423942912, 251 [72] = FSCALE * 0.02732372244729256080, 252 [73] = FSCALE * 0.02599112877875534358, 253 [74] = FSCALE * 0.02472352647033939120, 254 [75] = FSCALE * 0.02351774585600910823, 255 [76] = FSCALE * 0.02237077185616559577, 256 [77] = FSCALE * 0.02127973643837716938, 257 [78] = FSCALE * 0.02024191144580438847, 258 [79] = FSCALE * 0.01925470177538692429, 259 [80] = FSCALE * 0.01831563888873418029, 260 [81] = FSCALE * 0.01742237463949351138, 261 [82] = FSCALE * 0.01657267540176124754, 262 [83] = FSCALE * 0.01576441648485449082, 263 [84] = FSCALE * 0.01499557682047770621, 264 [85] = FSCALE * 0.01426423390899925527, 265 [86] = FSCALE * 0.01356855901220093175, 266 [87] = FSCALE * 0.01290681258047986886, 267 [88] = FSCALE * 0.01227733990306844117, 268 [89] = FSCALE * 0.01167856697039544521, 269 [90] = FSCALE * 0.01110899653824230649, 270 [91] = FSCALE * 0.01056720438385265337, 271 [92] = FSCALE * 0.01005183574463358164, 272 [93] = FSCALE * 0.00956160193054350793, 273 [94] = FSCALE * 0.00909527710169581709, 274 [95] = FSCALE * 0.00865169520312063417, 275 [96] = FSCALE * 0.00822974704902002884, 276 [97] = FSCALE * 0.00782837754922577143, 277 [98] = FSCALE * 0.00744658307092434051, 278 [99] = FSCALE * 0.00708340892905212004, 279 [100] = FSCALE * 0.00673794699908546709, 280 [101] = FSCALE * 0.00640933344625638184, 281 [102] = FSCALE * 0.00609674656551563610, 282 [103] = FSCALE * 0.00579940472684214321, 283 [104] = FSCALE * 0.00551656442076077241, 284 [105] = FSCALE * 0.00524751839918138427, 285 [106] = FSCALE * 0.00499159390691021621, 286 [107] = FSCALE * 0.00474815099941147558, 287 [108] = FSCALE * 0.00451658094261266798, 288 [109] = FSCALE * 0.00429630469075234057, 289 [110] = FSCALE * 0.00408677143846406699, 290 }; 291 #endif 292 293 #define CCPU_EXP_MAX 110 294 295 /* 296 * This function is analogical to the getpcpu() function in the ps(1) command. 297 * They should both calculate in the same way so that the racct %cpu 298 * calculations are consistent with the values showed by the ps(1) tool. 299 * The calculations are more complex in the 4BSD scheduler because of the value 300 * of the ccpu variable. In ULE it is defined to be zero which saves us some 301 * work. 302 */ 303 static uint64_t 304 racct_getpcpu(struct proc *p, u_int pcpu) 305 { 306 u_int swtime; 307 #ifdef SCHED_4BSD 308 fixpt_t pctcpu, pctcpu_next; 309 #endif 310 #ifdef SMP 311 struct pcpu *pc; 312 int found; 313 #endif 314 fixpt_t p_pctcpu; 315 struct thread *td; 316 317 /* 318 * If the process is swapped out, we count its %cpu usage as zero. 319 * This behaviour is consistent with the userland ps(1) tool. 320 */ 321 if ((p->p_flag & P_INMEM) == 0) 322 return (0); 323 swtime = (ticks - p->p_swtick) / hz; 324 325 /* 326 * For short-lived processes, the sched_pctcpu() returns small 327 * values even for cpu intensive processes. Therefore we use 328 * our own estimate in this case. 329 */ 330 if (swtime < RACCT_PCPU_SECS) 331 return (pcpu); 332 333 p_pctcpu = 0; 334 FOREACH_THREAD_IN_PROC(p, td) { 335 if (td == PCPU_GET(idlethread)) 336 continue; 337 #ifdef SMP 338 found = 0; 339 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 340 if (td == pc->pc_idlethread) { 341 found = 1; 342 break; 343 } 344 } 345 if (found) 346 continue; 347 #endif 348 thread_lock(td); 349 #ifdef SCHED_4BSD 350 pctcpu = sched_pctcpu(td); 351 /* Count also the yet unfinished second. */ 352 pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT; 353 pctcpu_next += sched_pctcpu_delta(td); 354 p_pctcpu += max(pctcpu, pctcpu_next); 355 #else 356 /* 357 * In ULE the %cpu statistics are updated on every 358 * sched_pctcpu() call. So special calculations to 359 * account for the latest (unfinished) second are 360 * not needed. 361 */ 362 p_pctcpu += sched_pctcpu(td); 363 #endif 364 thread_unlock(td); 365 } 366 367 #ifdef SCHED_4BSD 368 if (swtime <= CCPU_EXP_MAX) 369 return ((100 * (uint64_t)p_pctcpu * 1000000) / 370 (FSCALE - ccpu_exp[swtime])); 371 #endif 372 373 return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE); 374 } 375 376 static void 377 racct_add_racct(struct racct *dest, const struct racct *src) 378 { 379 int i; 380 381 mtx_assert(&racct_lock, MA_OWNED); 382 383 /* 384 * Update resource usage in dest. 385 */ 386 for (i = 0; i <= RACCT_MAX; i++) { 387 KASSERT(dest->r_resources[i] >= 0, 388 ("%s: resource %d propagation meltdown: dest < 0", 389 __func__, i)); 390 KASSERT(src->r_resources[i] >= 0, 391 ("%s: resource %d propagation meltdown: src < 0", 392 __func__, i)); 393 dest->r_resources[i] += src->r_resources[i]; 394 } 395 } 396 397 static void 398 racct_sub_racct(struct racct *dest, const struct racct *src) 399 { 400 int i; 401 402 mtx_assert(&racct_lock, MA_OWNED); 403 404 /* 405 * Update resource usage in dest. 406 */ 407 for (i = 0; i <= RACCT_MAX; i++) { 408 if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) { 409 KASSERT(dest->r_resources[i] >= 0, 410 ("%s: resource %d propagation meltdown: dest < 0", 411 __func__, i)); 412 KASSERT(src->r_resources[i] >= 0, 413 ("%s: resource %d propagation meltdown: src < 0", 414 __func__, i)); 415 KASSERT(src->r_resources[i] <= dest->r_resources[i], 416 ("%s: resource %d propagation meltdown: src > dest", 417 __func__, i)); 418 } 419 if (RACCT_CAN_DROP(i)) { 420 dest->r_resources[i] -= src->r_resources[i]; 421 if (dest->r_resources[i] < 0) { 422 KASSERT(RACCT_IS_SLOPPY(i) || 423 RACCT_IS_DECAYING(i), 424 ("%s: resource %d usage < 0", __func__, i)); 425 dest->r_resources[i] = 0; 426 } 427 } 428 } 429 } 430 431 void 432 racct_create(struct racct **racctp) 433 { 434 435 SDT_PROBE(racct, kernel, racct, create, racctp, 0, 0, 0, 0); 436 437 KASSERT(*racctp == NULL, ("racct already allocated")); 438 439 *racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO); 440 } 441 442 static void 443 racct_destroy_locked(struct racct **racctp) 444 { 445 int i; 446 struct racct *racct; 447 448 SDT_PROBE(racct, kernel, racct, destroy, racctp, 0, 0, 0, 0); 449 450 mtx_assert(&racct_lock, MA_OWNED); 451 KASSERT(racctp != NULL, ("NULL racctp")); 452 KASSERT(*racctp != NULL, ("NULL racct")); 453 454 racct = *racctp; 455 456 for (i = 0; i <= RACCT_MAX; i++) { 457 if (RACCT_IS_SLOPPY(i)) 458 continue; 459 if (!RACCT_IS_RECLAIMABLE(i)) 460 continue; 461 KASSERT(racct->r_resources[i] == 0, 462 ("destroying non-empty racct: " 463 "%ju allocated for resource %d\n", 464 racct->r_resources[i], i)); 465 } 466 uma_zfree(racct_zone, racct); 467 *racctp = NULL; 468 } 469 470 void 471 racct_destroy(struct racct **racct) 472 { 473 474 mtx_lock(&racct_lock); 475 racct_destroy_locked(racct); 476 mtx_unlock(&racct_lock); 477 } 478 479 /* 480 * Increase consumption of 'resource' by 'amount' for 'racct' 481 * and all its parents. Differently from other cases, 'amount' here 482 * may be less than zero. 483 */ 484 static void 485 racct_alloc_resource(struct racct *racct, int resource, 486 uint64_t amount) 487 { 488 489 mtx_assert(&racct_lock, MA_OWNED); 490 KASSERT(racct != NULL, ("NULL racct")); 491 492 racct->r_resources[resource] += amount; 493 if (racct->r_resources[resource] < 0) { 494 KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource), 495 ("%s: resource %d usage < 0", __func__, resource)); 496 racct->r_resources[resource] = 0; 497 } 498 499 /* 500 * There are some cases where the racct %cpu resource would grow 501 * beyond 100%. 502 * For example in racct_proc_exit() we add the process %cpu usage 503 * to the ucred racct containers. If too many processes terminated 504 * in a short time span, the ucred %cpu resource could grow too much. 505 * Also, the 4BSD scheduler sometimes returns for a thread more than 506 * 100% cpu usage. So we set a boundary here to 100%. 507 */ 508 if ((resource == RACCT_PCTCPU) && 509 (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000)) 510 racct->r_resources[RACCT_PCTCPU] = 100 * 1000000; 511 } 512 513 static int 514 racct_add_locked(struct proc *p, int resource, uint64_t amount) 515 { 516 #ifdef RCTL 517 int error; 518 #endif 519 520 SDT_PROBE(racct, kernel, rusage, add, p, resource, amount, 0, 0); 521 522 /* 523 * We need proc lock to dereference p->p_ucred. 524 */ 525 PROC_LOCK_ASSERT(p, MA_OWNED); 526 527 #ifdef RCTL 528 error = rctl_enforce(p, resource, amount); 529 if (error && RACCT_IS_DENIABLE(resource)) { 530 SDT_PROBE(racct, kernel, rusage, add_failure, p, resource, 531 amount, 0, 0); 532 return (error); 533 } 534 #endif 535 racct_alloc_resource(p->p_racct, resource, amount); 536 racct_add_cred_locked(p->p_ucred, resource, amount); 537 538 return (0); 539 } 540 541 /* 542 * Increase allocation of 'resource' by 'amount' for process 'p'. 543 * Return 0 if it's below limits, or errno, if it's not. 544 */ 545 int 546 racct_add(struct proc *p, int resource, uint64_t amount) 547 { 548 int error; 549 550 mtx_lock(&racct_lock); 551 error = racct_add_locked(p, resource, amount); 552 mtx_unlock(&racct_lock); 553 return (error); 554 } 555 556 static void 557 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount) 558 { 559 struct prison *pr; 560 561 SDT_PROBE(racct, kernel, rusage, add_cred, cred, resource, amount, 562 0, 0); 563 564 racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, amount); 565 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent) 566 racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource, 567 amount); 568 racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, amount); 569 } 570 571 /* 572 * Increase allocation of 'resource' by 'amount' for credential 'cred'. 573 * Doesn't check for limits and never fails. 574 * 575 * XXX: Shouldn't this ever return an error? 576 */ 577 void 578 racct_add_cred(struct ucred *cred, int resource, uint64_t amount) 579 { 580 581 mtx_lock(&racct_lock); 582 racct_add_cred_locked(cred, resource, amount); 583 mtx_unlock(&racct_lock); 584 } 585 586 /* 587 * Increase allocation of 'resource' by 'amount' for process 'p'. 588 * Doesn't check for limits and never fails. 589 */ 590 void 591 racct_add_force(struct proc *p, int resource, uint64_t amount) 592 { 593 594 SDT_PROBE(racct, kernel, rusage, add_force, p, resource, amount, 0, 0); 595 596 /* 597 * We need proc lock to dereference p->p_ucred. 598 */ 599 PROC_LOCK_ASSERT(p, MA_OWNED); 600 601 mtx_lock(&racct_lock); 602 racct_alloc_resource(p->p_racct, resource, amount); 603 mtx_unlock(&racct_lock); 604 racct_add_cred(p->p_ucred, resource, amount); 605 } 606 607 static int 608 racct_set_locked(struct proc *p, int resource, uint64_t amount) 609 { 610 int64_t old_amount, decayed_amount; 611 int64_t diff_proc, diff_cred; 612 #ifdef RCTL 613 int error; 614 #endif 615 616 SDT_PROBE(racct, kernel, rusage, set, p, resource, amount, 0, 0); 617 618 /* 619 * We need proc lock to dereference p->p_ucred. 620 */ 621 PROC_LOCK_ASSERT(p, MA_OWNED); 622 623 old_amount = p->p_racct->r_resources[resource]; 624 /* 625 * The diffs may be negative. 626 */ 627 diff_proc = amount - old_amount; 628 if (RACCT_IS_DECAYING(resource)) { 629 /* 630 * Resources in per-credential racct containers may decay. 631 * If this is the case, we need to calculate the difference 632 * between the new amount and the proportional value of the 633 * old amount that has decayed in the ucred racct containers. 634 */ 635 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE; 636 diff_cred = amount - decayed_amount; 637 } else 638 diff_cred = diff_proc; 639 #ifdef notyet 640 KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource), 641 ("%s: usage of non-droppable resource %d dropping", __func__, 642 resource)); 643 #endif 644 #ifdef RCTL 645 if (diff_proc > 0) { 646 error = rctl_enforce(p, resource, diff_proc); 647 if (error && RACCT_IS_DENIABLE(resource)) { 648 SDT_PROBE(racct, kernel, rusage, set_failure, p, 649 resource, amount, 0, 0); 650 return (error); 651 } 652 } 653 #endif 654 racct_alloc_resource(p->p_racct, resource, diff_proc); 655 if (diff_cred > 0) 656 racct_add_cred_locked(p->p_ucred, resource, diff_cred); 657 else if (diff_cred < 0) 658 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred); 659 660 return (0); 661 } 662 663 /* 664 * Set allocation of 'resource' to 'amount' for process 'p'. 665 * Return 0 if it's below limits, or errno, if it's not. 666 * 667 * Note that decreasing the allocation always returns 0, 668 * even if it's above the limit. 669 */ 670 int 671 racct_set(struct proc *p, int resource, uint64_t amount) 672 { 673 int error; 674 675 mtx_lock(&racct_lock); 676 error = racct_set_locked(p, resource, amount); 677 mtx_unlock(&racct_lock); 678 return (error); 679 } 680 681 static void 682 racct_set_force_locked(struct proc *p, int resource, uint64_t amount) 683 { 684 int64_t old_amount, decayed_amount; 685 int64_t diff_proc, diff_cred; 686 687 SDT_PROBE(racct, kernel, rusage, set, p, resource, amount, 0, 0); 688 689 /* 690 * We need proc lock to dereference p->p_ucred. 691 */ 692 PROC_LOCK_ASSERT(p, MA_OWNED); 693 694 old_amount = p->p_racct->r_resources[resource]; 695 /* 696 * The diffs may be negative. 697 */ 698 diff_proc = amount - old_amount; 699 if (RACCT_IS_DECAYING(resource)) { 700 /* 701 * Resources in per-credential racct containers may decay. 702 * If this is the case, we need to calculate the difference 703 * between the new amount and the proportional value of the 704 * old amount that has decayed in the ucred racct containers. 705 */ 706 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE; 707 diff_cred = amount - decayed_amount; 708 } else 709 diff_cred = diff_proc; 710 711 racct_alloc_resource(p->p_racct, resource, diff_proc); 712 if (diff_cred > 0) 713 racct_add_cred_locked(p->p_ucred, resource, diff_cred); 714 else if (diff_cred < 0) 715 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred); 716 } 717 718 void 719 racct_set_force(struct proc *p, int resource, uint64_t amount) 720 { 721 mtx_lock(&racct_lock); 722 racct_set_force_locked(p, resource, amount); 723 mtx_unlock(&racct_lock); 724 } 725 726 /* 727 * Returns amount of 'resource' the process 'p' can keep allocated. 728 * Allocating more than that would be denied, unless the resource 729 * is marked undeniable. Amount of already allocated resource does 730 * not matter. 731 */ 732 uint64_t 733 racct_get_limit(struct proc *p, int resource) 734 { 735 736 #ifdef RCTL 737 return (rctl_get_limit(p, resource)); 738 #else 739 return (UINT64_MAX); 740 #endif 741 } 742 743 /* 744 * Returns amount of 'resource' the process 'p' can keep allocated. 745 * Allocating more than that would be denied, unless the resource 746 * is marked undeniable. Amount of already allocated resource does 747 * matter. 748 */ 749 uint64_t 750 racct_get_available(struct proc *p, int resource) 751 { 752 753 #ifdef RCTL 754 return (rctl_get_available(p, resource)); 755 #else 756 return (UINT64_MAX); 757 #endif 758 } 759 760 /* 761 * Returns amount of the %cpu resource that process 'p' can add to its %cpu 762 * utilization. Adding more than that would lead to the process being 763 * throttled. 764 */ 765 static int64_t 766 racct_pcpu_available(struct proc *p) 767 { 768 769 #ifdef RCTL 770 return (rctl_pcpu_available(p)); 771 #else 772 return (INT64_MAX); 773 #endif 774 } 775 776 /* 777 * Decrease allocation of 'resource' by 'amount' for process 'p'. 778 */ 779 void 780 racct_sub(struct proc *p, int resource, uint64_t amount) 781 { 782 783 SDT_PROBE(racct, kernel, rusage, sub, p, resource, amount, 0, 0); 784 785 /* 786 * We need proc lock to dereference p->p_ucred. 787 */ 788 PROC_LOCK_ASSERT(p, MA_OWNED); 789 KASSERT(RACCT_CAN_DROP(resource), 790 ("%s: called for non-droppable resource %d", __func__, resource)); 791 792 mtx_lock(&racct_lock); 793 KASSERT(amount <= p->p_racct->r_resources[resource], 794 ("%s: freeing %ju of resource %d, which is more " 795 "than allocated %jd for %s (pid %d)", __func__, amount, resource, 796 (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid)); 797 798 racct_alloc_resource(p->p_racct, resource, -amount); 799 racct_sub_cred_locked(p->p_ucred, resource, amount); 800 mtx_unlock(&racct_lock); 801 } 802 803 static void 804 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount) 805 { 806 struct prison *pr; 807 808 SDT_PROBE(racct, kernel, rusage, sub_cred, cred, resource, amount, 809 0, 0); 810 811 #ifdef notyet 812 KASSERT(RACCT_CAN_DROP(resource), 813 ("%s: called for resource %d which can not drop", __func__, 814 resource)); 815 #endif 816 817 racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, -amount); 818 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent) 819 racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource, 820 -amount); 821 racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, -amount); 822 } 823 824 /* 825 * Decrease allocation of 'resource' by 'amount' for credential 'cred'. 826 */ 827 void 828 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount) 829 { 830 831 mtx_lock(&racct_lock); 832 racct_sub_cred_locked(cred, resource, amount); 833 mtx_unlock(&racct_lock); 834 } 835 836 /* 837 * Inherit resource usage information from the parent process. 838 */ 839 int 840 racct_proc_fork(struct proc *parent, struct proc *child) 841 { 842 int i, error = 0; 843 844 /* 845 * Create racct for the child process. 846 */ 847 racct_create(&child->p_racct); 848 849 PROC_LOCK(parent); 850 PROC_LOCK(child); 851 mtx_lock(&racct_lock); 852 853 #ifdef RCTL 854 error = rctl_proc_fork(parent, child); 855 if (error != 0) 856 goto out; 857 #endif 858 859 /* Init process cpu time. */ 860 child->p_prev_runtime = 0; 861 child->p_throttled = 0; 862 863 /* 864 * Inherit resource usage. 865 */ 866 for (i = 0; i <= RACCT_MAX; i++) { 867 if (parent->p_racct->r_resources[i] == 0 || 868 !RACCT_IS_INHERITABLE(i)) 869 continue; 870 871 error = racct_set_locked(child, i, 872 parent->p_racct->r_resources[i]); 873 if (error != 0) 874 goto out; 875 } 876 877 error = racct_add_locked(child, RACCT_NPROC, 1); 878 error += racct_add_locked(child, RACCT_NTHR, 1); 879 880 out: 881 mtx_unlock(&racct_lock); 882 PROC_UNLOCK(child); 883 PROC_UNLOCK(parent); 884 885 if (error != 0) 886 racct_proc_exit(child); 887 888 return (error); 889 } 890 891 /* 892 * Called at the end of fork1(), to handle rules that require the process 893 * to be fully initialized. 894 */ 895 void 896 racct_proc_fork_done(struct proc *child) 897 { 898 899 #ifdef RCTL 900 PROC_LOCK(child); 901 mtx_lock(&racct_lock); 902 rctl_enforce(child, RACCT_NPROC, 0); 903 rctl_enforce(child, RACCT_NTHR, 0); 904 mtx_unlock(&racct_lock); 905 PROC_UNLOCK(child); 906 #endif 907 } 908 909 void 910 racct_proc_exit(struct proc *p) 911 { 912 int i; 913 uint64_t runtime; 914 struct timeval wallclock; 915 uint64_t pct_estimate, pct; 916 917 PROC_LOCK(p); 918 /* 919 * We don't need to calculate rux, proc_reap() has already done this. 920 */ 921 runtime = cputick2usec(p->p_rux.rux_runtime); 922 #ifdef notyet 923 KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); 924 #else 925 if (runtime < p->p_prev_runtime) 926 runtime = p->p_prev_runtime; 927 #endif 928 microuptime(&wallclock); 929 timevalsub(&wallclock, &p->p_stats->p_start); 930 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { 931 pct_estimate = (1000000 * runtime * 100) / 932 ((uint64_t)wallclock.tv_sec * 1000000 + 933 wallclock.tv_usec); 934 } else 935 pct_estimate = 0; 936 pct = racct_getpcpu(p, pct_estimate); 937 938 mtx_lock(&racct_lock); 939 racct_set_locked(p, RACCT_CPU, runtime); 940 racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct); 941 942 for (i = 0; i <= RACCT_MAX; i++) { 943 if (p->p_racct->r_resources[i] == 0) 944 continue; 945 if (!RACCT_IS_RECLAIMABLE(i)) 946 continue; 947 racct_set_locked(p, i, 0); 948 } 949 950 mtx_unlock(&racct_lock); 951 PROC_UNLOCK(p); 952 953 #ifdef RCTL 954 rctl_racct_release(p->p_racct); 955 #endif 956 racct_destroy(&p->p_racct); 957 } 958 959 /* 960 * Called after credentials change, to move resource utilisation 961 * between raccts. 962 */ 963 void 964 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred, 965 struct ucred *newcred) 966 { 967 struct uidinfo *olduip, *newuip; 968 struct loginclass *oldlc, *newlc; 969 struct prison *oldpr, *newpr, *pr; 970 971 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 972 973 newuip = newcred->cr_ruidinfo; 974 olduip = oldcred->cr_ruidinfo; 975 newlc = newcred->cr_loginclass; 976 oldlc = oldcred->cr_loginclass; 977 newpr = newcred->cr_prison; 978 oldpr = oldcred->cr_prison; 979 980 mtx_lock(&racct_lock); 981 if (newuip != olduip) { 982 racct_sub_racct(olduip->ui_racct, p->p_racct); 983 racct_add_racct(newuip->ui_racct, p->p_racct); 984 } 985 if (newlc != oldlc) { 986 racct_sub_racct(oldlc->lc_racct, p->p_racct); 987 racct_add_racct(newlc->lc_racct, p->p_racct); 988 } 989 if (newpr != oldpr) { 990 for (pr = oldpr; pr != NULL; pr = pr->pr_parent) 991 racct_sub_racct(pr->pr_prison_racct->prr_racct, 992 p->p_racct); 993 for (pr = newpr; pr != NULL; pr = pr->pr_parent) 994 racct_add_racct(pr->pr_prison_racct->prr_racct, 995 p->p_racct); 996 } 997 mtx_unlock(&racct_lock); 998 999 #ifdef RCTL 1000 rctl_proc_ucred_changed(p, newcred); 1001 #endif 1002 } 1003 1004 void 1005 racct_move(struct racct *dest, struct racct *src) 1006 { 1007 1008 mtx_lock(&racct_lock); 1009 1010 racct_add_racct(dest, src); 1011 racct_sub_racct(src, src); 1012 1013 mtx_unlock(&racct_lock); 1014 } 1015 1016 static void 1017 racct_proc_throttle(struct proc *p) 1018 { 1019 struct thread *td; 1020 #ifdef SMP 1021 int cpuid; 1022 #endif 1023 1024 PROC_LOCK_ASSERT(p, MA_OWNED); 1025 1026 /* 1027 * Do not block kernel processes. Also do not block processes with 1028 * low %cpu utilization to improve interactivity. 1029 */ 1030 if (((p->p_flag & (P_SYSTEM | P_KTHREAD)) != 0) || 1031 (p->p_racct->r_resources[RACCT_PCTCPU] <= pcpu_threshold)) 1032 return; 1033 p->p_throttled = 1; 1034 1035 FOREACH_THREAD_IN_PROC(p, td) { 1036 switch (td->td_state) { 1037 case TDS_RUNQ: 1038 /* 1039 * If the thread is on the scheduler run-queue, we can 1040 * not just remove it from there. So we set the flag 1041 * TDF_NEEDRESCHED for the thread, so that once it is 1042 * running, it is taken off the cpu as soon as possible. 1043 */ 1044 thread_lock(td); 1045 td->td_flags |= TDF_NEEDRESCHED; 1046 thread_unlock(td); 1047 break; 1048 case TDS_RUNNING: 1049 /* 1050 * If the thread is running, we request a context 1051 * switch for it by setting the TDF_NEEDRESCHED flag. 1052 */ 1053 thread_lock(td); 1054 td->td_flags |= TDF_NEEDRESCHED; 1055 #ifdef SMP 1056 cpuid = td->td_oncpu; 1057 if ((cpuid != NOCPU) && (td != curthread)) 1058 ipi_cpu(cpuid, IPI_AST); 1059 #endif 1060 thread_unlock(td); 1061 break; 1062 default: 1063 break; 1064 } 1065 } 1066 } 1067 1068 static void 1069 racct_proc_wakeup(struct proc *p) 1070 { 1071 PROC_LOCK_ASSERT(p, MA_OWNED); 1072 1073 if (p->p_throttled) { 1074 p->p_throttled = 0; 1075 wakeup(p->p_racct); 1076 } 1077 } 1078 1079 static void 1080 racct_decay_resource(struct racct *racct, void * res, void* dummy) 1081 { 1082 int resource; 1083 int64_t r_old, r_new; 1084 1085 resource = *(int *)res; 1086 r_old = racct->r_resources[resource]; 1087 1088 /* If there is nothing to decay, just exit. */ 1089 if (r_old <= 0) 1090 return; 1091 1092 mtx_lock(&racct_lock); 1093 r_new = r_old * RACCT_DECAY_FACTOR / FSCALE; 1094 racct->r_resources[resource] = r_new; 1095 mtx_unlock(&racct_lock); 1096 } 1097 1098 static void 1099 racct_decay(int resource) 1100 { 1101 ui_racct_foreach(racct_decay_resource, &resource, NULL); 1102 loginclass_racct_foreach(racct_decay_resource, &resource, NULL); 1103 prison_racct_foreach(racct_decay_resource, &resource, NULL); 1104 } 1105 1106 static void 1107 racctd(void) 1108 { 1109 struct thread *td; 1110 struct proc *p; 1111 struct timeval wallclock; 1112 uint64_t runtime; 1113 uint64_t pct, pct_estimate; 1114 1115 for (;;) { 1116 racct_decay(RACCT_PCTCPU); 1117 1118 sx_slock(&allproc_lock); 1119 1120 LIST_FOREACH(p, &zombproc, p_list) { 1121 PROC_LOCK(p); 1122 racct_set(p, RACCT_PCTCPU, 0); 1123 PROC_UNLOCK(p); 1124 } 1125 1126 FOREACH_PROC_IN_SYSTEM(p) { 1127 PROC_LOCK(p); 1128 if (p->p_state != PRS_NORMAL) { 1129 PROC_UNLOCK(p); 1130 continue; 1131 } 1132 1133 microuptime(&wallclock); 1134 timevalsub(&wallclock, &p->p_stats->p_start); 1135 PROC_SLOCK(p); 1136 FOREACH_THREAD_IN_PROC(p, td) 1137 ruxagg(p, td); 1138 runtime = cputick2usec(p->p_rux.rux_runtime); 1139 PROC_SUNLOCK(p); 1140 #ifdef notyet 1141 KASSERT(runtime >= p->p_prev_runtime, 1142 ("runtime < p_prev_runtime")); 1143 #else 1144 if (runtime < p->p_prev_runtime) 1145 runtime = p->p_prev_runtime; 1146 #endif 1147 p->p_prev_runtime = runtime; 1148 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { 1149 pct_estimate = (1000000 * runtime * 100) / 1150 ((uint64_t)wallclock.tv_sec * 1000000 + 1151 wallclock.tv_usec); 1152 } else 1153 pct_estimate = 0; 1154 pct = racct_getpcpu(p, pct_estimate); 1155 mtx_lock(&racct_lock); 1156 racct_set_force_locked(p, RACCT_PCTCPU, pct); 1157 racct_set_locked(p, RACCT_CPU, runtime); 1158 racct_set_locked(p, RACCT_WALLCLOCK, 1159 (uint64_t)wallclock.tv_sec * 1000000 + 1160 wallclock.tv_usec); 1161 mtx_unlock(&racct_lock); 1162 PROC_UNLOCK(p); 1163 } 1164 1165 /* 1166 * To ensure that processes are throttled in a fair way, we need 1167 * to iterate over all processes again and check the limits 1168 * for %cpu resource only after ucred racct containers have been 1169 * properly filled. 1170 */ 1171 FOREACH_PROC_IN_SYSTEM(p) { 1172 PROC_LOCK(p); 1173 if (p->p_state != PRS_NORMAL) { 1174 PROC_UNLOCK(p); 1175 continue; 1176 } 1177 1178 if (racct_pcpu_available(p) <= 0) 1179 racct_proc_throttle(p); 1180 else if (p->p_throttled) 1181 racct_proc_wakeup(p); 1182 PROC_UNLOCK(p); 1183 } 1184 sx_sunlock(&allproc_lock); 1185 pause("-", hz); 1186 } 1187 } 1188 1189 static struct kproc_desc racctd_kp = { 1190 "racctd", 1191 racctd, 1192 NULL 1193 }; 1194 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, kproc_start, &racctd_kp); 1195 1196 static void 1197 racct_init(void) 1198 { 1199 1200 racct_zone = uma_zcreate("racct", sizeof(struct racct), 1201 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 1202 /* 1203 * XXX: Move this somewhere. 1204 */ 1205 prison0.pr_prison_racct = prison_racct_find("0"); 1206 } 1207 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL); 1208 1209 #else /* !RACCT */ 1210 1211 int 1212 racct_add(struct proc *p, int resource, uint64_t amount) 1213 { 1214 1215 return (0); 1216 } 1217 1218 void 1219 racct_add_cred(struct ucred *cred, int resource, uint64_t amount) 1220 { 1221 } 1222 1223 void 1224 racct_add_force(struct proc *p, int resource, uint64_t amount) 1225 { 1226 1227 return; 1228 } 1229 1230 int 1231 racct_set(struct proc *p, int resource, uint64_t amount) 1232 { 1233 1234 return (0); 1235 } 1236 1237 void 1238 racct_set_force(struct proc *p, int resource, uint64_t amount) 1239 { 1240 } 1241 1242 void 1243 racct_sub(struct proc *p, int resource, uint64_t amount) 1244 { 1245 } 1246 1247 void 1248 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount) 1249 { 1250 } 1251 1252 uint64_t 1253 racct_get_limit(struct proc *p, int resource) 1254 { 1255 1256 return (UINT64_MAX); 1257 } 1258 1259 uint64_t 1260 racct_get_available(struct proc *p, int resource) 1261 { 1262 1263 return (UINT64_MAX); 1264 } 1265 1266 void 1267 racct_create(struct racct **racctp) 1268 { 1269 } 1270 1271 void 1272 racct_destroy(struct racct **racctp) 1273 { 1274 } 1275 1276 int 1277 racct_proc_fork(struct proc *parent, struct proc *child) 1278 { 1279 1280 return (0); 1281 } 1282 1283 void 1284 racct_proc_fork_done(struct proc *child) 1285 { 1286 } 1287 1288 void 1289 racct_proc_exit(struct proc *p) 1290 { 1291 } 1292 1293 #endif /* !RACCT */ 1294