1 /*- 2 * Copyright (c) 2010 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Edward Tomasz Napierala under sponsorship 6 * from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_sched.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/eventhandler.h> 40 #include <sys/jail.h> 41 #include <sys/kernel.h> 42 #include <sys/kthread.h> 43 #include <sys/lock.h> 44 #include <sys/loginclass.h> 45 #include <sys/malloc.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/racct.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sbuf.h> 51 #include <sys/sched.h> 52 #include <sys/sdt.h> 53 #include <sys/smp.h> 54 #include <sys/sx.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysent.h> 57 #include <sys/sysproto.h> 58 #include <sys/umtx.h> 59 #include <machine/smp.h> 60 61 #ifdef RCTL 62 #include <sys/rctl.h> 63 #endif 64 65 #ifdef RACCT 66 67 FEATURE(racct, "Resource Accounting"); 68 69 /* 70 * Do not block processes that have their %cpu usage <= pcpu_threshold. 71 */ 72 static int pcpu_threshold = 1; 73 #ifdef RACCT_DEFAULT_TO_DISABLED 74 int racct_enable = 0; 75 #else 76 int racct_enable = 1; 77 #endif 78 79 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW, 0, "Resource Accounting"); 80 SYSCTL_UINT(_kern_racct, OID_AUTO, enable, CTLFLAG_RDTUN, &racct_enable, 81 0, "Enable RACCT/RCTL"); 82 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold, 83 0, "Processes with higher %cpu usage than this value can be throttled."); 84 85 /* 86 * How many seconds it takes to use the scheduler %cpu calculations. When a 87 * process starts, we compute its %cpu usage by dividing its runtime by the 88 * process wall clock time. After RACCT_PCPU_SECS pass, we use the value 89 * provided by the scheduler. 90 */ 91 #define RACCT_PCPU_SECS 3 92 93 static struct mtx racct_lock; 94 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF); 95 96 static uma_zone_t racct_zone; 97 98 static void racct_sub_racct(struct racct *dest, const struct racct *src); 99 static void racct_sub_cred_locked(struct ucred *cred, int resource, 100 uint64_t amount); 101 static void racct_add_cred_locked(struct ucred *cred, int resource, 102 uint64_t amount); 103 104 SDT_PROVIDER_DEFINE(racct); 105 SDT_PROBE_DEFINE3(racct, kernel, rusage, add, "struct proc *", "int", 106 "uint64_t"); 107 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__failure, 108 "struct proc *", "int", "uint64_t"); 109 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__cred, "struct ucred *", 110 "int", "uint64_t"); 111 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__force, "struct proc *", 112 "int", "uint64_t"); 113 SDT_PROBE_DEFINE3(racct, kernel, rusage, set, "struct proc *", "int", 114 "uint64_t"); 115 SDT_PROBE_DEFINE3(racct, kernel, rusage, set__failure, 116 "struct proc *", "int", "uint64_t"); 117 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub, "struct proc *", "int", 118 "uint64_t"); 119 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub__cred, "struct ucred *", 120 "int", "uint64_t"); 121 SDT_PROBE_DEFINE1(racct, kernel, racct, create, "struct racct *"); 122 SDT_PROBE_DEFINE1(racct, kernel, racct, destroy, "struct racct *"); 123 SDT_PROBE_DEFINE2(racct, kernel, racct, join, "struct racct *", 124 "struct racct *"); 125 SDT_PROBE_DEFINE2(racct, kernel, racct, join__failure, 126 "struct racct *", "struct racct *"); 127 SDT_PROBE_DEFINE2(racct, kernel, racct, leave, "struct racct *", 128 "struct racct *"); 129 130 int racct_types[] = { 131 [RACCT_CPU] = 132 RACCT_IN_MILLIONS, 133 [RACCT_DATA] = 134 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 135 [RACCT_STACK] = 136 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 137 [RACCT_CORE] = 138 RACCT_DENIABLE, 139 [RACCT_RSS] = 140 RACCT_RECLAIMABLE, 141 [RACCT_MEMLOCK] = 142 RACCT_RECLAIMABLE | RACCT_DENIABLE, 143 [RACCT_NPROC] = 144 RACCT_RECLAIMABLE | RACCT_DENIABLE, 145 [RACCT_NOFILE] = 146 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 147 [RACCT_VMEM] = 148 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 149 [RACCT_NPTS] = 150 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 151 [RACCT_SWAP] = 152 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 153 [RACCT_NTHR] = 154 RACCT_RECLAIMABLE | RACCT_DENIABLE, 155 [RACCT_MSGQQUEUED] = 156 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 157 [RACCT_MSGQSIZE] = 158 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 159 [RACCT_NMSGQ] = 160 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 161 [RACCT_NSEM] = 162 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 163 [RACCT_NSEMOP] = 164 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE, 165 [RACCT_NSHM] = 166 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 167 [RACCT_SHMSIZE] = 168 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY, 169 [RACCT_WALLCLOCK] = 170 RACCT_IN_MILLIONS, 171 [RACCT_PCTCPU] = 172 RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS }; 173 174 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE; 175 176 #ifdef SCHED_4BSD 177 /* 178 * Contains intermediate values for %cpu calculations to avoid using floating 179 * point in the kernel. 180 * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20) 181 * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to 182 * zero so the calculations are more straightforward. 183 */ 184 fixpt_t ccpu_exp[] = { 185 [0] = FSCALE * 1, 186 [1] = FSCALE * 0.95122942450071400909, 187 [2] = FSCALE * 0.90483741803595957316, 188 [3] = FSCALE * 0.86070797642505780722, 189 [4] = FSCALE * 0.81873075307798185866, 190 [5] = FSCALE * 0.77880078307140486824, 191 [6] = FSCALE * 0.74081822068171786606, 192 [7] = FSCALE * 0.70468808971871343435, 193 [8] = FSCALE * 0.67032004603563930074, 194 [9] = FSCALE * 0.63762815162177329314, 195 [10] = FSCALE * 0.60653065971263342360, 196 [11] = FSCALE * 0.57694981038048669531, 197 [12] = FSCALE * 0.54881163609402643262, 198 [13] = FSCALE * 0.52204577676101604789, 199 [14] = FSCALE * 0.49658530379140951470, 200 [15] = FSCALE * 0.47236655274101470713, 201 [16] = FSCALE * 0.44932896411722159143, 202 [17] = FSCALE * 0.42741493194872666992, 203 [18] = FSCALE * 0.40656965974059911188, 204 [19] = FSCALE * 0.38674102345450120691, 205 [20] = FSCALE * 0.36787944117144232159, 206 [21] = FSCALE * 0.34993774911115535467, 207 [22] = FSCALE * 0.33287108369807955328, 208 [23] = FSCALE * 0.31663676937905321821, 209 [24] = FSCALE * 0.30119421191220209664, 210 [25] = FSCALE * 0.28650479686019010032, 211 [26] = FSCALE * 0.27253179303401260312, 212 [27] = FSCALE * 0.25924026064589150757, 213 [28] = FSCALE * 0.24659696394160647693, 214 [29] = FSCALE * 0.23457028809379765313, 215 [30] = FSCALE * 0.22313016014842982893, 216 [31] = FSCALE * 0.21224797382674305771, 217 [32] = FSCALE * 0.20189651799465540848, 218 [33] = FSCALE * 0.19204990862075411423, 219 [34] = FSCALE * 0.18268352405273465022, 220 [35] = FSCALE * 0.17377394345044512668, 221 [36] = FSCALE * 0.16529888822158653829, 222 [37] = FSCALE * 0.15723716631362761621, 223 [38] = FSCALE * 0.14956861922263505264, 224 [39] = FSCALE * 0.14227407158651357185, 225 [40] = FSCALE * 0.13533528323661269189, 226 [41] = FSCALE * 0.12873490358780421886, 227 [42] = FSCALE * 0.12245642825298191021, 228 [43] = FSCALE * 0.11648415777349695786, 229 [44] = FSCALE * 0.11080315836233388333, 230 [45] = FSCALE * 0.10539922456186433678, 231 [46] = FSCALE * 0.10025884372280373372, 232 [47] = FSCALE * 0.09536916221554961888, 233 [48] = FSCALE * 0.09071795328941250337, 234 [49] = FSCALE * 0.08629358649937051097, 235 [50] = FSCALE * 0.08208499862389879516, 236 [51] = FSCALE * 0.07808166600115315231, 237 [52] = FSCALE * 0.07427357821433388042, 238 [53] = FSCALE * 0.07065121306042958674, 239 [54] = FSCALE * 0.06720551273974976512, 240 [55] = FSCALE * 0.06392786120670757270, 241 [56] = FSCALE * 0.06081006262521796499, 242 [57] = FSCALE * 0.05784432087483846296, 243 [58] = FSCALE * 0.05502322005640722902, 244 [59] = FSCALE * 0.05233970594843239308, 245 [60] = FSCALE * 0.04978706836786394297, 246 [61] = FSCALE * 0.04735892439114092119, 247 [62] = FSCALE * 0.04504920239355780606, 248 [63] = FSCALE * 0.04285212686704017991, 249 [64] = FSCALE * 0.04076220397836621516, 250 [65] = FSCALE * 0.03877420783172200988, 251 [66] = FSCALE * 0.03688316740124000544, 252 [67] = FSCALE * 0.03508435410084502588, 253 [68] = FSCALE * 0.03337326996032607948, 254 [69] = FSCALE * 0.03174563637806794323, 255 [70] = FSCALE * 0.03019738342231850073, 256 [71] = FSCALE * 0.02872463965423942912, 257 [72] = FSCALE * 0.02732372244729256080, 258 [73] = FSCALE * 0.02599112877875534358, 259 [74] = FSCALE * 0.02472352647033939120, 260 [75] = FSCALE * 0.02351774585600910823, 261 [76] = FSCALE * 0.02237077185616559577, 262 [77] = FSCALE * 0.02127973643837716938, 263 [78] = FSCALE * 0.02024191144580438847, 264 [79] = FSCALE * 0.01925470177538692429, 265 [80] = FSCALE * 0.01831563888873418029, 266 [81] = FSCALE * 0.01742237463949351138, 267 [82] = FSCALE * 0.01657267540176124754, 268 [83] = FSCALE * 0.01576441648485449082, 269 [84] = FSCALE * 0.01499557682047770621, 270 [85] = FSCALE * 0.01426423390899925527, 271 [86] = FSCALE * 0.01356855901220093175, 272 [87] = FSCALE * 0.01290681258047986886, 273 [88] = FSCALE * 0.01227733990306844117, 274 [89] = FSCALE * 0.01167856697039544521, 275 [90] = FSCALE * 0.01110899653824230649, 276 [91] = FSCALE * 0.01056720438385265337, 277 [92] = FSCALE * 0.01005183574463358164, 278 [93] = FSCALE * 0.00956160193054350793, 279 [94] = FSCALE * 0.00909527710169581709, 280 [95] = FSCALE * 0.00865169520312063417, 281 [96] = FSCALE * 0.00822974704902002884, 282 [97] = FSCALE * 0.00782837754922577143, 283 [98] = FSCALE * 0.00744658307092434051, 284 [99] = FSCALE * 0.00708340892905212004, 285 [100] = FSCALE * 0.00673794699908546709, 286 [101] = FSCALE * 0.00640933344625638184, 287 [102] = FSCALE * 0.00609674656551563610, 288 [103] = FSCALE * 0.00579940472684214321, 289 [104] = FSCALE * 0.00551656442076077241, 290 [105] = FSCALE * 0.00524751839918138427, 291 [106] = FSCALE * 0.00499159390691021621, 292 [107] = FSCALE * 0.00474815099941147558, 293 [108] = FSCALE * 0.00451658094261266798, 294 [109] = FSCALE * 0.00429630469075234057, 295 [110] = FSCALE * 0.00408677143846406699, 296 }; 297 #endif 298 299 #define CCPU_EXP_MAX 110 300 301 /* 302 * This function is analogical to the getpcpu() function in the ps(1) command. 303 * They should both calculate in the same way so that the racct %cpu 304 * calculations are consistent with the values showed by the ps(1) tool. 305 * The calculations are more complex in the 4BSD scheduler because of the value 306 * of the ccpu variable. In ULE it is defined to be zero which saves us some 307 * work. 308 */ 309 static uint64_t 310 racct_getpcpu(struct proc *p, u_int pcpu) 311 { 312 u_int swtime; 313 #ifdef SCHED_4BSD 314 fixpt_t pctcpu, pctcpu_next; 315 #endif 316 #ifdef SMP 317 struct pcpu *pc; 318 int found; 319 #endif 320 fixpt_t p_pctcpu; 321 struct thread *td; 322 323 ASSERT_RACCT_ENABLED(); 324 325 /* 326 * If the process is swapped out, we count its %cpu usage as zero. 327 * This behaviour is consistent with the userland ps(1) tool. 328 */ 329 if ((p->p_flag & P_INMEM) == 0) 330 return (0); 331 swtime = (ticks - p->p_swtick) / hz; 332 333 /* 334 * For short-lived processes, the sched_pctcpu() returns small 335 * values even for cpu intensive processes. Therefore we use 336 * our own estimate in this case. 337 */ 338 if (swtime < RACCT_PCPU_SECS) 339 return (pcpu); 340 341 p_pctcpu = 0; 342 FOREACH_THREAD_IN_PROC(p, td) { 343 if (td == PCPU_GET(idlethread)) 344 continue; 345 #ifdef SMP 346 found = 0; 347 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 348 if (td == pc->pc_idlethread) { 349 found = 1; 350 break; 351 } 352 } 353 if (found) 354 continue; 355 #endif 356 thread_lock(td); 357 #ifdef SCHED_4BSD 358 pctcpu = sched_pctcpu(td); 359 /* Count also the yet unfinished second. */ 360 pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT; 361 pctcpu_next += sched_pctcpu_delta(td); 362 p_pctcpu += max(pctcpu, pctcpu_next); 363 #else 364 /* 365 * In ULE the %cpu statistics are updated on every 366 * sched_pctcpu() call. So special calculations to 367 * account for the latest (unfinished) second are 368 * not needed. 369 */ 370 p_pctcpu += sched_pctcpu(td); 371 #endif 372 thread_unlock(td); 373 } 374 375 #ifdef SCHED_4BSD 376 if (swtime <= CCPU_EXP_MAX) 377 return ((100 * (uint64_t)p_pctcpu * 1000000) / 378 (FSCALE - ccpu_exp[swtime])); 379 #endif 380 381 return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE); 382 } 383 384 static void 385 racct_add_racct(struct racct *dest, const struct racct *src) 386 { 387 int i; 388 389 ASSERT_RACCT_ENABLED(); 390 mtx_assert(&racct_lock, MA_OWNED); 391 392 /* 393 * Update resource usage in dest. 394 */ 395 for (i = 0; i <= RACCT_MAX; i++) { 396 KASSERT(dest->r_resources[i] >= 0, 397 ("%s: resource %d propagation meltdown: dest < 0", 398 __func__, i)); 399 KASSERT(src->r_resources[i] >= 0, 400 ("%s: resource %d propagation meltdown: src < 0", 401 __func__, i)); 402 dest->r_resources[i] += src->r_resources[i]; 403 } 404 } 405 406 static void 407 racct_sub_racct(struct racct *dest, const struct racct *src) 408 { 409 int i; 410 411 ASSERT_RACCT_ENABLED(); 412 mtx_assert(&racct_lock, MA_OWNED); 413 414 /* 415 * Update resource usage in dest. 416 */ 417 for (i = 0; i <= RACCT_MAX; i++) { 418 if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) { 419 KASSERT(dest->r_resources[i] >= 0, 420 ("%s: resource %d propagation meltdown: dest < 0", 421 __func__, i)); 422 KASSERT(src->r_resources[i] >= 0, 423 ("%s: resource %d propagation meltdown: src < 0", 424 __func__, i)); 425 KASSERT(src->r_resources[i] <= dest->r_resources[i], 426 ("%s: resource %d propagation meltdown: src > dest", 427 __func__, i)); 428 } 429 if (RACCT_CAN_DROP(i)) { 430 dest->r_resources[i] -= src->r_resources[i]; 431 if (dest->r_resources[i] < 0) { 432 KASSERT(RACCT_IS_SLOPPY(i) || 433 RACCT_IS_DECAYING(i), 434 ("%s: resource %d usage < 0", __func__, i)); 435 dest->r_resources[i] = 0; 436 } 437 } 438 } 439 } 440 441 void 442 racct_create(struct racct **racctp) 443 { 444 445 if (!racct_enable) 446 return; 447 448 SDT_PROBE1(racct, kernel, racct, create, racctp); 449 450 KASSERT(*racctp == NULL, ("racct already allocated")); 451 452 *racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO); 453 } 454 455 static void 456 racct_destroy_locked(struct racct **racctp) 457 { 458 int i; 459 struct racct *racct; 460 461 ASSERT_RACCT_ENABLED(); 462 463 SDT_PROBE1(racct, kernel, racct, destroy, racctp); 464 465 mtx_assert(&racct_lock, MA_OWNED); 466 KASSERT(racctp != NULL, ("NULL racctp")); 467 KASSERT(*racctp != NULL, ("NULL racct")); 468 469 racct = *racctp; 470 471 for (i = 0; i <= RACCT_MAX; i++) { 472 if (RACCT_IS_SLOPPY(i)) 473 continue; 474 if (!RACCT_IS_RECLAIMABLE(i)) 475 continue; 476 KASSERT(racct->r_resources[i] == 0, 477 ("destroying non-empty racct: " 478 "%ju allocated for resource %d\n", 479 racct->r_resources[i], i)); 480 } 481 uma_zfree(racct_zone, racct); 482 *racctp = NULL; 483 } 484 485 void 486 racct_destroy(struct racct **racct) 487 { 488 489 if (!racct_enable) 490 return; 491 492 mtx_lock(&racct_lock); 493 racct_destroy_locked(racct); 494 mtx_unlock(&racct_lock); 495 } 496 497 /* 498 * Increase consumption of 'resource' by 'amount' for 'racct' 499 * and all its parents. Differently from other cases, 'amount' here 500 * may be less than zero. 501 */ 502 static void 503 racct_adjust_resource(struct racct *racct, int resource, 504 uint64_t amount) 505 { 506 507 ASSERT_RACCT_ENABLED(); 508 mtx_assert(&racct_lock, MA_OWNED); 509 KASSERT(racct != NULL, ("NULL racct")); 510 511 racct->r_resources[resource] += amount; 512 if (racct->r_resources[resource] < 0) { 513 KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource), 514 ("%s: resource %d usage < 0", __func__, resource)); 515 racct->r_resources[resource] = 0; 516 } 517 518 /* 519 * There are some cases where the racct %cpu resource would grow 520 * beyond 100%. 521 * For example in racct_proc_exit() we add the process %cpu usage 522 * to the ucred racct containers. If too many processes terminated 523 * in a short time span, the ucred %cpu resource could grow too much. 524 * Also, the 4BSD scheduler sometimes returns for a thread more than 525 * 100% cpu usage. So we set a boundary here to 100%. 526 */ 527 if ((resource == RACCT_PCTCPU) && 528 (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000)) 529 racct->r_resources[RACCT_PCTCPU] = 100 * 1000000; 530 } 531 532 static int 533 racct_add_locked(struct proc *p, int resource, uint64_t amount) 534 { 535 #ifdef RCTL 536 int error; 537 #endif 538 539 ASSERT_RACCT_ENABLED(); 540 541 SDT_PROBE3(racct, kernel, rusage, add, p, resource, amount); 542 543 /* 544 * We need proc lock to dereference p->p_ucred. 545 */ 546 PROC_LOCK_ASSERT(p, MA_OWNED); 547 548 #ifdef RCTL 549 error = rctl_enforce(p, resource, amount); 550 if (error && RACCT_IS_DENIABLE(resource)) { 551 SDT_PROBE3(racct, kernel, rusage, add__failure, p, resource, 552 amount); 553 return (error); 554 } 555 #endif 556 racct_adjust_resource(p->p_racct, resource, amount); 557 racct_add_cred_locked(p->p_ucred, resource, amount); 558 559 return (0); 560 } 561 562 /* 563 * Increase allocation of 'resource' by 'amount' for process 'p'. 564 * Return 0 if it's below limits, or errno, if it's not. 565 */ 566 int 567 racct_add(struct proc *p, int resource, uint64_t amount) 568 { 569 int error; 570 571 if (!racct_enable) 572 return (0); 573 574 mtx_lock(&racct_lock); 575 error = racct_add_locked(p, resource, amount); 576 mtx_unlock(&racct_lock); 577 return (error); 578 } 579 580 static void 581 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount) 582 { 583 struct prison *pr; 584 585 ASSERT_RACCT_ENABLED(); 586 587 SDT_PROBE3(racct, kernel, rusage, add__cred, cred, resource, amount); 588 589 racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, amount); 590 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent) 591 racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource, 592 amount); 593 racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, amount); 594 } 595 596 /* 597 * Increase allocation of 'resource' by 'amount' for credential 'cred'. 598 * Doesn't check for limits and never fails. 599 * 600 * XXX: Shouldn't this ever return an error? 601 */ 602 void 603 racct_add_cred(struct ucred *cred, int resource, uint64_t amount) 604 { 605 606 if (!racct_enable) 607 return; 608 609 mtx_lock(&racct_lock); 610 racct_add_cred_locked(cred, resource, amount); 611 mtx_unlock(&racct_lock); 612 } 613 614 /* 615 * Increase allocation of 'resource' by 'amount' for process 'p'. 616 * Doesn't check for limits and never fails. 617 */ 618 void 619 racct_add_force(struct proc *p, int resource, uint64_t amount) 620 { 621 622 if (!racct_enable) 623 return; 624 625 SDT_PROBE3(racct, kernel, rusage, add__force, p, resource, amount); 626 627 /* 628 * We need proc lock to dereference p->p_ucred. 629 */ 630 PROC_LOCK_ASSERT(p, MA_OWNED); 631 632 mtx_lock(&racct_lock); 633 racct_adjust_resource(p->p_racct, resource, amount); 634 mtx_unlock(&racct_lock); 635 racct_add_cred(p->p_ucred, resource, amount); 636 } 637 638 static int 639 racct_set_locked(struct proc *p, int resource, uint64_t amount) 640 { 641 int64_t old_amount, decayed_amount; 642 int64_t diff_proc, diff_cred; 643 #ifdef RCTL 644 int error; 645 #endif 646 647 ASSERT_RACCT_ENABLED(); 648 649 SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount); 650 651 /* 652 * We need proc lock to dereference p->p_ucred. 653 */ 654 PROC_LOCK_ASSERT(p, MA_OWNED); 655 656 old_amount = p->p_racct->r_resources[resource]; 657 /* 658 * The diffs may be negative. 659 */ 660 diff_proc = amount - old_amount; 661 if (RACCT_IS_DECAYING(resource)) { 662 /* 663 * Resources in per-credential racct containers may decay. 664 * If this is the case, we need to calculate the difference 665 * between the new amount and the proportional value of the 666 * old amount that has decayed in the ucred racct containers. 667 */ 668 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE; 669 diff_cred = amount - decayed_amount; 670 } else 671 diff_cred = diff_proc; 672 #ifdef notyet 673 KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource), 674 ("%s: usage of non-droppable resource %d dropping", __func__, 675 resource)); 676 #endif 677 #ifdef RCTL 678 if (diff_proc > 0) { 679 error = rctl_enforce(p, resource, diff_proc); 680 if (error && RACCT_IS_DENIABLE(resource)) { 681 SDT_PROBE3(racct, kernel, rusage, set__failure, p, 682 resource, amount); 683 return (error); 684 } 685 } 686 #endif 687 racct_adjust_resource(p->p_racct, resource, diff_proc); 688 if (diff_cred > 0) 689 racct_add_cred_locked(p->p_ucred, resource, diff_cred); 690 else if (diff_cred < 0) 691 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred); 692 693 return (0); 694 } 695 696 /* 697 * Set allocation of 'resource' to 'amount' for process 'p'. 698 * Return 0 if it's below limits, or errno, if it's not. 699 * 700 * Note that decreasing the allocation always returns 0, 701 * even if it's above the limit. 702 */ 703 int 704 racct_set(struct proc *p, int resource, uint64_t amount) 705 { 706 int error; 707 708 if (!racct_enable) 709 return (0); 710 711 mtx_lock(&racct_lock); 712 error = racct_set_locked(p, resource, amount); 713 mtx_unlock(&racct_lock); 714 return (error); 715 } 716 717 static void 718 racct_set_force_locked(struct proc *p, int resource, uint64_t amount) 719 { 720 int64_t old_amount, decayed_amount; 721 int64_t diff_proc, diff_cred; 722 723 ASSERT_RACCT_ENABLED(); 724 725 SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount); 726 727 /* 728 * We need proc lock to dereference p->p_ucred. 729 */ 730 PROC_LOCK_ASSERT(p, MA_OWNED); 731 732 old_amount = p->p_racct->r_resources[resource]; 733 /* 734 * The diffs may be negative. 735 */ 736 diff_proc = amount - old_amount; 737 if (RACCT_IS_DECAYING(resource)) { 738 /* 739 * Resources in per-credential racct containers may decay. 740 * If this is the case, we need to calculate the difference 741 * between the new amount and the proportional value of the 742 * old amount that has decayed in the ucred racct containers. 743 */ 744 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE; 745 diff_cred = amount - decayed_amount; 746 } else 747 diff_cred = diff_proc; 748 749 racct_adjust_resource(p->p_racct, resource, diff_proc); 750 if (diff_cred > 0) 751 racct_add_cred_locked(p->p_ucred, resource, diff_cred); 752 else if (diff_cred < 0) 753 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred); 754 } 755 756 void 757 racct_set_force(struct proc *p, int resource, uint64_t amount) 758 { 759 760 if (!racct_enable) 761 return; 762 763 mtx_lock(&racct_lock); 764 racct_set_force_locked(p, resource, amount); 765 mtx_unlock(&racct_lock); 766 } 767 768 /* 769 * Returns amount of 'resource' the process 'p' can keep allocated. 770 * Allocating more than that would be denied, unless the resource 771 * is marked undeniable. Amount of already allocated resource does 772 * not matter. 773 */ 774 uint64_t 775 racct_get_limit(struct proc *p, int resource) 776 { 777 778 if (!racct_enable) 779 return (UINT64_MAX); 780 781 #ifdef RCTL 782 return (rctl_get_limit(p, resource)); 783 #else 784 return (UINT64_MAX); 785 #endif 786 } 787 788 /* 789 * Returns amount of 'resource' the process 'p' can keep allocated. 790 * Allocating more than that would be denied, unless the resource 791 * is marked undeniable. Amount of already allocated resource does 792 * matter. 793 */ 794 uint64_t 795 racct_get_available(struct proc *p, int resource) 796 { 797 798 if (!racct_enable) 799 return (UINT64_MAX); 800 801 #ifdef RCTL 802 return (rctl_get_available(p, resource)); 803 #else 804 return (UINT64_MAX); 805 #endif 806 } 807 808 /* 809 * Returns amount of the %cpu resource that process 'p' can add to its %cpu 810 * utilization. Adding more than that would lead to the process being 811 * throttled. 812 */ 813 static int64_t 814 racct_pcpu_available(struct proc *p) 815 { 816 817 ASSERT_RACCT_ENABLED(); 818 819 #ifdef RCTL 820 return (rctl_pcpu_available(p)); 821 #else 822 return (INT64_MAX); 823 #endif 824 } 825 826 /* 827 * Decrease allocation of 'resource' by 'amount' for process 'p'. 828 */ 829 void 830 racct_sub(struct proc *p, int resource, uint64_t amount) 831 { 832 833 if (!racct_enable) 834 return; 835 836 SDT_PROBE3(racct, kernel, rusage, sub, p, resource, amount); 837 838 /* 839 * We need proc lock to dereference p->p_ucred. 840 */ 841 PROC_LOCK_ASSERT(p, MA_OWNED); 842 KASSERT(RACCT_CAN_DROP(resource), 843 ("%s: called for non-droppable resource %d", __func__, resource)); 844 845 mtx_lock(&racct_lock); 846 KASSERT(amount <= p->p_racct->r_resources[resource], 847 ("%s: freeing %ju of resource %d, which is more " 848 "than allocated %jd for %s (pid %d)", __func__, amount, resource, 849 (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid)); 850 851 racct_adjust_resource(p->p_racct, resource, -amount); 852 racct_sub_cred_locked(p->p_ucred, resource, amount); 853 mtx_unlock(&racct_lock); 854 } 855 856 static void 857 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount) 858 { 859 struct prison *pr; 860 861 ASSERT_RACCT_ENABLED(); 862 863 SDT_PROBE3(racct, kernel, rusage, sub__cred, cred, resource, amount); 864 865 #ifdef notyet 866 KASSERT(RACCT_CAN_DROP(resource), 867 ("%s: called for resource %d which can not drop", __func__, 868 resource)); 869 #endif 870 871 racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, -amount); 872 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent) 873 racct_adjust_resource(pr->pr_prison_racct->prr_racct, resource, 874 -amount); 875 racct_adjust_resource(cred->cr_loginclass->lc_racct, resource, -amount); 876 } 877 878 /* 879 * Decrease allocation of 'resource' by 'amount' for credential 'cred'. 880 */ 881 void 882 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount) 883 { 884 885 if (!racct_enable) 886 return; 887 888 mtx_lock(&racct_lock); 889 racct_sub_cred_locked(cred, resource, amount); 890 mtx_unlock(&racct_lock); 891 } 892 893 /* 894 * Inherit resource usage information from the parent process. 895 */ 896 int 897 racct_proc_fork(struct proc *parent, struct proc *child) 898 { 899 int i, error = 0; 900 901 if (!racct_enable) 902 return (0); 903 904 /* 905 * Create racct for the child process. 906 */ 907 racct_create(&child->p_racct); 908 909 PROC_LOCK(parent); 910 PROC_LOCK(child); 911 mtx_lock(&racct_lock); 912 913 #ifdef RCTL 914 error = rctl_proc_fork(parent, child); 915 if (error != 0) 916 goto out; 917 #endif 918 919 /* Init process cpu time. */ 920 child->p_prev_runtime = 0; 921 child->p_throttled = 0; 922 923 /* 924 * Inherit resource usage. 925 */ 926 for (i = 0; i <= RACCT_MAX; i++) { 927 if (parent->p_racct->r_resources[i] == 0 || 928 !RACCT_IS_INHERITABLE(i)) 929 continue; 930 931 error = racct_set_locked(child, i, 932 parent->p_racct->r_resources[i]); 933 if (error != 0) 934 goto out; 935 } 936 937 error = racct_add_locked(child, RACCT_NPROC, 1); 938 error += racct_add_locked(child, RACCT_NTHR, 1); 939 940 out: 941 mtx_unlock(&racct_lock); 942 PROC_UNLOCK(child); 943 PROC_UNLOCK(parent); 944 945 if (error != 0) 946 racct_proc_exit(child); 947 948 return (error); 949 } 950 951 /* 952 * Called at the end of fork1(), to handle rules that require the process 953 * to be fully initialized. 954 */ 955 void 956 racct_proc_fork_done(struct proc *child) 957 { 958 959 #ifdef RCTL 960 if (!racct_enable) 961 return; 962 963 PROC_LOCK(child); 964 mtx_lock(&racct_lock); 965 rctl_enforce(child, RACCT_NPROC, 0); 966 rctl_enforce(child, RACCT_NTHR, 0); 967 mtx_unlock(&racct_lock); 968 PROC_UNLOCK(child); 969 #endif 970 } 971 972 void 973 racct_proc_exit(struct proc *p) 974 { 975 int i; 976 uint64_t runtime; 977 struct timeval wallclock; 978 uint64_t pct_estimate, pct; 979 980 if (!racct_enable) 981 return; 982 983 PROC_LOCK(p); 984 /* 985 * We don't need to calculate rux, proc_reap() has already done this. 986 */ 987 runtime = cputick2usec(p->p_rux.rux_runtime); 988 #ifdef notyet 989 KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); 990 #else 991 if (runtime < p->p_prev_runtime) 992 runtime = p->p_prev_runtime; 993 #endif 994 microuptime(&wallclock); 995 timevalsub(&wallclock, &p->p_stats->p_start); 996 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { 997 pct_estimate = (1000000 * runtime * 100) / 998 ((uint64_t)wallclock.tv_sec * 1000000 + 999 wallclock.tv_usec); 1000 } else 1001 pct_estimate = 0; 1002 pct = racct_getpcpu(p, pct_estimate); 1003 1004 mtx_lock(&racct_lock); 1005 racct_set_locked(p, RACCT_CPU, runtime); 1006 racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct); 1007 1008 for (i = 0; i <= RACCT_MAX; i++) { 1009 if (p->p_racct->r_resources[i] == 0) 1010 continue; 1011 if (!RACCT_IS_RECLAIMABLE(i)) 1012 continue; 1013 racct_set_locked(p, i, 0); 1014 } 1015 1016 mtx_unlock(&racct_lock); 1017 PROC_UNLOCK(p); 1018 1019 #ifdef RCTL 1020 rctl_racct_release(p->p_racct); 1021 #endif 1022 racct_destroy(&p->p_racct); 1023 } 1024 1025 /* 1026 * Called after credentials change, to move resource utilisation 1027 * between raccts. 1028 */ 1029 void 1030 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred, 1031 struct ucred *newcred) 1032 { 1033 struct uidinfo *olduip, *newuip; 1034 struct loginclass *oldlc, *newlc; 1035 struct prison *oldpr, *newpr, *pr; 1036 1037 if (!racct_enable) 1038 return; 1039 1040 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 1041 1042 newuip = newcred->cr_ruidinfo; 1043 olduip = oldcred->cr_ruidinfo; 1044 newlc = newcred->cr_loginclass; 1045 oldlc = oldcred->cr_loginclass; 1046 newpr = newcred->cr_prison; 1047 oldpr = oldcred->cr_prison; 1048 1049 mtx_lock(&racct_lock); 1050 if (newuip != olduip) { 1051 racct_sub_racct(olduip->ui_racct, p->p_racct); 1052 racct_add_racct(newuip->ui_racct, p->p_racct); 1053 } 1054 if (newlc != oldlc) { 1055 racct_sub_racct(oldlc->lc_racct, p->p_racct); 1056 racct_add_racct(newlc->lc_racct, p->p_racct); 1057 } 1058 if (newpr != oldpr) { 1059 for (pr = oldpr; pr != NULL; pr = pr->pr_parent) 1060 racct_sub_racct(pr->pr_prison_racct->prr_racct, 1061 p->p_racct); 1062 for (pr = newpr; pr != NULL; pr = pr->pr_parent) 1063 racct_add_racct(pr->pr_prison_racct->prr_racct, 1064 p->p_racct); 1065 } 1066 mtx_unlock(&racct_lock); 1067 1068 #ifdef RCTL 1069 rctl_proc_ucred_changed(p, newcred); 1070 #endif 1071 } 1072 1073 void 1074 racct_move(struct racct *dest, struct racct *src) 1075 { 1076 1077 ASSERT_RACCT_ENABLED(); 1078 1079 mtx_lock(&racct_lock); 1080 1081 racct_add_racct(dest, src); 1082 racct_sub_racct(src, src); 1083 1084 mtx_unlock(&racct_lock); 1085 } 1086 1087 static void 1088 racct_proc_throttle(struct proc *p) 1089 { 1090 struct thread *td; 1091 #ifdef SMP 1092 int cpuid; 1093 #endif 1094 1095 ASSERT_RACCT_ENABLED(); 1096 PROC_LOCK_ASSERT(p, MA_OWNED); 1097 1098 /* 1099 * Do not block kernel processes. Also do not block processes with 1100 * low %cpu utilization to improve interactivity. 1101 */ 1102 if (((p->p_flag & (P_SYSTEM | P_KTHREAD)) != 0) || 1103 (p->p_racct->r_resources[RACCT_PCTCPU] <= pcpu_threshold)) 1104 return; 1105 p->p_throttled = 1; 1106 1107 FOREACH_THREAD_IN_PROC(p, td) { 1108 thread_lock(td); 1109 switch (td->td_state) { 1110 case TDS_RUNQ: 1111 /* 1112 * If the thread is on the scheduler run-queue, we can 1113 * not just remove it from there. So we set the flag 1114 * TDF_NEEDRESCHED for the thread, so that once it is 1115 * running, it is taken off the cpu as soon as possible. 1116 */ 1117 td->td_flags |= TDF_NEEDRESCHED; 1118 break; 1119 case TDS_RUNNING: 1120 /* 1121 * If the thread is running, we request a context 1122 * switch for it by setting the TDF_NEEDRESCHED flag. 1123 */ 1124 td->td_flags |= TDF_NEEDRESCHED; 1125 #ifdef SMP 1126 cpuid = td->td_oncpu; 1127 if ((cpuid != NOCPU) && (td != curthread)) 1128 ipi_cpu(cpuid, IPI_AST); 1129 #endif 1130 break; 1131 default: 1132 break; 1133 } 1134 thread_unlock(td); 1135 } 1136 } 1137 1138 static void 1139 racct_proc_wakeup(struct proc *p) 1140 { 1141 1142 ASSERT_RACCT_ENABLED(); 1143 1144 PROC_LOCK_ASSERT(p, MA_OWNED); 1145 1146 if (p->p_throttled) { 1147 p->p_throttled = 0; 1148 wakeup(p->p_racct); 1149 } 1150 } 1151 1152 static void 1153 racct_decay_resource(struct racct *racct, void * res, void* dummy) 1154 { 1155 int resource; 1156 int64_t r_old, r_new; 1157 1158 ASSERT_RACCT_ENABLED(); 1159 1160 resource = *(int *)res; 1161 r_old = racct->r_resources[resource]; 1162 1163 /* If there is nothing to decay, just exit. */ 1164 if (r_old <= 0) 1165 return; 1166 1167 mtx_lock(&racct_lock); 1168 r_new = r_old * RACCT_DECAY_FACTOR / FSCALE; 1169 racct->r_resources[resource] = r_new; 1170 mtx_unlock(&racct_lock); 1171 } 1172 1173 static void 1174 racct_decay(int resource) 1175 { 1176 1177 ASSERT_RACCT_ENABLED(); 1178 1179 ui_racct_foreach(racct_decay_resource, &resource, NULL); 1180 loginclass_racct_foreach(racct_decay_resource, &resource, NULL); 1181 prison_racct_foreach(racct_decay_resource, &resource, NULL); 1182 } 1183 1184 static void 1185 racctd(void) 1186 { 1187 struct thread *td; 1188 struct proc *p; 1189 struct timeval wallclock; 1190 uint64_t runtime; 1191 uint64_t pct, pct_estimate; 1192 1193 ASSERT_RACCT_ENABLED(); 1194 1195 for (;;) { 1196 racct_decay(RACCT_PCTCPU); 1197 1198 sx_slock(&allproc_lock); 1199 1200 LIST_FOREACH(p, &zombproc, p_list) { 1201 PROC_LOCK(p); 1202 racct_set(p, RACCT_PCTCPU, 0); 1203 PROC_UNLOCK(p); 1204 } 1205 1206 FOREACH_PROC_IN_SYSTEM(p) { 1207 PROC_LOCK(p); 1208 if (p->p_state != PRS_NORMAL) { 1209 PROC_UNLOCK(p); 1210 continue; 1211 } 1212 1213 microuptime(&wallclock); 1214 timevalsub(&wallclock, &p->p_stats->p_start); 1215 PROC_STATLOCK(p); 1216 FOREACH_THREAD_IN_PROC(p, td) 1217 ruxagg(p, td); 1218 runtime = cputick2usec(p->p_rux.rux_runtime); 1219 PROC_STATUNLOCK(p); 1220 #ifdef notyet 1221 KASSERT(runtime >= p->p_prev_runtime, 1222 ("runtime < p_prev_runtime")); 1223 #else 1224 if (runtime < p->p_prev_runtime) 1225 runtime = p->p_prev_runtime; 1226 #endif 1227 p->p_prev_runtime = runtime; 1228 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { 1229 pct_estimate = (1000000 * runtime * 100) / 1230 ((uint64_t)wallclock.tv_sec * 1000000 + 1231 wallclock.tv_usec); 1232 } else 1233 pct_estimate = 0; 1234 pct = racct_getpcpu(p, pct_estimate); 1235 mtx_lock(&racct_lock); 1236 racct_set_force_locked(p, RACCT_PCTCPU, pct); 1237 racct_set_locked(p, RACCT_CPU, runtime); 1238 racct_set_locked(p, RACCT_WALLCLOCK, 1239 (uint64_t)wallclock.tv_sec * 1000000 + 1240 wallclock.tv_usec); 1241 mtx_unlock(&racct_lock); 1242 PROC_UNLOCK(p); 1243 } 1244 1245 /* 1246 * To ensure that processes are throttled in a fair way, we need 1247 * to iterate over all processes again and check the limits 1248 * for %cpu resource only after ucred racct containers have been 1249 * properly filled. 1250 */ 1251 FOREACH_PROC_IN_SYSTEM(p) { 1252 PROC_LOCK(p); 1253 if (p->p_state != PRS_NORMAL) { 1254 PROC_UNLOCK(p); 1255 continue; 1256 } 1257 1258 if (racct_pcpu_available(p) <= 0) 1259 racct_proc_throttle(p); 1260 else if (p->p_throttled) 1261 racct_proc_wakeup(p); 1262 PROC_UNLOCK(p); 1263 } 1264 sx_sunlock(&allproc_lock); 1265 pause("-", hz); 1266 } 1267 } 1268 1269 static struct kproc_desc racctd_kp = { 1270 "racctd", 1271 racctd, 1272 NULL 1273 }; 1274 1275 static void 1276 racctd_init(void) 1277 { 1278 if (!racct_enable) 1279 return; 1280 1281 kproc_start(&racctd_kp); 1282 } 1283 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, racctd_init, NULL); 1284 1285 static void 1286 racct_init(void) 1287 { 1288 if (!racct_enable) 1289 return; 1290 1291 racct_zone = uma_zcreate("racct", sizeof(struct racct), 1292 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 1293 /* 1294 * XXX: Move this somewhere. 1295 */ 1296 prison0.pr_prison_racct = prison_racct_find("0"); 1297 } 1298 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL); 1299 1300 #endif /* !RACCT */ 1301