1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/sysproto.h> 45 #include <sys/file.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/refcount.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/rwlock.h> 56 #include <sys/sched.h> 57 #include <sys/sx.h> 58 #include <sys/syscallsubr.h> 59 #include <sys/sysent.h> 60 #include <sys/time.h> 61 #include <sys/umtx.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_param.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 68 69 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); 70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 72 static struct rwlock uihashtbl_lock; 73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 74 static u_long uihash; /* size of hash table - 1 */ 75 76 static void calcru1(struct proc *p, struct rusage_ext *ruxp, 77 struct timeval *up, struct timeval *sp); 78 static int donice(struct thread *td, struct proc *chgp, int n); 79 static struct uidinfo *uilookup(uid_t uid); 80 static void ruxagg_locked(struct rusage_ext *rux, struct thread *td); 81 82 /* 83 * Resource controls and accounting. 84 */ 85 #ifndef _SYS_SYSPROTO_H_ 86 struct getpriority_args { 87 int which; 88 int who; 89 }; 90 #endif 91 int 92 sys_getpriority(td, uap) 93 struct thread *td; 94 register struct getpriority_args *uap; 95 { 96 struct proc *p; 97 struct pgrp *pg; 98 int error, low; 99 100 error = 0; 101 low = PRIO_MAX + 1; 102 switch (uap->which) { 103 104 case PRIO_PROCESS: 105 if (uap->who == 0) 106 low = td->td_proc->p_nice; 107 else { 108 p = pfind(uap->who); 109 if (p == NULL) 110 break; 111 if (p_cansee(td, p) == 0) 112 low = p->p_nice; 113 PROC_UNLOCK(p); 114 } 115 break; 116 117 case PRIO_PGRP: 118 sx_slock(&proctree_lock); 119 if (uap->who == 0) { 120 pg = td->td_proc->p_pgrp; 121 PGRP_LOCK(pg); 122 } else { 123 pg = pgfind(uap->who); 124 if (pg == NULL) { 125 sx_sunlock(&proctree_lock); 126 break; 127 } 128 } 129 sx_sunlock(&proctree_lock); 130 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 131 PROC_LOCK(p); 132 if (p->p_state == PRS_NORMAL && 133 p_cansee(td, p) == 0) { 134 if (p->p_nice < low) 135 low = p->p_nice; 136 } 137 PROC_UNLOCK(p); 138 } 139 PGRP_UNLOCK(pg); 140 break; 141 142 case PRIO_USER: 143 if (uap->who == 0) 144 uap->who = td->td_ucred->cr_uid; 145 sx_slock(&allproc_lock); 146 FOREACH_PROC_IN_SYSTEM(p) { 147 PROC_LOCK(p); 148 if (p->p_state == PRS_NORMAL && 149 p_cansee(td, p) == 0 && 150 p->p_ucred->cr_uid == uap->who) { 151 if (p->p_nice < low) 152 low = p->p_nice; 153 } 154 PROC_UNLOCK(p); 155 } 156 sx_sunlock(&allproc_lock); 157 break; 158 159 default: 160 error = EINVAL; 161 break; 162 } 163 if (low == PRIO_MAX + 1 && error == 0) 164 error = ESRCH; 165 td->td_retval[0] = low; 166 return (error); 167 } 168 169 #ifndef _SYS_SYSPROTO_H_ 170 struct setpriority_args { 171 int which; 172 int who; 173 int prio; 174 }; 175 #endif 176 int 177 sys_setpriority(td, uap) 178 struct thread *td; 179 struct setpriority_args *uap; 180 { 181 struct proc *curp, *p; 182 struct pgrp *pg; 183 int found = 0, error = 0; 184 185 curp = td->td_proc; 186 switch (uap->which) { 187 case PRIO_PROCESS: 188 if (uap->who == 0) { 189 PROC_LOCK(curp); 190 error = donice(td, curp, uap->prio); 191 PROC_UNLOCK(curp); 192 } else { 193 p = pfind(uap->who); 194 if (p == NULL) 195 break; 196 error = p_cansee(td, p); 197 if (error == 0) 198 error = donice(td, p, uap->prio); 199 PROC_UNLOCK(p); 200 } 201 found++; 202 break; 203 204 case PRIO_PGRP: 205 sx_slock(&proctree_lock); 206 if (uap->who == 0) { 207 pg = curp->p_pgrp; 208 PGRP_LOCK(pg); 209 } else { 210 pg = pgfind(uap->who); 211 if (pg == NULL) { 212 sx_sunlock(&proctree_lock); 213 break; 214 } 215 } 216 sx_sunlock(&proctree_lock); 217 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 218 PROC_LOCK(p); 219 if (p->p_state == PRS_NORMAL && 220 p_cansee(td, p) == 0) { 221 error = donice(td, p, uap->prio); 222 found++; 223 } 224 PROC_UNLOCK(p); 225 } 226 PGRP_UNLOCK(pg); 227 break; 228 229 case PRIO_USER: 230 if (uap->who == 0) 231 uap->who = td->td_ucred->cr_uid; 232 sx_slock(&allproc_lock); 233 FOREACH_PROC_IN_SYSTEM(p) { 234 PROC_LOCK(p); 235 if (p->p_state == PRS_NORMAL && 236 p->p_ucred->cr_uid == uap->who && 237 p_cansee(td, p) == 0) { 238 error = donice(td, p, uap->prio); 239 found++; 240 } 241 PROC_UNLOCK(p); 242 } 243 sx_sunlock(&allproc_lock); 244 break; 245 246 default: 247 error = EINVAL; 248 break; 249 } 250 if (found == 0 && error == 0) 251 error = ESRCH; 252 return (error); 253 } 254 255 /* 256 * Set "nice" for a (whole) process. 257 */ 258 static int 259 donice(struct thread *td, struct proc *p, int n) 260 { 261 int error; 262 263 PROC_LOCK_ASSERT(p, MA_OWNED); 264 if ((error = p_cansched(td, p))) 265 return (error); 266 if (n > PRIO_MAX) 267 n = PRIO_MAX; 268 if (n < PRIO_MIN) 269 n = PRIO_MIN; 270 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) 271 return (EACCES); 272 sched_nice(p, n); 273 return (0); 274 } 275 276 /* 277 * Set realtime priority for LWP. 278 */ 279 #ifndef _SYS_SYSPROTO_H_ 280 struct rtprio_thread_args { 281 int function; 282 lwpid_t lwpid; 283 struct rtprio *rtp; 284 }; 285 #endif 286 int 287 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) 288 { 289 struct proc *p; 290 struct rtprio rtp; 291 struct thread *td1; 292 int cierror, error; 293 294 /* Perform copyin before acquiring locks if needed. */ 295 if (uap->function == RTP_SET) 296 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 297 else 298 cierror = 0; 299 300 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) { 301 p = td->td_proc; 302 td1 = td; 303 PROC_LOCK(p); 304 } else { 305 /* Only look up thread in current process */ 306 td1 = tdfind(uap->lwpid, curproc->p_pid); 307 if (td1 == NULL) 308 return (ESRCH); 309 p = td1->td_proc; 310 } 311 312 switch (uap->function) { 313 case RTP_LOOKUP: 314 if ((error = p_cansee(td, p))) 315 break; 316 pri_to_rtp(td1, &rtp); 317 PROC_UNLOCK(p); 318 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 319 case RTP_SET: 320 if ((error = p_cansched(td, p)) || (error = cierror)) 321 break; 322 323 /* Disallow setting rtprio in most cases if not superuser. */ 324 /* 325 * Realtime priority has to be restricted for reasons which should be 326 * obvious. However, for idle priority, there is a potential for 327 * system deadlock if an idleprio process gains a lock on a resource 328 * that other processes need (and the idleprio process can't run 329 * due to a CPU-bound normal process). Fix me! XXX 330 */ 331 #if 0 332 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 333 #else 334 if (rtp.type != RTP_PRIO_NORMAL) { 335 #endif 336 error = priv_check(td, PRIV_SCHED_RTPRIO); 337 if (error) 338 break; 339 } 340 error = rtp_to_pri(&rtp, td1); 341 break; 342 default: 343 error = EINVAL; 344 break; 345 } 346 PROC_UNLOCK(p); 347 return (error); 348 } 349 350 /* 351 * Set realtime priority. 352 */ 353 #ifndef _SYS_SYSPROTO_H_ 354 struct rtprio_args { 355 int function; 356 pid_t pid; 357 struct rtprio *rtp; 358 }; 359 #endif 360 int 361 sys_rtprio(td, uap) 362 struct thread *td; /* curthread */ 363 register struct rtprio_args *uap; 364 { 365 struct proc *p; 366 struct thread *tdp; 367 struct rtprio rtp; 368 int cierror, error; 369 370 /* Perform copyin before acquiring locks if needed. */ 371 if (uap->function == RTP_SET) 372 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 373 else 374 cierror = 0; 375 376 if (uap->pid == 0) { 377 p = td->td_proc; 378 PROC_LOCK(p); 379 } else { 380 p = pfind(uap->pid); 381 if (p == NULL) 382 return (ESRCH); 383 } 384 385 switch (uap->function) { 386 case RTP_LOOKUP: 387 if ((error = p_cansee(td, p))) 388 break; 389 /* 390 * Return OUR priority if no pid specified, 391 * or if one is, report the highest priority 392 * in the process. There isn't much more you can do as 393 * there is only room to return a single priority. 394 * Note: specifying our own pid is not the same 395 * as leaving it zero. 396 */ 397 if (uap->pid == 0) { 398 pri_to_rtp(td, &rtp); 399 } else { 400 struct rtprio rtp2; 401 402 rtp.type = RTP_PRIO_IDLE; 403 rtp.prio = RTP_PRIO_MAX; 404 FOREACH_THREAD_IN_PROC(p, tdp) { 405 pri_to_rtp(tdp, &rtp2); 406 if (rtp2.type < rtp.type || 407 (rtp2.type == rtp.type && 408 rtp2.prio < rtp.prio)) { 409 rtp.type = rtp2.type; 410 rtp.prio = rtp2.prio; 411 } 412 } 413 } 414 PROC_UNLOCK(p); 415 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 416 case RTP_SET: 417 if ((error = p_cansched(td, p)) || (error = cierror)) 418 break; 419 420 /* Disallow setting rtprio in most cases if not superuser. */ 421 /* 422 * Realtime priority has to be restricted for reasons which should be 423 * obvious. However, for idle priority, there is a potential for 424 * system deadlock if an idleprio process gains a lock on a resource 425 * that other processes need (and the idleprio process can't run 426 * due to a CPU-bound normal process). Fix me! XXX 427 */ 428 #if 0 429 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 430 #else 431 if (rtp.type != RTP_PRIO_NORMAL) { 432 #endif 433 error = priv_check(td, PRIV_SCHED_RTPRIO); 434 if (error) 435 break; 436 } 437 438 /* 439 * If we are setting our own priority, set just our 440 * thread but if we are doing another process, 441 * do all the threads on that process. If we 442 * specify our own pid we do the latter. 443 */ 444 if (uap->pid == 0) { 445 error = rtp_to_pri(&rtp, td); 446 } else { 447 FOREACH_THREAD_IN_PROC(p, td) { 448 if ((error = rtp_to_pri(&rtp, td)) != 0) 449 break; 450 } 451 } 452 break; 453 default: 454 error = EINVAL; 455 break; 456 } 457 PROC_UNLOCK(p); 458 return (error); 459 } 460 461 int 462 rtp_to_pri(struct rtprio *rtp, struct thread *td) 463 { 464 u_char newpri; 465 u_char oldpri; 466 467 switch (RTP_PRIO_BASE(rtp->type)) { 468 case RTP_PRIO_REALTIME: 469 if (rtp->prio > RTP_PRIO_MAX) 470 return (EINVAL); 471 newpri = PRI_MIN_REALTIME + rtp->prio; 472 break; 473 case RTP_PRIO_NORMAL: 474 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) 475 return (EINVAL); 476 newpri = PRI_MIN_TIMESHARE + rtp->prio; 477 break; 478 case RTP_PRIO_IDLE: 479 if (rtp->prio > RTP_PRIO_MAX) 480 return (EINVAL); 481 newpri = PRI_MIN_IDLE + rtp->prio; 482 break; 483 default: 484 return (EINVAL); 485 } 486 487 thread_lock(td); 488 sched_class(td, rtp->type); /* XXX fix */ 489 oldpri = td->td_user_pri; 490 sched_user_prio(td, newpri); 491 if (curthread == td) 492 sched_prio(curthread, td->td_user_pri); /* XXX dubious */ 493 if (TD_ON_UPILOCK(td) && oldpri != newpri) { 494 critical_enter(); 495 thread_unlock(td); 496 umtx_pi_adjust(td, oldpri); 497 critical_exit(); 498 } else 499 thread_unlock(td); 500 return (0); 501 } 502 503 void 504 pri_to_rtp(struct thread *td, struct rtprio *rtp) 505 { 506 507 thread_lock(td); 508 switch (PRI_BASE(td->td_pri_class)) { 509 case PRI_REALTIME: 510 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME; 511 break; 512 case PRI_TIMESHARE: 513 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE; 514 break; 515 case PRI_IDLE: 516 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE; 517 break; 518 default: 519 break; 520 } 521 rtp->type = td->td_pri_class; 522 thread_unlock(td); 523 } 524 525 #if defined(COMPAT_43) 526 #ifndef _SYS_SYSPROTO_H_ 527 struct osetrlimit_args { 528 u_int which; 529 struct orlimit *rlp; 530 }; 531 #endif 532 int 533 osetrlimit(td, uap) 534 struct thread *td; 535 register struct osetrlimit_args *uap; 536 { 537 struct orlimit olim; 538 struct rlimit lim; 539 int error; 540 541 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 542 return (error); 543 lim.rlim_cur = olim.rlim_cur; 544 lim.rlim_max = olim.rlim_max; 545 error = kern_setrlimit(td, uap->which, &lim); 546 return (error); 547 } 548 549 #ifndef _SYS_SYSPROTO_H_ 550 struct ogetrlimit_args { 551 u_int which; 552 struct orlimit *rlp; 553 }; 554 #endif 555 int 556 ogetrlimit(td, uap) 557 struct thread *td; 558 register struct ogetrlimit_args *uap; 559 { 560 struct orlimit olim; 561 struct rlimit rl; 562 struct proc *p; 563 int error; 564 565 if (uap->which >= RLIM_NLIMITS) 566 return (EINVAL); 567 p = td->td_proc; 568 PROC_LOCK(p); 569 lim_rlimit(p, uap->which, &rl); 570 PROC_UNLOCK(p); 571 572 /* 573 * XXX would be more correct to convert only RLIM_INFINITY to the 574 * old RLIM_INFINITY and fail with EOVERFLOW for other larger 575 * values. Most 64->32 and 32->16 conversions, including not 576 * unimportant ones of uids are even more broken than what we 577 * do here (they blindly truncate). We don't do this correctly 578 * here since we have little experience with EOVERFLOW yet. 579 * Elsewhere, getuid() can't fail... 580 */ 581 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur; 582 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max; 583 error = copyout(&olim, uap->rlp, sizeof(olim)); 584 return (error); 585 } 586 #endif /* COMPAT_43 */ 587 588 #ifndef _SYS_SYSPROTO_H_ 589 struct __setrlimit_args { 590 u_int which; 591 struct rlimit *rlp; 592 }; 593 #endif 594 int 595 sys_setrlimit(td, uap) 596 struct thread *td; 597 register struct __setrlimit_args *uap; 598 { 599 struct rlimit alim; 600 int error; 601 602 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit)))) 603 return (error); 604 error = kern_setrlimit(td, uap->which, &alim); 605 return (error); 606 } 607 608 static void 609 lim_cb(void *arg) 610 { 611 struct rlimit rlim; 612 struct thread *td; 613 struct proc *p; 614 615 p = arg; 616 PROC_LOCK_ASSERT(p, MA_OWNED); 617 /* 618 * Check if the process exceeds its cpu resource allocation. If 619 * it reaches the max, arrange to kill the process in ast(). 620 */ 621 if (p->p_cpulimit == RLIM_INFINITY) 622 return; 623 PROC_SLOCK(p); 624 FOREACH_THREAD_IN_PROC(p, td) { 625 ruxagg(p, td); 626 } 627 PROC_SUNLOCK(p); 628 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) { 629 lim_rlimit(p, RLIMIT_CPU, &rlim); 630 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { 631 killproc(p, "exceeded maximum CPU limit"); 632 } else { 633 if (p->p_cpulimit < rlim.rlim_max) 634 p->p_cpulimit += 5; 635 kern_psignal(p, SIGXCPU); 636 } 637 } 638 if ((p->p_flag & P_WEXIT) == 0) 639 callout_reset(&p->p_limco, hz, lim_cb, p); 640 } 641 642 int 643 kern_setrlimit(td, which, limp) 644 struct thread *td; 645 u_int which; 646 struct rlimit *limp; 647 { 648 struct plimit *newlim, *oldlim; 649 struct proc *p; 650 register struct rlimit *alimp; 651 struct rlimit oldssiz; 652 int error; 653 654 if (which >= RLIM_NLIMITS) 655 return (EINVAL); 656 657 /* 658 * Preserve historical bugs by treating negative limits as unsigned. 659 */ 660 if (limp->rlim_cur < 0) 661 limp->rlim_cur = RLIM_INFINITY; 662 if (limp->rlim_max < 0) 663 limp->rlim_max = RLIM_INFINITY; 664 665 oldssiz.rlim_cur = 0; 666 p = td->td_proc; 667 newlim = lim_alloc(); 668 PROC_LOCK(p); 669 oldlim = p->p_limit; 670 alimp = &oldlim->pl_rlimit[which]; 671 if (limp->rlim_cur > alimp->rlim_max || 672 limp->rlim_max > alimp->rlim_max) 673 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) { 674 PROC_UNLOCK(p); 675 lim_free(newlim); 676 return (error); 677 } 678 if (limp->rlim_cur > limp->rlim_max) 679 limp->rlim_cur = limp->rlim_max; 680 lim_copy(newlim, oldlim); 681 alimp = &newlim->pl_rlimit[which]; 682 683 switch (which) { 684 685 case RLIMIT_CPU: 686 if (limp->rlim_cur != RLIM_INFINITY && 687 p->p_cpulimit == RLIM_INFINITY) 688 callout_reset(&p->p_limco, hz, lim_cb, p); 689 p->p_cpulimit = limp->rlim_cur; 690 break; 691 case RLIMIT_DATA: 692 if (limp->rlim_cur > maxdsiz) 693 limp->rlim_cur = maxdsiz; 694 if (limp->rlim_max > maxdsiz) 695 limp->rlim_max = maxdsiz; 696 break; 697 698 case RLIMIT_STACK: 699 if (limp->rlim_cur > maxssiz) 700 limp->rlim_cur = maxssiz; 701 if (limp->rlim_max > maxssiz) 702 limp->rlim_max = maxssiz; 703 oldssiz = *alimp; 704 if (p->p_sysent->sv_fixlimit != NULL) 705 p->p_sysent->sv_fixlimit(&oldssiz, 706 RLIMIT_STACK); 707 break; 708 709 case RLIMIT_NOFILE: 710 if (limp->rlim_cur > maxfilesperproc) 711 limp->rlim_cur = maxfilesperproc; 712 if (limp->rlim_max > maxfilesperproc) 713 limp->rlim_max = maxfilesperproc; 714 break; 715 716 case RLIMIT_NPROC: 717 if (limp->rlim_cur > maxprocperuid) 718 limp->rlim_cur = maxprocperuid; 719 if (limp->rlim_max > maxprocperuid) 720 limp->rlim_max = maxprocperuid; 721 if (limp->rlim_cur < 1) 722 limp->rlim_cur = 1; 723 if (limp->rlim_max < 1) 724 limp->rlim_max = 1; 725 break; 726 } 727 if (p->p_sysent->sv_fixlimit != NULL) 728 p->p_sysent->sv_fixlimit(limp, which); 729 *alimp = *limp; 730 p->p_limit = newlim; 731 PROC_UNLOCK(p); 732 lim_free(oldlim); 733 734 if (which == RLIMIT_STACK) { 735 /* 736 * Stack is allocated to the max at exec time with only 737 * "rlim_cur" bytes accessible. If stack limit is going 738 * up make more accessible, if going down make inaccessible. 739 */ 740 if (limp->rlim_cur != oldssiz.rlim_cur) { 741 vm_offset_t addr; 742 vm_size_t size; 743 vm_prot_t prot; 744 745 if (limp->rlim_cur > oldssiz.rlim_cur) { 746 prot = p->p_sysent->sv_stackprot; 747 size = limp->rlim_cur - oldssiz.rlim_cur; 748 addr = p->p_sysent->sv_usrstack - 749 limp->rlim_cur; 750 } else { 751 prot = VM_PROT_NONE; 752 size = oldssiz.rlim_cur - limp->rlim_cur; 753 addr = p->p_sysent->sv_usrstack - 754 oldssiz.rlim_cur; 755 } 756 addr = trunc_page(addr); 757 size = round_page(size); 758 (void)vm_map_protect(&p->p_vmspace->vm_map, 759 addr, addr + size, prot, FALSE); 760 } 761 } 762 763 return (0); 764 } 765 766 #ifndef _SYS_SYSPROTO_H_ 767 struct __getrlimit_args { 768 u_int which; 769 struct rlimit *rlp; 770 }; 771 #endif 772 /* ARGSUSED */ 773 int 774 sys_getrlimit(td, uap) 775 struct thread *td; 776 register struct __getrlimit_args *uap; 777 { 778 struct rlimit rlim; 779 struct proc *p; 780 int error; 781 782 if (uap->which >= RLIM_NLIMITS) 783 return (EINVAL); 784 p = td->td_proc; 785 PROC_LOCK(p); 786 lim_rlimit(p, uap->which, &rlim); 787 PROC_UNLOCK(p); 788 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit)); 789 return (error); 790 } 791 792 /* 793 * Transform the running time and tick information for children of proc p 794 * into user and system time usage. 795 */ 796 void 797 calccru(p, up, sp) 798 struct proc *p; 799 struct timeval *up; 800 struct timeval *sp; 801 { 802 803 PROC_LOCK_ASSERT(p, MA_OWNED); 804 calcru1(p, &p->p_crux, up, sp); 805 } 806 807 /* 808 * Transform the running time and tick information in proc p into user 809 * and system time usage. If appropriate, include the current time slice 810 * on this CPU. 811 */ 812 void 813 calcru(struct proc *p, struct timeval *up, struct timeval *sp) 814 { 815 struct thread *td; 816 uint64_t runtime, u; 817 818 PROC_LOCK_ASSERT(p, MA_OWNED); 819 PROC_SLOCK_ASSERT(p, MA_OWNED); 820 /* 821 * If we are getting stats for the current process, then add in the 822 * stats that this thread has accumulated in its current time slice. 823 * We reset the thread and CPU state as if we had performed a context 824 * switch right here. 825 */ 826 td = curthread; 827 if (td->td_proc == p) { 828 u = cpu_ticks(); 829 runtime = u - PCPU_GET(switchtime); 830 td->td_runtime += runtime; 831 td->td_incruntime += runtime; 832 PCPU_SET(switchtime, u); 833 } 834 /* Make sure the per-thread stats are current. */ 835 FOREACH_THREAD_IN_PROC(p, td) { 836 if (td->td_incruntime == 0) 837 continue; 838 ruxagg(p, td); 839 } 840 calcru1(p, &p->p_rux, up, sp); 841 } 842 843 /* Collect resource usage for a single thread. */ 844 void 845 rufetchtd(struct thread *td, struct rusage *ru) 846 { 847 struct proc *p; 848 uint64_t runtime, u; 849 850 p = td->td_proc; 851 PROC_SLOCK_ASSERT(p, MA_OWNED); 852 THREAD_LOCK_ASSERT(td, MA_OWNED); 853 /* 854 * If we are getting stats for the current thread, then add in the 855 * stats that this thread has accumulated in its current time slice. 856 * We reset the thread and CPU state as if we had performed a context 857 * switch right here. 858 */ 859 if (td == curthread) { 860 u = cpu_ticks(); 861 runtime = u - PCPU_GET(switchtime); 862 td->td_runtime += runtime; 863 td->td_incruntime += runtime; 864 PCPU_SET(switchtime, u); 865 } 866 ruxagg(p, td); 867 *ru = td->td_ru; 868 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime); 869 } 870 871 static void 872 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, 873 struct timeval *sp) 874 { 875 /* {user, system, interrupt, total} {ticks, usec}: */ 876 uint64_t ut, uu, st, su, it, tt, tu; 877 878 ut = ruxp->rux_uticks; 879 st = ruxp->rux_sticks; 880 it = ruxp->rux_iticks; 881 tt = ut + st + it; 882 if (tt == 0) { 883 /* Avoid divide by zero */ 884 st = 1; 885 tt = 1; 886 } 887 tu = cputick2usec(ruxp->rux_runtime); 888 if ((int64_t)tu < 0) { 889 /* XXX: this should be an assert /phk */ 890 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", 891 (intmax_t)tu, p->p_pid, p->p_comm); 892 tu = ruxp->rux_tu; 893 } 894 895 if (tu >= ruxp->rux_tu) { 896 /* 897 * The normal case, time increased. 898 * Enforce monotonicity of bucketed numbers. 899 */ 900 uu = (tu * ut) / tt; 901 if (uu < ruxp->rux_uu) 902 uu = ruxp->rux_uu; 903 su = (tu * st) / tt; 904 if (su < ruxp->rux_su) 905 su = ruxp->rux_su; 906 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) { 907 /* 908 * When we calibrate the cputicker, it is not uncommon to 909 * see the presumably fixed frequency increase slightly over 910 * time as a result of thermal stabilization and NTP 911 * discipline (of the reference clock). We therefore ignore 912 * a bit of backwards slop because we expect to catch up 913 * shortly. We use a 3 microsecond limit to catch low 914 * counts and a 1% limit for high counts. 915 */ 916 uu = ruxp->rux_uu; 917 su = ruxp->rux_su; 918 tu = ruxp->rux_tu; 919 } else { /* tu < ruxp->rux_tu */ 920 /* 921 * What happened here was likely that a laptop, which ran at 922 * a reduced clock frequency at boot, kicked into high gear. 923 * The wisdom of spamming this message in that case is 924 * dubious, but it might also be indicative of something 925 * serious, so lets keep it and hope laptops can be made 926 * more truthful about their CPU speed via ACPI. 927 */ 928 printf("calcru: runtime went backwards from %ju usec " 929 "to %ju usec for pid %d (%s)\n", 930 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu, 931 p->p_pid, p->p_comm); 932 uu = (tu * ut) / tt; 933 su = (tu * st) / tt; 934 } 935 936 ruxp->rux_uu = uu; 937 ruxp->rux_su = su; 938 ruxp->rux_tu = tu; 939 940 up->tv_sec = uu / 1000000; 941 up->tv_usec = uu % 1000000; 942 sp->tv_sec = su / 1000000; 943 sp->tv_usec = su % 1000000; 944 } 945 946 #ifndef _SYS_SYSPROTO_H_ 947 struct getrusage_args { 948 int who; 949 struct rusage *rusage; 950 }; 951 #endif 952 int 953 sys_getrusage(td, uap) 954 register struct thread *td; 955 register struct getrusage_args *uap; 956 { 957 struct rusage ru; 958 int error; 959 960 error = kern_getrusage(td, uap->who, &ru); 961 if (error == 0) 962 error = copyout(&ru, uap->rusage, sizeof(struct rusage)); 963 return (error); 964 } 965 966 int 967 kern_getrusage(struct thread *td, int who, struct rusage *rup) 968 { 969 struct proc *p; 970 int error; 971 972 error = 0; 973 p = td->td_proc; 974 PROC_LOCK(p); 975 switch (who) { 976 case RUSAGE_SELF: 977 rufetchcalc(p, rup, &rup->ru_utime, 978 &rup->ru_stime); 979 break; 980 981 case RUSAGE_CHILDREN: 982 *rup = p->p_stats->p_cru; 983 calccru(p, &rup->ru_utime, &rup->ru_stime); 984 break; 985 986 case RUSAGE_THREAD: 987 PROC_SLOCK(p); 988 thread_lock(td); 989 rufetchtd(td, rup); 990 thread_unlock(td); 991 PROC_SUNLOCK(p); 992 break; 993 994 default: 995 error = EINVAL; 996 } 997 PROC_UNLOCK(p); 998 return (error); 999 } 1000 1001 void 1002 rucollect(struct rusage *ru, struct rusage *ru2) 1003 { 1004 long *ip, *ip2; 1005 int i; 1006 1007 if (ru->ru_maxrss < ru2->ru_maxrss) 1008 ru->ru_maxrss = ru2->ru_maxrss; 1009 ip = &ru->ru_first; 1010 ip2 = &ru2->ru_first; 1011 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 1012 *ip++ += *ip2++; 1013 } 1014 1015 void 1016 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, 1017 struct rusage_ext *rux2) 1018 { 1019 1020 rux->rux_runtime += rux2->rux_runtime; 1021 rux->rux_uticks += rux2->rux_uticks; 1022 rux->rux_sticks += rux2->rux_sticks; 1023 rux->rux_iticks += rux2->rux_iticks; 1024 rux->rux_uu += rux2->rux_uu; 1025 rux->rux_su += rux2->rux_su; 1026 rux->rux_tu += rux2->rux_tu; 1027 rucollect(ru, ru2); 1028 } 1029 1030 /* 1031 * Aggregate tick counts into the proc's rusage_ext. 1032 */ 1033 static void 1034 ruxagg_locked(struct rusage_ext *rux, struct thread *td) 1035 { 1036 1037 THREAD_LOCK_ASSERT(td, MA_OWNED); 1038 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED); 1039 rux->rux_runtime += td->td_incruntime; 1040 rux->rux_uticks += td->td_uticks; 1041 rux->rux_sticks += td->td_sticks; 1042 rux->rux_iticks += td->td_iticks; 1043 } 1044 1045 void 1046 ruxagg(struct proc *p, struct thread *td) 1047 { 1048 1049 thread_lock(td); 1050 ruxagg_locked(&p->p_rux, td); 1051 ruxagg_locked(&td->td_rux, td); 1052 td->td_incruntime = 0; 1053 td->td_uticks = 0; 1054 td->td_iticks = 0; 1055 td->td_sticks = 0; 1056 thread_unlock(td); 1057 } 1058 1059 /* 1060 * Update the rusage_ext structure and fetch a valid aggregate rusage 1061 * for proc p if storage for one is supplied. 1062 */ 1063 void 1064 rufetch(struct proc *p, struct rusage *ru) 1065 { 1066 struct thread *td; 1067 1068 PROC_SLOCK_ASSERT(p, MA_OWNED); 1069 1070 *ru = p->p_ru; 1071 if (p->p_numthreads > 0) { 1072 FOREACH_THREAD_IN_PROC(p, td) { 1073 ruxagg(p, td); 1074 rucollect(ru, &td->td_ru); 1075 } 1076 } 1077 } 1078 1079 /* 1080 * Atomically perform a rufetch and a calcru together. 1081 * Consumers, can safely assume the calcru is executed only once 1082 * rufetch is completed. 1083 */ 1084 void 1085 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, 1086 struct timeval *sp) 1087 { 1088 1089 PROC_SLOCK(p); 1090 rufetch(p, ru); 1091 calcru(p, up, sp); 1092 PROC_SUNLOCK(p); 1093 } 1094 1095 /* 1096 * Allocate a new resource limits structure and initialize its 1097 * reference count and mutex pointer. 1098 */ 1099 struct plimit * 1100 lim_alloc() 1101 { 1102 struct plimit *limp; 1103 1104 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); 1105 refcount_init(&limp->pl_refcnt, 1); 1106 return (limp); 1107 } 1108 1109 struct plimit * 1110 lim_hold(limp) 1111 struct plimit *limp; 1112 { 1113 1114 refcount_acquire(&limp->pl_refcnt); 1115 return (limp); 1116 } 1117 1118 void 1119 lim_fork(struct proc *p1, struct proc *p2) 1120 { 1121 p2->p_limit = lim_hold(p1->p_limit); 1122 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); 1123 if (p1->p_cpulimit != RLIM_INFINITY) 1124 callout_reset(&p2->p_limco, hz, lim_cb, p2); 1125 } 1126 1127 void 1128 lim_free(limp) 1129 struct plimit *limp; 1130 { 1131 1132 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow")); 1133 if (refcount_release(&limp->pl_refcnt)) 1134 free((void *)limp, M_PLIMIT); 1135 } 1136 1137 /* 1138 * Make a copy of the plimit structure. 1139 * We share these structures copy-on-write after fork. 1140 */ 1141 void 1142 lim_copy(dst, src) 1143 struct plimit *dst, *src; 1144 { 1145 1146 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit")); 1147 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit)); 1148 } 1149 1150 /* 1151 * Return the hard limit for a particular system resource. The 1152 * which parameter specifies the index into the rlimit array. 1153 */ 1154 rlim_t 1155 lim_max(struct proc *p, int which) 1156 { 1157 struct rlimit rl; 1158 1159 lim_rlimit(p, which, &rl); 1160 return (rl.rlim_max); 1161 } 1162 1163 /* 1164 * Return the current (soft) limit for a particular system resource. 1165 * The which parameter which specifies the index into the rlimit array 1166 */ 1167 rlim_t 1168 lim_cur(struct proc *p, int which) 1169 { 1170 struct rlimit rl; 1171 1172 lim_rlimit(p, which, &rl); 1173 return (rl.rlim_cur); 1174 } 1175 1176 /* 1177 * Return a copy of the entire rlimit structure for the system limit 1178 * specified by 'which' in the rlimit structure pointed to by 'rlp'. 1179 */ 1180 void 1181 lim_rlimit(struct proc *p, int which, struct rlimit *rlp) 1182 { 1183 1184 PROC_LOCK_ASSERT(p, MA_OWNED); 1185 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1186 ("request for invalid resource limit")); 1187 *rlp = p->p_limit->pl_rlimit[which]; 1188 if (p->p_sysent->sv_fixlimit != NULL) 1189 p->p_sysent->sv_fixlimit(rlp, which); 1190 } 1191 1192 void 1193 uihashinit() 1194 { 1195 1196 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 1197 rw_init(&uihashtbl_lock, "uidinfo hash"); 1198 } 1199 1200 /* 1201 * Look up a uidinfo struct for the parameter uid. 1202 * uihashtbl_lock must be locked. 1203 */ 1204 static struct uidinfo * 1205 uilookup(uid) 1206 uid_t uid; 1207 { 1208 struct uihashhead *uipp; 1209 struct uidinfo *uip; 1210 1211 rw_assert(&uihashtbl_lock, RA_LOCKED); 1212 uipp = UIHASH(uid); 1213 LIST_FOREACH(uip, uipp, ui_hash) 1214 if (uip->ui_uid == uid) 1215 break; 1216 1217 return (uip); 1218 } 1219 1220 /* 1221 * Find or allocate a struct uidinfo for a particular uid. 1222 * Increase refcount on uidinfo struct returned. 1223 * uifree() should be called on a struct uidinfo when released. 1224 */ 1225 struct uidinfo * 1226 uifind(uid) 1227 uid_t uid; 1228 { 1229 struct uidinfo *old_uip, *uip; 1230 1231 rw_rlock(&uihashtbl_lock); 1232 uip = uilookup(uid); 1233 if (uip == NULL) { 1234 rw_runlock(&uihashtbl_lock); 1235 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO); 1236 racct_create(&uip->ui_racct); 1237 rw_wlock(&uihashtbl_lock); 1238 /* 1239 * There's a chance someone created our uidinfo while we 1240 * were in malloc and not holding the lock, so we have to 1241 * make sure we don't insert a duplicate uidinfo. 1242 */ 1243 if ((old_uip = uilookup(uid)) != NULL) { 1244 /* Someone else beat us to it. */ 1245 racct_destroy(&uip->ui_racct); 1246 free(uip, M_UIDINFO); 1247 uip = old_uip; 1248 } else { 1249 refcount_init(&uip->ui_ref, 0); 1250 uip->ui_uid = uid; 1251 mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL, 1252 MTX_DEF); 1253 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 1254 } 1255 } 1256 uihold(uip); 1257 rw_unlock(&uihashtbl_lock); 1258 return (uip); 1259 } 1260 1261 /* 1262 * Place another refcount on a uidinfo struct. 1263 */ 1264 void 1265 uihold(uip) 1266 struct uidinfo *uip; 1267 { 1268 1269 refcount_acquire(&uip->ui_ref); 1270 } 1271 1272 /*- 1273 * Since uidinfo structs have a long lifetime, we use an 1274 * opportunistic refcounting scheme to avoid locking the lookup hash 1275 * for each release. 1276 * 1277 * If the refcount hits 0, we need to free the structure, 1278 * which means we need to lock the hash. 1279 * Optimal case: 1280 * After locking the struct and lowering the refcount, if we find 1281 * that we don't need to free, simply unlock and return. 1282 * Suboptimal case: 1283 * If refcount lowering results in need to free, bump the count 1284 * back up, lose the lock and acquire the locks in the proper 1285 * order to try again. 1286 */ 1287 void 1288 uifree(uip) 1289 struct uidinfo *uip; 1290 { 1291 int old; 1292 1293 /* Prepare for optimal case. */ 1294 old = uip->ui_ref; 1295 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1)) 1296 return; 1297 1298 /* Prepare for suboptimal case. */ 1299 rw_wlock(&uihashtbl_lock); 1300 if (refcount_release(&uip->ui_ref)) { 1301 racct_destroy(&uip->ui_racct); 1302 LIST_REMOVE(uip, ui_hash); 1303 rw_wunlock(&uihashtbl_lock); 1304 if (uip->ui_sbsize != 0) 1305 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 1306 uip->ui_uid, uip->ui_sbsize); 1307 if (uip->ui_proccnt != 0) 1308 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 1309 uip->ui_uid, uip->ui_proccnt); 1310 if (uip->ui_vmsize != 0) 1311 printf("freeing uidinfo: uid = %d, swapuse = %lld\n", 1312 uip->ui_uid, (unsigned long long)uip->ui_vmsize); 1313 mtx_destroy(&uip->ui_vmsize_mtx); 1314 free(uip, M_UIDINFO); 1315 return; 1316 } 1317 /* 1318 * Someone added a reference between atomic_cmpset_int() and 1319 * rw_wlock(&uihashtbl_lock). 1320 */ 1321 rw_wunlock(&uihashtbl_lock); 1322 } 1323 1324 void 1325 ui_racct_foreach(void (*callback)(struct racct *racct, 1326 void *arg2, void *arg3), void *arg2, void *arg3) 1327 { 1328 struct uidinfo *uip; 1329 struct uihashhead *uih; 1330 1331 rw_rlock(&uihashtbl_lock); 1332 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) { 1333 LIST_FOREACH(uip, uih, ui_hash) { 1334 (callback)(uip->ui_racct, arg2, arg3); 1335 } 1336 } 1337 rw_runlock(&uihashtbl_lock); 1338 } 1339 1340 /* 1341 * Change the count associated with number of processes 1342 * a given user is using. When 'max' is 0, don't enforce a limit 1343 */ 1344 int 1345 chgproccnt(uip, diff, max) 1346 struct uidinfo *uip; 1347 int diff; 1348 rlim_t max; 1349 { 1350 1351 /* Don't allow them to exceed max, but allow subtraction. */ 1352 if (diff > 0 && max != 0) { 1353 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) { 1354 atomic_subtract_long(&uip->ui_proccnt, (long)diff); 1355 return (0); 1356 } 1357 } else { 1358 atomic_add_long(&uip->ui_proccnt, (long)diff); 1359 if (uip->ui_proccnt < 0) 1360 printf("negative proccnt for uid = %d\n", uip->ui_uid); 1361 } 1362 return (1); 1363 } 1364 1365 /* 1366 * Change the total socket buffer size a user has used. 1367 */ 1368 int 1369 chgsbsize(uip, hiwat, to, max) 1370 struct uidinfo *uip; 1371 u_int *hiwat; 1372 u_int to; 1373 rlim_t max; 1374 { 1375 int diff; 1376 1377 diff = to - *hiwat; 1378 if (diff > 0) { 1379 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) { 1380 atomic_subtract_long(&uip->ui_sbsize, (long)diff); 1381 return (0); 1382 } 1383 } else { 1384 atomic_add_long(&uip->ui_sbsize, (long)diff); 1385 if (uip->ui_sbsize < 0) 1386 printf("negative sbsize for uid = %d\n", uip->ui_uid); 1387 } 1388 *hiwat = to; 1389 return (1); 1390 } 1391 1392 /* 1393 * Change the count associated with number of pseudo-terminals 1394 * a given user is using. When 'max' is 0, don't enforce a limit 1395 */ 1396 int 1397 chgptscnt(uip, diff, max) 1398 struct uidinfo *uip; 1399 int diff; 1400 rlim_t max; 1401 { 1402 1403 /* Don't allow them to exceed max, but allow subtraction. */ 1404 if (diff > 0 && max != 0) { 1405 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) { 1406 atomic_subtract_long(&uip->ui_ptscnt, (long)diff); 1407 return (0); 1408 } 1409 } else { 1410 atomic_add_long(&uip->ui_ptscnt, (long)diff); 1411 if (uip->ui_ptscnt < 0) 1412 printf("negative ptscnt for uid = %d\n", uip->ui_uid); 1413 } 1414 return (1); 1415 } 1416