1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/sysproto.h> 45 #include <sys/file.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/refcount.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/rwlock.h> 56 #include <sys/sched.h> 57 #include <sys/sx.h> 58 #include <sys/syscallsubr.h> 59 #include <sys/sysent.h> 60 #include <sys/time.h> 61 #include <sys/umtx.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_param.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 68 69 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); 70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 72 static struct rwlock uihashtbl_lock; 73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 74 static u_long uihash; /* size of hash table - 1 */ 75 76 static void calcru1(struct proc *p, struct rusage_ext *ruxp, 77 struct timeval *up, struct timeval *sp); 78 static int donice(struct thread *td, struct proc *chgp, int n); 79 static struct uidinfo *uilookup(uid_t uid); 80 static void ruxagg_locked(struct rusage_ext *rux, struct thread *td); 81 82 /* 83 * Resource controls and accounting. 84 */ 85 #ifndef _SYS_SYSPROTO_H_ 86 struct getpriority_args { 87 int which; 88 int who; 89 }; 90 #endif 91 int 92 getpriority(td, uap) 93 struct thread *td; 94 register struct getpriority_args *uap; 95 { 96 struct proc *p; 97 struct pgrp *pg; 98 int error, low; 99 100 error = 0; 101 low = PRIO_MAX + 1; 102 switch (uap->which) { 103 104 case PRIO_PROCESS: 105 if (uap->who == 0) 106 low = td->td_proc->p_nice; 107 else { 108 p = pfind(uap->who); 109 if (p == NULL) 110 break; 111 if (p_cansee(td, p) == 0) 112 low = p->p_nice; 113 PROC_UNLOCK(p); 114 } 115 break; 116 117 case PRIO_PGRP: 118 sx_slock(&proctree_lock); 119 if (uap->who == 0) { 120 pg = td->td_proc->p_pgrp; 121 PGRP_LOCK(pg); 122 } else { 123 pg = pgfind(uap->who); 124 if (pg == NULL) { 125 sx_sunlock(&proctree_lock); 126 break; 127 } 128 } 129 sx_sunlock(&proctree_lock); 130 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 131 PROC_LOCK(p); 132 if (p->p_state == PRS_NORMAL && 133 p_cansee(td, p) == 0) { 134 if (p->p_nice < low) 135 low = p->p_nice; 136 } 137 PROC_UNLOCK(p); 138 } 139 PGRP_UNLOCK(pg); 140 break; 141 142 case PRIO_USER: 143 if (uap->who == 0) 144 uap->who = td->td_ucred->cr_uid; 145 sx_slock(&allproc_lock); 146 FOREACH_PROC_IN_SYSTEM(p) { 147 PROC_LOCK(p); 148 if (p->p_state == PRS_NORMAL && 149 p_cansee(td, p) == 0 && 150 p->p_ucred->cr_uid == uap->who) { 151 if (p->p_nice < low) 152 low = p->p_nice; 153 } 154 PROC_UNLOCK(p); 155 } 156 sx_sunlock(&allproc_lock); 157 break; 158 159 default: 160 error = EINVAL; 161 break; 162 } 163 if (low == PRIO_MAX + 1 && error == 0) 164 error = ESRCH; 165 td->td_retval[0] = low; 166 return (error); 167 } 168 169 #ifndef _SYS_SYSPROTO_H_ 170 struct setpriority_args { 171 int which; 172 int who; 173 int prio; 174 }; 175 #endif 176 int 177 setpriority(td, uap) 178 struct thread *td; 179 struct setpriority_args *uap; 180 { 181 struct proc *curp, *p; 182 struct pgrp *pg; 183 int found = 0, error = 0; 184 185 curp = td->td_proc; 186 switch (uap->which) { 187 case PRIO_PROCESS: 188 if (uap->who == 0) { 189 PROC_LOCK(curp); 190 error = donice(td, curp, uap->prio); 191 PROC_UNLOCK(curp); 192 } else { 193 p = pfind(uap->who); 194 if (p == NULL) 195 break; 196 error = p_cansee(td, p); 197 if (error == 0) 198 error = donice(td, p, uap->prio); 199 PROC_UNLOCK(p); 200 } 201 found++; 202 break; 203 204 case PRIO_PGRP: 205 sx_slock(&proctree_lock); 206 if (uap->who == 0) { 207 pg = curp->p_pgrp; 208 PGRP_LOCK(pg); 209 } else { 210 pg = pgfind(uap->who); 211 if (pg == NULL) { 212 sx_sunlock(&proctree_lock); 213 break; 214 } 215 } 216 sx_sunlock(&proctree_lock); 217 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 218 PROC_LOCK(p); 219 if (p->p_state == PRS_NORMAL && 220 p_cansee(td, p) == 0) { 221 error = donice(td, p, uap->prio); 222 found++; 223 } 224 PROC_UNLOCK(p); 225 } 226 PGRP_UNLOCK(pg); 227 break; 228 229 case PRIO_USER: 230 if (uap->who == 0) 231 uap->who = td->td_ucred->cr_uid; 232 sx_slock(&allproc_lock); 233 FOREACH_PROC_IN_SYSTEM(p) { 234 PROC_LOCK(p); 235 if (p->p_state == PRS_NORMAL && 236 p->p_ucred->cr_uid == uap->who && 237 p_cansee(td, p) == 0) { 238 error = donice(td, p, uap->prio); 239 found++; 240 } 241 PROC_UNLOCK(p); 242 } 243 sx_sunlock(&allproc_lock); 244 break; 245 246 default: 247 error = EINVAL; 248 break; 249 } 250 if (found == 0 && error == 0) 251 error = ESRCH; 252 return (error); 253 } 254 255 /* 256 * Set "nice" for a (whole) process. 257 */ 258 static int 259 donice(struct thread *td, struct proc *p, int n) 260 { 261 int error; 262 263 PROC_LOCK_ASSERT(p, MA_OWNED); 264 if ((error = p_cansched(td, p))) 265 return (error); 266 if (n > PRIO_MAX) 267 n = PRIO_MAX; 268 if (n < PRIO_MIN) 269 n = PRIO_MIN; 270 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) 271 return (EACCES); 272 sched_nice(p, n); 273 return (0); 274 } 275 276 /* 277 * Set realtime priority for LWP. 278 */ 279 #ifndef _SYS_SYSPROTO_H_ 280 struct rtprio_thread_args { 281 int function; 282 lwpid_t lwpid; 283 struct rtprio *rtp; 284 }; 285 #endif 286 int 287 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) 288 { 289 struct proc *p; 290 struct rtprio rtp; 291 struct thread *td1; 292 int cierror, error; 293 294 /* Perform copyin before acquiring locks if needed. */ 295 if (uap->function == RTP_SET) 296 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 297 else 298 cierror = 0; 299 300 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) { 301 p = td->td_proc; 302 td1 = td; 303 PROC_LOCK(p); 304 } else { 305 /* Only look up thread in current process */ 306 td1 = tdfind(uap->lwpid, curproc->p_pid); 307 if (td1 == NULL) 308 return (ESRCH); 309 p = td1->td_proc; 310 } 311 312 switch (uap->function) { 313 case RTP_LOOKUP: 314 if ((error = p_cansee(td, p))) 315 break; 316 pri_to_rtp(td1, &rtp); 317 PROC_UNLOCK(p); 318 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 319 case RTP_SET: 320 if ((error = p_cansched(td, p)) || (error = cierror)) 321 break; 322 323 /* Disallow setting rtprio in most cases if not superuser. */ 324 /* 325 * Realtime priority has to be restricted for reasons which should be 326 * obvious. However, for idle priority, there is a potential for 327 * system deadlock if an idleprio process gains a lock on a resource 328 * that other processes need (and the idleprio process can't run 329 * due to a CPU-bound normal process). Fix me! XXX 330 */ 331 #if 0 332 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 333 #else 334 if (rtp.type != RTP_PRIO_NORMAL) { 335 #endif 336 error = priv_check(td, PRIV_SCHED_RTPRIO); 337 if (error) 338 break; 339 } 340 error = rtp_to_pri(&rtp, td1); 341 break; 342 default: 343 error = EINVAL; 344 break; 345 } 346 PROC_UNLOCK(p); 347 return (error); 348 } 349 350 /* 351 * Set realtime priority. 352 */ 353 #ifndef _SYS_SYSPROTO_H_ 354 struct rtprio_args { 355 int function; 356 pid_t pid; 357 struct rtprio *rtp; 358 }; 359 #endif 360 int 361 rtprio(td, uap) 362 struct thread *td; /* curthread */ 363 register struct rtprio_args *uap; 364 { 365 struct proc *p; 366 struct thread *tdp; 367 struct rtprio rtp; 368 int cierror, error; 369 370 /* Perform copyin before acquiring locks if needed. */ 371 if (uap->function == RTP_SET) 372 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 373 else 374 cierror = 0; 375 376 if (uap->pid == 0) { 377 p = td->td_proc; 378 PROC_LOCK(p); 379 } else { 380 p = pfind(uap->pid); 381 if (p == NULL) 382 return (ESRCH); 383 } 384 385 switch (uap->function) { 386 case RTP_LOOKUP: 387 if ((error = p_cansee(td, p))) 388 break; 389 /* 390 * Return OUR priority if no pid specified, 391 * or if one is, report the highest priority 392 * in the process. There isn't much more you can do as 393 * there is only room to return a single priority. 394 * Note: specifying our own pid is not the same 395 * as leaving it zero. 396 */ 397 if (uap->pid == 0) { 398 pri_to_rtp(td, &rtp); 399 } else { 400 struct rtprio rtp2; 401 402 rtp.type = RTP_PRIO_IDLE; 403 rtp.prio = RTP_PRIO_MAX; 404 FOREACH_THREAD_IN_PROC(p, tdp) { 405 pri_to_rtp(tdp, &rtp2); 406 if (rtp2.type < rtp.type || 407 (rtp2.type == rtp.type && 408 rtp2.prio < rtp.prio)) { 409 rtp.type = rtp2.type; 410 rtp.prio = rtp2.prio; 411 } 412 } 413 } 414 PROC_UNLOCK(p); 415 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 416 case RTP_SET: 417 if ((error = p_cansched(td, p)) || (error = cierror)) 418 break; 419 420 /* Disallow setting rtprio in most cases if not superuser. */ 421 /* 422 * Realtime priority has to be restricted for reasons which should be 423 * obvious. However, for idle priority, there is a potential for 424 * system deadlock if an idleprio process gains a lock on a resource 425 * that other processes need (and the idleprio process can't run 426 * due to a CPU-bound normal process). Fix me! XXX 427 */ 428 #if 0 429 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 430 #else 431 if (rtp.type != RTP_PRIO_NORMAL) { 432 #endif 433 error = priv_check(td, PRIV_SCHED_RTPRIO); 434 if (error) 435 break; 436 } 437 438 /* 439 * If we are setting our own priority, set just our 440 * thread but if we are doing another process, 441 * do all the threads on that process. If we 442 * specify our own pid we do the latter. 443 */ 444 if (uap->pid == 0) { 445 error = rtp_to_pri(&rtp, td); 446 } else { 447 FOREACH_THREAD_IN_PROC(p, td) { 448 if ((error = rtp_to_pri(&rtp, td)) != 0) 449 break; 450 } 451 } 452 break; 453 default: 454 error = EINVAL; 455 break; 456 } 457 PROC_UNLOCK(p); 458 return (error); 459 } 460 461 int 462 rtp_to_pri(struct rtprio *rtp, struct thread *td) 463 { 464 u_char newpri; 465 u_char oldpri; 466 467 switch (RTP_PRIO_BASE(rtp->type)) { 468 case RTP_PRIO_REALTIME: 469 if (rtp->prio > RTP_PRIO_MAX) 470 return (EINVAL); 471 newpri = PRI_MIN_REALTIME + rtp->prio; 472 break; 473 case RTP_PRIO_NORMAL: 474 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) 475 return (EINVAL); 476 newpri = PRI_MIN_TIMESHARE + rtp->prio; 477 break; 478 case RTP_PRIO_IDLE: 479 if (rtp->prio > RTP_PRIO_MAX) 480 return (EINVAL); 481 newpri = PRI_MIN_IDLE + rtp->prio; 482 break; 483 default: 484 return (EINVAL); 485 } 486 487 thread_lock(td); 488 sched_class(td, rtp->type); /* XXX fix */ 489 oldpri = td->td_user_pri; 490 sched_user_prio(td, newpri); 491 if (curthread == td) 492 sched_prio(curthread, td->td_user_pri); /* XXX dubious */ 493 if (TD_ON_UPILOCK(td) && oldpri != newpri) { 494 critical_enter(); 495 thread_unlock(td); 496 umtx_pi_adjust(td, oldpri); 497 critical_exit(); 498 } else 499 thread_unlock(td); 500 return (0); 501 } 502 503 void 504 pri_to_rtp(struct thread *td, struct rtprio *rtp) 505 { 506 507 thread_lock(td); 508 switch (PRI_BASE(td->td_pri_class)) { 509 case PRI_REALTIME: 510 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME; 511 break; 512 case PRI_TIMESHARE: 513 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE; 514 break; 515 case PRI_IDLE: 516 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE; 517 break; 518 default: 519 break; 520 } 521 rtp->type = td->td_pri_class; 522 thread_unlock(td); 523 } 524 525 #if defined(COMPAT_43) 526 #ifndef _SYS_SYSPROTO_H_ 527 struct osetrlimit_args { 528 u_int which; 529 struct orlimit *rlp; 530 }; 531 #endif 532 int 533 osetrlimit(td, uap) 534 struct thread *td; 535 register struct osetrlimit_args *uap; 536 { 537 struct orlimit olim; 538 struct rlimit lim; 539 int error; 540 541 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 542 return (error); 543 lim.rlim_cur = olim.rlim_cur; 544 lim.rlim_max = olim.rlim_max; 545 error = kern_setrlimit(td, uap->which, &lim); 546 return (error); 547 } 548 549 #ifndef _SYS_SYSPROTO_H_ 550 struct ogetrlimit_args { 551 u_int which; 552 struct orlimit *rlp; 553 }; 554 #endif 555 int 556 ogetrlimit(td, uap) 557 struct thread *td; 558 register struct ogetrlimit_args *uap; 559 { 560 struct orlimit olim; 561 struct rlimit rl; 562 struct proc *p; 563 int error; 564 565 if (uap->which >= RLIM_NLIMITS) 566 return (EINVAL); 567 p = td->td_proc; 568 PROC_LOCK(p); 569 lim_rlimit(p, uap->which, &rl); 570 PROC_UNLOCK(p); 571 572 /* 573 * XXX would be more correct to convert only RLIM_INFINITY to the 574 * old RLIM_INFINITY and fail with EOVERFLOW for other larger 575 * values. Most 64->32 and 32->16 conversions, including not 576 * unimportant ones of uids are even more broken than what we 577 * do here (they blindly truncate). We don't do this correctly 578 * here since we have little experience with EOVERFLOW yet. 579 * Elsewhere, getuid() can't fail... 580 */ 581 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur; 582 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max; 583 error = copyout(&olim, uap->rlp, sizeof(olim)); 584 return (error); 585 } 586 #endif /* COMPAT_43 */ 587 588 #ifndef _SYS_SYSPROTO_H_ 589 struct __setrlimit_args { 590 u_int which; 591 struct rlimit *rlp; 592 }; 593 #endif 594 int 595 setrlimit(td, uap) 596 struct thread *td; 597 register struct __setrlimit_args *uap; 598 { 599 struct rlimit alim; 600 int error; 601 602 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit)))) 603 return (error); 604 error = kern_setrlimit(td, uap->which, &alim); 605 return (error); 606 } 607 608 static void 609 lim_cb(void *arg) 610 { 611 struct rlimit rlim; 612 struct thread *td; 613 struct proc *p; 614 615 p = arg; 616 PROC_LOCK_ASSERT(p, MA_OWNED); 617 /* 618 * Check if the process exceeds its cpu resource allocation. If 619 * it reaches the max, arrange to kill the process in ast(). 620 */ 621 if (p->p_cpulimit == RLIM_INFINITY) 622 return; 623 PROC_SLOCK(p); 624 FOREACH_THREAD_IN_PROC(p, td) { 625 ruxagg(p, td); 626 } 627 PROC_SUNLOCK(p); 628 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) { 629 lim_rlimit(p, RLIMIT_CPU, &rlim); 630 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { 631 killproc(p, "exceeded maximum CPU limit"); 632 } else { 633 if (p->p_cpulimit < rlim.rlim_max) 634 p->p_cpulimit += 5; 635 psignal(p, SIGXCPU); 636 } 637 } 638 if ((p->p_flag & P_WEXIT) == 0) 639 callout_reset(&p->p_limco, hz, lim_cb, p); 640 } 641 642 int 643 kern_setrlimit(td, which, limp) 644 struct thread *td; 645 u_int which; 646 struct rlimit *limp; 647 { 648 struct plimit *newlim, *oldlim; 649 struct proc *p; 650 register struct rlimit *alimp; 651 struct rlimit oldssiz; 652 int error; 653 654 if (which >= RLIM_NLIMITS) 655 return (EINVAL); 656 657 /* 658 * Preserve historical bugs by treating negative limits as unsigned. 659 */ 660 if (limp->rlim_cur < 0) 661 limp->rlim_cur = RLIM_INFINITY; 662 if (limp->rlim_max < 0) 663 limp->rlim_max = RLIM_INFINITY; 664 665 oldssiz.rlim_cur = 0; 666 p = td->td_proc; 667 newlim = lim_alloc(); 668 PROC_LOCK(p); 669 oldlim = p->p_limit; 670 alimp = &oldlim->pl_rlimit[which]; 671 if (limp->rlim_cur > alimp->rlim_max || 672 limp->rlim_max > alimp->rlim_max) 673 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) { 674 PROC_UNLOCK(p); 675 lim_free(newlim); 676 return (error); 677 } 678 if (limp->rlim_cur > limp->rlim_max) 679 limp->rlim_cur = limp->rlim_max; 680 lim_copy(newlim, oldlim); 681 alimp = &newlim->pl_rlimit[which]; 682 683 switch (which) { 684 685 case RLIMIT_CPU: 686 if (limp->rlim_cur != RLIM_INFINITY && 687 p->p_cpulimit == RLIM_INFINITY) 688 callout_reset(&p->p_limco, hz, lim_cb, p); 689 p->p_cpulimit = limp->rlim_cur; 690 break; 691 case RLIMIT_DATA: 692 if (limp->rlim_cur > maxdsiz) 693 limp->rlim_cur = maxdsiz; 694 if (limp->rlim_max > maxdsiz) 695 limp->rlim_max = maxdsiz; 696 break; 697 698 case RLIMIT_STACK: 699 if (limp->rlim_cur > maxssiz) 700 limp->rlim_cur = maxssiz; 701 if (limp->rlim_max > maxssiz) 702 limp->rlim_max = maxssiz; 703 oldssiz = *alimp; 704 if (p->p_sysent->sv_fixlimit != NULL) 705 p->p_sysent->sv_fixlimit(&oldssiz, 706 RLIMIT_STACK); 707 break; 708 709 case RLIMIT_NOFILE: 710 if (limp->rlim_cur > maxfilesperproc) 711 limp->rlim_cur = maxfilesperproc; 712 if (limp->rlim_max > maxfilesperproc) 713 limp->rlim_max = maxfilesperproc; 714 break; 715 716 case RLIMIT_NPROC: 717 if (limp->rlim_cur > maxprocperuid) 718 limp->rlim_cur = maxprocperuid; 719 if (limp->rlim_max > maxprocperuid) 720 limp->rlim_max = maxprocperuid; 721 if (limp->rlim_cur < 1) 722 limp->rlim_cur = 1; 723 if (limp->rlim_max < 1) 724 limp->rlim_max = 1; 725 break; 726 } 727 if (p->p_sysent->sv_fixlimit != NULL) 728 p->p_sysent->sv_fixlimit(limp, which); 729 *alimp = *limp; 730 p->p_limit = newlim; 731 PROC_UNLOCK(p); 732 lim_free(oldlim); 733 734 if (which == RLIMIT_STACK) { 735 /* 736 * Stack is allocated to the max at exec time with only 737 * "rlim_cur" bytes accessible. If stack limit is going 738 * up make more accessible, if going down make inaccessible. 739 */ 740 if (limp->rlim_cur != oldssiz.rlim_cur) { 741 vm_offset_t addr; 742 vm_size_t size; 743 vm_prot_t prot; 744 745 if (limp->rlim_cur > oldssiz.rlim_cur) { 746 prot = p->p_sysent->sv_stackprot; 747 size = limp->rlim_cur - oldssiz.rlim_cur; 748 addr = p->p_sysent->sv_usrstack - 749 limp->rlim_cur; 750 } else { 751 prot = VM_PROT_NONE; 752 size = oldssiz.rlim_cur - limp->rlim_cur; 753 addr = p->p_sysent->sv_usrstack - 754 oldssiz.rlim_cur; 755 } 756 addr = trunc_page(addr); 757 size = round_page(size); 758 (void)vm_map_protect(&p->p_vmspace->vm_map, 759 addr, addr + size, prot, FALSE); 760 } 761 } 762 763 return (0); 764 } 765 766 #ifndef _SYS_SYSPROTO_H_ 767 struct __getrlimit_args { 768 u_int which; 769 struct rlimit *rlp; 770 }; 771 #endif 772 /* ARGSUSED */ 773 int 774 getrlimit(td, uap) 775 struct thread *td; 776 register struct __getrlimit_args *uap; 777 { 778 struct rlimit rlim; 779 struct proc *p; 780 int error; 781 782 if (uap->which >= RLIM_NLIMITS) 783 return (EINVAL); 784 p = td->td_proc; 785 PROC_LOCK(p); 786 lim_rlimit(p, uap->which, &rlim); 787 PROC_UNLOCK(p); 788 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit)); 789 return (error); 790 } 791 792 /* 793 * Transform the running time and tick information for children of proc p 794 * into user and system time usage. 795 */ 796 void 797 calccru(p, up, sp) 798 struct proc *p; 799 struct timeval *up; 800 struct timeval *sp; 801 { 802 803 PROC_LOCK_ASSERT(p, MA_OWNED); 804 calcru1(p, &p->p_crux, up, sp); 805 } 806 807 /* 808 * Transform the running time and tick information in proc p into user 809 * and system time usage. If appropriate, include the current time slice 810 * on this CPU. 811 */ 812 void 813 calcru(struct proc *p, struct timeval *up, struct timeval *sp) 814 { 815 struct thread *td; 816 uint64_t u; 817 818 PROC_LOCK_ASSERT(p, MA_OWNED); 819 PROC_SLOCK_ASSERT(p, MA_OWNED); 820 /* 821 * If we are getting stats for the current process, then add in the 822 * stats that this thread has accumulated in its current time slice. 823 * We reset the thread and CPU state as if we had performed a context 824 * switch right here. 825 */ 826 td = curthread; 827 if (td->td_proc == p) { 828 u = cpu_ticks(); 829 p->p_rux.rux_runtime += u - PCPU_GET(switchtime); 830 PCPU_SET(switchtime, u); 831 } 832 /* Make sure the per-thread stats are current. */ 833 FOREACH_THREAD_IN_PROC(p, td) { 834 if (td->td_incruntime == 0) 835 continue; 836 ruxagg(p, td); 837 } 838 calcru1(p, &p->p_rux, up, sp); 839 } 840 841 static void 842 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, 843 struct timeval *sp) 844 { 845 /* {user, system, interrupt, total} {ticks, usec}: */ 846 uint64_t ut, uu, st, su, it, tt, tu; 847 848 ut = ruxp->rux_uticks; 849 st = ruxp->rux_sticks; 850 it = ruxp->rux_iticks; 851 tt = ut + st + it; 852 if (tt == 0) { 853 /* Avoid divide by zero */ 854 st = 1; 855 tt = 1; 856 } 857 tu = cputick2usec(ruxp->rux_runtime); 858 if ((int64_t)tu < 0) { 859 /* XXX: this should be an assert /phk */ 860 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", 861 (intmax_t)tu, p->p_pid, p->p_comm); 862 tu = ruxp->rux_tu; 863 } 864 865 if (tu >= ruxp->rux_tu) { 866 /* 867 * The normal case, time increased. 868 * Enforce monotonicity of bucketed numbers. 869 */ 870 uu = (tu * ut) / tt; 871 if (uu < ruxp->rux_uu) 872 uu = ruxp->rux_uu; 873 su = (tu * st) / tt; 874 if (su < ruxp->rux_su) 875 su = ruxp->rux_su; 876 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) { 877 /* 878 * When we calibrate the cputicker, it is not uncommon to 879 * see the presumably fixed frequency increase slightly over 880 * time as a result of thermal stabilization and NTP 881 * discipline (of the reference clock). We therefore ignore 882 * a bit of backwards slop because we expect to catch up 883 * shortly. We use a 3 microsecond limit to catch low 884 * counts and a 1% limit for high counts. 885 */ 886 uu = ruxp->rux_uu; 887 su = ruxp->rux_su; 888 tu = ruxp->rux_tu; 889 } else { /* tu < ruxp->rux_tu */ 890 /* 891 * What happened here was likely that a laptop, which ran at 892 * a reduced clock frequency at boot, kicked into high gear. 893 * The wisdom of spamming this message in that case is 894 * dubious, but it might also be indicative of something 895 * serious, so lets keep it and hope laptops can be made 896 * more truthful about their CPU speed via ACPI. 897 */ 898 printf("calcru: runtime went backwards from %ju usec " 899 "to %ju usec for pid %d (%s)\n", 900 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu, 901 p->p_pid, p->p_comm); 902 uu = (tu * ut) / tt; 903 su = (tu * st) / tt; 904 } 905 906 ruxp->rux_uu = uu; 907 ruxp->rux_su = su; 908 ruxp->rux_tu = tu; 909 910 up->tv_sec = uu / 1000000; 911 up->tv_usec = uu % 1000000; 912 sp->tv_sec = su / 1000000; 913 sp->tv_usec = su % 1000000; 914 } 915 916 #ifndef _SYS_SYSPROTO_H_ 917 struct getrusage_args { 918 int who; 919 struct rusage *rusage; 920 }; 921 #endif 922 int 923 getrusage(td, uap) 924 register struct thread *td; 925 register struct getrusage_args *uap; 926 { 927 struct rusage ru; 928 int error; 929 930 error = kern_getrusage(td, uap->who, &ru); 931 if (error == 0) 932 error = copyout(&ru, uap->rusage, sizeof(struct rusage)); 933 return (error); 934 } 935 936 int 937 kern_getrusage(struct thread *td, int who, struct rusage *rup) 938 { 939 struct proc *p; 940 int error; 941 942 error = 0; 943 p = td->td_proc; 944 PROC_LOCK(p); 945 switch (who) { 946 case RUSAGE_SELF: 947 rufetchcalc(p, rup, &rup->ru_utime, 948 &rup->ru_stime); 949 break; 950 951 case RUSAGE_CHILDREN: 952 *rup = p->p_stats->p_cru; 953 calccru(p, &rup->ru_utime, &rup->ru_stime); 954 break; 955 956 case RUSAGE_THREAD: 957 PROC_SLOCK(p); 958 ruxagg(p, td); 959 PROC_SUNLOCK(p); 960 thread_lock(td); 961 *rup = td->td_ru; 962 calcru1(p, &td->td_rux, &rup->ru_utime, &rup->ru_stime); 963 thread_unlock(td); 964 break; 965 966 default: 967 error = EINVAL; 968 } 969 PROC_UNLOCK(p); 970 return (error); 971 } 972 973 void 974 rucollect(struct rusage *ru, struct rusage *ru2) 975 { 976 long *ip, *ip2; 977 int i; 978 979 if (ru->ru_maxrss < ru2->ru_maxrss) 980 ru->ru_maxrss = ru2->ru_maxrss; 981 ip = &ru->ru_first; 982 ip2 = &ru2->ru_first; 983 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 984 *ip++ += *ip2++; 985 } 986 987 void 988 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, 989 struct rusage_ext *rux2) 990 { 991 992 rux->rux_runtime += rux2->rux_runtime; 993 rux->rux_uticks += rux2->rux_uticks; 994 rux->rux_sticks += rux2->rux_sticks; 995 rux->rux_iticks += rux2->rux_iticks; 996 rux->rux_uu += rux2->rux_uu; 997 rux->rux_su += rux2->rux_su; 998 rux->rux_tu += rux2->rux_tu; 999 rucollect(ru, ru2); 1000 } 1001 1002 /* 1003 * Aggregate tick counts into the proc's rusage_ext. 1004 */ 1005 static void 1006 ruxagg_locked(struct rusage_ext *rux, struct thread *td) 1007 { 1008 1009 THREAD_LOCK_ASSERT(td, MA_OWNED); 1010 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED); 1011 rux->rux_runtime += td->td_incruntime; 1012 rux->rux_uticks += td->td_uticks; 1013 rux->rux_sticks += td->td_sticks; 1014 rux->rux_iticks += td->td_iticks; 1015 } 1016 1017 void 1018 ruxagg(struct proc *p, struct thread *td) 1019 { 1020 1021 thread_lock(td); 1022 ruxagg_locked(&p->p_rux, td); 1023 ruxagg_locked(&td->td_rux, td); 1024 td->td_incruntime = 0; 1025 td->td_uticks = 0; 1026 td->td_iticks = 0; 1027 td->td_sticks = 0; 1028 thread_unlock(td); 1029 } 1030 1031 /* 1032 * Update the rusage_ext structure and fetch a valid aggregate rusage 1033 * for proc p if storage for one is supplied. 1034 */ 1035 void 1036 rufetch(struct proc *p, struct rusage *ru) 1037 { 1038 struct thread *td; 1039 1040 PROC_SLOCK_ASSERT(p, MA_OWNED); 1041 1042 *ru = p->p_ru; 1043 if (p->p_numthreads > 0) { 1044 FOREACH_THREAD_IN_PROC(p, td) { 1045 ruxagg(p, td); 1046 rucollect(ru, &td->td_ru); 1047 } 1048 } 1049 } 1050 1051 /* 1052 * Atomically perform a rufetch and a calcru together. 1053 * Consumers, can safely assume the calcru is executed only once 1054 * rufetch is completed. 1055 */ 1056 void 1057 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, 1058 struct timeval *sp) 1059 { 1060 1061 PROC_SLOCK(p); 1062 rufetch(p, ru); 1063 calcru(p, up, sp); 1064 PROC_SUNLOCK(p); 1065 } 1066 1067 /* 1068 * Allocate a new resource limits structure and initialize its 1069 * reference count and mutex pointer. 1070 */ 1071 struct plimit * 1072 lim_alloc() 1073 { 1074 struct plimit *limp; 1075 1076 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); 1077 refcount_init(&limp->pl_refcnt, 1); 1078 return (limp); 1079 } 1080 1081 struct plimit * 1082 lim_hold(limp) 1083 struct plimit *limp; 1084 { 1085 1086 refcount_acquire(&limp->pl_refcnt); 1087 return (limp); 1088 } 1089 1090 void 1091 lim_fork(struct proc *p1, struct proc *p2) 1092 { 1093 p2->p_limit = lim_hold(p1->p_limit); 1094 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); 1095 if (p1->p_cpulimit != RLIM_INFINITY) 1096 callout_reset(&p2->p_limco, hz, lim_cb, p2); 1097 } 1098 1099 void 1100 lim_free(limp) 1101 struct plimit *limp; 1102 { 1103 1104 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow")); 1105 if (refcount_release(&limp->pl_refcnt)) 1106 free((void *)limp, M_PLIMIT); 1107 } 1108 1109 /* 1110 * Make a copy of the plimit structure. 1111 * We share these structures copy-on-write after fork. 1112 */ 1113 void 1114 lim_copy(dst, src) 1115 struct plimit *dst, *src; 1116 { 1117 1118 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit")); 1119 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit)); 1120 } 1121 1122 /* 1123 * Return the hard limit for a particular system resource. The 1124 * which parameter specifies the index into the rlimit array. 1125 */ 1126 rlim_t 1127 lim_max(struct proc *p, int which) 1128 { 1129 struct rlimit rl; 1130 1131 lim_rlimit(p, which, &rl); 1132 return (rl.rlim_max); 1133 } 1134 1135 /* 1136 * Return the current (soft) limit for a particular system resource. 1137 * The which parameter which specifies the index into the rlimit array 1138 */ 1139 rlim_t 1140 lim_cur(struct proc *p, int which) 1141 { 1142 struct rlimit rl; 1143 1144 lim_rlimit(p, which, &rl); 1145 return (rl.rlim_cur); 1146 } 1147 1148 /* 1149 * Return a copy of the entire rlimit structure for the system limit 1150 * specified by 'which' in the rlimit structure pointed to by 'rlp'. 1151 */ 1152 void 1153 lim_rlimit(struct proc *p, int which, struct rlimit *rlp) 1154 { 1155 1156 PROC_LOCK_ASSERT(p, MA_OWNED); 1157 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1158 ("request for invalid resource limit")); 1159 *rlp = p->p_limit->pl_rlimit[which]; 1160 if (p->p_sysent->sv_fixlimit != NULL) 1161 p->p_sysent->sv_fixlimit(rlp, which); 1162 } 1163 1164 void 1165 uihashinit() 1166 { 1167 1168 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 1169 rw_init(&uihashtbl_lock, "uidinfo hash"); 1170 } 1171 1172 /* 1173 * Look up a uidinfo struct for the parameter uid. 1174 * uihashtbl_lock must be locked. 1175 */ 1176 static struct uidinfo * 1177 uilookup(uid) 1178 uid_t uid; 1179 { 1180 struct uihashhead *uipp; 1181 struct uidinfo *uip; 1182 1183 rw_assert(&uihashtbl_lock, RA_LOCKED); 1184 uipp = UIHASH(uid); 1185 LIST_FOREACH(uip, uipp, ui_hash) 1186 if (uip->ui_uid == uid) 1187 break; 1188 1189 return (uip); 1190 } 1191 1192 /* 1193 * Find or allocate a struct uidinfo for a particular uid. 1194 * Increase refcount on uidinfo struct returned. 1195 * uifree() should be called on a struct uidinfo when released. 1196 */ 1197 struct uidinfo * 1198 uifind(uid) 1199 uid_t uid; 1200 { 1201 struct uidinfo *old_uip, *uip; 1202 1203 rw_rlock(&uihashtbl_lock); 1204 uip = uilookup(uid); 1205 if (uip == NULL) { 1206 rw_runlock(&uihashtbl_lock); 1207 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO); 1208 racct_create(&uip->ui_racct); 1209 rw_wlock(&uihashtbl_lock); 1210 /* 1211 * There's a chance someone created our uidinfo while we 1212 * were in malloc and not holding the lock, so we have to 1213 * make sure we don't insert a duplicate uidinfo. 1214 */ 1215 if ((old_uip = uilookup(uid)) != NULL) { 1216 /* Someone else beat us to it. */ 1217 racct_destroy(&uip->ui_racct); 1218 free(uip, M_UIDINFO); 1219 uip = old_uip; 1220 } else { 1221 refcount_init(&uip->ui_ref, 0); 1222 uip->ui_uid = uid; 1223 mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL, 1224 MTX_DEF); 1225 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 1226 } 1227 } 1228 uihold(uip); 1229 rw_unlock(&uihashtbl_lock); 1230 return (uip); 1231 } 1232 1233 /* 1234 * Place another refcount on a uidinfo struct. 1235 */ 1236 void 1237 uihold(uip) 1238 struct uidinfo *uip; 1239 { 1240 1241 refcount_acquire(&uip->ui_ref); 1242 } 1243 1244 /*- 1245 * Since uidinfo structs have a long lifetime, we use an 1246 * opportunistic refcounting scheme to avoid locking the lookup hash 1247 * for each release. 1248 * 1249 * If the refcount hits 0, we need to free the structure, 1250 * which means we need to lock the hash. 1251 * Optimal case: 1252 * After locking the struct and lowering the refcount, if we find 1253 * that we don't need to free, simply unlock and return. 1254 * Suboptimal case: 1255 * If refcount lowering results in need to free, bump the count 1256 * back up, lose the lock and acquire the locks in the proper 1257 * order to try again. 1258 */ 1259 void 1260 uifree(uip) 1261 struct uidinfo *uip; 1262 { 1263 int old; 1264 1265 /* Prepare for optimal case. */ 1266 old = uip->ui_ref; 1267 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1)) 1268 return; 1269 1270 /* Prepare for suboptimal case. */ 1271 rw_wlock(&uihashtbl_lock); 1272 if (refcount_release(&uip->ui_ref)) { 1273 racct_destroy(&uip->ui_racct); 1274 LIST_REMOVE(uip, ui_hash); 1275 rw_wunlock(&uihashtbl_lock); 1276 if (uip->ui_sbsize != 0) 1277 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 1278 uip->ui_uid, uip->ui_sbsize); 1279 if (uip->ui_proccnt != 0) 1280 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 1281 uip->ui_uid, uip->ui_proccnt); 1282 if (uip->ui_vmsize != 0) 1283 printf("freeing uidinfo: uid = %d, swapuse = %lld\n", 1284 uip->ui_uid, (unsigned long long)uip->ui_vmsize); 1285 mtx_destroy(&uip->ui_vmsize_mtx); 1286 free(uip, M_UIDINFO); 1287 return; 1288 } 1289 /* 1290 * Someone added a reference between atomic_cmpset_int() and 1291 * rw_wlock(&uihashtbl_lock). 1292 */ 1293 rw_wunlock(&uihashtbl_lock); 1294 } 1295 1296 void 1297 ui_racct_foreach(void (*callback)(struct racct *racct, 1298 void *arg2, void *arg3), void *arg2, void *arg3) 1299 { 1300 struct uidinfo *uip; 1301 struct uihashhead *uih; 1302 1303 rw_rlock(&uihashtbl_lock); 1304 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) { 1305 LIST_FOREACH(uip, uih, ui_hash) { 1306 (callback)(uip->ui_racct, arg2, arg3); 1307 } 1308 } 1309 rw_runlock(&uihashtbl_lock); 1310 } 1311 1312 /* 1313 * Change the count associated with number of processes 1314 * a given user is using. When 'max' is 0, don't enforce a limit 1315 */ 1316 int 1317 chgproccnt(uip, diff, max) 1318 struct uidinfo *uip; 1319 int diff; 1320 rlim_t max; 1321 { 1322 1323 /* Don't allow them to exceed max, but allow subtraction. */ 1324 if (diff > 0 && max != 0) { 1325 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) { 1326 atomic_subtract_long(&uip->ui_proccnt, (long)diff); 1327 return (0); 1328 } 1329 } else { 1330 atomic_add_long(&uip->ui_proccnt, (long)diff); 1331 if (uip->ui_proccnt < 0) 1332 printf("negative proccnt for uid = %d\n", uip->ui_uid); 1333 } 1334 return (1); 1335 } 1336 1337 /* 1338 * Change the total socket buffer size a user has used. 1339 */ 1340 int 1341 chgsbsize(uip, hiwat, to, max) 1342 struct uidinfo *uip; 1343 u_int *hiwat; 1344 u_int to; 1345 rlim_t max; 1346 { 1347 int diff; 1348 1349 diff = to - *hiwat; 1350 if (diff > 0) { 1351 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) { 1352 atomic_subtract_long(&uip->ui_sbsize, (long)diff); 1353 return (0); 1354 } 1355 } else { 1356 atomic_add_long(&uip->ui_sbsize, (long)diff); 1357 if (uip->ui_sbsize < 0) 1358 printf("negative sbsize for uid = %d\n", uip->ui_uid); 1359 } 1360 *hiwat = to; 1361 return (1); 1362 } 1363 1364 /* 1365 * Change the count associated with number of pseudo-terminals 1366 * a given user is using. When 'max' is 0, don't enforce a limit 1367 */ 1368 int 1369 chgptscnt(uip, diff, max) 1370 struct uidinfo *uip; 1371 int diff; 1372 rlim_t max; 1373 { 1374 1375 /* Don't allow them to exceed max, but allow subtraction. */ 1376 if (diff > 0 && max != 0) { 1377 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) { 1378 atomic_subtract_long(&uip->ui_ptscnt, (long)diff); 1379 return (0); 1380 } 1381 } else { 1382 atomic_add_long(&uip->ui_ptscnt, (long)diff); 1383 if (uip->ui_ptscnt < 0) 1384 printf("negative ptscnt for uid = %d\n", uip->ui_uid); 1385 } 1386 return (1); 1387 } 1388