1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/sysproto.h> 45 #include <sys/file.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/refcount.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/rwlock.h> 56 #include <sys/sched.h> 57 #include <sys/sx.h> 58 #include <sys/syscallsubr.h> 59 #include <sys/sysctl.h> 60 #include <sys/sysent.h> 61 #include <sys/time.h> 62 #include <sys/umtxvar.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 69 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); 70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 72 static struct rwlock uihashtbl_lock; 73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 74 static u_long uihash; /* size of hash table - 1 */ 75 76 static void calcru1(struct proc *p, struct rusage_ext *ruxp, 77 struct timeval *up, struct timeval *sp); 78 static int donice(struct thread *td, struct proc *chgp, int n); 79 static struct uidinfo *uilookup(uid_t uid); 80 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td); 81 82 /* 83 * Resource controls and accounting. 84 */ 85 #ifndef _SYS_SYSPROTO_H_ 86 struct getpriority_args { 87 int which; 88 int who; 89 }; 90 #endif 91 int 92 sys_getpriority(struct thread *td, struct getpriority_args *uap) 93 { 94 95 return (kern_getpriority(td, uap->which, uap->who)); 96 } 97 98 int 99 kern_getpriority(struct thread *td, int which, int who) 100 { 101 struct proc *p; 102 struct pgrp *pg; 103 int error, low; 104 105 error = 0; 106 low = PRIO_MAX + 1; 107 switch (which) { 108 case PRIO_PROCESS: 109 if (who == 0) 110 low = td->td_proc->p_nice; 111 else { 112 p = pfind(who); 113 if (p == NULL) 114 break; 115 if (p_cansee(td, p) == 0) 116 low = p->p_nice; 117 PROC_UNLOCK(p); 118 } 119 break; 120 121 case PRIO_PGRP: 122 sx_slock(&proctree_lock); 123 if (who == 0) { 124 pg = td->td_proc->p_pgrp; 125 PGRP_LOCK(pg); 126 } else { 127 pg = pgfind(who); 128 if (pg == NULL) { 129 sx_sunlock(&proctree_lock); 130 break; 131 } 132 } 133 sx_sunlock(&proctree_lock); 134 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 135 PROC_LOCK(p); 136 if (p->p_state == PRS_NORMAL && 137 p_cansee(td, p) == 0) { 138 if (p->p_nice < low) 139 low = p->p_nice; 140 } 141 PROC_UNLOCK(p); 142 } 143 PGRP_UNLOCK(pg); 144 break; 145 146 case PRIO_USER: 147 if (who == 0) 148 who = td->td_ucred->cr_uid; 149 sx_slock(&allproc_lock); 150 FOREACH_PROC_IN_SYSTEM(p) { 151 PROC_LOCK(p); 152 if (p->p_state == PRS_NORMAL && 153 p_cansee(td, p) == 0 && 154 p->p_ucred->cr_uid == who) { 155 if (p->p_nice < low) 156 low = p->p_nice; 157 } 158 PROC_UNLOCK(p); 159 } 160 sx_sunlock(&allproc_lock); 161 break; 162 163 default: 164 error = EINVAL; 165 break; 166 } 167 if (low == PRIO_MAX + 1 && error == 0) 168 error = ESRCH; 169 td->td_retval[0] = low; 170 return (error); 171 } 172 173 #ifndef _SYS_SYSPROTO_H_ 174 struct setpriority_args { 175 int which; 176 int who; 177 int prio; 178 }; 179 #endif 180 int 181 sys_setpriority(struct thread *td, struct setpriority_args *uap) 182 { 183 184 return (kern_setpriority(td, uap->which, uap->who, uap->prio)); 185 } 186 187 int 188 kern_setpriority(struct thread *td, int which, int who, int prio) 189 { 190 struct proc *curp, *p; 191 struct pgrp *pg; 192 int found = 0, error = 0; 193 194 curp = td->td_proc; 195 switch (which) { 196 case PRIO_PROCESS: 197 if (who == 0) { 198 PROC_LOCK(curp); 199 error = donice(td, curp, prio); 200 PROC_UNLOCK(curp); 201 } else { 202 p = pfind(who); 203 if (p == NULL) 204 break; 205 error = p_cansee(td, p); 206 if (error == 0) 207 error = donice(td, p, prio); 208 PROC_UNLOCK(p); 209 } 210 found++; 211 break; 212 213 case PRIO_PGRP: 214 sx_slock(&proctree_lock); 215 if (who == 0) { 216 pg = curp->p_pgrp; 217 PGRP_LOCK(pg); 218 } else { 219 pg = pgfind(who); 220 if (pg == NULL) { 221 sx_sunlock(&proctree_lock); 222 break; 223 } 224 } 225 sx_sunlock(&proctree_lock); 226 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 227 PROC_LOCK(p); 228 if (p->p_state == PRS_NORMAL && 229 p_cansee(td, p) == 0) { 230 error = donice(td, p, prio); 231 found++; 232 } 233 PROC_UNLOCK(p); 234 } 235 PGRP_UNLOCK(pg); 236 break; 237 238 case PRIO_USER: 239 if (who == 0) 240 who = td->td_ucred->cr_uid; 241 sx_slock(&allproc_lock); 242 FOREACH_PROC_IN_SYSTEM(p) { 243 PROC_LOCK(p); 244 if (p->p_state == PRS_NORMAL && 245 p->p_ucred->cr_uid == who && 246 p_cansee(td, p) == 0) { 247 error = donice(td, p, prio); 248 found++; 249 } 250 PROC_UNLOCK(p); 251 } 252 sx_sunlock(&allproc_lock); 253 break; 254 255 default: 256 error = EINVAL; 257 break; 258 } 259 if (found == 0 && error == 0) 260 error = ESRCH; 261 return (error); 262 } 263 264 /* 265 * Set "nice" for a (whole) process. 266 */ 267 static int 268 donice(struct thread *td, struct proc *p, int n) 269 { 270 int error; 271 272 PROC_LOCK_ASSERT(p, MA_OWNED); 273 if ((error = p_cansched(td, p))) 274 return (error); 275 if (n > PRIO_MAX) 276 n = PRIO_MAX; 277 if (n < PRIO_MIN) 278 n = PRIO_MIN; 279 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) 280 return (EACCES); 281 sched_nice(p, n); 282 return (0); 283 } 284 285 static int unprivileged_idprio; 286 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW, 287 &unprivileged_idprio, 0, 288 "Allow non-root users to set an idle priority (deprecated)"); 289 290 /* 291 * Set realtime priority for LWP. 292 */ 293 #ifndef _SYS_SYSPROTO_H_ 294 struct rtprio_thread_args { 295 int function; 296 lwpid_t lwpid; 297 struct rtprio *rtp; 298 }; 299 #endif 300 int 301 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) 302 { 303 struct proc *p; 304 struct rtprio rtp; 305 struct thread *td1; 306 int cierror, error; 307 308 /* Perform copyin before acquiring locks if needed. */ 309 if (uap->function == RTP_SET) 310 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 311 else 312 cierror = 0; 313 314 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) { 315 p = td->td_proc; 316 td1 = td; 317 PROC_LOCK(p); 318 } else { 319 td1 = tdfind(uap->lwpid, -1); 320 if (td1 == NULL) 321 return (ESRCH); 322 p = td1->td_proc; 323 } 324 325 switch (uap->function) { 326 case RTP_LOOKUP: 327 if ((error = p_cansee(td, p))) 328 break; 329 pri_to_rtp(td1, &rtp); 330 PROC_UNLOCK(p); 331 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 332 case RTP_SET: 333 if ((error = p_cansched(td, p)) || (error = cierror)) 334 break; 335 336 /* Disallow setting rtprio in most cases if not superuser. */ 337 338 /* 339 * Realtime priority has to be restricted for reasons which 340 * should be obvious. However, for idleprio processes, there is 341 * a potential for system deadlock if an idleprio process gains 342 * a lock on a resource that other processes need (and the 343 * idleprio process can't run due to a CPU-bound normal 344 * process). Fix me! XXX 345 * 346 * This problem is not only related to idleprio process. 347 * A user level program can obtain a file lock and hold it 348 * indefinitely. Additionally, without idleprio processes it is 349 * still conceivable that a program with low priority will never 350 * get to run. In short, allowing this feature might make it 351 * easier to lock a resource indefinitely, but it is not the 352 * only thing that makes it possible. 353 */ 354 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 355 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 356 break; 357 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 358 unprivileged_idprio == 0 && 359 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 360 break; 361 error = rtp_to_pri(&rtp, td1); 362 break; 363 default: 364 error = EINVAL; 365 break; 366 } 367 PROC_UNLOCK(p); 368 return (error); 369 } 370 371 /* 372 * Set realtime priority. 373 */ 374 #ifndef _SYS_SYSPROTO_H_ 375 struct rtprio_args { 376 int function; 377 pid_t pid; 378 struct rtprio *rtp; 379 }; 380 #endif 381 int 382 sys_rtprio(struct thread *td, struct rtprio_args *uap) 383 { 384 struct proc *p; 385 struct thread *tdp; 386 struct rtprio rtp; 387 int cierror, error; 388 389 /* Perform copyin before acquiring locks if needed. */ 390 if (uap->function == RTP_SET) 391 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 392 else 393 cierror = 0; 394 395 if (uap->pid == 0) { 396 p = td->td_proc; 397 PROC_LOCK(p); 398 } else { 399 p = pfind(uap->pid); 400 if (p == NULL) 401 return (ESRCH); 402 } 403 404 switch (uap->function) { 405 case RTP_LOOKUP: 406 if ((error = p_cansee(td, p))) 407 break; 408 /* 409 * Return OUR priority if no pid specified, 410 * or if one is, report the highest priority 411 * in the process. There isn't much more you can do as 412 * there is only room to return a single priority. 413 * Note: specifying our own pid is not the same 414 * as leaving it zero. 415 */ 416 if (uap->pid == 0) { 417 pri_to_rtp(td, &rtp); 418 } else { 419 struct rtprio rtp2; 420 421 rtp.type = RTP_PRIO_IDLE; 422 rtp.prio = RTP_PRIO_MAX; 423 FOREACH_THREAD_IN_PROC(p, tdp) { 424 pri_to_rtp(tdp, &rtp2); 425 if (rtp2.type < rtp.type || 426 (rtp2.type == rtp.type && 427 rtp2.prio < rtp.prio)) { 428 rtp.type = rtp2.type; 429 rtp.prio = rtp2.prio; 430 } 431 } 432 } 433 PROC_UNLOCK(p); 434 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 435 case RTP_SET: 436 if ((error = p_cansched(td, p)) || (error = cierror)) 437 break; 438 439 /* 440 * Disallow setting rtprio in most cases if not superuser. 441 * See the comment in sys_rtprio_thread about idprio 442 * threads holding a lock. 443 */ 444 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 445 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 446 break; 447 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 448 unprivileged_idprio == 0 && 449 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 450 break; 451 452 /* 453 * If we are setting our own priority, set just our 454 * thread but if we are doing another process, 455 * do all the threads on that process. If we 456 * specify our own pid we do the latter. 457 */ 458 if (uap->pid == 0) { 459 error = rtp_to_pri(&rtp, td); 460 } else { 461 FOREACH_THREAD_IN_PROC(p, td) { 462 if ((error = rtp_to_pri(&rtp, td)) != 0) 463 break; 464 } 465 } 466 break; 467 default: 468 error = EINVAL; 469 break; 470 } 471 PROC_UNLOCK(p); 472 return (error); 473 } 474 475 int 476 rtp_to_pri(struct rtprio *rtp, struct thread *td) 477 { 478 u_char newpri, oldclass, oldpri; 479 480 switch (RTP_PRIO_BASE(rtp->type)) { 481 case RTP_PRIO_REALTIME: 482 if (rtp->prio > RTP_PRIO_MAX) 483 return (EINVAL); 484 newpri = PRI_MIN_REALTIME + rtp->prio; 485 break; 486 case RTP_PRIO_NORMAL: 487 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) 488 return (EINVAL); 489 newpri = PRI_MIN_TIMESHARE + rtp->prio; 490 break; 491 case RTP_PRIO_IDLE: 492 if (rtp->prio > RTP_PRIO_MAX) 493 return (EINVAL); 494 newpri = PRI_MIN_IDLE + rtp->prio; 495 break; 496 default: 497 return (EINVAL); 498 } 499 500 thread_lock(td); 501 oldclass = td->td_pri_class; 502 sched_class(td, rtp->type); /* XXX fix */ 503 oldpri = td->td_user_pri; 504 sched_user_prio(td, newpri); 505 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL || 506 td->td_pri_class != RTP_PRIO_NORMAL)) 507 sched_prio(td, td->td_user_pri); 508 if (TD_ON_UPILOCK(td) && oldpri != newpri) { 509 critical_enter(); 510 thread_unlock(td); 511 umtx_pi_adjust(td, oldpri); 512 critical_exit(); 513 } else 514 thread_unlock(td); 515 return (0); 516 } 517 518 void 519 pri_to_rtp(struct thread *td, struct rtprio *rtp) 520 { 521 522 thread_lock(td); 523 switch (PRI_BASE(td->td_pri_class)) { 524 case PRI_REALTIME: 525 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME; 526 break; 527 case PRI_TIMESHARE: 528 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE; 529 break; 530 case PRI_IDLE: 531 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE; 532 break; 533 default: 534 break; 535 } 536 rtp->type = td->td_pri_class; 537 thread_unlock(td); 538 } 539 540 #if defined(COMPAT_43) 541 #ifndef _SYS_SYSPROTO_H_ 542 struct osetrlimit_args { 543 u_int which; 544 struct orlimit *rlp; 545 }; 546 #endif 547 int 548 osetrlimit(struct thread *td, struct osetrlimit_args *uap) 549 { 550 struct orlimit olim; 551 struct rlimit lim; 552 int error; 553 554 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 555 return (error); 556 lim.rlim_cur = olim.rlim_cur; 557 lim.rlim_max = olim.rlim_max; 558 error = kern_setrlimit(td, uap->which, &lim); 559 return (error); 560 } 561 562 #ifndef _SYS_SYSPROTO_H_ 563 struct ogetrlimit_args { 564 u_int which; 565 struct orlimit *rlp; 566 }; 567 #endif 568 int 569 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap) 570 { 571 struct orlimit olim; 572 struct rlimit rl; 573 int error; 574 575 if (uap->which >= RLIM_NLIMITS) 576 return (EINVAL); 577 lim_rlimit(td, uap->which, &rl); 578 579 /* 580 * XXX would be more correct to convert only RLIM_INFINITY to the 581 * old RLIM_INFINITY and fail with EOVERFLOW for other larger 582 * values. Most 64->32 and 32->16 conversions, including not 583 * unimportant ones of uids are even more broken than what we 584 * do here (they blindly truncate). We don't do this correctly 585 * here since we have little experience with EOVERFLOW yet. 586 * Elsewhere, getuid() can't fail... 587 */ 588 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur; 589 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max; 590 error = copyout(&olim, uap->rlp, sizeof(olim)); 591 return (error); 592 } 593 #endif /* COMPAT_43 */ 594 595 #ifndef _SYS_SYSPROTO_H_ 596 struct setrlimit_args { 597 u_int which; 598 struct rlimit *rlp; 599 }; 600 #endif 601 int 602 sys_setrlimit(struct thread *td, struct setrlimit_args *uap) 603 { 604 struct rlimit alim; 605 int error; 606 607 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit)))) 608 return (error); 609 error = kern_setrlimit(td, uap->which, &alim); 610 return (error); 611 } 612 613 static void 614 lim_cb(void *arg) 615 { 616 struct rlimit rlim; 617 struct thread *td; 618 struct proc *p; 619 620 p = arg; 621 PROC_LOCK_ASSERT(p, MA_OWNED); 622 /* 623 * Check if the process exceeds its cpu resource allocation. If 624 * it reaches the max, arrange to kill the process in ast(). 625 */ 626 if (p->p_cpulimit == RLIM_INFINITY) 627 return; 628 PROC_STATLOCK(p); 629 FOREACH_THREAD_IN_PROC(p, td) { 630 ruxagg(p, td); 631 } 632 PROC_STATUNLOCK(p); 633 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) { 634 lim_rlimit_proc(p, RLIMIT_CPU, &rlim); 635 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { 636 killproc(p, "exceeded maximum CPU limit"); 637 } else { 638 if (p->p_cpulimit < rlim.rlim_max) 639 p->p_cpulimit += 5; 640 kern_psignal(p, SIGXCPU); 641 } 642 } 643 if ((p->p_flag & P_WEXIT) == 0) 644 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 645 lim_cb, p, C_PREL(1)); 646 } 647 648 int 649 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp) 650 { 651 652 return (kern_proc_setrlimit(td, td->td_proc, which, limp)); 653 } 654 655 int 656 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which, 657 struct rlimit *limp) 658 { 659 struct plimit *newlim, *oldlim; 660 struct rlimit *alimp; 661 struct rlimit oldssiz; 662 int error; 663 664 if (which >= RLIM_NLIMITS) 665 return (EINVAL); 666 667 /* 668 * Preserve historical bugs by treating negative limits as unsigned. 669 */ 670 if (limp->rlim_cur < 0) 671 limp->rlim_cur = RLIM_INFINITY; 672 if (limp->rlim_max < 0) 673 limp->rlim_max = RLIM_INFINITY; 674 675 if (which == RLIMIT_STACK && limp->rlim_cur != RLIM_INFINITY) 676 limp->rlim_cur += p->p_vmspace->vm_stkgap; 677 678 oldssiz.rlim_cur = 0; 679 newlim = lim_alloc(); 680 PROC_LOCK(p); 681 oldlim = p->p_limit; 682 alimp = &oldlim->pl_rlimit[which]; 683 if (limp->rlim_cur > alimp->rlim_max || 684 limp->rlim_max > alimp->rlim_max) 685 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) { 686 PROC_UNLOCK(p); 687 lim_free(newlim); 688 return (error); 689 } 690 if (limp->rlim_cur > limp->rlim_max) 691 limp->rlim_cur = limp->rlim_max; 692 lim_copy(newlim, oldlim); 693 alimp = &newlim->pl_rlimit[which]; 694 695 switch (which) { 696 case RLIMIT_CPU: 697 if (limp->rlim_cur != RLIM_INFINITY && 698 p->p_cpulimit == RLIM_INFINITY) 699 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 700 lim_cb, p, C_PREL(1)); 701 p->p_cpulimit = limp->rlim_cur; 702 break; 703 case RLIMIT_DATA: 704 if (limp->rlim_cur > maxdsiz) 705 limp->rlim_cur = maxdsiz; 706 if (limp->rlim_max > maxdsiz) 707 limp->rlim_max = maxdsiz; 708 break; 709 710 case RLIMIT_STACK: 711 if (limp->rlim_cur > maxssiz) 712 limp->rlim_cur = maxssiz; 713 if (limp->rlim_max > maxssiz) 714 limp->rlim_max = maxssiz; 715 oldssiz = *alimp; 716 if (p->p_sysent->sv_fixlimit != NULL) 717 p->p_sysent->sv_fixlimit(&oldssiz, 718 RLIMIT_STACK); 719 break; 720 721 case RLIMIT_NOFILE: 722 if (limp->rlim_cur > maxfilesperproc) 723 limp->rlim_cur = maxfilesperproc; 724 if (limp->rlim_max > maxfilesperproc) 725 limp->rlim_max = maxfilesperproc; 726 break; 727 728 case RLIMIT_NPROC: 729 if (limp->rlim_cur > maxprocperuid) 730 limp->rlim_cur = maxprocperuid; 731 if (limp->rlim_max > maxprocperuid) 732 limp->rlim_max = maxprocperuid; 733 if (limp->rlim_cur < 1) 734 limp->rlim_cur = 1; 735 if (limp->rlim_max < 1) 736 limp->rlim_max = 1; 737 break; 738 } 739 if (p->p_sysent->sv_fixlimit != NULL) 740 p->p_sysent->sv_fixlimit(limp, which); 741 *alimp = *limp; 742 p->p_limit = newlim; 743 PROC_UPDATE_COW(p); 744 PROC_UNLOCK(p); 745 lim_free(oldlim); 746 747 if (which == RLIMIT_STACK && 748 /* 749 * Skip calls from exec_new_vmspace(), done when stack is 750 * not mapped yet. 751 */ 752 (td != curthread || (p->p_flag & P_INEXEC) == 0)) { 753 /* 754 * Stack is allocated to the max at exec time with only 755 * "rlim_cur" bytes accessible. If stack limit is going 756 * up make more accessible, if going down make inaccessible. 757 */ 758 if (limp->rlim_cur != oldssiz.rlim_cur) { 759 vm_offset_t addr; 760 vm_size_t size; 761 vm_prot_t prot; 762 763 if (limp->rlim_cur > oldssiz.rlim_cur) { 764 prot = p->p_sysent->sv_stackprot; 765 size = limp->rlim_cur - oldssiz.rlim_cur; 766 addr = p->p_sysent->sv_usrstack - 767 limp->rlim_cur; 768 } else { 769 prot = VM_PROT_NONE; 770 size = oldssiz.rlim_cur - limp->rlim_cur; 771 addr = p->p_sysent->sv_usrstack - 772 oldssiz.rlim_cur; 773 } 774 addr = trunc_page(addr); 775 size = round_page(size); 776 (void)vm_map_protect(&p->p_vmspace->vm_map, 777 addr, addr + size, prot, 0, 778 VM_MAP_PROTECT_SET_PROT); 779 } 780 } 781 782 return (0); 783 } 784 785 #ifndef _SYS_SYSPROTO_H_ 786 struct getrlimit_args { 787 u_int which; 788 struct rlimit *rlp; 789 }; 790 #endif 791 /* ARGSUSED */ 792 int 793 sys_getrlimit(struct thread *td, struct getrlimit_args *uap) 794 { 795 struct rlimit rlim; 796 int error; 797 798 if (uap->which >= RLIM_NLIMITS) 799 return (EINVAL); 800 lim_rlimit(td, uap->which, &rlim); 801 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit)); 802 return (error); 803 } 804 805 /* 806 * Transform the running time and tick information for children of proc p 807 * into user and system time usage. 808 */ 809 void 810 calccru(struct proc *p, struct timeval *up, struct timeval *sp) 811 { 812 813 PROC_LOCK_ASSERT(p, MA_OWNED); 814 calcru1(p, &p->p_crux, up, sp); 815 } 816 817 /* 818 * Transform the running time and tick information in proc p into user 819 * and system time usage. If appropriate, include the current time slice 820 * on this CPU. 821 */ 822 void 823 calcru(struct proc *p, struct timeval *up, struct timeval *sp) 824 { 825 struct thread *td; 826 uint64_t runtime, u; 827 828 PROC_LOCK_ASSERT(p, MA_OWNED); 829 PROC_STATLOCK_ASSERT(p, MA_OWNED); 830 /* 831 * If we are getting stats for the current process, then add in the 832 * stats that this thread has accumulated in its current time slice. 833 * We reset the thread and CPU state as if we had performed a context 834 * switch right here. 835 */ 836 td = curthread; 837 if (td->td_proc == p) { 838 u = cpu_ticks(); 839 runtime = u - PCPU_GET(switchtime); 840 td->td_runtime += runtime; 841 td->td_incruntime += runtime; 842 PCPU_SET(switchtime, u); 843 } 844 /* Make sure the per-thread stats are current. */ 845 FOREACH_THREAD_IN_PROC(p, td) { 846 if (td->td_incruntime == 0) 847 continue; 848 ruxagg(p, td); 849 } 850 calcru1(p, &p->p_rux, up, sp); 851 } 852 853 /* Collect resource usage for a single thread. */ 854 void 855 rufetchtd(struct thread *td, struct rusage *ru) 856 { 857 struct proc *p; 858 uint64_t runtime, u; 859 860 p = td->td_proc; 861 PROC_STATLOCK_ASSERT(p, MA_OWNED); 862 THREAD_LOCK_ASSERT(td, MA_OWNED); 863 /* 864 * If we are getting stats for the current thread, then add in the 865 * stats that this thread has accumulated in its current time slice. 866 * We reset the thread and CPU state as if we had performed a context 867 * switch right here. 868 */ 869 if (td == curthread) { 870 u = cpu_ticks(); 871 runtime = u - PCPU_GET(switchtime); 872 td->td_runtime += runtime; 873 td->td_incruntime += runtime; 874 PCPU_SET(switchtime, u); 875 } 876 ruxagg_locked(p, td); 877 *ru = td->td_ru; 878 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime); 879 } 880 881 /* XXX: the MI version is too slow to use: */ 882 #ifndef __HAVE_INLINE_FLSLL 883 #define flsll(x) (fls((x) >> 32) != 0 ? fls((x) >> 32) + 32 : fls(x)) 884 #endif 885 886 static uint64_t 887 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c) 888 { 889 uint64_t acc, bh, bl; 890 int i, s, sa, sb; 891 892 /* 893 * Calculate (a * b) / c accurately enough without overflowing. c 894 * must be nonzero, and its top bit must be 0. a or b must be 895 * <= c, and the implementation is tuned for b <= c. 896 * 897 * The comments about times are for use in calcru1() with units of 898 * microseconds for 'a' and stathz ticks at 128 Hz for b and c. 899 * 900 * Let n be the number of top zero bits in c. Each iteration 901 * either returns, or reduces b by right shifting it by at least n. 902 * The number of iterations is at most 1 + 64 / n, and the error is 903 * at most the number of iterations. 904 * 905 * It is very unusual to need even 2 iterations. Previous 906 * implementations overflowed essentially by returning early in the 907 * first iteration, with n = 38 giving overflow at 105+ hours and 908 * n = 32 giving overlow at at 388+ days despite a more careful 909 * calculation. 388 days is a reasonable uptime, and the calculation 910 * needs to work for the uptime times the number of CPUs since 'a' 911 * is per-process. 912 */ 913 if (a >= (uint64_t)1 << 63) 914 return (0); /* Unsupported arg -- can't happen. */ 915 acc = 0; 916 for (i = 0; i < 128; i++) { 917 sa = flsll(a); 918 sb = flsll(b); 919 if (sa + sb <= 64) 920 /* Up to 105 hours on first iteration. */ 921 return (acc + (a * b) / c); 922 if (a >= c) { 923 /* 924 * This reduction is based on a = q * c + r, with the 925 * remainder r < c. 'a' may be large to start, and 926 * moving bits from b into 'a' at the end of the loop 927 * sets the top bit of 'a', so the reduction makes 928 * significant progress. 929 */ 930 acc += (a / c) * b; 931 a %= c; 932 sa = flsll(a); 933 if (sa + sb <= 64) 934 /* Up to 388 days on first iteration. */ 935 return (acc + (a * b) / c); 936 } 937 938 /* 939 * This step writes a * b as a * ((bh << s) + bl) = 940 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2 941 * additive terms are handled separately. Splitting in 942 * this way is linear except for rounding errors. 943 * 944 * s = 64 - sa is the maximum such that a << s fits in 64 945 * bits. Since a < c and c has at least 1 zero top bit, 946 * sa < 64 and s > 0. Thus this step makes progress by 947 * reducing b (it increases 'a', but taking remainders on 948 * the next iteration completes the reduction). 949 * 950 * Finally, the choice for s is just what is needed to keep 951 * a * bl from overflowing, so we don't need complications 952 * like a recursive call mul64_by_fraction(a, bl, c) to 953 * handle the second additive term. 954 */ 955 s = 64 - sa; 956 bh = b >> s; 957 bl = b - (bh << s); 958 acc += (a * bl) / c; 959 a <<= s; 960 b = bh; 961 } 962 return (0); /* Algorithm failure -- can't happen. */ 963 } 964 965 static void 966 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, 967 struct timeval *sp) 968 { 969 /* {user, system, interrupt, total} {ticks, usec}: */ 970 uint64_t ut, uu, st, su, it, tt, tu; 971 972 ut = ruxp->rux_uticks; 973 st = ruxp->rux_sticks; 974 it = ruxp->rux_iticks; 975 tt = ut + st + it; 976 if (tt == 0) { 977 /* Avoid divide by zero */ 978 st = 1; 979 tt = 1; 980 } 981 tu = cputick2usec(ruxp->rux_runtime); 982 if ((int64_t)tu < 0) { 983 /* XXX: this should be an assert /phk */ 984 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", 985 (intmax_t)tu, p->p_pid, p->p_comm); 986 tu = ruxp->rux_tu; 987 } 988 989 /* Subdivide tu. Avoid overflow in the multiplications. */ 990 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) { 991 /* Up to 76 hours when stathz is 128. */ 992 uu = (tu * ut) / tt; 993 su = (tu * st) / tt; 994 } else { 995 uu = mul64_by_fraction(tu, ut, tt); 996 su = mul64_by_fraction(tu, st, tt); 997 } 998 999 if (tu >= ruxp->rux_tu) { 1000 /* 1001 * The normal case, time increased. 1002 * Enforce monotonicity of bucketed numbers. 1003 */ 1004 if (uu < ruxp->rux_uu) 1005 uu = ruxp->rux_uu; 1006 if (su < ruxp->rux_su) 1007 su = ruxp->rux_su; 1008 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) { 1009 /* 1010 * When we calibrate the cputicker, it is not uncommon to 1011 * see the presumably fixed frequency increase slightly over 1012 * time as a result of thermal stabilization and NTP 1013 * discipline (of the reference clock). We therefore ignore 1014 * a bit of backwards slop because we expect to catch up 1015 * shortly. We use a 3 microsecond limit to catch low 1016 * counts and a 1% limit for high counts. 1017 */ 1018 uu = ruxp->rux_uu; 1019 su = ruxp->rux_su; 1020 tu = ruxp->rux_tu; 1021 } else { /* tu < ruxp->rux_tu */ 1022 /* 1023 * What happened here was likely that a laptop, which ran at 1024 * a reduced clock frequency at boot, kicked into high gear. 1025 * The wisdom of spamming this message in that case is 1026 * dubious, but it might also be indicative of something 1027 * serious, so lets keep it and hope laptops can be made 1028 * more truthful about their CPU speed via ACPI. 1029 */ 1030 printf("calcru: runtime went backwards from %ju usec " 1031 "to %ju usec for pid %d (%s)\n", 1032 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu, 1033 p->p_pid, p->p_comm); 1034 } 1035 1036 ruxp->rux_uu = uu; 1037 ruxp->rux_su = su; 1038 ruxp->rux_tu = tu; 1039 1040 up->tv_sec = uu / 1000000; 1041 up->tv_usec = uu % 1000000; 1042 sp->tv_sec = su / 1000000; 1043 sp->tv_usec = su % 1000000; 1044 } 1045 1046 #ifndef _SYS_SYSPROTO_H_ 1047 struct getrusage_args { 1048 int who; 1049 struct rusage *rusage; 1050 }; 1051 #endif 1052 int 1053 sys_getrusage(struct thread *td, struct getrusage_args *uap) 1054 { 1055 struct rusage ru; 1056 int error; 1057 1058 error = kern_getrusage(td, uap->who, &ru); 1059 if (error == 0) 1060 error = copyout(&ru, uap->rusage, sizeof(struct rusage)); 1061 return (error); 1062 } 1063 1064 int 1065 kern_getrusage(struct thread *td, int who, struct rusage *rup) 1066 { 1067 struct proc *p; 1068 int error; 1069 1070 error = 0; 1071 p = td->td_proc; 1072 PROC_LOCK(p); 1073 switch (who) { 1074 case RUSAGE_SELF: 1075 rufetchcalc(p, rup, &rup->ru_utime, 1076 &rup->ru_stime); 1077 break; 1078 1079 case RUSAGE_CHILDREN: 1080 *rup = p->p_stats->p_cru; 1081 calccru(p, &rup->ru_utime, &rup->ru_stime); 1082 break; 1083 1084 case RUSAGE_THREAD: 1085 PROC_STATLOCK(p); 1086 thread_lock(td); 1087 rufetchtd(td, rup); 1088 thread_unlock(td); 1089 PROC_STATUNLOCK(p); 1090 break; 1091 1092 default: 1093 error = EINVAL; 1094 } 1095 PROC_UNLOCK(p); 1096 return (error); 1097 } 1098 1099 void 1100 rucollect(struct rusage *ru, struct rusage *ru2) 1101 { 1102 long *ip, *ip2; 1103 int i; 1104 1105 if (ru->ru_maxrss < ru2->ru_maxrss) 1106 ru->ru_maxrss = ru2->ru_maxrss; 1107 ip = &ru->ru_first; 1108 ip2 = &ru2->ru_first; 1109 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 1110 *ip++ += *ip2++; 1111 } 1112 1113 void 1114 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, 1115 struct rusage_ext *rux2) 1116 { 1117 1118 rux->rux_runtime += rux2->rux_runtime; 1119 rux->rux_uticks += rux2->rux_uticks; 1120 rux->rux_sticks += rux2->rux_sticks; 1121 rux->rux_iticks += rux2->rux_iticks; 1122 rux->rux_uu += rux2->rux_uu; 1123 rux->rux_su += rux2->rux_su; 1124 rux->rux_tu += rux2->rux_tu; 1125 rucollect(ru, ru2); 1126 } 1127 1128 /* 1129 * Aggregate tick counts into the proc's rusage_ext. 1130 */ 1131 static void 1132 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td) 1133 { 1134 1135 rux->rux_runtime += td->td_incruntime; 1136 rux->rux_uticks += td->td_uticks; 1137 rux->rux_sticks += td->td_sticks; 1138 rux->rux_iticks += td->td_iticks; 1139 } 1140 1141 void 1142 ruxagg_locked(struct proc *p, struct thread *td) 1143 { 1144 THREAD_LOCK_ASSERT(td, MA_OWNED); 1145 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED); 1146 1147 ruxagg_ext_locked(&p->p_rux, td); 1148 ruxagg_ext_locked(&td->td_rux, td); 1149 td->td_incruntime = 0; 1150 td->td_uticks = 0; 1151 td->td_iticks = 0; 1152 td->td_sticks = 0; 1153 } 1154 1155 void 1156 ruxagg(struct proc *p, struct thread *td) 1157 { 1158 1159 thread_lock(td); 1160 ruxagg_locked(p, td); 1161 thread_unlock(td); 1162 } 1163 1164 /* 1165 * Update the rusage_ext structure and fetch a valid aggregate rusage 1166 * for proc p if storage for one is supplied. 1167 */ 1168 void 1169 rufetch(struct proc *p, struct rusage *ru) 1170 { 1171 struct thread *td; 1172 1173 PROC_STATLOCK_ASSERT(p, MA_OWNED); 1174 1175 *ru = p->p_ru; 1176 if (p->p_numthreads > 0) { 1177 FOREACH_THREAD_IN_PROC(p, td) { 1178 ruxagg(p, td); 1179 rucollect(ru, &td->td_ru); 1180 } 1181 } 1182 } 1183 1184 /* 1185 * Atomically perform a rufetch and a calcru together. 1186 * Consumers, can safely assume the calcru is executed only once 1187 * rufetch is completed. 1188 */ 1189 void 1190 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, 1191 struct timeval *sp) 1192 { 1193 1194 PROC_STATLOCK(p); 1195 rufetch(p, ru); 1196 calcru(p, up, sp); 1197 PROC_STATUNLOCK(p); 1198 } 1199 1200 /* 1201 * Allocate a new resource limits structure and initialize its 1202 * reference count and mutex pointer. 1203 */ 1204 struct plimit * 1205 lim_alloc() 1206 { 1207 struct plimit *limp; 1208 1209 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); 1210 refcount_init(&limp->pl_refcnt, 1); 1211 return (limp); 1212 } 1213 1214 struct plimit * 1215 lim_hold(struct plimit *limp) 1216 { 1217 1218 refcount_acquire(&limp->pl_refcnt); 1219 return (limp); 1220 } 1221 1222 void 1223 lim_fork(struct proc *p1, struct proc *p2) 1224 { 1225 1226 PROC_LOCK_ASSERT(p1, MA_OWNED); 1227 PROC_LOCK_ASSERT(p2, MA_OWNED); 1228 1229 p2->p_limit = lim_hold(p1->p_limit); 1230 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); 1231 if (p1->p_cpulimit != RLIM_INFINITY) 1232 callout_reset_sbt(&p2->p_limco, SBT_1S, 0, 1233 lim_cb, p2, C_PREL(1)); 1234 } 1235 1236 void 1237 lim_free(struct plimit *limp) 1238 { 1239 1240 if (refcount_release(&limp->pl_refcnt)) 1241 free((void *)limp, M_PLIMIT); 1242 } 1243 1244 void 1245 lim_freen(struct plimit *limp, int n) 1246 { 1247 1248 if (refcount_releasen(&limp->pl_refcnt, n)) 1249 free((void *)limp, M_PLIMIT); 1250 } 1251 1252 /* 1253 * Make a copy of the plimit structure. 1254 * We share these structures copy-on-write after fork. 1255 */ 1256 void 1257 lim_copy(struct plimit *dst, struct plimit *src) 1258 { 1259 1260 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit")); 1261 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit)); 1262 } 1263 1264 /* 1265 * Return the hard limit for a particular system resource. The 1266 * which parameter specifies the index into the rlimit array. 1267 */ 1268 rlim_t 1269 lim_max(struct thread *td, int which) 1270 { 1271 struct rlimit rl; 1272 1273 lim_rlimit(td, which, &rl); 1274 return (rl.rlim_max); 1275 } 1276 1277 rlim_t 1278 lim_max_proc(struct proc *p, int which) 1279 { 1280 struct rlimit rl; 1281 1282 lim_rlimit_proc(p, which, &rl); 1283 return (rl.rlim_max); 1284 } 1285 1286 /* 1287 * Return the current (soft) limit for a particular system resource. 1288 * The which parameter which specifies the index into the rlimit array 1289 */ 1290 rlim_t 1291 (lim_cur)(struct thread *td, int which) 1292 { 1293 struct rlimit rl; 1294 1295 lim_rlimit(td, which, &rl); 1296 return (rl.rlim_cur); 1297 } 1298 1299 rlim_t 1300 lim_cur_proc(struct proc *p, int which) 1301 { 1302 struct rlimit rl; 1303 1304 lim_rlimit_proc(p, which, &rl); 1305 return (rl.rlim_cur); 1306 } 1307 1308 /* 1309 * Return a copy of the entire rlimit structure for the system limit 1310 * specified by 'which' in the rlimit structure pointed to by 'rlp'. 1311 */ 1312 void 1313 lim_rlimit(struct thread *td, int which, struct rlimit *rlp) 1314 { 1315 struct proc *p = td->td_proc; 1316 1317 MPASS(td == curthread); 1318 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1319 ("request for invalid resource limit")); 1320 *rlp = td->td_limit->pl_rlimit[which]; 1321 if (p->p_sysent->sv_fixlimit != NULL) 1322 p->p_sysent->sv_fixlimit(rlp, which); 1323 } 1324 1325 void 1326 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp) 1327 { 1328 1329 PROC_LOCK_ASSERT(p, MA_OWNED); 1330 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1331 ("request for invalid resource limit")); 1332 *rlp = p->p_limit->pl_rlimit[which]; 1333 if (p->p_sysent->sv_fixlimit != NULL) 1334 p->p_sysent->sv_fixlimit(rlp, which); 1335 } 1336 1337 void 1338 uihashinit() 1339 { 1340 1341 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 1342 rw_init(&uihashtbl_lock, "uidinfo hash"); 1343 } 1344 1345 /* 1346 * Look up a uidinfo struct for the parameter uid. 1347 * uihashtbl_lock must be locked. 1348 * Increase refcount on uidinfo struct returned. 1349 */ 1350 static struct uidinfo * 1351 uilookup(uid_t uid) 1352 { 1353 struct uihashhead *uipp; 1354 struct uidinfo *uip; 1355 1356 rw_assert(&uihashtbl_lock, RA_LOCKED); 1357 uipp = UIHASH(uid); 1358 LIST_FOREACH(uip, uipp, ui_hash) 1359 if (uip->ui_uid == uid) { 1360 uihold(uip); 1361 break; 1362 } 1363 1364 return (uip); 1365 } 1366 1367 /* 1368 * Find or allocate a struct uidinfo for a particular uid. 1369 * Returns with uidinfo struct referenced. 1370 * uifree() should be called on a struct uidinfo when released. 1371 */ 1372 struct uidinfo * 1373 uifind(uid_t uid) 1374 { 1375 struct uidinfo *new_uip, *uip; 1376 struct ucred *cred; 1377 1378 cred = curthread->td_ucred; 1379 if (cred->cr_uidinfo->ui_uid == uid) { 1380 uip = cred->cr_uidinfo; 1381 uihold(uip); 1382 return (uip); 1383 } else if (cred->cr_ruidinfo->ui_uid == uid) { 1384 uip = cred->cr_ruidinfo; 1385 uihold(uip); 1386 return (uip); 1387 } 1388 1389 rw_rlock(&uihashtbl_lock); 1390 uip = uilookup(uid); 1391 rw_runlock(&uihashtbl_lock); 1392 if (uip != NULL) 1393 return (uip); 1394 1395 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO); 1396 racct_create(&new_uip->ui_racct); 1397 refcount_init(&new_uip->ui_ref, 1); 1398 new_uip->ui_uid = uid; 1399 1400 rw_wlock(&uihashtbl_lock); 1401 /* 1402 * There's a chance someone created our uidinfo while we 1403 * were in malloc and not holding the lock, so we have to 1404 * make sure we don't insert a duplicate uidinfo. 1405 */ 1406 if ((uip = uilookup(uid)) == NULL) { 1407 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash); 1408 rw_wunlock(&uihashtbl_lock); 1409 uip = new_uip; 1410 } else { 1411 rw_wunlock(&uihashtbl_lock); 1412 racct_destroy(&new_uip->ui_racct); 1413 free(new_uip, M_UIDINFO); 1414 } 1415 return (uip); 1416 } 1417 1418 /* 1419 * Place another refcount on a uidinfo struct. 1420 */ 1421 void 1422 uihold(struct uidinfo *uip) 1423 { 1424 1425 refcount_acquire(&uip->ui_ref); 1426 } 1427 1428 /*- 1429 * Since uidinfo structs have a long lifetime, we use an 1430 * opportunistic refcounting scheme to avoid locking the lookup hash 1431 * for each release. 1432 * 1433 * If the refcount hits 0, we need to free the structure, 1434 * which means we need to lock the hash. 1435 * Optimal case: 1436 * After locking the struct and lowering the refcount, if we find 1437 * that we don't need to free, simply unlock and return. 1438 * Suboptimal case: 1439 * If refcount lowering results in need to free, bump the count 1440 * back up, lose the lock and acquire the locks in the proper 1441 * order to try again. 1442 */ 1443 void 1444 uifree(struct uidinfo *uip) 1445 { 1446 1447 if (refcount_release_if_not_last(&uip->ui_ref)) 1448 return; 1449 1450 rw_wlock(&uihashtbl_lock); 1451 if (refcount_release(&uip->ui_ref) == 0) { 1452 rw_wunlock(&uihashtbl_lock); 1453 return; 1454 } 1455 1456 racct_destroy(&uip->ui_racct); 1457 LIST_REMOVE(uip, ui_hash); 1458 rw_wunlock(&uihashtbl_lock); 1459 1460 if (uip->ui_sbsize != 0) 1461 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 1462 uip->ui_uid, uip->ui_sbsize); 1463 if (uip->ui_proccnt != 0) 1464 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 1465 uip->ui_uid, uip->ui_proccnt); 1466 if (uip->ui_vmsize != 0) 1467 printf("freeing uidinfo: uid = %d, swapuse = %lld\n", 1468 uip->ui_uid, (unsigned long long)uip->ui_vmsize); 1469 free(uip, M_UIDINFO); 1470 } 1471 1472 #ifdef RACCT 1473 void 1474 ui_racct_foreach(void (*callback)(struct racct *racct, 1475 void *arg2, void *arg3), void (*pre)(void), void (*post)(void), 1476 void *arg2, void *arg3) 1477 { 1478 struct uidinfo *uip; 1479 struct uihashhead *uih; 1480 1481 rw_rlock(&uihashtbl_lock); 1482 if (pre != NULL) 1483 (pre)(); 1484 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) { 1485 LIST_FOREACH(uip, uih, ui_hash) { 1486 (callback)(uip->ui_racct, arg2, arg3); 1487 } 1488 } 1489 if (post != NULL) 1490 (post)(); 1491 rw_runlock(&uihashtbl_lock); 1492 } 1493 #endif 1494 1495 static inline int 1496 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name) 1497 { 1498 long new; 1499 1500 /* Don't allow them to exceed max, but allow subtraction. */ 1501 new = atomic_fetchadd_long(limit, (long)diff) + diff; 1502 if (diff > 0 && max != 0) { 1503 if (new < 0 || new > max) { 1504 atomic_subtract_long(limit, (long)diff); 1505 return (0); 1506 } 1507 } else if (new < 0) 1508 printf("negative %s for uid = %d\n", name, uip->ui_uid); 1509 return (1); 1510 } 1511 1512 /* 1513 * Change the count associated with number of processes 1514 * a given user is using. When 'max' is 0, don't enforce a limit 1515 */ 1516 int 1517 chgproccnt(struct uidinfo *uip, int diff, rlim_t max) 1518 { 1519 1520 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt")); 1521 } 1522 1523 /* 1524 * Change the total socket buffer size a user has used. 1525 */ 1526 int 1527 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max) 1528 { 1529 int diff, rv; 1530 1531 diff = to - *hiwat; 1532 if (diff > 0 && max == 0) { 1533 rv = 0; 1534 } else { 1535 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize"); 1536 if (rv != 0) 1537 *hiwat = to; 1538 } 1539 return (rv); 1540 } 1541 1542 /* 1543 * Change the count associated with number of pseudo-terminals 1544 * a given user is using. When 'max' is 0, don't enforce a limit 1545 */ 1546 int 1547 chgptscnt(struct uidinfo *uip, int diff, rlim_t max) 1548 { 1549 1550 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt")); 1551 } 1552 1553 int 1554 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max) 1555 { 1556 1557 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt")); 1558 } 1559 1560 int 1561 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max) 1562 { 1563 1564 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt")); 1565 } 1566