1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sysproto.h> 40 #include <sys/file.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/refcount.h> 48 #include <sys/racct.h> 49 #include <sys/resourcevar.h> 50 #include <sys/rwlock.h> 51 #include <sys/sched.h> 52 #include <sys/sx.h> 53 #include <sys/syscallsubr.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysent.h> 56 #include <sys/time.h> 57 #include <sys/umtxvar.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 64 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); 65 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 66 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 67 static struct rwlock uihashtbl_lock; 68 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 69 static u_long uihash; /* size of hash table - 1 */ 70 71 static void calcru1(struct proc *p, struct rusage_ext *ruxp, 72 struct timeval *up, struct timeval *sp); 73 static int donice(struct thread *td, struct proc *chgp, int n); 74 static struct uidinfo *uilookup(uid_t uid); 75 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td); 76 77 /* 78 * Resource controls and accounting. 79 */ 80 #ifndef _SYS_SYSPROTO_H_ 81 struct getpriority_args { 82 int which; 83 int who; 84 }; 85 #endif 86 int 87 sys_getpriority(struct thread *td, struct getpriority_args *uap) 88 { 89 90 return (kern_getpriority(td, uap->which, uap->who)); 91 } 92 93 int 94 kern_getpriority(struct thread *td, int which, int who) 95 { 96 struct proc *p; 97 struct pgrp *pg; 98 int error, low; 99 100 error = 0; 101 low = PRIO_MAX + 1; 102 switch (which) { 103 case PRIO_PROCESS: 104 if (who == 0) 105 low = td->td_proc->p_nice; 106 else { 107 p = pfind(who); 108 if (p == NULL) 109 break; 110 if (p_cansee(td, p) == 0) 111 low = p->p_nice; 112 PROC_UNLOCK(p); 113 } 114 break; 115 116 case PRIO_PGRP: 117 sx_slock(&proctree_lock); 118 if (who == 0) { 119 pg = td->td_proc->p_pgrp; 120 PGRP_LOCK(pg); 121 } else { 122 pg = pgfind(who); 123 if (pg == NULL) { 124 sx_sunlock(&proctree_lock); 125 break; 126 } 127 } 128 sx_sunlock(&proctree_lock); 129 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 130 PROC_LOCK(p); 131 if (p->p_state == PRS_NORMAL && 132 p_cansee(td, p) == 0) { 133 if (p->p_nice < low) 134 low = p->p_nice; 135 } 136 PROC_UNLOCK(p); 137 } 138 PGRP_UNLOCK(pg); 139 break; 140 141 case PRIO_USER: 142 if (who == 0) 143 who = td->td_ucred->cr_uid; 144 sx_slock(&allproc_lock); 145 FOREACH_PROC_IN_SYSTEM(p) { 146 PROC_LOCK(p); 147 if (p->p_state == PRS_NORMAL && 148 p_cansee(td, p) == 0 && 149 p->p_ucred->cr_uid == who) { 150 if (p->p_nice < low) 151 low = p->p_nice; 152 } 153 PROC_UNLOCK(p); 154 } 155 sx_sunlock(&allproc_lock); 156 break; 157 158 default: 159 error = EINVAL; 160 break; 161 } 162 if (low == PRIO_MAX + 1 && error == 0) 163 error = ESRCH; 164 td->td_retval[0] = low; 165 return (error); 166 } 167 168 #ifndef _SYS_SYSPROTO_H_ 169 struct setpriority_args { 170 int which; 171 int who; 172 int prio; 173 }; 174 #endif 175 int 176 sys_setpriority(struct thread *td, struct setpriority_args *uap) 177 { 178 179 return (kern_setpriority(td, uap->which, uap->who, uap->prio)); 180 } 181 182 int 183 kern_setpriority(struct thread *td, int which, int who, int prio) 184 { 185 struct proc *curp, *p; 186 struct pgrp *pg; 187 int found = 0, error = 0; 188 189 curp = td->td_proc; 190 switch (which) { 191 case PRIO_PROCESS: 192 if (who == 0) { 193 PROC_LOCK(curp); 194 error = donice(td, curp, prio); 195 PROC_UNLOCK(curp); 196 } else { 197 p = pfind(who); 198 if (p == NULL) 199 break; 200 error = p_cansee(td, p); 201 if (error == 0) 202 error = donice(td, p, prio); 203 PROC_UNLOCK(p); 204 } 205 found++; 206 break; 207 208 case PRIO_PGRP: 209 sx_slock(&proctree_lock); 210 if (who == 0) { 211 pg = curp->p_pgrp; 212 PGRP_LOCK(pg); 213 } else { 214 pg = pgfind(who); 215 if (pg == NULL) { 216 sx_sunlock(&proctree_lock); 217 break; 218 } 219 } 220 sx_sunlock(&proctree_lock); 221 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 222 PROC_LOCK(p); 223 if (p->p_state == PRS_NORMAL && 224 p_cansee(td, p) == 0) { 225 error = donice(td, p, prio); 226 found++; 227 } 228 PROC_UNLOCK(p); 229 } 230 PGRP_UNLOCK(pg); 231 break; 232 233 case PRIO_USER: 234 if (who == 0) 235 who = td->td_ucred->cr_uid; 236 sx_slock(&allproc_lock); 237 FOREACH_PROC_IN_SYSTEM(p) { 238 PROC_LOCK(p); 239 if (p->p_state == PRS_NORMAL && 240 p->p_ucred->cr_uid == who && 241 p_cansee(td, p) == 0) { 242 error = donice(td, p, prio); 243 found++; 244 } 245 PROC_UNLOCK(p); 246 } 247 sx_sunlock(&allproc_lock); 248 break; 249 250 default: 251 error = EINVAL; 252 break; 253 } 254 if (found == 0 && error == 0) 255 error = ESRCH; 256 return (error); 257 } 258 259 /* 260 * Set "nice" for a (whole) process. 261 */ 262 static int 263 donice(struct thread *td, struct proc *p, int n) 264 { 265 int error; 266 267 PROC_LOCK_ASSERT(p, MA_OWNED); 268 if ((error = p_cansched(td, p))) 269 return (error); 270 if (n > PRIO_MAX) 271 n = PRIO_MAX; 272 if (n < PRIO_MIN) 273 n = PRIO_MIN; 274 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) 275 return (EACCES); 276 sched_nice(p, n); 277 return (0); 278 } 279 280 static int unprivileged_idprio; 281 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW, 282 &unprivileged_idprio, 0, 283 "Allow non-root users to set an idle priority (deprecated)"); 284 285 /* 286 * Set realtime priority for LWP. 287 */ 288 #ifndef _SYS_SYSPROTO_H_ 289 struct rtprio_thread_args { 290 int function; 291 lwpid_t lwpid; 292 struct rtprio *rtp; 293 }; 294 #endif 295 int 296 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) 297 { 298 struct proc *p; 299 struct rtprio rtp; 300 struct thread *td1; 301 int cierror, error; 302 303 /* Perform copyin before acquiring locks if needed. */ 304 if (uap->function == RTP_SET) 305 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 306 else 307 cierror = 0; 308 309 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) { 310 p = td->td_proc; 311 td1 = td; 312 PROC_LOCK(p); 313 } else { 314 td1 = tdfind(uap->lwpid, -1); 315 if (td1 == NULL) 316 return (ESRCH); 317 p = td1->td_proc; 318 } 319 320 switch (uap->function) { 321 case RTP_LOOKUP: 322 if ((error = p_cansee(td, p))) 323 break; 324 pri_to_rtp(td1, &rtp); 325 PROC_UNLOCK(p); 326 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 327 case RTP_SET: 328 if ((error = p_cansched(td, p)) || (error = cierror)) 329 break; 330 331 /* Disallow setting rtprio in most cases if not superuser. */ 332 333 /* 334 * Realtime priority has to be restricted for reasons which 335 * should be obvious. However, for idleprio processes, there is 336 * a potential for system deadlock if an idleprio process gains 337 * a lock on a resource that other processes need (and the 338 * idleprio process can't run due to a CPU-bound normal 339 * process). Fix me! XXX 340 * 341 * This problem is not only related to idleprio process. 342 * A user level program can obtain a file lock and hold it 343 * indefinitely. Additionally, without idleprio processes it is 344 * still conceivable that a program with low priority will never 345 * get to run. In short, allowing this feature might make it 346 * easier to lock a resource indefinitely, but it is not the 347 * only thing that makes it possible. 348 */ 349 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 350 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 351 break; 352 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 353 unprivileged_idprio == 0 && 354 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 355 break; 356 error = rtp_to_pri(&rtp, td1); 357 break; 358 default: 359 error = EINVAL; 360 break; 361 } 362 PROC_UNLOCK(p); 363 return (error); 364 } 365 366 /* 367 * Set realtime priority. 368 */ 369 #ifndef _SYS_SYSPROTO_H_ 370 struct rtprio_args { 371 int function; 372 pid_t pid; 373 struct rtprio *rtp; 374 }; 375 #endif 376 int 377 sys_rtprio(struct thread *td, struct rtprio_args *uap) 378 { 379 struct proc *p; 380 struct thread *tdp; 381 struct rtprio rtp; 382 int cierror, error; 383 384 /* Perform copyin before acquiring locks if needed. */ 385 if (uap->function == RTP_SET) 386 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 387 else 388 cierror = 0; 389 390 if (uap->pid == 0) { 391 p = td->td_proc; 392 PROC_LOCK(p); 393 } else { 394 p = pfind(uap->pid); 395 if (p == NULL) 396 return (ESRCH); 397 } 398 399 switch (uap->function) { 400 case RTP_LOOKUP: 401 if ((error = p_cansee(td, p))) 402 break; 403 /* 404 * Return OUR priority if no pid specified, 405 * or if one is, report the highest priority 406 * in the process. There isn't much more you can do as 407 * there is only room to return a single priority. 408 * Note: specifying our own pid is not the same 409 * as leaving it zero. 410 */ 411 if (uap->pid == 0) { 412 pri_to_rtp(td, &rtp); 413 } else { 414 struct rtprio rtp2; 415 416 rtp.type = RTP_PRIO_IDLE; 417 rtp.prio = RTP_PRIO_MAX; 418 FOREACH_THREAD_IN_PROC(p, tdp) { 419 pri_to_rtp(tdp, &rtp2); 420 if (rtp2.type < rtp.type || 421 (rtp2.type == rtp.type && 422 rtp2.prio < rtp.prio)) { 423 rtp.type = rtp2.type; 424 rtp.prio = rtp2.prio; 425 } 426 } 427 } 428 PROC_UNLOCK(p); 429 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 430 case RTP_SET: 431 if ((error = p_cansched(td, p)) || (error = cierror)) 432 break; 433 434 /* 435 * Disallow setting rtprio in most cases if not superuser. 436 * See the comment in sys_rtprio_thread about idprio 437 * threads holding a lock. 438 */ 439 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 440 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 441 break; 442 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 443 unprivileged_idprio == 0 && 444 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 445 break; 446 447 /* 448 * If we are setting our own priority, set just our 449 * thread but if we are doing another process, 450 * do all the threads on that process. If we 451 * specify our own pid we do the latter. 452 */ 453 if (uap->pid == 0) { 454 error = rtp_to_pri(&rtp, td); 455 } else { 456 FOREACH_THREAD_IN_PROC(p, td) { 457 if ((error = rtp_to_pri(&rtp, td)) != 0) 458 break; 459 } 460 } 461 break; 462 default: 463 error = EINVAL; 464 break; 465 } 466 PROC_UNLOCK(p); 467 return (error); 468 } 469 470 int 471 rtp_to_pri(struct rtprio *rtp, struct thread *td) 472 { 473 u_char newpri, oldclass, oldpri; 474 475 switch (RTP_PRIO_BASE(rtp->type)) { 476 case RTP_PRIO_REALTIME: 477 if (rtp->prio > RTP_PRIO_MAX) 478 return (EINVAL); 479 newpri = PRI_MIN_REALTIME + rtp->prio; 480 break; 481 case RTP_PRIO_NORMAL: 482 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) 483 return (EINVAL); 484 newpri = PRI_MIN_TIMESHARE + rtp->prio; 485 break; 486 case RTP_PRIO_IDLE: 487 if (rtp->prio > RTP_PRIO_MAX) 488 return (EINVAL); 489 newpri = PRI_MIN_IDLE + rtp->prio; 490 break; 491 default: 492 return (EINVAL); 493 } 494 495 thread_lock(td); 496 oldclass = td->td_pri_class; 497 sched_class(td, rtp->type); /* XXX fix */ 498 oldpri = td->td_user_pri; 499 sched_user_prio(td, newpri); 500 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL || 501 td->td_pri_class != RTP_PRIO_NORMAL)) 502 sched_prio(td, td->td_user_pri); 503 if (TD_ON_UPILOCK(td) && oldpri != newpri) { 504 critical_enter(); 505 thread_unlock(td); 506 umtx_pi_adjust(td, oldpri); 507 critical_exit(); 508 } else 509 thread_unlock(td); 510 return (0); 511 } 512 513 void 514 pri_to_rtp(struct thread *td, struct rtprio *rtp) 515 { 516 517 thread_lock(td); 518 switch (PRI_BASE(td->td_pri_class)) { 519 case PRI_REALTIME: 520 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME; 521 break; 522 case PRI_TIMESHARE: 523 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE; 524 break; 525 case PRI_IDLE: 526 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE; 527 break; 528 default: 529 break; 530 } 531 rtp->type = td->td_pri_class; 532 thread_unlock(td); 533 } 534 535 #if defined(COMPAT_43) 536 #ifndef _SYS_SYSPROTO_H_ 537 struct osetrlimit_args { 538 u_int which; 539 struct orlimit *rlp; 540 }; 541 #endif 542 int 543 osetrlimit(struct thread *td, struct osetrlimit_args *uap) 544 { 545 struct orlimit olim; 546 struct rlimit lim; 547 int error; 548 549 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 550 return (error); 551 lim.rlim_cur = olim.rlim_cur; 552 lim.rlim_max = olim.rlim_max; 553 error = kern_setrlimit(td, uap->which, &lim); 554 return (error); 555 } 556 557 #ifndef _SYS_SYSPROTO_H_ 558 struct ogetrlimit_args { 559 u_int which; 560 struct orlimit *rlp; 561 }; 562 #endif 563 int 564 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap) 565 { 566 struct orlimit olim; 567 struct rlimit rl; 568 int error; 569 570 if (uap->which >= RLIM_NLIMITS) 571 return (EINVAL); 572 lim_rlimit(td, uap->which, &rl); 573 574 /* 575 * XXX would be more correct to convert only RLIM_INFINITY to the 576 * old RLIM_INFINITY and fail with EOVERFLOW for other larger 577 * values. Most 64->32 and 32->16 conversions, including not 578 * unimportant ones of uids are even more broken than what we 579 * do here (they blindly truncate). We don't do this correctly 580 * here since we have little experience with EOVERFLOW yet. 581 * Elsewhere, getuid() can't fail... 582 */ 583 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur; 584 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max; 585 error = copyout(&olim, uap->rlp, sizeof(olim)); 586 return (error); 587 } 588 #endif /* COMPAT_43 */ 589 590 #ifndef _SYS_SYSPROTO_H_ 591 struct setrlimit_args { 592 u_int which; 593 struct rlimit *rlp; 594 }; 595 #endif 596 int 597 sys_setrlimit(struct thread *td, struct setrlimit_args *uap) 598 { 599 struct rlimit alim; 600 int error; 601 602 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit)))) 603 return (error); 604 error = kern_setrlimit(td, uap->which, &alim); 605 return (error); 606 } 607 608 static void 609 lim_cb(void *arg) 610 { 611 struct rlimit rlim; 612 struct thread *td; 613 struct proc *p; 614 615 p = arg; 616 PROC_LOCK_ASSERT(p, MA_OWNED); 617 /* 618 * Check if the process exceeds its cpu resource allocation. If 619 * it reaches the max, arrange to kill the process in ast(). 620 */ 621 if (p->p_cpulimit == RLIM_INFINITY) 622 return; 623 PROC_STATLOCK(p); 624 FOREACH_THREAD_IN_PROC(p, td) { 625 ruxagg(p, td); 626 } 627 PROC_STATUNLOCK(p); 628 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) { 629 lim_rlimit_proc(p, RLIMIT_CPU, &rlim); 630 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { 631 killproc(p, "exceeded maximum CPU limit"); 632 } else { 633 if (p->p_cpulimit < rlim.rlim_max) 634 p->p_cpulimit += 5; 635 kern_psignal(p, SIGXCPU); 636 } 637 } 638 if ((p->p_flag & P_WEXIT) == 0) 639 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 640 lim_cb, p, C_PREL(1)); 641 } 642 643 int 644 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp) 645 { 646 647 return (kern_proc_setrlimit(td, td->td_proc, which, limp)); 648 } 649 650 int 651 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which, 652 struct rlimit *limp) 653 { 654 struct plimit *newlim, *oldlim, *oldlim_td; 655 struct rlimit *alimp; 656 struct rlimit oldssiz; 657 int error; 658 659 if (which >= RLIM_NLIMITS) 660 return (EINVAL); 661 662 /* 663 * Preserve historical bugs by treating negative limits as unsigned. 664 */ 665 if (limp->rlim_cur < 0) 666 limp->rlim_cur = RLIM_INFINITY; 667 if (limp->rlim_max < 0) 668 limp->rlim_max = RLIM_INFINITY; 669 670 oldssiz.rlim_cur = 0; 671 newlim = lim_alloc(); 672 PROC_LOCK(p); 673 oldlim = p->p_limit; 674 alimp = &oldlim->pl_rlimit[which]; 675 if (limp->rlim_cur > alimp->rlim_max || 676 limp->rlim_max > alimp->rlim_max) 677 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) { 678 PROC_UNLOCK(p); 679 lim_free(newlim); 680 return (error); 681 } 682 if (limp->rlim_cur > limp->rlim_max) 683 limp->rlim_cur = limp->rlim_max; 684 lim_copy(newlim, oldlim); 685 alimp = &newlim->pl_rlimit[which]; 686 687 switch (which) { 688 case RLIMIT_CPU: 689 if (limp->rlim_cur != RLIM_INFINITY && 690 p->p_cpulimit == RLIM_INFINITY) 691 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 692 lim_cb, p, C_PREL(1)); 693 p->p_cpulimit = limp->rlim_cur; 694 break; 695 case RLIMIT_DATA: 696 if (limp->rlim_cur > maxdsiz) 697 limp->rlim_cur = maxdsiz; 698 if (limp->rlim_max > maxdsiz) 699 limp->rlim_max = maxdsiz; 700 break; 701 702 case RLIMIT_STACK: 703 if (limp->rlim_cur > maxssiz) 704 limp->rlim_cur = maxssiz; 705 if (limp->rlim_max > maxssiz) 706 limp->rlim_max = maxssiz; 707 oldssiz = *alimp; 708 if (p->p_sysent->sv_fixlimit != NULL) 709 p->p_sysent->sv_fixlimit(&oldssiz, 710 RLIMIT_STACK); 711 break; 712 713 case RLIMIT_NOFILE: 714 if (limp->rlim_cur > maxfilesperproc) 715 limp->rlim_cur = maxfilesperproc; 716 if (limp->rlim_max > maxfilesperproc) 717 limp->rlim_max = maxfilesperproc; 718 break; 719 720 case RLIMIT_NPROC: 721 if (limp->rlim_cur > maxprocperuid) 722 limp->rlim_cur = maxprocperuid; 723 if (limp->rlim_max > maxprocperuid) 724 limp->rlim_max = maxprocperuid; 725 if (limp->rlim_cur < 1) 726 limp->rlim_cur = 1; 727 if (limp->rlim_max < 1) 728 limp->rlim_max = 1; 729 break; 730 } 731 if (p->p_sysent->sv_fixlimit != NULL) 732 p->p_sysent->sv_fixlimit(limp, which); 733 *alimp = *limp; 734 p->p_limit = newlim; 735 PROC_UPDATE_COW(p); 736 oldlim_td = NULL; 737 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) { 738 oldlim_td = lim_cowsync(); 739 thread_cow_synced(td); 740 } 741 PROC_UNLOCK(p); 742 if (oldlim_td != NULL) { 743 MPASS(oldlim_td == oldlim); 744 lim_freen(oldlim, 2); 745 } else { 746 lim_free(oldlim); 747 } 748 749 if (which == RLIMIT_STACK && 750 /* 751 * Skip calls from exec_new_vmspace(), done when stack is 752 * not mapped yet. 753 */ 754 (td != curthread || (p->p_flag & P_INEXEC) == 0)) { 755 /* 756 * Stack is allocated to the max at exec time with only 757 * "rlim_cur" bytes accessible. If stack limit is going 758 * up make more accessible, if going down make inaccessible. 759 */ 760 if (limp->rlim_cur != oldssiz.rlim_cur) { 761 vm_offset_t addr; 762 vm_size_t size; 763 vm_prot_t prot; 764 765 if (limp->rlim_cur > oldssiz.rlim_cur) { 766 prot = p->p_sysent->sv_stackprot; 767 size = limp->rlim_cur - oldssiz.rlim_cur; 768 addr = round_page(p->p_vmspace->vm_stacktop) - 769 limp->rlim_cur; 770 } else { 771 prot = VM_PROT_NONE; 772 size = oldssiz.rlim_cur - limp->rlim_cur; 773 addr = round_page(p->p_vmspace->vm_stacktop) - 774 oldssiz.rlim_cur; 775 } 776 addr = trunc_page(addr); 777 size = round_page(size); 778 (void)vm_map_protect(&p->p_vmspace->vm_map, 779 addr, addr + size, prot, 0, 780 VM_MAP_PROTECT_SET_PROT); 781 } 782 } 783 784 return (0); 785 } 786 787 #ifndef _SYS_SYSPROTO_H_ 788 struct getrlimit_args { 789 u_int which; 790 struct rlimit *rlp; 791 }; 792 #endif 793 /* ARGSUSED */ 794 int 795 sys_getrlimit(struct thread *td, struct getrlimit_args *uap) 796 { 797 struct rlimit rlim; 798 int error; 799 800 if (uap->which >= RLIM_NLIMITS) 801 return (EINVAL); 802 lim_rlimit(td, uap->which, &rlim); 803 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit)); 804 return (error); 805 } 806 807 /* 808 * Transform the running time and tick information for children of proc p 809 * into user and system time usage. 810 */ 811 void 812 calccru(struct proc *p, struct timeval *up, struct timeval *sp) 813 { 814 815 PROC_LOCK_ASSERT(p, MA_OWNED); 816 calcru1(p, &p->p_crux, up, sp); 817 } 818 819 /* 820 * Transform the running time and tick information in proc p into user 821 * and system time usage. If appropriate, include the current time slice 822 * on this CPU. 823 */ 824 void 825 calcru(struct proc *p, struct timeval *up, struct timeval *sp) 826 { 827 struct thread *td; 828 uint64_t runtime, u; 829 830 PROC_LOCK_ASSERT(p, MA_OWNED); 831 PROC_STATLOCK_ASSERT(p, MA_OWNED); 832 /* 833 * If we are getting stats for the current process, then add in the 834 * stats that this thread has accumulated in its current time slice. 835 * We reset the thread and CPU state as if we had performed a context 836 * switch right here. 837 */ 838 td = curthread; 839 if (td->td_proc == p) { 840 u = cpu_ticks(); 841 runtime = u - PCPU_GET(switchtime); 842 td->td_runtime += runtime; 843 td->td_incruntime += runtime; 844 PCPU_SET(switchtime, u); 845 } 846 /* Make sure the per-thread stats are current. */ 847 FOREACH_THREAD_IN_PROC(p, td) { 848 if (td->td_incruntime == 0) 849 continue; 850 ruxagg(p, td); 851 } 852 calcru1(p, &p->p_rux, up, sp); 853 } 854 855 /* Collect resource usage for a single thread. */ 856 void 857 rufetchtd(struct thread *td, struct rusage *ru) 858 { 859 struct proc *p; 860 uint64_t runtime, u; 861 862 p = td->td_proc; 863 PROC_STATLOCK_ASSERT(p, MA_OWNED); 864 THREAD_LOCK_ASSERT(td, MA_OWNED); 865 /* 866 * If we are getting stats for the current thread, then add in the 867 * stats that this thread has accumulated in its current time slice. 868 * We reset the thread and CPU state as if we had performed a context 869 * switch right here. 870 */ 871 if (td == curthread) { 872 u = cpu_ticks(); 873 runtime = u - PCPU_GET(switchtime); 874 td->td_runtime += runtime; 875 td->td_incruntime += runtime; 876 PCPU_SET(switchtime, u); 877 } 878 ruxagg_locked(p, td); 879 *ru = td->td_ru; 880 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime); 881 } 882 883 static uint64_t 884 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c) 885 { 886 uint64_t acc, bh, bl; 887 int i, s, sa, sb; 888 889 /* 890 * Calculate (a * b) / c accurately enough without overflowing. c 891 * must be nonzero, and its top bit must be 0. a or b must be 892 * <= c, and the implementation is tuned for b <= c. 893 * 894 * The comments about times are for use in calcru1() with units of 895 * microseconds for 'a' and stathz ticks at 128 Hz for b and c. 896 * 897 * Let n be the number of top zero bits in c. Each iteration 898 * either returns, or reduces b by right shifting it by at least n. 899 * The number of iterations is at most 1 + 64 / n, and the error is 900 * at most the number of iterations. 901 * 902 * It is very unusual to need even 2 iterations. Previous 903 * implementations overflowed essentially by returning early in the 904 * first iteration, with n = 38 giving overflow at 105+ hours and 905 * n = 32 giving overlow at at 388+ days despite a more careful 906 * calculation. 388 days is a reasonable uptime, and the calculation 907 * needs to work for the uptime times the number of CPUs since 'a' 908 * is per-process. 909 */ 910 if (a >= (uint64_t)1 << 63) 911 return (0); /* Unsupported arg -- can't happen. */ 912 acc = 0; 913 for (i = 0; i < 128; i++) { 914 sa = flsll(a); 915 sb = flsll(b); 916 if (sa + sb <= 64) 917 /* Up to 105 hours on first iteration. */ 918 return (acc + (a * b) / c); 919 if (a >= c) { 920 /* 921 * This reduction is based on a = q * c + r, with the 922 * remainder r < c. 'a' may be large to start, and 923 * moving bits from b into 'a' at the end of the loop 924 * sets the top bit of 'a', so the reduction makes 925 * significant progress. 926 */ 927 acc += (a / c) * b; 928 a %= c; 929 sa = flsll(a); 930 if (sa + sb <= 64) 931 /* Up to 388 days on first iteration. */ 932 return (acc + (a * b) / c); 933 } 934 935 /* 936 * This step writes a * b as a * ((bh << s) + bl) = 937 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2 938 * additive terms are handled separately. Splitting in 939 * this way is linear except for rounding errors. 940 * 941 * s = 64 - sa is the maximum such that a << s fits in 64 942 * bits. Since a < c and c has at least 1 zero top bit, 943 * sa < 64 and s > 0. Thus this step makes progress by 944 * reducing b (it increases 'a', but taking remainders on 945 * the next iteration completes the reduction). 946 * 947 * Finally, the choice for s is just what is needed to keep 948 * a * bl from overflowing, so we don't need complications 949 * like a recursive call mul64_by_fraction(a, bl, c) to 950 * handle the second additive term. 951 */ 952 s = 64 - sa; 953 bh = b >> s; 954 bl = b - (bh << s); 955 acc += (a * bl) / c; 956 a <<= s; 957 b = bh; 958 } 959 return (0); /* Algorithm failure -- can't happen. */ 960 } 961 962 static void 963 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, 964 struct timeval *sp) 965 { 966 /* {user, system, interrupt, total} {ticks, usec}: */ 967 uint64_t ut, uu, st, su, it, tt, tu; 968 969 ut = ruxp->rux_uticks; 970 st = ruxp->rux_sticks; 971 it = ruxp->rux_iticks; 972 tt = ut + st + it; 973 if (tt == 0) { 974 /* Avoid divide by zero */ 975 st = 1; 976 tt = 1; 977 } 978 tu = cputick2usec(ruxp->rux_runtime); 979 if ((int64_t)tu < 0) { 980 /* XXX: this should be an assert /phk */ 981 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", 982 (intmax_t)tu, p->p_pid, p->p_comm); 983 tu = ruxp->rux_tu; 984 } 985 986 /* Subdivide tu. Avoid overflow in the multiplications. */ 987 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) { 988 /* Up to 76 hours when stathz is 128. */ 989 uu = (tu * ut) / tt; 990 su = (tu * st) / tt; 991 } else { 992 uu = mul64_by_fraction(tu, ut, tt); 993 su = mul64_by_fraction(tu, st, tt); 994 } 995 996 if (tu >= ruxp->rux_tu) { 997 /* 998 * The normal case, time increased. 999 * Enforce monotonicity of bucketed numbers. 1000 */ 1001 if (uu < ruxp->rux_uu) 1002 uu = ruxp->rux_uu; 1003 if (su < ruxp->rux_su) 1004 su = ruxp->rux_su; 1005 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) { 1006 /* 1007 * When we calibrate the cputicker, it is not uncommon to 1008 * see the presumably fixed frequency increase slightly over 1009 * time as a result of thermal stabilization and NTP 1010 * discipline (of the reference clock). We therefore ignore 1011 * a bit of backwards slop because we expect to catch up 1012 * shortly. We use a 3 microsecond limit to catch low 1013 * counts and a 1% limit for high counts. 1014 */ 1015 uu = ruxp->rux_uu; 1016 su = ruxp->rux_su; 1017 tu = ruxp->rux_tu; 1018 } else if (vm_guest == VM_GUEST_NO) { /* tu < ruxp->rux_tu */ 1019 /* 1020 * What happened here was likely that a laptop, which ran at 1021 * a reduced clock frequency at boot, kicked into high gear. 1022 * The wisdom of spamming this message in that case is 1023 * dubious, but it might also be indicative of something 1024 * serious, so lets keep it and hope laptops can be made 1025 * more truthful about their CPU speed via ACPI. 1026 */ 1027 printf("calcru: runtime went backwards from %ju usec " 1028 "to %ju usec for pid %d (%s)\n", 1029 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu, 1030 p->p_pid, p->p_comm); 1031 } 1032 1033 ruxp->rux_uu = uu; 1034 ruxp->rux_su = su; 1035 ruxp->rux_tu = tu; 1036 1037 up->tv_sec = uu / 1000000; 1038 up->tv_usec = uu % 1000000; 1039 sp->tv_sec = su / 1000000; 1040 sp->tv_usec = su % 1000000; 1041 } 1042 1043 #ifndef _SYS_SYSPROTO_H_ 1044 struct getrusage_args { 1045 int who; 1046 struct rusage *rusage; 1047 }; 1048 #endif 1049 int 1050 sys_getrusage(struct thread *td, struct getrusage_args *uap) 1051 { 1052 struct rusage ru; 1053 int error; 1054 1055 error = kern_getrusage(td, uap->who, &ru); 1056 if (error == 0) 1057 error = copyout(&ru, uap->rusage, sizeof(struct rusage)); 1058 return (error); 1059 } 1060 1061 int 1062 kern_getrusage(struct thread *td, int who, struct rusage *rup) 1063 { 1064 struct proc *p; 1065 int error; 1066 1067 error = 0; 1068 p = td->td_proc; 1069 PROC_LOCK(p); 1070 switch (who) { 1071 case RUSAGE_SELF: 1072 rufetchcalc(p, rup, &rup->ru_utime, 1073 &rup->ru_stime); 1074 break; 1075 1076 case RUSAGE_CHILDREN: 1077 *rup = p->p_stats->p_cru; 1078 calccru(p, &rup->ru_utime, &rup->ru_stime); 1079 break; 1080 1081 case RUSAGE_THREAD: 1082 PROC_STATLOCK(p); 1083 thread_lock(td); 1084 rufetchtd(td, rup); 1085 thread_unlock(td); 1086 PROC_STATUNLOCK(p); 1087 break; 1088 1089 default: 1090 error = EINVAL; 1091 } 1092 PROC_UNLOCK(p); 1093 return (error); 1094 } 1095 1096 void 1097 rucollect(struct rusage *ru, struct rusage *ru2) 1098 { 1099 long *ip, *ip2; 1100 int i; 1101 1102 if (ru->ru_maxrss < ru2->ru_maxrss) 1103 ru->ru_maxrss = ru2->ru_maxrss; 1104 ip = &ru->ru_first; 1105 ip2 = &ru2->ru_first; 1106 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 1107 *ip++ += *ip2++; 1108 } 1109 1110 void 1111 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, 1112 struct rusage_ext *rux2) 1113 { 1114 1115 rux->rux_runtime += rux2->rux_runtime; 1116 rux->rux_uticks += rux2->rux_uticks; 1117 rux->rux_sticks += rux2->rux_sticks; 1118 rux->rux_iticks += rux2->rux_iticks; 1119 rux->rux_uu += rux2->rux_uu; 1120 rux->rux_su += rux2->rux_su; 1121 rux->rux_tu += rux2->rux_tu; 1122 rucollect(ru, ru2); 1123 } 1124 1125 /* 1126 * Aggregate tick counts into the proc's rusage_ext. 1127 */ 1128 static void 1129 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td) 1130 { 1131 1132 rux->rux_runtime += td->td_incruntime; 1133 rux->rux_uticks += td->td_uticks; 1134 rux->rux_sticks += td->td_sticks; 1135 rux->rux_iticks += td->td_iticks; 1136 } 1137 1138 void 1139 ruxagg_locked(struct proc *p, struct thread *td) 1140 { 1141 THREAD_LOCK_ASSERT(td, MA_OWNED); 1142 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED); 1143 1144 ruxagg_ext_locked(&p->p_rux, td); 1145 ruxagg_ext_locked(&td->td_rux, td); 1146 td->td_incruntime = 0; 1147 td->td_uticks = 0; 1148 td->td_iticks = 0; 1149 td->td_sticks = 0; 1150 } 1151 1152 void 1153 ruxagg(struct proc *p, struct thread *td) 1154 { 1155 1156 thread_lock(td); 1157 ruxagg_locked(p, td); 1158 thread_unlock(td); 1159 } 1160 1161 /* 1162 * Update the rusage_ext structure and fetch a valid aggregate rusage 1163 * for proc p if storage for one is supplied. 1164 */ 1165 void 1166 rufetch(struct proc *p, struct rusage *ru) 1167 { 1168 struct thread *td; 1169 1170 PROC_STATLOCK_ASSERT(p, MA_OWNED); 1171 1172 *ru = p->p_ru; 1173 if (p->p_numthreads > 0) { 1174 FOREACH_THREAD_IN_PROC(p, td) { 1175 ruxagg(p, td); 1176 rucollect(ru, &td->td_ru); 1177 } 1178 } 1179 } 1180 1181 /* 1182 * Atomically perform a rufetch and a calcru together. 1183 * Consumers, can safely assume the calcru is executed only once 1184 * rufetch is completed. 1185 */ 1186 void 1187 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, 1188 struct timeval *sp) 1189 { 1190 1191 PROC_STATLOCK(p); 1192 rufetch(p, ru); 1193 calcru(p, up, sp); 1194 PROC_STATUNLOCK(p); 1195 } 1196 1197 /* 1198 * Allocate a new resource limits structure and initialize its 1199 * reference count and mutex pointer. 1200 */ 1201 struct plimit * 1202 lim_alloc(void) 1203 { 1204 struct plimit *limp; 1205 1206 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); 1207 refcount_init(&limp->pl_refcnt, 1); 1208 return (limp); 1209 } 1210 1211 struct plimit * 1212 lim_hold(struct plimit *limp) 1213 { 1214 1215 refcount_acquire(&limp->pl_refcnt); 1216 return (limp); 1217 } 1218 1219 struct plimit * 1220 lim_cowsync(void) 1221 { 1222 struct thread *td; 1223 struct proc *p; 1224 struct plimit *oldlimit; 1225 1226 td = curthread; 1227 p = td->td_proc; 1228 PROC_LOCK_ASSERT(p, MA_OWNED); 1229 1230 if (td->td_limit == p->p_limit) 1231 return (NULL); 1232 1233 oldlimit = td->td_limit; 1234 td->td_limit = lim_hold(p->p_limit); 1235 1236 return (oldlimit); 1237 } 1238 1239 void 1240 lim_fork(struct proc *p1, struct proc *p2) 1241 { 1242 1243 PROC_LOCK_ASSERT(p1, MA_OWNED); 1244 PROC_LOCK_ASSERT(p2, MA_OWNED); 1245 1246 p2->p_limit = lim_hold(p1->p_limit); 1247 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); 1248 if (p1->p_cpulimit != RLIM_INFINITY) 1249 callout_reset_sbt(&p2->p_limco, SBT_1S, 0, 1250 lim_cb, p2, C_PREL(1)); 1251 } 1252 1253 void 1254 lim_free(struct plimit *limp) 1255 { 1256 1257 if (refcount_release(&limp->pl_refcnt)) 1258 free((void *)limp, M_PLIMIT); 1259 } 1260 1261 void 1262 lim_freen(struct plimit *limp, int n) 1263 { 1264 1265 if (refcount_releasen(&limp->pl_refcnt, n)) 1266 free((void *)limp, M_PLIMIT); 1267 } 1268 1269 void 1270 limbatch_add(struct limbatch *lb, struct thread *td) 1271 { 1272 struct plimit *limp; 1273 1274 MPASS(td->td_limit != NULL); 1275 limp = td->td_limit; 1276 1277 if (lb->limp != limp) { 1278 if (lb->count != 0) { 1279 lim_freen(lb->limp, lb->count); 1280 lb->count = 0; 1281 } 1282 lb->limp = limp; 1283 } 1284 1285 lb->count++; 1286 } 1287 1288 void 1289 limbatch_final(struct limbatch *lb) 1290 { 1291 1292 MPASS(lb->count != 0); 1293 lim_freen(lb->limp, lb->count); 1294 } 1295 1296 /* 1297 * Make a copy of the plimit structure. 1298 * We share these structures copy-on-write after fork. 1299 */ 1300 void 1301 lim_copy(struct plimit *dst, struct plimit *src) 1302 { 1303 1304 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit")); 1305 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit)); 1306 } 1307 1308 /* 1309 * Return the hard limit for a particular system resource. The 1310 * which parameter specifies the index into the rlimit array. 1311 */ 1312 rlim_t 1313 lim_max(struct thread *td, int which) 1314 { 1315 struct rlimit rl; 1316 1317 lim_rlimit(td, which, &rl); 1318 return (rl.rlim_max); 1319 } 1320 1321 rlim_t 1322 lim_max_proc(struct proc *p, int which) 1323 { 1324 struct rlimit rl; 1325 1326 lim_rlimit_proc(p, which, &rl); 1327 return (rl.rlim_max); 1328 } 1329 1330 /* 1331 * Return the current (soft) limit for a particular system resource. 1332 * The which parameter which specifies the index into the rlimit array 1333 */ 1334 rlim_t 1335 (lim_cur)(struct thread *td, int which) 1336 { 1337 struct rlimit rl; 1338 1339 lim_rlimit(td, which, &rl); 1340 return (rl.rlim_cur); 1341 } 1342 1343 rlim_t 1344 lim_cur_proc(struct proc *p, int which) 1345 { 1346 struct rlimit rl; 1347 1348 lim_rlimit_proc(p, which, &rl); 1349 return (rl.rlim_cur); 1350 } 1351 1352 /* 1353 * Return a copy of the entire rlimit structure for the system limit 1354 * specified by 'which' in the rlimit structure pointed to by 'rlp'. 1355 */ 1356 void 1357 lim_rlimit(struct thread *td, int which, struct rlimit *rlp) 1358 { 1359 struct proc *p = td->td_proc; 1360 1361 MPASS(td == curthread); 1362 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1363 ("request for invalid resource limit")); 1364 *rlp = td->td_limit->pl_rlimit[which]; 1365 if (p->p_sysent->sv_fixlimit != NULL) 1366 p->p_sysent->sv_fixlimit(rlp, which); 1367 } 1368 1369 void 1370 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp) 1371 { 1372 1373 PROC_LOCK_ASSERT(p, MA_OWNED); 1374 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1375 ("request for invalid resource limit")); 1376 *rlp = p->p_limit->pl_rlimit[which]; 1377 if (p->p_sysent->sv_fixlimit != NULL) 1378 p->p_sysent->sv_fixlimit(rlp, which); 1379 } 1380 1381 void 1382 uihashinit(void) 1383 { 1384 1385 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 1386 rw_init(&uihashtbl_lock, "uidinfo hash"); 1387 } 1388 1389 /* 1390 * Look up a uidinfo struct for the parameter uid. 1391 * uihashtbl_lock must be locked. 1392 * Increase refcount on uidinfo struct returned. 1393 */ 1394 static struct uidinfo * 1395 uilookup(uid_t uid) 1396 { 1397 struct uihashhead *uipp; 1398 struct uidinfo *uip; 1399 1400 rw_assert(&uihashtbl_lock, RA_LOCKED); 1401 uipp = UIHASH(uid); 1402 LIST_FOREACH(uip, uipp, ui_hash) 1403 if (uip->ui_uid == uid) { 1404 uihold(uip); 1405 break; 1406 } 1407 1408 return (uip); 1409 } 1410 1411 /* 1412 * Find or allocate a struct uidinfo for a particular uid. 1413 * Returns with uidinfo struct referenced. 1414 * uifree() should be called on a struct uidinfo when released. 1415 */ 1416 struct uidinfo * 1417 uifind(uid_t uid) 1418 { 1419 struct uidinfo *new_uip, *uip; 1420 struct ucred *cred; 1421 1422 cred = curthread->td_ucred; 1423 if (cred->cr_uidinfo->ui_uid == uid) { 1424 uip = cred->cr_uidinfo; 1425 uihold(uip); 1426 return (uip); 1427 } else if (cred->cr_ruidinfo->ui_uid == uid) { 1428 uip = cred->cr_ruidinfo; 1429 uihold(uip); 1430 return (uip); 1431 } 1432 1433 rw_rlock(&uihashtbl_lock); 1434 uip = uilookup(uid); 1435 rw_runlock(&uihashtbl_lock); 1436 if (uip != NULL) 1437 return (uip); 1438 1439 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO); 1440 racct_create(&new_uip->ui_racct); 1441 refcount_init(&new_uip->ui_ref, 1); 1442 new_uip->ui_uid = uid; 1443 1444 rw_wlock(&uihashtbl_lock); 1445 /* 1446 * There's a chance someone created our uidinfo while we 1447 * were in malloc and not holding the lock, so we have to 1448 * make sure we don't insert a duplicate uidinfo. 1449 */ 1450 if ((uip = uilookup(uid)) == NULL) { 1451 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash); 1452 rw_wunlock(&uihashtbl_lock); 1453 uip = new_uip; 1454 } else { 1455 rw_wunlock(&uihashtbl_lock); 1456 racct_destroy(&new_uip->ui_racct); 1457 free(new_uip, M_UIDINFO); 1458 } 1459 return (uip); 1460 } 1461 1462 /* 1463 * Place another refcount on a uidinfo struct. 1464 */ 1465 void 1466 uihold(struct uidinfo *uip) 1467 { 1468 1469 refcount_acquire(&uip->ui_ref); 1470 } 1471 1472 /*- 1473 * Since uidinfo structs have a long lifetime, we use an 1474 * opportunistic refcounting scheme to avoid locking the lookup hash 1475 * for each release. 1476 * 1477 * If the refcount hits 0, we need to free the structure, 1478 * which means we need to lock the hash. 1479 * Optimal case: 1480 * After locking the struct and lowering the refcount, if we find 1481 * that we don't need to free, simply unlock and return. 1482 * Suboptimal case: 1483 * If refcount lowering results in need to free, bump the count 1484 * back up, lose the lock and acquire the locks in the proper 1485 * order to try again. 1486 */ 1487 void 1488 uifree(struct uidinfo *uip) 1489 { 1490 1491 if (refcount_release_if_not_last(&uip->ui_ref)) 1492 return; 1493 1494 rw_wlock(&uihashtbl_lock); 1495 if (refcount_release(&uip->ui_ref) == 0) { 1496 rw_wunlock(&uihashtbl_lock); 1497 return; 1498 } 1499 1500 racct_destroy(&uip->ui_racct); 1501 LIST_REMOVE(uip, ui_hash); 1502 rw_wunlock(&uihashtbl_lock); 1503 1504 if (uip->ui_sbsize != 0) 1505 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 1506 uip->ui_uid, uip->ui_sbsize); 1507 if (uip->ui_proccnt != 0) 1508 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 1509 uip->ui_uid, uip->ui_proccnt); 1510 if (uip->ui_vmsize != 0) 1511 printf("freeing uidinfo: uid = %d, swapuse = %lld\n", 1512 uip->ui_uid, (unsigned long long)uip->ui_vmsize); 1513 free(uip, M_UIDINFO); 1514 } 1515 1516 #ifdef RACCT 1517 void 1518 ui_racct_foreach(void (*callback)(struct racct *racct, 1519 void *arg2, void *arg3), void (*pre)(void), void (*post)(void), 1520 void *arg2, void *arg3) 1521 { 1522 struct uidinfo *uip; 1523 struct uihashhead *uih; 1524 1525 rw_rlock(&uihashtbl_lock); 1526 if (pre != NULL) 1527 (pre)(); 1528 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) { 1529 LIST_FOREACH(uip, uih, ui_hash) { 1530 (callback)(uip->ui_racct, arg2, arg3); 1531 } 1532 } 1533 if (post != NULL) 1534 (post)(); 1535 rw_runlock(&uihashtbl_lock); 1536 } 1537 #endif 1538 1539 static inline int 1540 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name) 1541 { 1542 long new; 1543 1544 /* Don't allow them to exceed max, but allow subtraction. */ 1545 new = atomic_fetchadd_long(limit, (long)diff) + diff; 1546 if (diff > 0 && max != 0) { 1547 if (new < 0 || new > max) { 1548 atomic_subtract_long(limit, (long)diff); 1549 return (0); 1550 } 1551 } else if (new < 0) 1552 printf("negative %s for uid = %d\n", name, uip->ui_uid); 1553 return (1); 1554 } 1555 1556 /* 1557 * Change the count associated with number of processes 1558 * a given user is using. When 'max' is 0, don't enforce a limit 1559 */ 1560 int 1561 chgproccnt(struct uidinfo *uip, int diff, rlim_t max) 1562 { 1563 1564 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt")); 1565 } 1566 1567 /* 1568 * Change the total socket buffer size a user has used. 1569 */ 1570 int 1571 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max) 1572 { 1573 int diff, rv; 1574 1575 diff = to - *hiwat; 1576 if (diff > 0 && max == 0) { 1577 rv = 0; 1578 } else { 1579 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize"); 1580 if (rv != 0) 1581 *hiwat = to; 1582 } 1583 return (rv); 1584 } 1585 1586 /* 1587 * Change the count associated with number of pseudo-terminals 1588 * a given user is using. When 'max' is 0, don't enforce a limit 1589 */ 1590 int 1591 chgptscnt(struct uidinfo *uip, int diff, rlim_t max) 1592 { 1593 1594 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt")); 1595 } 1596 1597 int 1598 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max) 1599 { 1600 1601 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt")); 1602 } 1603 1604 int 1605 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max) 1606 { 1607 1608 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt")); 1609 } 1610