1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/file.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/refcount.h> 49 #include <sys/racct.h> 50 #include <sys/resourcevar.h> 51 #include <sys/rwlock.h> 52 #include <sys/sched.h> 53 #include <sys/sx.h> 54 #include <sys/syscallsubr.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysent.h> 57 #include <sys/time.h> 58 #include <sys/umtxvar.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_param.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_map.h> 64 65 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); 66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 67 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 68 static struct rwlock uihashtbl_lock; 69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 70 static u_long uihash; /* size of hash table - 1 */ 71 72 static void calcru1(struct proc *p, struct rusage_ext *ruxp, 73 struct timeval *up, struct timeval *sp); 74 static int donice(struct thread *td, struct proc *chgp, int n); 75 static struct uidinfo *uilookup(uid_t uid); 76 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td); 77 78 /* 79 * Resource controls and accounting. 80 */ 81 #ifndef _SYS_SYSPROTO_H_ 82 struct getpriority_args { 83 int which; 84 int who; 85 }; 86 #endif 87 int 88 sys_getpriority(struct thread *td, struct getpriority_args *uap) 89 { 90 91 return (kern_getpriority(td, uap->which, uap->who)); 92 } 93 94 int 95 kern_getpriority(struct thread *td, int which, int who) 96 { 97 struct proc *p; 98 struct pgrp *pg; 99 int error, low; 100 101 error = 0; 102 low = PRIO_MAX + 1; 103 switch (which) { 104 case PRIO_PROCESS: 105 if (who == 0) 106 low = td->td_proc->p_nice; 107 else { 108 p = pfind(who); 109 if (p == NULL) 110 break; 111 if (p_cansee(td, p) == 0) 112 low = p->p_nice; 113 PROC_UNLOCK(p); 114 } 115 break; 116 117 case PRIO_PGRP: 118 sx_slock(&proctree_lock); 119 if (who == 0) { 120 pg = td->td_proc->p_pgrp; 121 PGRP_LOCK(pg); 122 } else { 123 pg = pgfind(who); 124 if (pg == NULL) { 125 sx_sunlock(&proctree_lock); 126 break; 127 } 128 } 129 sx_sunlock(&proctree_lock); 130 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 131 PROC_LOCK(p); 132 if (p->p_state == PRS_NORMAL && 133 p_cansee(td, p) == 0) { 134 if (p->p_nice < low) 135 low = p->p_nice; 136 } 137 PROC_UNLOCK(p); 138 } 139 PGRP_UNLOCK(pg); 140 break; 141 142 case PRIO_USER: 143 if (who == 0) 144 who = td->td_ucred->cr_uid; 145 sx_slock(&allproc_lock); 146 FOREACH_PROC_IN_SYSTEM(p) { 147 PROC_LOCK(p); 148 if (p->p_state == PRS_NORMAL && 149 p_cansee(td, p) == 0 && 150 p->p_ucred->cr_uid == who) { 151 if (p->p_nice < low) 152 low = p->p_nice; 153 } 154 PROC_UNLOCK(p); 155 } 156 sx_sunlock(&allproc_lock); 157 break; 158 159 default: 160 error = EINVAL; 161 break; 162 } 163 if (low == PRIO_MAX + 1 && error == 0) 164 error = ESRCH; 165 td->td_retval[0] = low; 166 return (error); 167 } 168 169 #ifndef _SYS_SYSPROTO_H_ 170 struct setpriority_args { 171 int which; 172 int who; 173 int prio; 174 }; 175 #endif 176 int 177 sys_setpriority(struct thread *td, struct setpriority_args *uap) 178 { 179 180 return (kern_setpriority(td, uap->which, uap->who, uap->prio)); 181 } 182 183 int 184 kern_setpriority(struct thread *td, int which, int who, int prio) 185 { 186 struct proc *curp, *p; 187 struct pgrp *pg; 188 int found = 0, error = 0; 189 190 curp = td->td_proc; 191 switch (which) { 192 case PRIO_PROCESS: 193 if (who == 0) { 194 PROC_LOCK(curp); 195 error = donice(td, curp, prio); 196 PROC_UNLOCK(curp); 197 } else { 198 p = pfind(who); 199 if (p == NULL) 200 break; 201 error = p_cansee(td, p); 202 if (error == 0) 203 error = donice(td, p, prio); 204 PROC_UNLOCK(p); 205 } 206 found++; 207 break; 208 209 case PRIO_PGRP: 210 sx_slock(&proctree_lock); 211 if (who == 0) { 212 pg = curp->p_pgrp; 213 PGRP_LOCK(pg); 214 } else { 215 pg = pgfind(who); 216 if (pg == NULL) { 217 sx_sunlock(&proctree_lock); 218 break; 219 } 220 } 221 sx_sunlock(&proctree_lock); 222 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 223 PROC_LOCK(p); 224 if (p->p_state == PRS_NORMAL && 225 p_cansee(td, p) == 0) { 226 error = donice(td, p, prio); 227 found++; 228 } 229 PROC_UNLOCK(p); 230 } 231 PGRP_UNLOCK(pg); 232 break; 233 234 case PRIO_USER: 235 if (who == 0) 236 who = td->td_ucred->cr_uid; 237 sx_slock(&allproc_lock); 238 FOREACH_PROC_IN_SYSTEM(p) { 239 PROC_LOCK(p); 240 if (p->p_state == PRS_NORMAL && 241 p->p_ucred->cr_uid == who && 242 p_cansee(td, p) == 0) { 243 error = donice(td, p, prio); 244 found++; 245 } 246 PROC_UNLOCK(p); 247 } 248 sx_sunlock(&allproc_lock); 249 break; 250 251 default: 252 error = EINVAL; 253 break; 254 } 255 if (found == 0 && error == 0) 256 error = ESRCH; 257 return (error); 258 } 259 260 /* 261 * Set "nice" for a (whole) process. 262 */ 263 static int 264 donice(struct thread *td, struct proc *p, int n) 265 { 266 int error; 267 268 PROC_LOCK_ASSERT(p, MA_OWNED); 269 if ((error = p_cansched(td, p))) 270 return (error); 271 if (n > PRIO_MAX) 272 n = PRIO_MAX; 273 if (n < PRIO_MIN) 274 n = PRIO_MIN; 275 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) 276 return (EACCES); 277 sched_nice(p, n); 278 return (0); 279 } 280 281 static int unprivileged_idprio; 282 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW, 283 &unprivileged_idprio, 0, 284 "Allow non-root users to set an idle priority (deprecated)"); 285 286 /* 287 * Set realtime priority for LWP. 288 */ 289 #ifndef _SYS_SYSPROTO_H_ 290 struct rtprio_thread_args { 291 int function; 292 lwpid_t lwpid; 293 struct rtprio *rtp; 294 }; 295 #endif 296 int 297 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) 298 { 299 struct proc *p; 300 struct rtprio rtp; 301 struct thread *td1; 302 int cierror, error; 303 304 /* Perform copyin before acquiring locks if needed. */ 305 if (uap->function == RTP_SET) 306 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 307 else 308 cierror = 0; 309 310 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) { 311 p = td->td_proc; 312 td1 = td; 313 PROC_LOCK(p); 314 } else { 315 td1 = tdfind(uap->lwpid, -1); 316 if (td1 == NULL) 317 return (ESRCH); 318 p = td1->td_proc; 319 } 320 321 switch (uap->function) { 322 case RTP_LOOKUP: 323 if ((error = p_cansee(td, p))) 324 break; 325 pri_to_rtp(td1, &rtp); 326 PROC_UNLOCK(p); 327 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 328 case RTP_SET: 329 if ((error = p_cansched(td, p)) || (error = cierror)) 330 break; 331 332 /* Disallow setting rtprio in most cases if not superuser. */ 333 334 /* 335 * Realtime priority has to be restricted for reasons which 336 * should be obvious. However, for idleprio processes, there is 337 * a potential for system deadlock if an idleprio process gains 338 * a lock on a resource that other processes need (and the 339 * idleprio process can't run due to a CPU-bound normal 340 * process). Fix me! XXX 341 * 342 * This problem is not only related to idleprio process. 343 * A user level program can obtain a file lock and hold it 344 * indefinitely. Additionally, without idleprio processes it is 345 * still conceivable that a program with low priority will never 346 * get to run. In short, allowing this feature might make it 347 * easier to lock a resource indefinitely, but it is not the 348 * only thing that makes it possible. 349 */ 350 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 351 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 352 break; 353 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 354 unprivileged_idprio == 0 && 355 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 356 break; 357 error = rtp_to_pri(&rtp, td1); 358 break; 359 default: 360 error = EINVAL; 361 break; 362 } 363 PROC_UNLOCK(p); 364 return (error); 365 } 366 367 /* 368 * Set realtime priority. 369 */ 370 #ifndef _SYS_SYSPROTO_H_ 371 struct rtprio_args { 372 int function; 373 pid_t pid; 374 struct rtprio *rtp; 375 }; 376 #endif 377 int 378 sys_rtprio(struct thread *td, struct rtprio_args *uap) 379 { 380 struct proc *p; 381 struct thread *tdp; 382 struct rtprio rtp; 383 int cierror, error; 384 385 /* Perform copyin before acquiring locks if needed. */ 386 if (uap->function == RTP_SET) 387 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 388 else 389 cierror = 0; 390 391 if (uap->pid == 0) { 392 p = td->td_proc; 393 PROC_LOCK(p); 394 } else { 395 p = pfind(uap->pid); 396 if (p == NULL) 397 return (ESRCH); 398 } 399 400 switch (uap->function) { 401 case RTP_LOOKUP: 402 if ((error = p_cansee(td, p))) 403 break; 404 /* 405 * Return OUR priority if no pid specified, 406 * or if one is, report the highest priority 407 * in the process. There isn't much more you can do as 408 * there is only room to return a single priority. 409 * Note: specifying our own pid is not the same 410 * as leaving it zero. 411 */ 412 if (uap->pid == 0) { 413 pri_to_rtp(td, &rtp); 414 } else { 415 struct rtprio rtp2; 416 417 rtp.type = RTP_PRIO_IDLE; 418 rtp.prio = RTP_PRIO_MAX; 419 FOREACH_THREAD_IN_PROC(p, tdp) { 420 pri_to_rtp(tdp, &rtp2); 421 if (rtp2.type < rtp.type || 422 (rtp2.type == rtp.type && 423 rtp2.prio < rtp.prio)) { 424 rtp.type = rtp2.type; 425 rtp.prio = rtp2.prio; 426 } 427 } 428 } 429 PROC_UNLOCK(p); 430 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 431 case RTP_SET: 432 if ((error = p_cansched(td, p)) || (error = cierror)) 433 break; 434 435 /* 436 * Disallow setting rtprio in most cases if not superuser. 437 * See the comment in sys_rtprio_thread about idprio 438 * threads holding a lock. 439 */ 440 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME && 441 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0) 442 break; 443 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE && 444 unprivileged_idprio == 0 && 445 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0) 446 break; 447 448 /* 449 * If we are setting our own priority, set just our 450 * thread but if we are doing another process, 451 * do all the threads on that process. If we 452 * specify our own pid we do the latter. 453 */ 454 if (uap->pid == 0) { 455 error = rtp_to_pri(&rtp, td); 456 } else { 457 FOREACH_THREAD_IN_PROC(p, td) { 458 if ((error = rtp_to_pri(&rtp, td)) != 0) 459 break; 460 } 461 } 462 break; 463 default: 464 error = EINVAL; 465 break; 466 } 467 PROC_UNLOCK(p); 468 return (error); 469 } 470 471 int 472 rtp_to_pri(struct rtprio *rtp, struct thread *td) 473 { 474 u_char newpri, oldclass, oldpri; 475 476 switch (RTP_PRIO_BASE(rtp->type)) { 477 case RTP_PRIO_REALTIME: 478 if (rtp->prio > RTP_PRIO_MAX) 479 return (EINVAL); 480 newpri = PRI_MIN_REALTIME + rtp->prio; 481 break; 482 case RTP_PRIO_NORMAL: 483 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) 484 return (EINVAL); 485 newpri = PRI_MIN_TIMESHARE + rtp->prio; 486 break; 487 case RTP_PRIO_IDLE: 488 if (rtp->prio > RTP_PRIO_MAX) 489 return (EINVAL); 490 newpri = PRI_MIN_IDLE + rtp->prio; 491 break; 492 default: 493 return (EINVAL); 494 } 495 496 thread_lock(td); 497 oldclass = td->td_pri_class; 498 sched_class(td, rtp->type); /* XXX fix */ 499 oldpri = td->td_user_pri; 500 sched_user_prio(td, newpri); 501 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL || 502 td->td_pri_class != RTP_PRIO_NORMAL)) 503 sched_prio(td, td->td_user_pri); 504 if (TD_ON_UPILOCK(td) && oldpri != newpri) { 505 critical_enter(); 506 thread_unlock(td); 507 umtx_pi_adjust(td, oldpri); 508 critical_exit(); 509 } else 510 thread_unlock(td); 511 return (0); 512 } 513 514 void 515 pri_to_rtp(struct thread *td, struct rtprio *rtp) 516 { 517 518 thread_lock(td); 519 switch (PRI_BASE(td->td_pri_class)) { 520 case PRI_REALTIME: 521 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME; 522 break; 523 case PRI_TIMESHARE: 524 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE; 525 break; 526 case PRI_IDLE: 527 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE; 528 break; 529 default: 530 break; 531 } 532 rtp->type = td->td_pri_class; 533 thread_unlock(td); 534 } 535 536 #if defined(COMPAT_43) 537 #ifndef _SYS_SYSPROTO_H_ 538 struct osetrlimit_args { 539 u_int which; 540 struct orlimit *rlp; 541 }; 542 #endif 543 int 544 osetrlimit(struct thread *td, struct osetrlimit_args *uap) 545 { 546 struct orlimit olim; 547 struct rlimit lim; 548 int error; 549 550 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 551 return (error); 552 lim.rlim_cur = olim.rlim_cur; 553 lim.rlim_max = olim.rlim_max; 554 error = kern_setrlimit(td, uap->which, &lim); 555 return (error); 556 } 557 558 #ifndef _SYS_SYSPROTO_H_ 559 struct ogetrlimit_args { 560 u_int which; 561 struct orlimit *rlp; 562 }; 563 #endif 564 int 565 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap) 566 { 567 struct orlimit olim; 568 struct rlimit rl; 569 int error; 570 571 if (uap->which >= RLIM_NLIMITS) 572 return (EINVAL); 573 lim_rlimit(td, uap->which, &rl); 574 575 /* 576 * XXX would be more correct to convert only RLIM_INFINITY to the 577 * old RLIM_INFINITY and fail with EOVERFLOW for other larger 578 * values. Most 64->32 and 32->16 conversions, including not 579 * unimportant ones of uids are even more broken than what we 580 * do here (they blindly truncate). We don't do this correctly 581 * here since we have little experience with EOVERFLOW yet. 582 * Elsewhere, getuid() can't fail... 583 */ 584 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur; 585 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max; 586 error = copyout(&olim, uap->rlp, sizeof(olim)); 587 return (error); 588 } 589 #endif /* COMPAT_43 */ 590 591 #ifndef _SYS_SYSPROTO_H_ 592 struct setrlimit_args { 593 u_int which; 594 struct rlimit *rlp; 595 }; 596 #endif 597 int 598 sys_setrlimit(struct thread *td, struct setrlimit_args *uap) 599 { 600 struct rlimit alim; 601 int error; 602 603 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit)))) 604 return (error); 605 error = kern_setrlimit(td, uap->which, &alim); 606 return (error); 607 } 608 609 static void 610 lim_cb(void *arg) 611 { 612 struct rlimit rlim; 613 struct thread *td; 614 struct proc *p; 615 616 p = arg; 617 PROC_LOCK_ASSERT(p, MA_OWNED); 618 /* 619 * Check if the process exceeds its cpu resource allocation. If 620 * it reaches the max, arrange to kill the process in ast(). 621 */ 622 if (p->p_cpulimit == RLIM_INFINITY) 623 return; 624 PROC_STATLOCK(p); 625 FOREACH_THREAD_IN_PROC(p, td) { 626 ruxagg(p, td); 627 } 628 PROC_STATUNLOCK(p); 629 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) { 630 lim_rlimit_proc(p, RLIMIT_CPU, &rlim); 631 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { 632 killproc(p, "exceeded maximum CPU limit"); 633 } else { 634 if (p->p_cpulimit < rlim.rlim_max) 635 p->p_cpulimit += 5; 636 kern_psignal(p, SIGXCPU); 637 } 638 } 639 if ((p->p_flag & P_WEXIT) == 0) 640 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 641 lim_cb, p, C_PREL(1)); 642 } 643 644 int 645 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp) 646 { 647 648 return (kern_proc_setrlimit(td, td->td_proc, which, limp)); 649 } 650 651 int 652 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which, 653 struct rlimit *limp) 654 { 655 struct plimit *newlim, *oldlim, *oldlim_td; 656 struct rlimit *alimp; 657 struct rlimit oldssiz; 658 int error; 659 660 if (which >= RLIM_NLIMITS) 661 return (EINVAL); 662 663 /* 664 * Preserve historical bugs by treating negative limits as unsigned. 665 */ 666 if (limp->rlim_cur < 0) 667 limp->rlim_cur = RLIM_INFINITY; 668 if (limp->rlim_max < 0) 669 limp->rlim_max = RLIM_INFINITY; 670 671 oldssiz.rlim_cur = 0; 672 newlim = lim_alloc(); 673 PROC_LOCK(p); 674 oldlim = p->p_limit; 675 alimp = &oldlim->pl_rlimit[which]; 676 if (limp->rlim_cur > alimp->rlim_max || 677 limp->rlim_max > alimp->rlim_max) 678 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) { 679 PROC_UNLOCK(p); 680 lim_free(newlim); 681 return (error); 682 } 683 if (limp->rlim_cur > limp->rlim_max) 684 limp->rlim_cur = limp->rlim_max; 685 lim_copy(newlim, oldlim); 686 alimp = &newlim->pl_rlimit[which]; 687 688 switch (which) { 689 case RLIMIT_CPU: 690 if (limp->rlim_cur != RLIM_INFINITY && 691 p->p_cpulimit == RLIM_INFINITY) 692 callout_reset_sbt(&p->p_limco, SBT_1S, 0, 693 lim_cb, p, C_PREL(1)); 694 p->p_cpulimit = limp->rlim_cur; 695 break; 696 case RLIMIT_DATA: 697 if (limp->rlim_cur > maxdsiz) 698 limp->rlim_cur = maxdsiz; 699 if (limp->rlim_max > maxdsiz) 700 limp->rlim_max = maxdsiz; 701 break; 702 703 case RLIMIT_STACK: 704 if (limp->rlim_cur > maxssiz) 705 limp->rlim_cur = maxssiz; 706 if (limp->rlim_max > maxssiz) 707 limp->rlim_max = maxssiz; 708 oldssiz = *alimp; 709 if (p->p_sysent->sv_fixlimit != NULL) 710 p->p_sysent->sv_fixlimit(&oldssiz, 711 RLIMIT_STACK); 712 break; 713 714 case RLIMIT_NOFILE: 715 if (limp->rlim_cur > maxfilesperproc) 716 limp->rlim_cur = maxfilesperproc; 717 if (limp->rlim_max > maxfilesperproc) 718 limp->rlim_max = maxfilesperproc; 719 break; 720 721 case RLIMIT_NPROC: 722 if (limp->rlim_cur > maxprocperuid) 723 limp->rlim_cur = maxprocperuid; 724 if (limp->rlim_max > maxprocperuid) 725 limp->rlim_max = maxprocperuid; 726 if (limp->rlim_cur < 1) 727 limp->rlim_cur = 1; 728 if (limp->rlim_max < 1) 729 limp->rlim_max = 1; 730 break; 731 } 732 if (p->p_sysent->sv_fixlimit != NULL) 733 p->p_sysent->sv_fixlimit(limp, which); 734 *alimp = *limp; 735 p->p_limit = newlim; 736 PROC_UPDATE_COW(p); 737 oldlim_td = NULL; 738 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) { 739 oldlim_td = lim_cowsync(); 740 thread_cow_synced(td); 741 } 742 PROC_UNLOCK(p); 743 if (oldlim_td != NULL) { 744 MPASS(oldlim_td == oldlim); 745 lim_freen(oldlim, 2); 746 } else { 747 lim_free(oldlim); 748 } 749 750 if (which == RLIMIT_STACK && 751 /* 752 * Skip calls from exec_new_vmspace(), done when stack is 753 * not mapped yet. 754 */ 755 (td != curthread || (p->p_flag & P_INEXEC) == 0)) { 756 /* 757 * Stack is allocated to the max at exec time with only 758 * "rlim_cur" bytes accessible. If stack limit is going 759 * up make more accessible, if going down make inaccessible. 760 */ 761 if (limp->rlim_cur != oldssiz.rlim_cur) { 762 vm_offset_t addr; 763 vm_size_t size; 764 vm_prot_t prot; 765 766 if (limp->rlim_cur > oldssiz.rlim_cur) { 767 prot = p->p_sysent->sv_stackprot; 768 size = limp->rlim_cur - oldssiz.rlim_cur; 769 addr = round_page(p->p_vmspace->vm_stacktop) - 770 limp->rlim_cur; 771 } else { 772 prot = VM_PROT_NONE; 773 size = oldssiz.rlim_cur - limp->rlim_cur; 774 addr = round_page(p->p_vmspace->vm_stacktop) - 775 oldssiz.rlim_cur; 776 } 777 addr = trunc_page(addr); 778 size = round_page(size); 779 (void)vm_map_protect(&p->p_vmspace->vm_map, 780 addr, addr + size, prot, 0, 781 VM_MAP_PROTECT_SET_PROT); 782 } 783 } 784 785 return (0); 786 } 787 788 #ifndef _SYS_SYSPROTO_H_ 789 struct getrlimit_args { 790 u_int which; 791 struct rlimit *rlp; 792 }; 793 #endif 794 /* ARGSUSED */ 795 int 796 sys_getrlimit(struct thread *td, struct getrlimit_args *uap) 797 { 798 struct rlimit rlim; 799 int error; 800 801 if (uap->which >= RLIM_NLIMITS) 802 return (EINVAL); 803 lim_rlimit(td, uap->which, &rlim); 804 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit)); 805 return (error); 806 } 807 808 /* 809 * Transform the running time and tick information for children of proc p 810 * into user and system time usage. 811 */ 812 void 813 calccru(struct proc *p, struct timeval *up, struct timeval *sp) 814 { 815 816 PROC_LOCK_ASSERT(p, MA_OWNED); 817 calcru1(p, &p->p_crux, up, sp); 818 } 819 820 /* 821 * Transform the running time and tick information in proc p into user 822 * and system time usage. If appropriate, include the current time slice 823 * on this CPU. 824 */ 825 void 826 calcru(struct proc *p, struct timeval *up, struct timeval *sp) 827 { 828 struct thread *td; 829 uint64_t runtime, u; 830 831 PROC_LOCK_ASSERT(p, MA_OWNED); 832 PROC_STATLOCK_ASSERT(p, MA_OWNED); 833 /* 834 * If we are getting stats for the current process, then add in the 835 * stats that this thread has accumulated in its current time slice. 836 * We reset the thread and CPU state as if we had performed a context 837 * switch right here. 838 */ 839 td = curthread; 840 if (td->td_proc == p) { 841 u = cpu_ticks(); 842 runtime = u - PCPU_GET(switchtime); 843 td->td_runtime += runtime; 844 td->td_incruntime += runtime; 845 PCPU_SET(switchtime, u); 846 } 847 /* Make sure the per-thread stats are current. */ 848 FOREACH_THREAD_IN_PROC(p, td) { 849 if (td->td_incruntime == 0) 850 continue; 851 ruxagg(p, td); 852 } 853 calcru1(p, &p->p_rux, up, sp); 854 } 855 856 /* Collect resource usage for a single thread. */ 857 void 858 rufetchtd(struct thread *td, struct rusage *ru) 859 { 860 struct proc *p; 861 uint64_t runtime, u; 862 863 p = td->td_proc; 864 PROC_STATLOCK_ASSERT(p, MA_OWNED); 865 THREAD_LOCK_ASSERT(td, MA_OWNED); 866 /* 867 * If we are getting stats for the current thread, then add in the 868 * stats that this thread has accumulated in its current time slice. 869 * We reset the thread and CPU state as if we had performed a context 870 * switch right here. 871 */ 872 if (td == curthread) { 873 u = cpu_ticks(); 874 runtime = u - PCPU_GET(switchtime); 875 td->td_runtime += runtime; 876 td->td_incruntime += runtime; 877 PCPU_SET(switchtime, u); 878 } 879 ruxagg_locked(p, td); 880 *ru = td->td_ru; 881 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime); 882 } 883 884 static uint64_t 885 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c) 886 { 887 uint64_t acc, bh, bl; 888 int i, s, sa, sb; 889 890 /* 891 * Calculate (a * b) / c accurately enough without overflowing. c 892 * must be nonzero, and its top bit must be 0. a or b must be 893 * <= c, and the implementation is tuned for b <= c. 894 * 895 * The comments about times are for use in calcru1() with units of 896 * microseconds for 'a' and stathz ticks at 128 Hz for b and c. 897 * 898 * Let n be the number of top zero bits in c. Each iteration 899 * either returns, or reduces b by right shifting it by at least n. 900 * The number of iterations is at most 1 + 64 / n, and the error is 901 * at most the number of iterations. 902 * 903 * It is very unusual to need even 2 iterations. Previous 904 * implementations overflowed essentially by returning early in the 905 * first iteration, with n = 38 giving overflow at 105+ hours and 906 * n = 32 giving overlow at at 388+ days despite a more careful 907 * calculation. 388 days is a reasonable uptime, and the calculation 908 * needs to work for the uptime times the number of CPUs since 'a' 909 * is per-process. 910 */ 911 if (a >= (uint64_t)1 << 63) 912 return (0); /* Unsupported arg -- can't happen. */ 913 acc = 0; 914 for (i = 0; i < 128; i++) { 915 sa = flsll(a); 916 sb = flsll(b); 917 if (sa + sb <= 64) 918 /* Up to 105 hours on first iteration. */ 919 return (acc + (a * b) / c); 920 if (a >= c) { 921 /* 922 * This reduction is based on a = q * c + r, with the 923 * remainder r < c. 'a' may be large to start, and 924 * moving bits from b into 'a' at the end of the loop 925 * sets the top bit of 'a', so the reduction makes 926 * significant progress. 927 */ 928 acc += (a / c) * b; 929 a %= c; 930 sa = flsll(a); 931 if (sa + sb <= 64) 932 /* Up to 388 days on first iteration. */ 933 return (acc + (a * b) / c); 934 } 935 936 /* 937 * This step writes a * b as a * ((bh << s) + bl) = 938 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2 939 * additive terms are handled separately. Splitting in 940 * this way is linear except for rounding errors. 941 * 942 * s = 64 - sa is the maximum such that a << s fits in 64 943 * bits. Since a < c and c has at least 1 zero top bit, 944 * sa < 64 and s > 0. Thus this step makes progress by 945 * reducing b (it increases 'a', but taking remainders on 946 * the next iteration completes the reduction). 947 * 948 * Finally, the choice for s is just what is needed to keep 949 * a * bl from overflowing, so we don't need complications 950 * like a recursive call mul64_by_fraction(a, bl, c) to 951 * handle the second additive term. 952 */ 953 s = 64 - sa; 954 bh = b >> s; 955 bl = b - (bh << s); 956 acc += (a * bl) / c; 957 a <<= s; 958 b = bh; 959 } 960 return (0); /* Algorithm failure -- can't happen. */ 961 } 962 963 static void 964 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, 965 struct timeval *sp) 966 { 967 /* {user, system, interrupt, total} {ticks, usec}: */ 968 uint64_t ut, uu, st, su, it, tt, tu; 969 970 ut = ruxp->rux_uticks; 971 st = ruxp->rux_sticks; 972 it = ruxp->rux_iticks; 973 tt = ut + st + it; 974 if (tt == 0) { 975 /* Avoid divide by zero */ 976 st = 1; 977 tt = 1; 978 } 979 tu = cputick2usec(ruxp->rux_runtime); 980 if ((int64_t)tu < 0) { 981 /* XXX: this should be an assert /phk */ 982 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", 983 (intmax_t)tu, p->p_pid, p->p_comm); 984 tu = ruxp->rux_tu; 985 } 986 987 /* Subdivide tu. Avoid overflow in the multiplications. */ 988 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) { 989 /* Up to 76 hours when stathz is 128. */ 990 uu = (tu * ut) / tt; 991 su = (tu * st) / tt; 992 } else { 993 uu = mul64_by_fraction(tu, ut, tt); 994 su = mul64_by_fraction(tu, st, tt); 995 } 996 997 if (tu >= ruxp->rux_tu) { 998 /* 999 * The normal case, time increased. 1000 * Enforce monotonicity of bucketed numbers. 1001 */ 1002 if (uu < ruxp->rux_uu) 1003 uu = ruxp->rux_uu; 1004 if (su < ruxp->rux_su) 1005 su = ruxp->rux_su; 1006 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) { 1007 /* 1008 * When we calibrate the cputicker, it is not uncommon to 1009 * see the presumably fixed frequency increase slightly over 1010 * time as a result of thermal stabilization and NTP 1011 * discipline (of the reference clock). We therefore ignore 1012 * a bit of backwards slop because we expect to catch up 1013 * shortly. We use a 3 microsecond limit to catch low 1014 * counts and a 1% limit for high counts. 1015 */ 1016 uu = ruxp->rux_uu; 1017 su = ruxp->rux_su; 1018 tu = ruxp->rux_tu; 1019 } else if (vm_guest == VM_GUEST_NO) { /* tu < ruxp->rux_tu */ 1020 /* 1021 * What happened here was likely that a laptop, which ran at 1022 * a reduced clock frequency at boot, kicked into high gear. 1023 * The wisdom of spamming this message in that case is 1024 * dubious, but it might also be indicative of something 1025 * serious, so lets keep it and hope laptops can be made 1026 * more truthful about their CPU speed via ACPI. 1027 */ 1028 printf("calcru: runtime went backwards from %ju usec " 1029 "to %ju usec for pid %d (%s)\n", 1030 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu, 1031 p->p_pid, p->p_comm); 1032 } 1033 1034 ruxp->rux_uu = uu; 1035 ruxp->rux_su = su; 1036 ruxp->rux_tu = tu; 1037 1038 up->tv_sec = uu / 1000000; 1039 up->tv_usec = uu % 1000000; 1040 sp->tv_sec = su / 1000000; 1041 sp->tv_usec = su % 1000000; 1042 } 1043 1044 #ifndef _SYS_SYSPROTO_H_ 1045 struct getrusage_args { 1046 int who; 1047 struct rusage *rusage; 1048 }; 1049 #endif 1050 int 1051 sys_getrusage(struct thread *td, struct getrusage_args *uap) 1052 { 1053 struct rusage ru; 1054 int error; 1055 1056 error = kern_getrusage(td, uap->who, &ru); 1057 if (error == 0) 1058 error = copyout(&ru, uap->rusage, sizeof(struct rusage)); 1059 return (error); 1060 } 1061 1062 int 1063 kern_getrusage(struct thread *td, int who, struct rusage *rup) 1064 { 1065 struct proc *p; 1066 int error; 1067 1068 error = 0; 1069 p = td->td_proc; 1070 PROC_LOCK(p); 1071 switch (who) { 1072 case RUSAGE_SELF: 1073 rufetchcalc(p, rup, &rup->ru_utime, 1074 &rup->ru_stime); 1075 break; 1076 1077 case RUSAGE_CHILDREN: 1078 *rup = p->p_stats->p_cru; 1079 calccru(p, &rup->ru_utime, &rup->ru_stime); 1080 break; 1081 1082 case RUSAGE_THREAD: 1083 PROC_STATLOCK(p); 1084 thread_lock(td); 1085 rufetchtd(td, rup); 1086 thread_unlock(td); 1087 PROC_STATUNLOCK(p); 1088 break; 1089 1090 default: 1091 error = EINVAL; 1092 } 1093 PROC_UNLOCK(p); 1094 return (error); 1095 } 1096 1097 void 1098 rucollect(struct rusage *ru, struct rusage *ru2) 1099 { 1100 long *ip, *ip2; 1101 int i; 1102 1103 if (ru->ru_maxrss < ru2->ru_maxrss) 1104 ru->ru_maxrss = ru2->ru_maxrss; 1105 ip = &ru->ru_first; 1106 ip2 = &ru2->ru_first; 1107 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 1108 *ip++ += *ip2++; 1109 } 1110 1111 void 1112 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, 1113 struct rusage_ext *rux2) 1114 { 1115 1116 rux->rux_runtime += rux2->rux_runtime; 1117 rux->rux_uticks += rux2->rux_uticks; 1118 rux->rux_sticks += rux2->rux_sticks; 1119 rux->rux_iticks += rux2->rux_iticks; 1120 rux->rux_uu += rux2->rux_uu; 1121 rux->rux_su += rux2->rux_su; 1122 rux->rux_tu += rux2->rux_tu; 1123 rucollect(ru, ru2); 1124 } 1125 1126 /* 1127 * Aggregate tick counts into the proc's rusage_ext. 1128 */ 1129 static void 1130 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td) 1131 { 1132 1133 rux->rux_runtime += td->td_incruntime; 1134 rux->rux_uticks += td->td_uticks; 1135 rux->rux_sticks += td->td_sticks; 1136 rux->rux_iticks += td->td_iticks; 1137 } 1138 1139 void 1140 ruxagg_locked(struct proc *p, struct thread *td) 1141 { 1142 THREAD_LOCK_ASSERT(td, MA_OWNED); 1143 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED); 1144 1145 ruxagg_ext_locked(&p->p_rux, td); 1146 ruxagg_ext_locked(&td->td_rux, td); 1147 td->td_incruntime = 0; 1148 td->td_uticks = 0; 1149 td->td_iticks = 0; 1150 td->td_sticks = 0; 1151 } 1152 1153 void 1154 ruxagg(struct proc *p, struct thread *td) 1155 { 1156 1157 thread_lock(td); 1158 ruxagg_locked(p, td); 1159 thread_unlock(td); 1160 } 1161 1162 /* 1163 * Update the rusage_ext structure and fetch a valid aggregate rusage 1164 * for proc p if storage for one is supplied. 1165 */ 1166 void 1167 rufetch(struct proc *p, struct rusage *ru) 1168 { 1169 struct thread *td; 1170 1171 PROC_STATLOCK_ASSERT(p, MA_OWNED); 1172 1173 *ru = p->p_ru; 1174 if (p->p_numthreads > 0) { 1175 FOREACH_THREAD_IN_PROC(p, td) { 1176 ruxagg(p, td); 1177 rucollect(ru, &td->td_ru); 1178 } 1179 } 1180 } 1181 1182 /* 1183 * Atomically perform a rufetch and a calcru together. 1184 * Consumers, can safely assume the calcru is executed only once 1185 * rufetch is completed. 1186 */ 1187 void 1188 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, 1189 struct timeval *sp) 1190 { 1191 1192 PROC_STATLOCK(p); 1193 rufetch(p, ru); 1194 calcru(p, up, sp); 1195 PROC_STATUNLOCK(p); 1196 } 1197 1198 /* 1199 * Allocate a new resource limits structure and initialize its 1200 * reference count and mutex pointer. 1201 */ 1202 struct plimit * 1203 lim_alloc(void) 1204 { 1205 struct plimit *limp; 1206 1207 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); 1208 refcount_init(&limp->pl_refcnt, 1); 1209 return (limp); 1210 } 1211 1212 struct plimit * 1213 lim_hold(struct plimit *limp) 1214 { 1215 1216 refcount_acquire(&limp->pl_refcnt); 1217 return (limp); 1218 } 1219 1220 struct plimit * 1221 lim_cowsync(void) 1222 { 1223 struct thread *td; 1224 struct proc *p; 1225 struct plimit *oldlimit; 1226 1227 td = curthread; 1228 p = td->td_proc; 1229 PROC_LOCK_ASSERT(p, MA_OWNED); 1230 1231 if (td->td_limit == p->p_limit) 1232 return (NULL); 1233 1234 oldlimit = td->td_limit; 1235 td->td_limit = lim_hold(p->p_limit); 1236 1237 return (oldlimit); 1238 } 1239 1240 void 1241 lim_fork(struct proc *p1, struct proc *p2) 1242 { 1243 1244 PROC_LOCK_ASSERT(p1, MA_OWNED); 1245 PROC_LOCK_ASSERT(p2, MA_OWNED); 1246 1247 p2->p_limit = lim_hold(p1->p_limit); 1248 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); 1249 if (p1->p_cpulimit != RLIM_INFINITY) 1250 callout_reset_sbt(&p2->p_limco, SBT_1S, 0, 1251 lim_cb, p2, C_PREL(1)); 1252 } 1253 1254 void 1255 lim_free(struct plimit *limp) 1256 { 1257 1258 if (refcount_release(&limp->pl_refcnt)) 1259 free((void *)limp, M_PLIMIT); 1260 } 1261 1262 void 1263 lim_freen(struct plimit *limp, int n) 1264 { 1265 1266 if (refcount_releasen(&limp->pl_refcnt, n)) 1267 free((void *)limp, M_PLIMIT); 1268 } 1269 1270 void 1271 limbatch_add(struct limbatch *lb, struct thread *td) 1272 { 1273 struct plimit *limp; 1274 1275 MPASS(td->td_limit != NULL); 1276 limp = td->td_limit; 1277 1278 if (lb->limp != limp) { 1279 if (lb->count != 0) { 1280 lim_freen(lb->limp, lb->count); 1281 lb->count = 0; 1282 } 1283 lb->limp = limp; 1284 } 1285 1286 lb->count++; 1287 } 1288 1289 void 1290 limbatch_final(struct limbatch *lb) 1291 { 1292 1293 MPASS(lb->count != 0); 1294 lim_freen(lb->limp, lb->count); 1295 } 1296 1297 /* 1298 * Make a copy of the plimit structure. 1299 * We share these structures copy-on-write after fork. 1300 */ 1301 void 1302 lim_copy(struct plimit *dst, struct plimit *src) 1303 { 1304 1305 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit")); 1306 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit)); 1307 } 1308 1309 /* 1310 * Return the hard limit for a particular system resource. The 1311 * which parameter specifies the index into the rlimit array. 1312 */ 1313 rlim_t 1314 lim_max(struct thread *td, int which) 1315 { 1316 struct rlimit rl; 1317 1318 lim_rlimit(td, which, &rl); 1319 return (rl.rlim_max); 1320 } 1321 1322 rlim_t 1323 lim_max_proc(struct proc *p, int which) 1324 { 1325 struct rlimit rl; 1326 1327 lim_rlimit_proc(p, which, &rl); 1328 return (rl.rlim_max); 1329 } 1330 1331 /* 1332 * Return the current (soft) limit for a particular system resource. 1333 * The which parameter which specifies the index into the rlimit array 1334 */ 1335 rlim_t 1336 (lim_cur)(struct thread *td, int which) 1337 { 1338 struct rlimit rl; 1339 1340 lim_rlimit(td, which, &rl); 1341 return (rl.rlim_cur); 1342 } 1343 1344 rlim_t 1345 lim_cur_proc(struct proc *p, int which) 1346 { 1347 struct rlimit rl; 1348 1349 lim_rlimit_proc(p, which, &rl); 1350 return (rl.rlim_cur); 1351 } 1352 1353 /* 1354 * Return a copy of the entire rlimit structure for the system limit 1355 * specified by 'which' in the rlimit structure pointed to by 'rlp'. 1356 */ 1357 void 1358 lim_rlimit(struct thread *td, int which, struct rlimit *rlp) 1359 { 1360 struct proc *p = td->td_proc; 1361 1362 MPASS(td == curthread); 1363 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1364 ("request for invalid resource limit")); 1365 *rlp = td->td_limit->pl_rlimit[which]; 1366 if (p->p_sysent->sv_fixlimit != NULL) 1367 p->p_sysent->sv_fixlimit(rlp, which); 1368 } 1369 1370 void 1371 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp) 1372 { 1373 1374 PROC_LOCK_ASSERT(p, MA_OWNED); 1375 KASSERT(which >= 0 && which < RLIM_NLIMITS, 1376 ("request for invalid resource limit")); 1377 *rlp = p->p_limit->pl_rlimit[which]; 1378 if (p->p_sysent->sv_fixlimit != NULL) 1379 p->p_sysent->sv_fixlimit(rlp, which); 1380 } 1381 1382 void 1383 uihashinit(void) 1384 { 1385 1386 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 1387 rw_init(&uihashtbl_lock, "uidinfo hash"); 1388 } 1389 1390 /* 1391 * Look up a uidinfo struct for the parameter uid. 1392 * uihashtbl_lock must be locked. 1393 * Increase refcount on uidinfo struct returned. 1394 */ 1395 static struct uidinfo * 1396 uilookup(uid_t uid) 1397 { 1398 struct uihashhead *uipp; 1399 struct uidinfo *uip; 1400 1401 rw_assert(&uihashtbl_lock, RA_LOCKED); 1402 uipp = UIHASH(uid); 1403 LIST_FOREACH(uip, uipp, ui_hash) 1404 if (uip->ui_uid == uid) { 1405 uihold(uip); 1406 break; 1407 } 1408 1409 return (uip); 1410 } 1411 1412 /* 1413 * Find or allocate a struct uidinfo for a particular uid. 1414 * Returns with uidinfo struct referenced. 1415 * uifree() should be called on a struct uidinfo when released. 1416 */ 1417 struct uidinfo * 1418 uifind(uid_t uid) 1419 { 1420 struct uidinfo *new_uip, *uip; 1421 struct ucred *cred; 1422 1423 cred = curthread->td_ucred; 1424 if (cred->cr_uidinfo->ui_uid == uid) { 1425 uip = cred->cr_uidinfo; 1426 uihold(uip); 1427 return (uip); 1428 } else if (cred->cr_ruidinfo->ui_uid == uid) { 1429 uip = cred->cr_ruidinfo; 1430 uihold(uip); 1431 return (uip); 1432 } 1433 1434 rw_rlock(&uihashtbl_lock); 1435 uip = uilookup(uid); 1436 rw_runlock(&uihashtbl_lock); 1437 if (uip != NULL) 1438 return (uip); 1439 1440 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO); 1441 racct_create(&new_uip->ui_racct); 1442 refcount_init(&new_uip->ui_ref, 1); 1443 new_uip->ui_uid = uid; 1444 1445 rw_wlock(&uihashtbl_lock); 1446 /* 1447 * There's a chance someone created our uidinfo while we 1448 * were in malloc and not holding the lock, so we have to 1449 * make sure we don't insert a duplicate uidinfo. 1450 */ 1451 if ((uip = uilookup(uid)) == NULL) { 1452 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash); 1453 rw_wunlock(&uihashtbl_lock); 1454 uip = new_uip; 1455 } else { 1456 rw_wunlock(&uihashtbl_lock); 1457 racct_destroy(&new_uip->ui_racct); 1458 free(new_uip, M_UIDINFO); 1459 } 1460 return (uip); 1461 } 1462 1463 /* 1464 * Place another refcount on a uidinfo struct. 1465 */ 1466 void 1467 uihold(struct uidinfo *uip) 1468 { 1469 1470 refcount_acquire(&uip->ui_ref); 1471 } 1472 1473 /*- 1474 * Since uidinfo structs have a long lifetime, we use an 1475 * opportunistic refcounting scheme to avoid locking the lookup hash 1476 * for each release. 1477 * 1478 * If the refcount hits 0, we need to free the structure, 1479 * which means we need to lock the hash. 1480 * Optimal case: 1481 * After locking the struct and lowering the refcount, if we find 1482 * that we don't need to free, simply unlock and return. 1483 * Suboptimal case: 1484 * If refcount lowering results in need to free, bump the count 1485 * back up, lose the lock and acquire the locks in the proper 1486 * order to try again. 1487 */ 1488 void 1489 uifree(struct uidinfo *uip) 1490 { 1491 1492 if (refcount_release_if_not_last(&uip->ui_ref)) 1493 return; 1494 1495 rw_wlock(&uihashtbl_lock); 1496 if (refcount_release(&uip->ui_ref) == 0) { 1497 rw_wunlock(&uihashtbl_lock); 1498 return; 1499 } 1500 1501 racct_destroy(&uip->ui_racct); 1502 LIST_REMOVE(uip, ui_hash); 1503 rw_wunlock(&uihashtbl_lock); 1504 1505 if (uip->ui_sbsize != 0) 1506 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 1507 uip->ui_uid, uip->ui_sbsize); 1508 if (uip->ui_proccnt != 0) 1509 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 1510 uip->ui_uid, uip->ui_proccnt); 1511 if (uip->ui_vmsize != 0) 1512 printf("freeing uidinfo: uid = %d, swapuse = %lld\n", 1513 uip->ui_uid, (unsigned long long)uip->ui_vmsize); 1514 free(uip, M_UIDINFO); 1515 } 1516 1517 #ifdef RACCT 1518 void 1519 ui_racct_foreach(void (*callback)(struct racct *racct, 1520 void *arg2, void *arg3), void (*pre)(void), void (*post)(void), 1521 void *arg2, void *arg3) 1522 { 1523 struct uidinfo *uip; 1524 struct uihashhead *uih; 1525 1526 rw_rlock(&uihashtbl_lock); 1527 if (pre != NULL) 1528 (pre)(); 1529 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) { 1530 LIST_FOREACH(uip, uih, ui_hash) { 1531 (callback)(uip->ui_racct, arg2, arg3); 1532 } 1533 } 1534 if (post != NULL) 1535 (post)(); 1536 rw_runlock(&uihashtbl_lock); 1537 } 1538 #endif 1539 1540 static inline int 1541 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name) 1542 { 1543 long new; 1544 1545 /* Don't allow them to exceed max, but allow subtraction. */ 1546 new = atomic_fetchadd_long(limit, (long)diff) + diff; 1547 if (diff > 0 && max != 0) { 1548 if (new < 0 || new > max) { 1549 atomic_subtract_long(limit, (long)diff); 1550 return (0); 1551 } 1552 } else if (new < 0) 1553 printf("negative %s for uid = %d\n", name, uip->ui_uid); 1554 return (1); 1555 } 1556 1557 /* 1558 * Change the count associated with number of processes 1559 * a given user is using. When 'max' is 0, don't enforce a limit 1560 */ 1561 int 1562 chgproccnt(struct uidinfo *uip, int diff, rlim_t max) 1563 { 1564 1565 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt")); 1566 } 1567 1568 /* 1569 * Change the total socket buffer size a user has used. 1570 */ 1571 int 1572 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max) 1573 { 1574 int diff, rv; 1575 1576 diff = to - *hiwat; 1577 if (diff > 0 && max == 0) { 1578 rv = 0; 1579 } else { 1580 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize"); 1581 if (rv != 0) 1582 *hiwat = to; 1583 } 1584 return (rv); 1585 } 1586 1587 /* 1588 * Change the count associated with number of pseudo-terminals 1589 * a given user is using. When 'max' is 0, don't enforce a limit 1590 */ 1591 int 1592 chgptscnt(struct uidinfo *uip, int diff, rlim_t max) 1593 { 1594 1595 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt")); 1596 } 1597 1598 int 1599 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max) 1600 { 1601 1602 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt")); 1603 } 1604 1605 int 1606 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max) 1607 { 1608 1609 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt")); 1610 } 1611