1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sx.h> 55 #include <sys/time.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_param.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 62 static int donice(struct proc *curp, struct proc *chgp, int n); 63 64 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 65 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 66 static struct mtx uihashtbl_mtx; 67 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 68 static u_long uihash; /* size of hash table - 1 */ 69 70 static struct uidinfo *uilookup(uid_t uid); 71 72 /* 73 * Resource controls and accounting. 74 */ 75 76 #ifndef _SYS_SYSPROTO_H_ 77 struct getpriority_args { 78 int which; 79 int who; 80 }; 81 #endif 82 /* 83 * MPSAFE 84 */ 85 int 86 getpriority(td, uap) 87 struct thread *td; 88 register struct getpriority_args *uap; 89 { 90 struct proc *curp = td->td_proc; 91 register struct proc *p; 92 register int low = PRIO_MAX + 1; 93 int error = 0; 94 95 mtx_lock(&Giant); 96 97 switch (uap->which) { 98 case PRIO_PROCESS: 99 if (uap->who == 0) 100 low = td->td_ksegrp->kg_nice; 101 else { 102 p = pfind(uap->who); 103 if (p == NULL) 104 break; 105 if (p_cansee(curp, p) == 0) 106 low = p->p_ksegrp.kg_nice /* XXXKSE */ ; 107 PROC_UNLOCK(p); 108 } 109 break; 110 111 case PRIO_PGRP: { 112 register struct pgrp *pg; 113 114 PGRPSESS_SLOCK(); 115 if (uap->who == 0) { 116 pg = curp->p_pgrp; 117 PGRP_LOCK(pg); 118 } else { 119 pg = pgfind(uap->who); 120 if (pg == NULL) { 121 PGRPSESS_SUNLOCK(); 122 break; 123 } 124 } 125 PGRPSESS_SUNLOCK(); 126 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 127 PROC_LOCK(p); 128 if (!p_cansee(curp, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low) 129 low = p->p_ksegrp.kg_nice /* XXXKSE */ ; 130 PROC_UNLOCK(p); 131 } 132 PGRP_UNLOCK(pg); 133 break; 134 } 135 136 case PRIO_USER: 137 if (uap->who == 0) 138 uap->who = curp->p_ucred->cr_uid; 139 sx_slock(&allproc_lock); 140 LIST_FOREACH(p, &allproc, p_list) 141 if (!p_cansee(curp, p) && 142 p->p_ucred->cr_uid == uap->who && 143 p->p_ksegrp.kg_nice /* XXXKSE */ < low) 144 low = p->p_ksegrp.kg_nice /* XXXKSE */ ; 145 sx_sunlock(&allproc_lock); 146 break; 147 148 default: 149 error = EINVAL; 150 break; 151 } 152 if (low == PRIO_MAX + 1 && error == 0) 153 error = ESRCH; 154 td->td_retval[0] = low; 155 mtx_unlock(&Giant); 156 return (error); 157 } 158 159 #ifndef _SYS_SYSPROTO_H_ 160 struct setpriority_args { 161 int which; 162 int who; 163 int prio; 164 }; 165 #endif 166 /* 167 * MPSAFE 168 */ 169 /* ARGSUSED */ 170 int 171 setpriority(td, uap) 172 struct thread *td; 173 register struct setpriority_args *uap; 174 { 175 struct proc *curp = td->td_proc; 176 register struct proc *p; 177 int found = 0, error = 0; 178 179 mtx_lock(&Giant); 180 181 switch (uap->which) { 182 case PRIO_PROCESS: 183 if (uap->who == 0) 184 error = donice(curp, curp, uap->prio); 185 else { 186 p = pfind(uap->who); 187 if (p == 0) 188 break; 189 if (p_cansee(curp, p) == 0) 190 error = donice(curp, p, uap->prio); 191 PROC_UNLOCK(p); 192 } 193 found++; 194 break; 195 196 case PRIO_PGRP: { 197 register struct pgrp *pg; 198 199 PGRPSESS_SLOCK(); 200 if (uap->who == 0) { 201 pg = curp->p_pgrp; 202 PGRP_LOCK(pg); 203 } else { 204 pg = pgfind(uap->who); 205 if (pg == NULL) { 206 PGRPSESS_SUNLOCK(); 207 break; 208 } 209 } 210 PGRPSESS_SUNLOCK(); 211 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 212 PROC_LOCK(p); 213 if (!p_cansee(curp, p)) { 214 error = donice(curp, p, uap->prio); 215 found++; 216 } 217 PROC_UNLOCK(p); 218 } 219 PGRP_UNLOCK(pg); 220 break; 221 } 222 223 case PRIO_USER: 224 if (uap->who == 0) 225 uap->who = curp->p_ucred->cr_uid; 226 sx_slock(&allproc_lock); 227 FOREACH_PROC_IN_SYSTEM(p) { 228 if (p->p_ucred->cr_uid == uap->who && 229 !p_cansee(curp, p)) { 230 error = donice(curp, p, uap->prio); 231 found++; 232 } 233 } 234 sx_sunlock(&allproc_lock); 235 break; 236 237 default: 238 error = EINVAL; 239 break; 240 } 241 if (found == 0 && error == 0) 242 error = ESRCH; 243 mtx_unlock(&Giant); 244 return (error); 245 } 246 247 static int 248 donice(curp, chgp, n) 249 register struct proc *curp, *chgp; 250 register int n; 251 { 252 int error; 253 254 if ((error = p_cansched(curp, chgp))) 255 return (error); 256 if (n > PRIO_MAX) 257 n = PRIO_MAX; 258 if (n < PRIO_MIN) 259 n = PRIO_MIN; 260 if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && 261 suser_xxx(curp->p_ucred, NULL, 0)) 262 return (EACCES); 263 chgp->p_ksegrp.kg_nice /* XXXKSE */ = n; 264 (void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */ 265 return (0); 266 } 267 268 /* rtprio system call */ 269 #ifndef _SYS_SYSPROTO_H_ 270 struct rtprio_args { 271 int function; 272 pid_t pid; 273 struct rtprio *rtp; 274 }; 275 #endif 276 277 /* 278 * Set realtime priority 279 */ 280 281 /* 282 * MPSAFE 283 */ 284 /* ARGSUSED */ 285 int 286 rtprio(td, uap) 287 struct thread *td; 288 register struct rtprio_args *uap; 289 { 290 struct proc *curp = td->td_proc; 291 register struct proc *p; 292 struct rtprio rtp; 293 int error; 294 295 mtx_lock(&Giant); 296 297 if (uap->pid == 0) { 298 p = curp; 299 PROC_LOCK(p); 300 } else { 301 p = pfind(uap->pid); 302 } 303 304 if (p == NULL) { 305 error = ESRCH; 306 goto done2; 307 } 308 309 switch (uap->function) { 310 case RTP_LOOKUP: 311 if ((error = p_cansee(curp, p))) 312 break; 313 mtx_lock_spin(&sched_lock); 314 pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp); 315 mtx_unlock_spin(&sched_lock); 316 error = copyout(&rtp, uap->rtp, sizeof(struct rtprio)); 317 break; 318 case RTP_SET: 319 if ((error = p_cansched(curp, p)) || 320 (error = copyin(uap->rtp, &rtp, sizeof(struct rtprio)))) 321 break; 322 /* disallow setting rtprio in most cases if not superuser */ 323 if (suser_xxx(curp->p_ucred, NULL, 0) != 0) { 324 /* can't set someone else's */ 325 if (uap->pid) { 326 error = EPERM; 327 break; 328 } 329 /* can't set realtime priority */ 330 /* 331 * Realtime priority has to be restricted for reasons which should be 332 * obvious. However, for idle priority, there is a potential for 333 * system deadlock if an idleprio process gains a lock on a resource 334 * that other processes need (and the idleprio process can't run 335 * due to a CPU-bound normal process). Fix me! XXX 336 */ 337 #if 0 338 if (RTP_PRIO_IS_REALTIME(rtp.type)) 339 #endif 340 if (rtp.type != RTP_PRIO_NORMAL) { 341 error = EPERM; 342 break; 343 } 344 } 345 mtx_lock_spin(&sched_lock); 346 error = rtp_to_pri(&rtp, &p->p_ksegrp); 347 mtx_unlock_spin(&sched_lock); 348 break; 349 default: 350 error = EINVAL; 351 break; 352 } 353 PROC_UNLOCK(p); 354 done2: 355 mtx_unlock(&Giant); 356 return (error); 357 } 358 359 int 360 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg) 361 { 362 363 if (rtp->prio > RTP_PRIO_MAX) 364 return (EINVAL); 365 switch (RTP_PRIO_BASE(rtp->type)) { 366 case RTP_PRIO_REALTIME: 367 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio; 368 break; 369 case RTP_PRIO_NORMAL: 370 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio; 371 break; 372 case RTP_PRIO_IDLE: 373 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio; 374 break; 375 default: 376 return (EINVAL); 377 } 378 kg->kg_pri_class = rtp->type; 379 if (curthread->td_ksegrp == kg) { 380 curthread->td_base_pri = kg->kg_user_pri; 381 curthread->td_priority = kg->kg_user_pri; /* XXX dubious */ 382 } 383 return (0); 384 } 385 386 void 387 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp) 388 { 389 390 switch (PRI_BASE(kg->kg_pri_class)) { 391 case PRI_REALTIME: 392 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME; 393 break; 394 case PRI_TIMESHARE: 395 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE; 396 break; 397 case PRI_IDLE: 398 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE; 399 break; 400 default: 401 break; 402 } 403 rtp->type = kg->kg_pri_class; 404 } 405 406 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 407 #ifndef _SYS_SYSPROTO_H_ 408 struct osetrlimit_args { 409 u_int which; 410 struct orlimit *rlp; 411 }; 412 #endif 413 /* 414 * MPSAFE 415 */ 416 /* ARGSUSED */ 417 int 418 osetrlimit(td, uap) 419 struct thread *td; 420 register struct osetrlimit_args *uap; 421 { 422 struct orlimit olim; 423 struct rlimit lim; 424 int error; 425 426 if ((error = 427 copyin((caddr_t)uap->rlp, (caddr_t)&olim, sizeof(struct orlimit)))) 428 return (error); 429 lim.rlim_cur = olim.rlim_cur; 430 lim.rlim_max = olim.rlim_max; 431 mtx_lock(&Giant); 432 error = dosetrlimit(td, uap->which, &lim); 433 mtx_unlock(&Giant); 434 return (error); 435 } 436 437 #ifndef _SYS_SYSPROTO_H_ 438 struct ogetrlimit_args { 439 u_int which; 440 struct orlimit *rlp; 441 }; 442 #endif 443 /* 444 * MPSAFE 445 */ 446 /* ARGSUSED */ 447 int 448 ogetrlimit(td, uap) 449 struct thread *td; 450 register struct ogetrlimit_args *uap; 451 { 452 struct proc *p = td->td_proc; 453 struct orlimit olim; 454 int error; 455 456 if (uap->which >= RLIM_NLIMITS) 457 return (EINVAL); 458 mtx_lock(&Giant); 459 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur; 460 if (olim.rlim_cur == -1) 461 olim.rlim_cur = 0x7fffffff; 462 olim.rlim_max = p->p_rlimit[uap->which].rlim_max; 463 if (olim.rlim_max == -1) 464 olim.rlim_max = 0x7fffffff; 465 error = copyout((caddr_t)&olim, (caddr_t)uap->rlp, sizeof(olim)); 466 mtx_unlock(&Giant); 467 return (error); 468 } 469 #endif /* COMPAT_43 || COMPAT_SUNOS */ 470 471 #ifndef _SYS_SYSPROTO_H_ 472 struct __setrlimit_args { 473 u_int which; 474 struct rlimit *rlp; 475 }; 476 #endif 477 /* 478 * MPSAFE 479 */ 480 /* ARGSUSED */ 481 int 482 setrlimit(td, uap) 483 struct thread *td; 484 register struct __setrlimit_args *uap; 485 { 486 struct rlimit alim; 487 int error; 488 489 if ((error = 490 copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit)))) 491 return (error); 492 mtx_lock(&Giant); 493 error = dosetrlimit(td, uap->which, &alim); 494 mtx_unlock(&Giant); 495 return (error); 496 } 497 498 int 499 dosetrlimit(td, which, limp) 500 struct thread *td; 501 u_int which; 502 struct rlimit *limp; 503 { 504 struct proc *p = td->td_proc; 505 register struct rlimit *alimp; 506 int error; 507 508 GIANT_REQUIRED; 509 510 if (which >= RLIM_NLIMITS) 511 return (EINVAL); 512 alimp = &p->p_rlimit[which]; 513 514 /* 515 * Preserve historical bugs by treating negative limits as unsigned. 516 */ 517 if (limp->rlim_cur < 0) 518 limp->rlim_cur = RLIM_INFINITY; 519 if (limp->rlim_max < 0) 520 limp->rlim_max = RLIM_INFINITY; 521 522 if (limp->rlim_cur > alimp->rlim_max || 523 limp->rlim_max > alimp->rlim_max) 524 if ((error = suser_xxx(0, p, PRISON_ROOT))) 525 return (error); 526 if (limp->rlim_cur > limp->rlim_max) 527 limp->rlim_cur = limp->rlim_max; 528 if (p->p_limit->p_refcnt > 1 && 529 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 530 p->p_limit->p_refcnt--; 531 p->p_limit = limcopy(p->p_limit); 532 alimp = &p->p_rlimit[which]; 533 } 534 535 switch (which) { 536 537 case RLIMIT_CPU: 538 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000) 539 p->p_limit->p_cpulimit = RLIM_INFINITY; 540 else 541 p->p_limit->p_cpulimit = 542 (rlim_t)1000000 * limp->rlim_cur; 543 break; 544 case RLIMIT_DATA: 545 if (limp->rlim_cur > maxdsiz) 546 limp->rlim_cur = maxdsiz; 547 if (limp->rlim_max > maxdsiz) 548 limp->rlim_max = maxdsiz; 549 break; 550 551 case RLIMIT_STACK: 552 if (limp->rlim_cur > maxssiz) 553 limp->rlim_cur = maxssiz; 554 if (limp->rlim_max > maxssiz) 555 limp->rlim_max = maxssiz; 556 /* 557 * Stack is allocated to the max at exec time with only 558 * "rlim_cur" bytes accessible. If stack limit is going 559 * up make more accessible, if going down make inaccessible. 560 */ 561 if (limp->rlim_cur != alimp->rlim_cur) { 562 vm_offset_t addr; 563 vm_size_t size; 564 vm_prot_t prot; 565 566 if (limp->rlim_cur > alimp->rlim_cur) { 567 prot = VM_PROT_ALL; 568 size = limp->rlim_cur - alimp->rlim_cur; 569 addr = USRSTACK - limp->rlim_cur; 570 } else { 571 prot = VM_PROT_NONE; 572 size = alimp->rlim_cur - limp->rlim_cur; 573 addr = USRSTACK - alimp->rlim_cur; 574 } 575 addr = trunc_page(addr); 576 size = round_page(size); 577 (void) vm_map_protect(&p->p_vmspace->vm_map, 578 addr, addr+size, prot, FALSE); 579 } 580 break; 581 582 case RLIMIT_NOFILE: 583 if (limp->rlim_cur > maxfilesperproc) 584 limp->rlim_cur = maxfilesperproc; 585 if (limp->rlim_max > maxfilesperproc) 586 limp->rlim_max = maxfilesperproc; 587 break; 588 589 case RLIMIT_NPROC: 590 if (limp->rlim_cur > maxprocperuid) 591 limp->rlim_cur = maxprocperuid; 592 if (limp->rlim_max > maxprocperuid) 593 limp->rlim_max = maxprocperuid; 594 if (limp->rlim_cur < 1) 595 limp->rlim_cur = 1; 596 if (limp->rlim_max < 1) 597 limp->rlim_max = 1; 598 break; 599 } 600 *alimp = *limp; 601 return (0); 602 } 603 604 #ifndef _SYS_SYSPROTO_H_ 605 struct __getrlimit_args { 606 u_int which; 607 struct rlimit *rlp; 608 }; 609 #endif 610 /* 611 * MPSAFE 612 */ 613 /* ARGSUSED */ 614 int 615 getrlimit(td, uap) 616 struct thread *td; 617 register struct __getrlimit_args *uap; 618 { 619 int error; 620 struct proc *p = td->td_proc; 621 622 if (uap->which >= RLIM_NLIMITS) 623 return (EINVAL); 624 mtx_lock(&Giant); 625 error = copyout((caddr_t)&p->p_rlimit[uap->which], (caddr_t)uap->rlp, 626 sizeof (struct rlimit)); 627 mtx_unlock(&Giant); 628 return(error); 629 } 630 631 /* 632 * Transform the running time and tick information in proc p into user, 633 * system, and interrupt time usage. 634 */ 635 void 636 calcru(p, up, sp, ip) 637 struct proc *p; 638 struct timeval *up; 639 struct timeval *sp; 640 struct timeval *ip; 641 { 642 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */ 643 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu; 644 u_int64_t uut = 0, sut = 0, iut = 0; 645 int s; 646 struct timeval tv; 647 struct bintime bt; 648 struct kse *ke; 649 struct ksegrp *kg; 650 651 mtx_assert(&sched_lock, MA_OWNED); 652 /* XXX: why spl-protect ? worst case is an off-by-one report */ 653 654 FOREACH_KSEGRP_IN_PROC(p, kg) { 655 /* we could accumulate per ksegrp and per process here*/ 656 FOREACH_KSE_IN_GROUP(kg, ke) { 657 s = splstatclock(); 658 ut = ke->ke_uticks; 659 st = ke->ke_sticks; 660 it = ke->ke_iticks; 661 splx(s); 662 663 tt = ut + st + it; 664 if (tt == 0) { 665 st = 1; 666 tt = 1; 667 } 668 669 if (ke == curthread->td_kse) { 670 /* 671 * Adjust for the current time slice. This is actually fairly 672 * important since the error here is on the order of a time 673 * quantum, which is much greater than the sampling error. 674 * XXXKSE use a different test due to threads on other 675 * processors also being 'current'. 676 */ 677 678 binuptime(&bt); 679 bintime_sub(&bt, PCPU_PTR(switchtime)); 680 bintime_add(&bt, &p->p_runtime); 681 } else { 682 bt = p->p_runtime; 683 } 684 bintime2timeval(&bt, &tv); 685 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 686 ptu = ke->ke_uu + ke->ke_su + ke->ke_iu; 687 if (tu < ptu || (int64_t)tu < 0) { 688 /* XXX no %qd in kernel. Truncate. */ 689 printf("calcru: negative time of %ld usec for pid %d (%s)\n", 690 (long)tu, p->p_pid, p->p_comm); 691 tu = ptu; 692 } 693 694 /* Subdivide tu. */ 695 uu = (tu * ut) / tt; 696 su = (tu * st) / tt; 697 iu = tu - uu - su; 698 699 /* Enforce monotonicity. */ 700 if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) { 701 if (uu < ke->ke_uu) 702 uu = ke->ke_uu; 703 else if (uu + ke->ke_su + ke->ke_iu > tu) 704 uu = tu - ke->ke_su - ke->ke_iu; 705 if (st == 0) 706 su = ke->ke_su; 707 else { 708 su = ((tu - uu) * st) / (st + it); 709 if (su < ke->ke_su) 710 su = ke->ke_su; 711 else if (uu + su + ke->ke_iu > tu) 712 su = tu - uu - ke->ke_iu; 713 } 714 KASSERT(uu + su + ke->ke_iu <= tu, 715 ("calcru: monotonisation botch 1")); 716 iu = tu - uu - su; 717 KASSERT(iu >= ke->ke_iu, 718 ("calcru: monotonisation botch 2")); 719 } 720 ke->ke_uu = uu; 721 ke->ke_su = su; 722 ke->ke_iu = iu; 723 uut += uu; 724 sut += su; 725 iut += iu; 726 727 } /* end kse loop */ 728 } /* end kseg loop */ 729 up->tv_sec = uut / 1000000; 730 up->tv_usec = uut % 1000000; 731 sp->tv_sec = sut / 1000000; 732 sp->tv_usec = sut % 1000000; 733 if (ip != NULL) { 734 ip->tv_sec = iut / 1000000; 735 ip->tv_usec = iut % 1000000; 736 } 737 } 738 739 #ifndef _SYS_SYSPROTO_H_ 740 struct getrusage_args { 741 int who; 742 struct rusage *rusage; 743 }; 744 #endif 745 /* 746 * MPSAFE 747 */ 748 /* ARGSUSED */ 749 int 750 getrusage(td, uap) 751 register struct thread *td; 752 register struct getrusage_args *uap; 753 { 754 struct proc *p = td->td_proc; 755 register struct rusage *rup; 756 int error = 0; 757 758 mtx_lock(&Giant); 759 760 switch (uap->who) { 761 case RUSAGE_SELF: 762 rup = &p->p_stats->p_ru; 763 mtx_lock_spin(&sched_lock); 764 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 765 mtx_unlock_spin(&sched_lock); 766 break; 767 768 case RUSAGE_CHILDREN: 769 rup = &p->p_stats->p_cru; 770 break; 771 772 default: 773 rup = NULL; 774 error = EINVAL; 775 break; 776 } 777 mtx_unlock(&Giant); 778 if (error == 0) { 779 error = copyout((caddr_t)rup, (caddr_t)uap->rusage, 780 sizeof (struct rusage)); 781 } 782 return(error); 783 } 784 785 void 786 ruadd(ru, ru2) 787 register struct rusage *ru, *ru2; 788 { 789 register long *ip, *ip2; 790 register int i; 791 792 timevaladd(&ru->ru_utime, &ru2->ru_utime); 793 timevaladd(&ru->ru_stime, &ru2->ru_stime); 794 if (ru->ru_maxrss < ru2->ru_maxrss) 795 ru->ru_maxrss = ru2->ru_maxrss; 796 ip = &ru->ru_first; ip2 = &ru2->ru_first; 797 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 798 *ip++ += *ip2++; 799 } 800 801 /* 802 * Make a copy of the plimit structure. 803 * We share these structures copy-on-write after fork, 804 * and copy when a limit is changed. 805 */ 806 struct plimit * 807 limcopy(lim) 808 struct plimit *lim; 809 { 810 register struct plimit *copy; 811 812 MALLOC(copy, struct plimit *, sizeof(struct plimit), 813 M_SUBPROC, M_WAITOK); 814 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit)); 815 copy->p_lflags = 0; 816 copy->p_refcnt = 1; 817 return (copy); 818 } 819 820 /* 821 * Find the uidinfo structure for a uid. This structure is used to 822 * track the total resource consumption (process count, socket buffer 823 * size, etc.) for the uid and impose limits. 824 */ 825 void 826 uihashinit() 827 { 828 829 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 830 mtx_init(&uihashtbl_mtx, "uidinfo hash", MTX_DEF); 831 } 832 833 /* 834 * lookup a uidinfo struct for the parameter uid. 835 * uihashtbl_mtx must be locked. 836 */ 837 static struct uidinfo * 838 uilookup(uid) 839 uid_t uid; 840 { 841 struct uihashhead *uipp; 842 struct uidinfo *uip; 843 844 mtx_assert(&uihashtbl_mtx, MA_OWNED); 845 uipp = UIHASH(uid); 846 LIST_FOREACH(uip, uipp, ui_hash) 847 if (uip->ui_uid == uid) 848 break; 849 850 return (uip); 851 } 852 853 /* 854 * Find or allocate a struct uidinfo for a particular uid. 855 * Increase refcount on uidinfo struct returned. 856 * uifree() should be called on a struct uidinfo when released. 857 */ 858 struct uidinfo * 859 uifind(uid) 860 uid_t uid; 861 { 862 struct uidinfo *uip; 863 864 mtx_lock(&uihashtbl_mtx); 865 uip = uilookup(uid); 866 if (uip == NULL) { 867 struct uidinfo *old_uip; 868 869 mtx_unlock(&uihashtbl_mtx); 870 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO); 871 mtx_lock(&uihashtbl_mtx); 872 /* 873 * There's a chance someone created our uidinfo while we 874 * were in malloc and not holding the lock, so we have to 875 * make sure we don't insert a duplicate uidinfo 876 */ 877 if ((old_uip = uilookup(uid)) != NULL) { 878 /* someone else beat us to it */ 879 free(uip, M_UIDINFO); 880 uip = old_uip; 881 } else { 882 uip->ui_mtxp = mtx_pool_alloc(); 883 uip->ui_uid = uid; 884 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 885 } 886 } 887 uihold(uip); 888 mtx_unlock(&uihashtbl_mtx); 889 return (uip); 890 } 891 892 /* 893 * Place another refcount on a uidinfo struct. 894 */ 895 void 896 uihold(uip) 897 struct uidinfo *uip; 898 { 899 900 UIDINFO_LOCK(uip); 901 uip->ui_ref++; 902 UIDINFO_UNLOCK(uip); 903 } 904 905 /*- 906 * Since uidinfo structs have a long lifetime, we use an 907 * opportunistic refcounting scheme to avoid locking the lookup hash 908 * for each release. 909 * 910 * If the refcount hits 0, we need to free the structure, 911 * which means we need to lock the hash. 912 * Optimal case: 913 * After locking the struct and lowering the refcount, if we find 914 * that we don't need to free, simply unlock and return. 915 * Suboptimal case: 916 * If refcount lowering results in need to free, bump the count 917 * back up, loose the lock and aquire the locks in the proper 918 * order to try again. 919 */ 920 void 921 uifree(uip) 922 struct uidinfo *uip; 923 { 924 925 /* Prepare for optimal case. */ 926 UIDINFO_LOCK(uip); 927 928 if (--uip->ui_ref != 0) { 929 UIDINFO_UNLOCK(uip); 930 return; 931 } 932 933 /* Prepare for suboptimal case. */ 934 uip->ui_ref++; 935 UIDINFO_UNLOCK(uip); 936 mtx_lock(&uihashtbl_mtx); 937 UIDINFO_LOCK(uip); 938 939 /* 940 * We must subtract one from the count again because we backed out 941 * our initial subtraction before dropping the lock. 942 * Since another thread may have added a reference after we dropped the 943 * initial lock we have to test for zero again. 944 */ 945 if (--uip->ui_ref == 0) { 946 LIST_REMOVE(uip, ui_hash); 947 mtx_unlock(&uihashtbl_mtx); 948 if (uip->ui_sbsize != 0) 949 /* XXX no %qd in kernel. Truncate. */ 950 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 951 uip->ui_uid, (long)uip->ui_sbsize); 952 if (uip->ui_proccnt != 0) 953 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 954 uip->ui_uid, uip->ui_proccnt); 955 UIDINFO_UNLOCK(uip); 956 FREE(uip, M_UIDINFO); 957 return; 958 } 959 960 mtx_unlock(&uihashtbl_mtx); 961 UIDINFO_UNLOCK(uip); 962 } 963 964 /* 965 * Change the count associated with number of processes 966 * a given user is using. When 'max' is 0, don't enforce a limit 967 */ 968 int 969 chgproccnt(uip, diff, max) 970 struct uidinfo *uip; 971 int diff; 972 int max; 973 { 974 975 UIDINFO_LOCK(uip); 976 /* don't allow them to exceed max, but allow subtraction */ 977 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) { 978 UIDINFO_UNLOCK(uip); 979 return (0); 980 } 981 uip->ui_proccnt += diff; 982 if (uip->ui_proccnt < 0) 983 printf("negative proccnt for uid = %d\n", uip->ui_uid); 984 UIDINFO_UNLOCK(uip); 985 return (1); 986 } 987 988 /* 989 * Change the total socket buffer size a user has used. 990 */ 991 int 992 chgsbsize(uip, hiwat, to, max) 993 struct uidinfo *uip; 994 u_long *hiwat; 995 u_long to; 996 rlim_t max; 997 { 998 rlim_t new; 999 int s; 1000 1001 s = splnet(); 1002 UIDINFO_LOCK(uip); 1003 new = uip->ui_sbsize + to - *hiwat; 1004 /* don't allow them to exceed max, but allow subtraction */ 1005 if (to > *hiwat && new > max) { 1006 splx(s); 1007 UIDINFO_UNLOCK(uip); 1008 return (0); 1009 } 1010 uip->ui_sbsize = new; 1011 *hiwat = to; 1012 if (uip->ui_sbsize < 0) 1013 printf("negative sbsize for uid = %d\n", uip->ui_uid); 1014 splx(s); 1015 UIDINFO_UNLOCK(uip); 1016 return (1); 1017 } 1018