1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sched.h> 55 #include <sys/sx.h> 56 #include <sys/sysent.h> 57 #include <sys/time.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 64 static int donice(struct thread *td, struct proc *chgp, int n); 65 66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 67 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 68 static struct mtx uihashtbl_mtx; 69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 70 static u_long uihash; /* size of hash table - 1 */ 71 72 static struct uidinfo *uilookup(uid_t uid); 73 74 /* 75 * Resource controls and accounting. 76 */ 77 78 #ifndef _SYS_SYSPROTO_H_ 79 struct getpriority_args { 80 int which; 81 int who; 82 }; 83 #endif 84 /* 85 * MPSAFE 86 */ 87 int 88 getpriority(td, uap) 89 struct thread *td; 90 register struct getpriority_args *uap; 91 { 92 struct proc *p; 93 int low = PRIO_MAX + 1; 94 int error = 0; 95 struct ksegrp *kg; 96 97 mtx_lock(&Giant); 98 99 switch (uap->which) { 100 case PRIO_PROCESS: 101 if (uap->who == 0) 102 low = td->td_ksegrp->kg_nice; 103 else { 104 p = pfind(uap->who); 105 if (p == NULL) 106 break; 107 if (p_cansee(td, p) == 0) { 108 FOREACH_KSEGRP_IN_PROC(p, kg) { 109 if (kg->kg_nice < low) 110 low = kg->kg_nice; 111 } 112 } 113 PROC_UNLOCK(p); 114 } 115 break; 116 117 case PRIO_PGRP: { 118 register struct pgrp *pg; 119 120 sx_slock(&proctree_lock); 121 if (uap->who == 0) { 122 pg = td->td_proc->p_pgrp; 123 PGRP_LOCK(pg); 124 } else { 125 pg = pgfind(uap->who); 126 if (pg == NULL) { 127 sx_sunlock(&proctree_lock); 128 break; 129 } 130 } 131 sx_sunlock(&proctree_lock); 132 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 133 PROC_LOCK(p); 134 if (!p_cansee(td, p)) { 135 FOREACH_KSEGRP_IN_PROC(p, kg) { 136 if (kg->kg_nice < low) 137 low = kg->kg_nice; 138 } 139 } 140 PROC_UNLOCK(p); 141 } 142 PGRP_UNLOCK(pg); 143 break; 144 } 145 146 case PRIO_USER: 147 if (uap->who == 0) 148 uap->who = td->td_ucred->cr_uid; 149 sx_slock(&allproc_lock); 150 LIST_FOREACH(p, &allproc, p_list) { 151 PROC_LOCK(p); 152 if (!p_cansee(td, p) && 153 p->p_ucred->cr_uid == uap->who) { 154 FOREACH_KSEGRP_IN_PROC(p, kg) { 155 if (kg->kg_nice < low) 156 low = kg->kg_nice; 157 } 158 } 159 PROC_UNLOCK(p); 160 } 161 sx_sunlock(&allproc_lock); 162 break; 163 164 default: 165 error = EINVAL; 166 break; 167 } 168 if (low == PRIO_MAX + 1 && error == 0) 169 error = ESRCH; 170 td->td_retval[0] = low; 171 mtx_unlock(&Giant); 172 return (error); 173 } 174 175 #ifndef _SYS_SYSPROTO_H_ 176 struct setpriority_args { 177 int which; 178 int who; 179 int prio; 180 }; 181 #endif 182 /* 183 * MPSAFE 184 */ 185 /* ARGSUSED */ 186 int 187 setpriority(td, uap) 188 struct thread *td; 189 register struct setpriority_args *uap; 190 { 191 struct proc *curp = td->td_proc; 192 register struct proc *p; 193 int found = 0, error = 0; 194 195 mtx_lock(&Giant); 196 197 switch (uap->which) { 198 case PRIO_PROCESS: 199 if (uap->who == 0) { 200 PROC_LOCK(curp); 201 error = donice(td, curp, uap->prio); 202 PROC_UNLOCK(curp); 203 } else { 204 p = pfind(uap->who); 205 if (p == 0) 206 break; 207 if (p_cansee(td, p) == 0) 208 error = donice(td, p, uap->prio); 209 PROC_UNLOCK(p); 210 } 211 found++; 212 break; 213 214 case PRIO_PGRP: { 215 register struct pgrp *pg; 216 217 sx_slock(&proctree_lock); 218 if (uap->who == 0) { 219 pg = curp->p_pgrp; 220 PGRP_LOCK(pg); 221 } else { 222 pg = pgfind(uap->who); 223 if (pg == NULL) { 224 sx_sunlock(&proctree_lock); 225 break; 226 } 227 } 228 sx_sunlock(&proctree_lock); 229 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 230 PROC_LOCK(p); 231 if (!p_cansee(td, p)) { 232 error = donice(td, p, uap->prio); 233 found++; 234 } 235 PROC_UNLOCK(p); 236 } 237 PGRP_UNLOCK(pg); 238 break; 239 } 240 241 case PRIO_USER: 242 if (uap->who == 0) 243 uap->who = td->td_ucred->cr_uid; 244 sx_slock(&allproc_lock); 245 FOREACH_PROC_IN_SYSTEM(p) { 246 PROC_LOCK(p); 247 if (p->p_ucred->cr_uid == uap->who && 248 !p_cansee(td, p)) { 249 error = donice(td, p, uap->prio); 250 found++; 251 } 252 PROC_UNLOCK(p); 253 } 254 sx_sunlock(&allproc_lock); 255 break; 256 257 default: 258 error = EINVAL; 259 break; 260 } 261 if (found == 0 && error == 0) 262 error = ESRCH; 263 mtx_unlock(&Giant); 264 return (error); 265 } 266 267 /* 268 * Set "nice" for a process. Doesn't really understand threaded processes well 269 * but does try. Has the unfortunate side effect of making all the NICE 270 * values for a process's ksegrps the same.. This suggests that 271 * NICE valuse should be stored as a process nice and deltas for the ksegrps. 272 * (but not yet). 273 */ 274 static int 275 donice(struct thread *td, struct proc *p, int n) 276 { 277 int error; 278 int low = PRIO_MAX + 1; 279 struct ksegrp *kg; 280 281 PROC_LOCK_ASSERT(p, MA_OWNED); 282 if ((error = p_cansched(td, p))) 283 return (error); 284 if (n > PRIO_MAX) 285 n = PRIO_MAX; 286 if (n < PRIO_MIN) 287 n = PRIO_MIN; 288 /* 289 * Only allow nicing if to more than the lowest nice. 290 * e.g. nices of 4,3,2 allow nice to 3 but not 1 291 */ 292 FOREACH_KSEGRP_IN_PROC(p, kg) { 293 if (kg->kg_nice < low) 294 low = kg->kg_nice; 295 } 296 if (n < low && suser(td)) 297 return (EACCES); 298 FOREACH_KSEGRP_IN_PROC(p, kg) { 299 sched_nice(kg, n); 300 } 301 return (0); 302 } 303 304 /* rtprio system call */ 305 #ifndef _SYS_SYSPROTO_H_ 306 struct rtprio_args { 307 int function; 308 pid_t pid; 309 struct rtprio *rtp; 310 }; 311 #endif 312 313 /* 314 * Set realtime priority 315 */ 316 317 /* 318 * MPSAFE 319 */ 320 /* ARGSUSED */ 321 int 322 rtprio(td, uap) 323 struct thread *td; 324 register struct rtprio_args *uap; 325 { 326 struct proc *curp = td->td_proc; 327 register struct proc *p; 328 struct rtprio rtp; 329 int error, cierror = 0; 330 331 /* Perform copyin before acquiring locks if needed. */ 332 if (uap->function == RTP_SET) 333 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 334 335 if (uap->pid == 0) { 336 p = curp; 337 PROC_LOCK(p); 338 } else { 339 p = pfind(uap->pid); 340 if (p == NULL) 341 return (ESRCH); 342 } 343 344 switch (uap->function) { 345 case RTP_LOOKUP: 346 if ((error = p_cansee(td, p))) 347 break; 348 mtx_lock_spin(&sched_lock); 349 pri_to_rtp(FIRST_KSEGRP_IN_PROC(p), &rtp); 350 mtx_unlock_spin(&sched_lock); 351 PROC_UNLOCK(p); 352 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); 353 case RTP_SET: 354 if ((error = p_cansched(td, p)) || (error = cierror)) 355 break; 356 /* disallow setting rtprio in most cases if not superuser */ 357 if (suser(td) != 0) { 358 /* can't set someone else's */ 359 if (uap->pid) { 360 error = EPERM; 361 break; 362 } 363 /* can't set realtime priority */ 364 /* 365 * Realtime priority has to be restricted for reasons which should be 366 * obvious. However, for idle priority, there is a potential for 367 * system deadlock if an idleprio process gains a lock on a resource 368 * that other processes need (and the idleprio process can't run 369 * due to a CPU-bound normal process). Fix me! XXX 370 */ 371 #if 0 372 if (RTP_PRIO_IS_REALTIME(rtp.type)) 373 #endif 374 if (rtp.type != RTP_PRIO_NORMAL) { 375 error = EPERM; 376 break; 377 } 378 } 379 mtx_lock_spin(&sched_lock); 380 error = rtp_to_pri(&rtp, FIRST_KSEGRP_IN_PROC(p)); 381 mtx_unlock_spin(&sched_lock); 382 break; 383 default: 384 error = EINVAL; 385 break; 386 } 387 PROC_UNLOCK(p); 388 return (error); 389 } 390 391 int 392 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg) 393 { 394 395 if (rtp->prio > RTP_PRIO_MAX) 396 return (EINVAL); 397 switch (RTP_PRIO_BASE(rtp->type)) { 398 case RTP_PRIO_REALTIME: 399 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio; 400 break; 401 case RTP_PRIO_NORMAL: 402 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio; 403 break; 404 case RTP_PRIO_IDLE: 405 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio; 406 break; 407 default: 408 return (EINVAL); 409 } 410 kg->kg_pri_class = rtp->type; 411 if (curthread->td_ksegrp == kg) { 412 curthread->td_base_pri = kg->kg_user_pri; 413 curthread->td_priority = kg->kg_user_pri; /* XXX dubious */ 414 } 415 return (0); 416 } 417 418 void 419 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp) 420 { 421 422 switch (PRI_BASE(kg->kg_pri_class)) { 423 case PRI_REALTIME: 424 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME; 425 break; 426 case PRI_TIMESHARE: 427 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE; 428 break; 429 case PRI_IDLE: 430 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE; 431 break; 432 default: 433 break; 434 } 435 rtp->type = kg->kg_pri_class; 436 } 437 438 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 439 #ifndef _SYS_SYSPROTO_H_ 440 struct osetrlimit_args { 441 u_int which; 442 struct orlimit *rlp; 443 }; 444 #endif 445 /* 446 * MPSAFE 447 */ 448 /* ARGSUSED */ 449 int 450 osetrlimit(td, uap) 451 struct thread *td; 452 register struct osetrlimit_args *uap; 453 { 454 struct orlimit olim; 455 struct rlimit lim; 456 int error; 457 458 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit)))) 459 return (error); 460 lim.rlim_cur = olim.rlim_cur; 461 lim.rlim_max = olim.rlim_max; 462 mtx_lock(&Giant); 463 error = dosetrlimit(td, uap->which, &lim); 464 mtx_unlock(&Giant); 465 return (error); 466 } 467 468 #ifndef _SYS_SYSPROTO_H_ 469 struct ogetrlimit_args { 470 u_int which; 471 struct orlimit *rlp; 472 }; 473 #endif 474 /* 475 * MPSAFE 476 */ 477 /* ARGSUSED */ 478 int 479 ogetrlimit(td, uap) 480 struct thread *td; 481 register struct ogetrlimit_args *uap; 482 { 483 struct proc *p = td->td_proc; 484 struct orlimit olim; 485 int error; 486 487 if (uap->which >= RLIM_NLIMITS) 488 return (EINVAL); 489 mtx_lock(&Giant); 490 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur; 491 if (olim.rlim_cur == -1) 492 olim.rlim_cur = 0x7fffffff; 493 olim.rlim_max = p->p_rlimit[uap->which].rlim_max; 494 if (olim.rlim_max == -1) 495 olim.rlim_max = 0x7fffffff; 496 error = copyout(&olim, uap->rlp, sizeof(olim)); 497 mtx_unlock(&Giant); 498 return (error); 499 } 500 #endif /* COMPAT_43 || COMPAT_SUNOS */ 501 502 #ifndef _SYS_SYSPROTO_H_ 503 struct __setrlimit_args { 504 u_int which; 505 struct rlimit *rlp; 506 }; 507 #endif 508 /* 509 * MPSAFE 510 */ 511 /* ARGSUSED */ 512 int 513 setrlimit(td, uap) 514 struct thread *td; 515 register struct __setrlimit_args *uap; 516 { 517 struct rlimit alim; 518 int error; 519 520 if ((error = copyin(uap->rlp, &alim, sizeof (struct rlimit)))) 521 return (error); 522 mtx_lock(&Giant); 523 error = dosetrlimit(td, uap->which, &alim); 524 mtx_unlock(&Giant); 525 return (error); 526 } 527 528 int 529 dosetrlimit(td, which, limp) 530 struct thread *td; 531 u_int which; 532 struct rlimit *limp; 533 { 534 struct proc *p = td->td_proc; 535 register struct rlimit *alimp; 536 int error; 537 538 GIANT_REQUIRED; 539 540 if (which >= RLIM_NLIMITS) 541 return (EINVAL); 542 alimp = &p->p_rlimit[which]; 543 544 /* 545 * Preserve historical bugs by treating negative limits as unsigned. 546 */ 547 if (limp->rlim_cur < 0) 548 limp->rlim_cur = RLIM_INFINITY; 549 if (limp->rlim_max < 0) 550 limp->rlim_max = RLIM_INFINITY; 551 552 if (limp->rlim_cur > alimp->rlim_max || 553 limp->rlim_max > alimp->rlim_max) 554 if ((error = suser_cred(td->td_ucred, PRISON_ROOT))) 555 return (error); 556 if (limp->rlim_cur > limp->rlim_max) 557 limp->rlim_cur = limp->rlim_max; 558 if (p->p_limit->p_refcnt > 1 && 559 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 560 p->p_limit->p_refcnt--; 561 p->p_limit = limcopy(p->p_limit); 562 alimp = &p->p_rlimit[which]; 563 } 564 565 switch (which) { 566 567 case RLIMIT_CPU: 568 mtx_lock_spin(&sched_lock); 569 p->p_cpulimit = limp->rlim_cur; 570 mtx_unlock_spin(&sched_lock); 571 break; 572 case RLIMIT_DATA: 573 if (limp->rlim_cur > maxdsiz) 574 limp->rlim_cur = maxdsiz; 575 if (limp->rlim_max > maxdsiz) 576 limp->rlim_max = maxdsiz; 577 break; 578 579 case RLIMIT_STACK: 580 if (limp->rlim_cur > maxssiz) 581 limp->rlim_cur = maxssiz; 582 if (limp->rlim_max > maxssiz) 583 limp->rlim_max = maxssiz; 584 /* 585 * Stack is allocated to the max at exec time with only 586 * "rlim_cur" bytes accessible. If stack limit is going 587 * up make more accessible, if going down make inaccessible. 588 */ 589 if (limp->rlim_cur != alimp->rlim_cur) { 590 vm_offset_t addr; 591 vm_size_t size; 592 vm_prot_t prot; 593 594 if (limp->rlim_cur > alimp->rlim_cur) { 595 prot = p->p_sysent->sv_stackprot; 596 size = limp->rlim_cur - alimp->rlim_cur; 597 addr = p->p_sysent->sv_usrstack - 598 limp->rlim_cur; 599 } else { 600 prot = VM_PROT_NONE; 601 size = alimp->rlim_cur - limp->rlim_cur; 602 addr = p->p_sysent->sv_usrstack - 603 alimp->rlim_cur; 604 } 605 addr = trunc_page(addr); 606 size = round_page(size); 607 (void) vm_map_protect(&p->p_vmspace->vm_map, 608 addr, addr+size, prot, FALSE); 609 } 610 break; 611 612 case RLIMIT_NOFILE: 613 if (limp->rlim_cur > maxfilesperproc) 614 limp->rlim_cur = maxfilesperproc; 615 if (limp->rlim_max > maxfilesperproc) 616 limp->rlim_max = maxfilesperproc; 617 break; 618 619 case RLIMIT_NPROC: 620 if (limp->rlim_cur > maxprocperuid) 621 limp->rlim_cur = maxprocperuid; 622 if (limp->rlim_max > maxprocperuid) 623 limp->rlim_max = maxprocperuid; 624 if (limp->rlim_cur < 1) 625 limp->rlim_cur = 1; 626 if (limp->rlim_max < 1) 627 limp->rlim_max = 1; 628 break; 629 } 630 *alimp = *limp; 631 return (0); 632 } 633 634 #ifndef _SYS_SYSPROTO_H_ 635 struct __getrlimit_args { 636 u_int which; 637 struct rlimit *rlp; 638 }; 639 #endif 640 /* 641 * MPSAFE 642 */ 643 /* ARGSUSED */ 644 int 645 getrlimit(td, uap) 646 struct thread *td; 647 register struct __getrlimit_args *uap; 648 { 649 int error; 650 struct proc *p = td->td_proc; 651 652 if (uap->which >= RLIM_NLIMITS) 653 return (EINVAL); 654 mtx_lock(&Giant); 655 error = copyout(&p->p_rlimit[uap->which], uap->rlp, 656 sizeof (struct rlimit)); 657 mtx_unlock(&Giant); 658 return(error); 659 } 660 661 /* 662 * Transform the running time and tick information in proc p into user, 663 * system, and interrupt time usage. 664 */ 665 void 666 calcru(p, up, sp, ip) 667 struct proc *p; 668 struct timeval *up; 669 struct timeval *sp; 670 struct timeval *ip; 671 { 672 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */ 673 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu; 674 struct timeval tv; 675 struct bintime bt; 676 677 mtx_assert(&sched_lock, MA_OWNED); 678 /* XXX: why spl-protect ? worst case is an off-by-one report */ 679 680 ut = p->p_uticks; 681 st = p->p_sticks; 682 it = p->p_iticks; 683 684 tt = ut + st + it; 685 if (tt == 0) { 686 st = 1; 687 tt = 1; 688 } 689 690 if (curthread->td_proc == p) { 691 /* 692 * Adjust for the current time slice. This is actually fairly 693 * important since the error here is on the order of a time 694 * quantum, which is much greater than the sampling error. 695 * XXXKSE use a different test due to threads on other 696 * processors also being 'current'. 697 */ 698 699 binuptime(&bt); 700 bintime_sub(&bt, PCPU_PTR(switchtime)); 701 bintime_add(&bt, &p->p_runtime); 702 } else { 703 bt = p->p_runtime; 704 } 705 bintime2timeval(&bt, &tv); 706 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 707 ptu = p->p_uu + p->p_su + p->p_iu; 708 if (tu < ptu || (int64_t)tu < 0) { 709 /* XXX no %qd in kernel. Truncate. */ 710 printf("calcru: negative time of %ld usec for pid %d (%s)\n", 711 (long)tu, p->p_pid, p->p_comm); 712 tu = ptu; 713 } 714 715 /* Subdivide tu. */ 716 uu = (tu * ut) / tt; 717 su = (tu * st) / tt; 718 iu = tu - uu - su; 719 720 /* Enforce monotonicity. */ 721 if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) { 722 if (uu < p->p_uu) 723 uu = p->p_uu; 724 else if (uu + p->p_su + p->p_iu > tu) 725 uu = tu - p->p_su - p->p_iu; 726 if (st == 0) 727 su = p->p_su; 728 else { 729 su = ((tu - uu) * st) / (st + it); 730 if (su < p->p_su) 731 su = p->p_su; 732 else if (uu + su + p->p_iu > tu) 733 su = tu - uu - p->p_iu; 734 } 735 KASSERT(uu + su + p->p_iu <= tu, 736 ("calcru: monotonisation botch 1")); 737 iu = tu - uu - su; 738 KASSERT(iu >= p->p_iu, 739 ("calcru: monotonisation botch 2")); 740 } 741 p->p_uu = uu; 742 p->p_su = su; 743 p->p_iu = iu; 744 745 up->tv_sec = uu / 1000000; 746 up->tv_usec = uu % 1000000; 747 sp->tv_sec = su / 1000000; 748 sp->tv_usec = su % 1000000; 749 if (ip != NULL) { 750 ip->tv_sec = iu / 1000000; 751 ip->tv_usec = iu % 1000000; 752 } 753 } 754 755 #ifndef _SYS_SYSPROTO_H_ 756 struct getrusage_args { 757 int who; 758 struct rusage *rusage; 759 }; 760 #endif 761 /* 762 * MPSAFE 763 */ 764 /* ARGSUSED */ 765 int 766 getrusage(td, uap) 767 register struct thread *td; 768 register struct getrusage_args *uap; 769 { 770 struct proc *p = td->td_proc; 771 register struct rusage *rup; 772 int error = 0; 773 774 mtx_lock(&Giant); 775 776 switch (uap->who) { 777 case RUSAGE_SELF: 778 rup = &p->p_stats->p_ru; 779 mtx_lock_spin(&sched_lock); 780 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 781 mtx_unlock_spin(&sched_lock); 782 break; 783 784 case RUSAGE_CHILDREN: 785 rup = &p->p_stats->p_cru; 786 break; 787 788 default: 789 rup = NULL; 790 error = EINVAL; 791 break; 792 } 793 mtx_unlock(&Giant); 794 if (error == 0) { 795 error = copyout(rup, uap->rusage, sizeof (struct rusage)); 796 } 797 return(error); 798 } 799 800 void 801 ruadd(ru, ru2) 802 register struct rusage *ru, *ru2; 803 { 804 register long *ip, *ip2; 805 register int i; 806 807 timevaladd(&ru->ru_utime, &ru2->ru_utime); 808 timevaladd(&ru->ru_stime, &ru2->ru_stime); 809 if (ru->ru_maxrss < ru2->ru_maxrss) 810 ru->ru_maxrss = ru2->ru_maxrss; 811 ip = &ru->ru_first; ip2 = &ru2->ru_first; 812 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 813 *ip++ += *ip2++; 814 } 815 816 /* 817 * Make a copy of the plimit structure. 818 * We share these structures copy-on-write after fork, 819 * and copy when a limit is changed. 820 */ 821 struct plimit * 822 limcopy(lim) 823 struct plimit *lim; 824 { 825 register struct plimit *copy; 826 827 MALLOC(copy, struct plimit *, sizeof(struct plimit), 828 M_SUBPROC, 0); 829 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit)); 830 copy->p_lflags = 0; 831 copy->p_refcnt = 1; 832 return (copy); 833 } 834 835 /* 836 * Find the uidinfo structure for a uid. This structure is used to 837 * track the total resource consumption (process count, socket buffer 838 * size, etc.) for the uid and impose limits. 839 */ 840 void 841 uihashinit() 842 { 843 844 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 845 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF); 846 } 847 848 /* 849 * lookup a uidinfo struct for the parameter uid. 850 * uihashtbl_mtx must be locked. 851 */ 852 static struct uidinfo * 853 uilookup(uid) 854 uid_t uid; 855 { 856 struct uihashhead *uipp; 857 struct uidinfo *uip; 858 859 mtx_assert(&uihashtbl_mtx, MA_OWNED); 860 uipp = UIHASH(uid); 861 LIST_FOREACH(uip, uipp, ui_hash) 862 if (uip->ui_uid == uid) 863 break; 864 865 return (uip); 866 } 867 868 /* 869 * Find or allocate a struct uidinfo for a particular uid. 870 * Increase refcount on uidinfo struct returned. 871 * uifree() should be called on a struct uidinfo when released. 872 */ 873 struct uidinfo * 874 uifind(uid) 875 uid_t uid; 876 { 877 struct uidinfo *uip; 878 879 mtx_lock(&uihashtbl_mtx); 880 uip = uilookup(uid); 881 if (uip == NULL) { 882 struct uidinfo *old_uip; 883 884 mtx_unlock(&uihashtbl_mtx); 885 uip = malloc(sizeof(*uip), M_UIDINFO, M_ZERO); 886 mtx_lock(&uihashtbl_mtx); 887 /* 888 * There's a chance someone created our uidinfo while we 889 * were in malloc and not holding the lock, so we have to 890 * make sure we don't insert a duplicate uidinfo 891 */ 892 if ((old_uip = uilookup(uid)) != NULL) { 893 /* someone else beat us to it */ 894 free(uip, M_UIDINFO); 895 uip = old_uip; 896 } else { 897 uip->ui_mtxp = mtx_pool_alloc(); 898 uip->ui_uid = uid; 899 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 900 } 901 } 902 uihold(uip); 903 mtx_unlock(&uihashtbl_mtx); 904 return (uip); 905 } 906 907 /* 908 * Place another refcount on a uidinfo struct. 909 */ 910 void 911 uihold(uip) 912 struct uidinfo *uip; 913 { 914 915 UIDINFO_LOCK(uip); 916 uip->ui_ref++; 917 UIDINFO_UNLOCK(uip); 918 } 919 920 /*- 921 * Since uidinfo structs have a long lifetime, we use an 922 * opportunistic refcounting scheme to avoid locking the lookup hash 923 * for each release. 924 * 925 * If the refcount hits 0, we need to free the structure, 926 * which means we need to lock the hash. 927 * Optimal case: 928 * After locking the struct and lowering the refcount, if we find 929 * that we don't need to free, simply unlock and return. 930 * Suboptimal case: 931 * If refcount lowering results in need to free, bump the count 932 * back up, loose the lock and aquire the locks in the proper 933 * order to try again. 934 */ 935 void 936 uifree(uip) 937 struct uidinfo *uip; 938 { 939 940 /* Prepare for optimal case. */ 941 UIDINFO_LOCK(uip); 942 943 if (--uip->ui_ref != 0) { 944 UIDINFO_UNLOCK(uip); 945 return; 946 } 947 948 /* Prepare for suboptimal case. */ 949 uip->ui_ref++; 950 UIDINFO_UNLOCK(uip); 951 mtx_lock(&uihashtbl_mtx); 952 UIDINFO_LOCK(uip); 953 954 /* 955 * We must subtract one from the count again because we backed out 956 * our initial subtraction before dropping the lock. 957 * Since another thread may have added a reference after we dropped the 958 * initial lock we have to test for zero again. 959 */ 960 if (--uip->ui_ref == 0) { 961 LIST_REMOVE(uip, ui_hash); 962 mtx_unlock(&uihashtbl_mtx); 963 if (uip->ui_sbsize != 0) 964 /* XXX no %qd in kernel. Truncate. */ 965 printf("freeing uidinfo: uid = %d, sbsize = %ld\n", 966 uip->ui_uid, (long)uip->ui_sbsize); 967 if (uip->ui_proccnt != 0) 968 printf("freeing uidinfo: uid = %d, proccnt = %ld\n", 969 uip->ui_uid, uip->ui_proccnt); 970 UIDINFO_UNLOCK(uip); 971 FREE(uip, M_UIDINFO); 972 return; 973 } 974 975 mtx_unlock(&uihashtbl_mtx); 976 UIDINFO_UNLOCK(uip); 977 } 978 979 /* 980 * Change the count associated with number of processes 981 * a given user is using. When 'max' is 0, don't enforce a limit 982 */ 983 int 984 chgproccnt(uip, diff, max) 985 struct uidinfo *uip; 986 int diff; 987 int max; 988 { 989 990 UIDINFO_LOCK(uip); 991 /* don't allow them to exceed max, but allow subtraction */ 992 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) { 993 UIDINFO_UNLOCK(uip); 994 return (0); 995 } 996 uip->ui_proccnt += diff; 997 if (uip->ui_proccnt < 0) 998 printf("negative proccnt for uid = %d\n", uip->ui_uid); 999 UIDINFO_UNLOCK(uip); 1000 return (1); 1001 } 1002 1003 /* 1004 * Change the total socket buffer size a user has used. 1005 */ 1006 int 1007 chgsbsize(uip, hiwat, to, max) 1008 struct uidinfo *uip; 1009 u_int *hiwat; 1010 u_int to; 1011 rlim_t max; 1012 { 1013 rlim_t new; 1014 int s; 1015 1016 s = splnet(); 1017 UIDINFO_LOCK(uip); 1018 new = uip->ui_sbsize + to - *hiwat; 1019 /* don't allow them to exceed max, but allow subtraction */ 1020 if (to > *hiwat && new > max) { 1021 splx(s); 1022 UIDINFO_UNLOCK(uip); 1023 return (0); 1024 } 1025 uip->ui_sbsize = new; 1026 *hiwat = to; 1027 if (uip->ui_sbsize < 0) 1028 printf("negative sbsize for uid = %d\n", uip->ui_uid); 1029 splx(s); 1030 UIDINFO_UNLOCK(uip); 1031 return (1); 1032 } 1033