xref: /freebsd/sys/kern/kern_resource.c (revision f856af0466c076beef4ea9b15d088e1119a945b8)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_compat.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
45 #include <sys/file.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sx.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysent.h>
58 #include <sys/time.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_map.h>
64 
65 
66 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
67 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
68 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
69 static struct mtx uihashtbl_mtx;
70 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
71 static u_long uihash;		/* size of hash table - 1 */
72 
73 static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
74 		    struct timeval *up, struct timeval *sp);
75 static int	donice(struct thread *td, struct proc *chgp, int n);
76 static struct uidinfo *uilookup(uid_t uid);
77 
78 /*
79  * Resource controls and accounting.
80  */
81 
82 #ifndef _SYS_SYSPROTO_H_
83 struct getpriority_args {
84 	int	which;
85 	int	who;
86 };
87 #endif
88 /*
89  * MPSAFE
90  */
91 int
92 getpriority(td, uap)
93 	struct thread *td;
94 	register struct getpriority_args *uap;
95 {
96 	struct proc *p;
97 	struct pgrp *pg;
98 	int error, low;
99 
100 	error = 0;
101 	low = PRIO_MAX + 1;
102 	switch (uap->which) {
103 
104 	case PRIO_PROCESS:
105 		if (uap->who == 0)
106 			low = td->td_proc->p_nice;
107 		else {
108 			p = pfind(uap->who);
109 			if (p == NULL)
110 				break;
111 			if (p_cansee(td, p) == 0)
112 				low = p->p_nice;
113 			PROC_UNLOCK(p);
114 		}
115 		break;
116 
117 	case PRIO_PGRP:
118 		sx_slock(&proctree_lock);
119 		if (uap->who == 0) {
120 			pg = td->td_proc->p_pgrp;
121 			PGRP_LOCK(pg);
122 		} else {
123 			pg = pgfind(uap->who);
124 			if (pg == NULL) {
125 				sx_sunlock(&proctree_lock);
126 				break;
127 			}
128 		}
129 		sx_sunlock(&proctree_lock);
130 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
131 			PROC_LOCK(p);
132 			if (!p_cansee(td, p)) {
133 				if (p->p_nice < low)
134 					low = p->p_nice;
135 			}
136 			PROC_UNLOCK(p);
137 		}
138 		PGRP_UNLOCK(pg);
139 		break;
140 
141 	case PRIO_USER:
142 		if (uap->who == 0)
143 			uap->who = td->td_ucred->cr_uid;
144 		sx_slock(&allproc_lock);
145 		LIST_FOREACH(p, &allproc, p_list) {
146 			PROC_LOCK(p);
147 			if (!p_cansee(td, p) &&
148 			    p->p_ucred->cr_uid == uap->who) {
149 				if (p->p_nice < low)
150 					low = p->p_nice;
151 			}
152 			PROC_UNLOCK(p);
153 		}
154 		sx_sunlock(&allproc_lock);
155 		break;
156 
157 	default:
158 		error = EINVAL;
159 		break;
160 	}
161 	if (low == PRIO_MAX + 1 && error == 0)
162 		error = ESRCH;
163 	td->td_retval[0] = low;
164 	return (error);
165 }
166 
167 #ifndef _SYS_SYSPROTO_H_
168 struct setpriority_args {
169 	int	which;
170 	int	who;
171 	int	prio;
172 };
173 #endif
174 /*
175  * MPSAFE
176  */
177 int
178 setpriority(td, uap)
179 	struct thread *td;
180 	struct setpriority_args *uap;
181 {
182 	struct proc *curp, *p;
183 	struct pgrp *pg;
184 	int found = 0, error = 0;
185 
186 	curp = td->td_proc;
187 	switch (uap->which) {
188 	case PRIO_PROCESS:
189 		if (uap->who == 0) {
190 			PROC_LOCK(curp);
191 			error = donice(td, curp, uap->prio);
192 			PROC_UNLOCK(curp);
193 		} else {
194 			p = pfind(uap->who);
195 			if (p == 0)
196 				break;
197 			if (p_cansee(td, p) == 0)
198 				error = donice(td, p, uap->prio);
199 			PROC_UNLOCK(p);
200 		}
201 		found++;
202 		break;
203 
204 	case PRIO_PGRP:
205 		sx_slock(&proctree_lock);
206 		if (uap->who == 0) {
207 			pg = curp->p_pgrp;
208 			PGRP_LOCK(pg);
209 		} else {
210 			pg = pgfind(uap->who);
211 			if (pg == NULL) {
212 				sx_sunlock(&proctree_lock);
213 				break;
214 			}
215 		}
216 		sx_sunlock(&proctree_lock);
217 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 			PROC_LOCK(p);
219 			if (!p_cansee(td, p)) {
220 				error = donice(td, p, uap->prio);
221 				found++;
222 			}
223 			PROC_UNLOCK(p);
224 		}
225 		PGRP_UNLOCK(pg);
226 		break;
227 
228 	case PRIO_USER:
229 		if (uap->who == 0)
230 			uap->who = td->td_ucred->cr_uid;
231 		sx_slock(&allproc_lock);
232 		FOREACH_PROC_IN_SYSTEM(p) {
233 			PROC_LOCK(p);
234 			if (p->p_ucred->cr_uid == uap->who &&
235 			    !p_cansee(td, p)) {
236 				error = donice(td, p, uap->prio);
237 				found++;
238 			}
239 			PROC_UNLOCK(p);
240 		}
241 		sx_sunlock(&allproc_lock);
242 		break;
243 
244 	default:
245 		error = EINVAL;
246 		break;
247 	}
248 	if (found == 0 && error == 0)
249 		error = ESRCH;
250 	return (error);
251 }
252 
253 /*
254  * Set "nice" for a (whole) process.
255  */
256 static int
257 donice(struct thread *td, struct proc *p, int n)
258 {
259 	int error;
260 
261 	PROC_LOCK_ASSERT(p, MA_OWNED);
262 	if ((error = p_cansched(td, p)))
263 		return (error);
264 	if (n > PRIO_MAX)
265 		n = PRIO_MAX;
266 	if (n < PRIO_MIN)
267 		n = PRIO_MIN;
268  	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
269 		return (EACCES);
270 	mtx_lock_spin(&sched_lock);
271 	sched_nice(p, n);
272 	mtx_unlock_spin(&sched_lock);
273 	return (0);
274 }
275 
276 /*
277  * Set realtime priority for LWP.
278  *
279  * MPSAFE
280  */
281 #ifndef _SYS_SYSPROTO_H_
282 struct rtprio_thread_args {
283 	int		function;
284 	lwpid_t		lwpid;
285 	struct rtprio	*rtp;
286 };
287 #endif
288 
289 int
290 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
291 {
292 	struct proc *curp;
293 	struct proc *p;
294 	struct rtprio rtp;
295 	struct thread *td1;
296 	int cierror, error;
297 
298 	/* Perform copyin before acquiring locks if needed. */
299 	if (uap->function == RTP_SET)
300 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
301 	else
302 		cierror = 0;
303 
304 	curp = td->td_proc;
305 	/*
306 	 * Though lwpid is unique, only current process is supported
307 	 * since there is no efficient way to look up a LWP yet.
308 	 */
309 	p = curp;
310 	PROC_LOCK(p);
311 
312 	switch (uap->function) {
313 	case RTP_LOOKUP:
314 		if ((error = p_cansee(td, p)))
315 			break;
316 		mtx_lock_spin(&sched_lock);
317 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
318 			td1 = td;
319 		else
320 			td1 = thread_find(p, uap->lwpid);
321 		if (td1 != NULL)
322 			pri_to_rtp(td1, &rtp);
323 		else
324 			error = ESRCH;
325 		mtx_unlock_spin(&sched_lock);
326 		PROC_UNLOCK(p);
327 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
328 	case RTP_SET:
329 		if ((error = p_cansched(td, p)) || (error = cierror))
330 			break;
331 
332 		/* Disallow setting rtprio in most cases if not superuser. */
333 		if (suser(td) != 0) {
334 			/* can't set realtime priority */
335 /*
336  * Realtime priority has to be restricted for reasons which should be
337  * obvious.  However, for idle priority, there is a potential for
338  * system deadlock if an idleprio process gains a lock on a resource
339  * that other processes need (and the idleprio process can't run
340  * due to a CPU-bound normal process).  Fix me!  XXX
341  */
342 #if 0
343  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
344 #else
345 			if (rtp.type != RTP_PRIO_NORMAL) {
346 #endif
347 				error = EPERM;
348 				break;
349 			}
350 		}
351 
352 		mtx_lock_spin(&sched_lock);
353 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
354 			td1 = td;
355 		else
356 			td1 = thread_find(p, uap->lwpid);
357 		if (td1 != NULL)
358 			error = rtp_to_pri(&rtp, td1);
359 		else
360 			error = ESRCH;
361 		mtx_unlock_spin(&sched_lock);
362 		break;
363 	default:
364 		error = EINVAL;
365 		break;
366 	}
367 	PROC_UNLOCK(p);
368 	return (error);
369 }
370 
371 /*
372  * Set realtime priority.
373  *
374  * MPSAFE
375  */
376 #ifndef _SYS_SYSPROTO_H_
377 struct rtprio_args {
378 	int		function;
379 	pid_t		pid;
380 	struct rtprio	*rtp;
381 };
382 #endif
383 
384 int
385 rtprio(td, uap)
386 	struct thread *td;		/* curthread */
387 	register struct rtprio_args *uap;
388 {
389 	struct proc *curp;
390 	struct proc *p;
391 	struct thread *tdp;
392 	struct rtprio rtp;
393 	int cierror, error;
394 
395 	/* Perform copyin before acquiring locks if needed. */
396 	if (uap->function == RTP_SET)
397 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
398 	else
399 		cierror = 0;
400 
401 	curp = td->td_proc;
402 	if (uap->pid == 0) {
403 		p = curp;
404 		PROC_LOCK(p);
405 	} else {
406 		p = pfind(uap->pid);
407 		if (p == NULL)
408 			return (ESRCH);
409 	}
410 
411 	switch (uap->function) {
412 	case RTP_LOOKUP:
413 		if ((error = p_cansee(td, p)))
414 			break;
415 		mtx_lock_spin(&sched_lock);
416 		/*
417 		 * Return OUR priority if no pid specified,
418 		 * or if one is, report the highest priority
419 		 * in the process.  There isn't much more you can do as
420 		 * there is only room to return a single priority.
421 		 * XXXKSE: maybe need a new interface to report
422 		 * priorities of multiple system scope threads.
423 		 * Note: specifying our own pid is not the same
424 		 * as leaving it zero.
425 		 */
426 		if (uap->pid == 0) {
427 			pri_to_rtp(td, &rtp);
428 		} else {
429 			struct rtprio rtp2;
430 
431 			rtp.type = RTP_PRIO_IDLE;
432 			rtp.prio = RTP_PRIO_MAX;
433 			FOREACH_THREAD_IN_PROC(p, tdp) {
434 				pri_to_rtp(tdp, &rtp2);
435 				if (rtp2.type <  rtp.type ||
436 				    (rtp2.type == rtp.type &&
437 				    rtp2.prio < rtp.prio)) {
438 					rtp.type = rtp2.type;
439 					rtp.prio = rtp2.prio;
440 				}
441 			}
442 		}
443 		mtx_unlock_spin(&sched_lock);
444 		PROC_UNLOCK(p);
445 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
446 	case RTP_SET:
447 		if ((error = p_cansched(td, p)) || (error = cierror))
448 			break;
449 
450 		/* Disallow setting rtprio in most cases if not superuser. */
451 		if (priv_check(td, PRIV_SCHED_RTPRIO) != 0) {
452 			/* can't set someone else's */
453 			if (uap->pid) {
454 				error = EPERM;
455 				break;
456 			}
457 			/* can't set realtime priority */
458 /*
459  * Realtime priority has to be restricted for reasons which should be
460  * obvious.  However, for idle priority, there is a potential for
461  * system deadlock if an idleprio process gains a lock on a resource
462  * that other processes need (and the idleprio process can't run
463  * due to a CPU-bound normal process).  Fix me!  XXX
464  */
465 #if 0
466  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
467 #else
468 			if (rtp.type != RTP_PRIO_NORMAL) {
469 #endif
470 				error = EPERM;
471 				break;
472 			}
473 		}
474 
475 		/*
476 		 * If we are setting our own priority, set just our
477 		 * thread but if we are doing another process,
478 		 * do all the threads on that process. If we
479 		 * specify our own pid we do the latter.
480 		 */
481 		mtx_lock_spin(&sched_lock);
482 		if (uap->pid == 0) {
483 			error = rtp_to_pri(&rtp, td);
484 		} else {
485 			FOREACH_THREAD_IN_PROC(p, td) {
486 				if ((error = rtp_to_pri(&rtp, td)) != 0)
487 					break;
488 			}
489 		}
490 		mtx_unlock_spin(&sched_lock);
491 		break;
492 	default:
493 		error = EINVAL;
494 		break;
495 	}
496 	PROC_UNLOCK(p);
497 	return (error);
498 }
499 
500 int
501 rtp_to_pri(struct rtprio *rtp, struct thread *td)
502 {
503 	u_char	newpri;
504 
505 	mtx_assert(&sched_lock, MA_OWNED);
506 	if (rtp->prio > RTP_PRIO_MAX)
507 		return (EINVAL);
508 	switch (RTP_PRIO_BASE(rtp->type)) {
509 	case RTP_PRIO_REALTIME:
510 		newpri = PRI_MIN_REALTIME + rtp->prio;
511 		break;
512 	case RTP_PRIO_NORMAL:
513 		newpri = PRI_MIN_TIMESHARE + rtp->prio;
514 		break;
515 	case RTP_PRIO_IDLE:
516 		newpri = PRI_MIN_IDLE + rtp->prio;
517 		break;
518 	default:
519 		return (EINVAL);
520 	}
521 	sched_class(td, rtp->type);	/* XXX fix */
522 	sched_user_prio(td, newpri);
523 	if (curthread == td)
524 		sched_prio(curthread, td->td_user_pri); /* XXX dubious */
525 	return (0);
526 }
527 
528 void
529 pri_to_rtp(struct thread *td, struct rtprio *rtp)
530 {
531 
532 	mtx_assert(&sched_lock, MA_OWNED);
533 	switch (PRI_BASE(td->td_pri_class)) {
534 	case PRI_REALTIME:
535 		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
536 		break;
537 	case PRI_TIMESHARE:
538 		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
539 		break;
540 	case PRI_IDLE:
541 		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
542 		break;
543 	default:
544 		break;
545 	}
546 	rtp->type = td->td_pri_class;
547 }
548 
549 #if defined(COMPAT_43)
550 #ifndef _SYS_SYSPROTO_H_
551 struct osetrlimit_args {
552 	u_int	which;
553 	struct	orlimit *rlp;
554 };
555 #endif
556 /*
557  * MPSAFE
558  */
559 int
560 osetrlimit(td, uap)
561 	struct thread *td;
562 	register struct osetrlimit_args *uap;
563 {
564 	struct orlimit olim;
565 	struct rlimit lim;
566 	int error;
567 
568 	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
569 		return (error);
570 	lim.rlim_cur = olim.rlim_cur;
571 	lim.rlim_max = olim.rlim_max;
572 	error = kern_setrlimit(td, uap->which, &lim);
573 	return (error);
574 }
575 
576 #ifndef _SYS_SYSPROTO_H_
577 struct ogetrlimit_args {
578 	u_int	which;
579 	struct	orlimit *rlp;
580 };
581 #endif
582 /*
583  * MPSAFE
584  */
585 int
586 ogetrlimit(td, uap)
587 	struct thread *td;
588 	register struct ogetrlimit_args *uap;
589 {
590 	struct orlimit olim;
591 	struct rlimit rl;
592 	struct proc *p;
593 	int error;
594 
595 	if (uap->which >= RLIM_NLIMITS)
596 		return (EINVAL);
597 	p = td->td_proc;
598 	PROC_LOCK(p);
599 	lim_rlimit(p, uap->which, &rl);
600 	PROC_UNLOCK(p);
601 
602 	/*
603 	 * XXX would be more correct to convert only RLIM_INFINITY to the
604 	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
605 	 * values.  Most 64->32 and 32->16 conversions, including not
606 	 * unimportant ones of uids are even more broken than what we
607 	 * do here (they blindly truncate).  We don't do this correctly
608 	 * here since we have little experience with EOVERFLOW yet.
609 	 * Elsewhere, getuid() can't fail...
610 	 */
611 	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
612 	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
613 	error = copyout(&olim, uap->rlp, sizeof(olim));
614 	return (error);
615 }
616 #endif /* COMPAT_43 */
617 
618 #ifndef _SYS_SYSPROTO_H_
619 struct __setrlimit_args {
620 	u_int	which;
621 	struct	rlimit *rlp;
622 };
623 #endif
624 /*
625  * MPSAFE
626  */
627 int
628 setrlimit(td, uap)
629 	struct thread *td;
630 	register struct __setrlimit_args *uap;
631 {
632 	struct rlimit alim;
633 	int error;
634 
635 	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
636 		return (error);
637 	error = kern_setrlimit(td, uap->which, &alim);
638 	return (error);
639 }
640 
641 int
642 kern_setrlimit(td, which, limp)
643 	struct thread *td;
644 	u_int which;
645 	struct rlimit *limp;
646 {
647 	struct plimit *newlim, *oldlim;
648 	struct proc *p;
649 	register struct rlimit *alimp;
650 	rlim_t oldssiz;
651 	int error;
652 
653 	if (which >= RLIM_NLIMITS)
654 		return (EINVAL);
655 
656 	/*
657 	 * Preserve historical bugs by treating negative limits as unsigned.
658 	 */
659 	if (limp->rlim_cur < 0)
660 		limp->rlim_cur = RLIM_INFINITY;
661 	if (limp->rlim_max < 0)
662 		limp->rlim_max = RLIM_INFINITY;
663 
664 	oldssiz = 0;
665 	p = td->td_proc;
666 	newlim = lim_alloc();
667 	PROC_LOCK(p);
668 	oldlim = p->p_limit;
669 	alimp = &oldlim->pl_rlimit[which];
670 	if (limp->rlim_cur > alimp->rlim_max ||
671 	    limp->rlim_max > alimp->rlim_max)
672 		if ((error = priv_check_cred(td->td_ucred,
673 		    PRIV_PROC_SETRLIMIT, SUSER_ALLOWJAIL))) {
674 			PROC_UNLOCK(p);
675 			lim_free(newlim);
676 			return (error);
677 		}
678 	if (limp->rlim_cur > limp->rlim_max)
679 		limp->rlim_cur = limp->rlim_max;
680 	lim_copy(newlim, oldlim);
681 	alimp = &newlim->pl_rlimit[which];
682 
683 	switch (which) {
684 
685 	case RLIMIT_CPU:
686 		mtx_lock_spin(&sched_lock);
687 		p->p_cpulimit = limp->rlim_cur;
688 		mtx_unlock_spin(&sched_lock);
689 		break;
690 	case RLIMIT_DATA:
691 		if (limp->rlim_cur > maxdsiz)
692 			limp->rlim_cur = maxdsiz;
693 		if (limp->rlim_max > maxdsiz)
694 			limp->rlim_max = maxdsiz;
695 		break;
696 
697 	case RLIMIT_STACK:
698 		if (limp->rlim_cur > maxssiz)
699 			limp->rlim_cur = maxssiz;
700 		if (limp->rlim_max > maxssiz)
701 			limp->rlim_max = maxssiz;
702 		oldssiz = alimp->rlim_cur;
703 		break;
704 
705 	case RLIMIT_NOFILE:
706 		if (limp->rlim_cur > maxfilesperproc)
707 			limp->rlim_cur = maxfilesperproc;
708 		if (limp->rlim_max > maxfilesperproc)
709 			limp->rlim_max = maxfilesperproc;
710 		break;
711 
712 	case RLIMIT_NPROC:
713 		if (limp->rlim_cur > maxprocperuid)
714 			limp->rlim_cur = maxprocperuid;
715 		if (limp->rlim_max > maxprocperuid)
716 			limp->rlim_max = maxprocperuid;
717 		if (limp->rlim_cur < 1)
718 			limp->rlim_cur = 1;
719 		if (limp->rlim_max < 1)
720 			limp->rlim_max = 1;
721 		break;
722 	}
723 	*alimp = *limp;
724 	p->p_limit = newlim;
725 	PROC_UNLOCK(p);
726 	lim_free(oldlim);
727 
728 	if (which == RLIMIT_STACK) {
729 		/*
730 		 * Stack is allocated to the max at exec time with only
731 		 * "rlim_cur" bytes accessible.  If stack limit is going
732 		 * up make more accessible, if going down make inaccessible.
733 		 */
734 		if (limp->rlim_cur != oldssiz) {
735 			vm_offset_t addr;
736 			vm_size_t size;
737 			vm_prot_t prot;
738 
739 			if (limp->rlim_cur > oldssiz) {
740 				prot = p->p_sysent->sv_stackprot;
741 				size = limp->rlim_cur - oldssiz;
742 				addr = p->p_sysent->sv_usrstack -
743 				    limp->rlim_cur;
744 			} else {
745 				prot = VM_PROT_NONE;
746 				size = oldssiz - limp->rlim_cur;
747 				addr = p->p_sysent->sv_usrstack - oldssiz;
748 			}
749 			addr = trunc_page(addr);
750 			size = round_page(size);
751 			(void)vm_map_protect(&p->p_vmspace->vm_map,
752 			    addr, addr + size, prot, FALSE);
753 		}
754 	}
755 
756 	/*
757 	 * The data size limit may need to be changed to a value
758 	 * that makes sense for the 32 bit binary.
759 	 */
760 	if (p->p_sysent->sv_fixlimits != NULL)
761 		p->p_sysent->sv_fixlimits(p);
762 	return (0);
763 }
764 
765 #ifndef _SYS_SYSPROTO_H_
766 struct __getrlimit_args {
767 	u_int	which;
768 	struct	rlimit *rlp;
769 };
770 #endif
771 /*
772  * MPSAFE
773  */
774 /* ARGSUSED */
775 int
776 getrlimit(td, uap)
777 	struct thread *td;
778 	register struct __getrlimit_args *uap;
779 {
780 	struct rlimit rlim;
781 	struct proc *p;
782 	int error;
783 
784 	if (uap->which >= RLIM_NLIMITS)
785 		return (EINVAL);
786 	p = td->td_proc;
787 	PROC_LOCK(p);
788 	lim_rlimit(p, uap->which, &rlim);
789 	PROC_UNLOCK(p);
790 	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
791 	return (error);
792 }
793 
794 /*
795  * Transform the running time and tick information for children of proc p
796  * into user and system time usage.
797  */
798 void
799 calccru(p, up, sp)
800 	struct proc *p;
801 	struct timeval *up;
802 	struct timeval *sp;
803 {
804 
805 	PROC_LOCK_ASSERT(p, MA_OWNED);
806 	calcru1(p, &p->p_crux, up, sp);
807 }
808 
809 /*
810  * Transform the running time and tick information in proc p into user
811  * and system time usage.  If appropriate, include the current time slice
812  * on this CPU.
813  */
814 void
815 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
816 {
817 	struct rusage_ext rux;
818 	struct thread *td;
819 	uint64_t u;
820 
821 	PROC_LOCK_ASSERT(p, MA_OWNED);
822 	mtx_assert(&sched_lock, MA_NOTOWNED);
823 	mtx_lock_spin(&sched_lock);
824 
825 	/*
826 	 * If we are getting stats for the current process, then add in the
827 	 * stats that this thread has accumulated in its current time slice.
828 	 * We reset the thread and CPU state as if we had performed a context
829 	 * switch right here.
830 	 */
831 	if (curthread->td_proc == p) {
832 		td = curthread;
833 		u = cpu_ticks();
834 		p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
835 		PCPU_SET(switchtime, u);
836 		p->p_rux.rux_uticks += td->td_uticks;
837 		td->td_uticks = 0;
838 		p->p_rux.rux_iticks += td->td_iticks;
839 		td->td_iticks = 0;
840 		p->p_rux.rux_sticks += td->td_sticks;
841 		td->td_sticks = 0;
842 	}
843 	/* Work on a copy of p_rux so we can let go of sched_lock */
844 	rux = p->p_rux;
845 	mtx_unlock_spin(&sched_lock);
846 	calcru1(p, &rux, up, sp);
847 	/* Update the result from the p_rux copy */
848 	p->p_rux.rux_uu = rux.rux_uu;
849 	p->p_rux.rux_su = rux.rux_su;
850 	p->p_rux.rux_tu = rux.rux_tu;
851 }
852 
853 static void
854 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
855     struct timeval *sp)
856 {
857 	/* {user, system, interrupt, total} {ticks, usec}: */
858 	u_int64_t ut, uu, st, su, it, tt, tu;
859 
860 	ut = ruxp->rux_uticks;
861 	st = ruxp->rux_sticks;
862 	it = ruxp->rux_iticks;
863 	tt = ut + st + it;
864 	if (tt == 0) {
865 		/* Avoid divide by zero */
866 		st = 1;
867 		tt = 1;
868 	}
869 	tu = cputick2usec(ruxp->rux_runtime);
870 	if ((int64_t)tu < 0) {
871 		/* XXX: this should be an assert /phk */
872 		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
873 		    (intmax_t)tu, p->p_pid, p->p_comm);
874 		tu = ruxp->rux_tu;
875 	}
876 
877 	if (tu >= ruxp->rux_tu) {
878 		/*
879 		 * The normal case, time increased.
880 		 * Enforce monotonicity of bucketed numbers.
881 		 */
882 		uu = (tu * ut) / tt;
883 		if (uu < ruxp->rux_uu)
884 			uu = ruxp->rux_uu;
885 		su = (tu * st) / tt;
886 		if (su < ruxp->rux_su)
887 			su = ruxp->rux_su;
888 	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
889 		/*
890 		 * When we calibrate the cputicker, it is not uncommon to
891 		 * see the presumably fixed frequency increase slightly over
892 		 * time as a result of thermal stabilization and NTP
893 		 * discipline (of the reference clock).  We therefore ignore
894 		 * a bit of backwards slop because we  expect to catch up
895  		 * shortly.  We use a 3 microsecond limit to catch low
896 		 * counts and a 1% limit for high counts.
897 		 */
898 		uu = ruxp->rux_uu;
899 		su = ruxp->rux_su;
900 		tu = ruxp->rux_tu;
901 	} else { /* tu < ruxp->rux_tu */
902 		/*
903 		 * What happene here was likely that a laptop, which ran at
904 		 * a reduced clock frequency at boot, kicked into high gear.
905 		 * The wisdom of spamming this message in that case is
906 		 * dubious, but it might also be indicative of something
907 		 * serious, so lets keep it and hope laptops can be made
908 		 * more truthful about their CPU speed via ACPI.
909 		 */
910 		printf("calcru: runtime went backwards from %ju usec "
911 		    "to %ju usec for pid %d (%s)\n",
912 		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
913 		    p->p_pid, p->p_comm);
914 		uu = (tu * ut) / tt;
915 		su = (tu * st) / tt;
916 	}
917 
918 	ruxp->rux_uu = uu;
919 	ruxp->rux_su = su;
920 	ruxp->rux_tu = tu;
921 
922 	up->tv_sec = uu / 1000000;
923 	up->tv_usec = uu % 1000000;
924 	sp->tv_sec = su / 1000000;
925 	sp->tv_usec = su % 1000000;
926 }
927 
928 #ifndef _SYS_SYSPROTO_H_
929 struct getrusage_args {
930 	int	who;
931 	struct	rusage *rusage;
932 };
933 #endif
934 /*
935  * MPSAFE
936  */
937 int
938 getrusage(td, uap)
939 	register struct thread *td;
940 	register struct getrusage_args *uap;
941 {
942 	struct rusage ru;
943 	int error;
944 
945 	error = kern_getrusage(td, uap->who, &ru);
946 	if (error == 0)
947 		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
948 	return (error);
949 }
950 
951 int
952 kern_getrusage(td, who, rup)
953 	struct thread *td;
954 	int who;
955 	struct rusage *rup;
956 {
957 	struct proc *p;
958 
959 	p = td->td_proc;
960 	PROC_LOCK(p);
961 	switch (who) {
962 
963 	case RUSAGE_SELF:
964 		*rup = p->p_stats->p_ru;
965 		calcru(p, &rup->ru_utime, &rup->ru_stime);
966 		break;
967 
968 	case RUSAGE_CHILDREN:
969 		*rup = p->p_stats->p_cru;
970 		calccru(p, &rup->ru_utime, &rup->ru_stime);
971 		break;
972 
973 	default:
974 		PROC_UNLOCK(p);
975 		return (EINVAL);
976 	}
977 	PROC_UNLOCK(p);
978 	return (0);
979 }
980 
981 void
982 ruadd(ru, rux, ru2, rux2)
983 	struct rusage *ru;
984 	struct rusage_ext *rux;
985 	struct rusage *ru2;
986 	struct rusage_ext *rux2;
987 {
988 	register long *ip, *ip2;
989 	register int i;
990 
991 	rux->rux_runtime += rux2->rux_runtime;
992 	rux->rux_uticks += rux2->rux_uticks;
993 	rux->rux_sticks += rux2->rux_sticks;
994 	rux->rux_iticks += rux2->rux_iticks;
995 	rux->rux_uu += rux2->rux_uu;
996 	rux->rux_su += rux2->rux_su;
997 	rux->rux_tu += rux2->rux_tu;
998 	if (ru->ru_maxrss < ru2->ru_maxrss)
999 		ru->ru_maxrss = ru2->ru_maxrss;
1000 	ip = &ru->ru_first;
1001 	ip2 = &ru2->ru_first;
1002 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1003 		*ip++ += *ip2++;
1004 }
1005 
1006 /*
1007  * Allocate a new resource limits structure and initialize its
1008  * reference count and mutex pointer.
1009  */
1010 struct plimit *
1011 lim_alloc()
1012 {
1013 	struct plimit *limp;
1014 
1015 	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1016 	refcount_init(&limp->pl_refcnt, 1);
1017 	return (limp);
1018 }
1019 
1020 struct plimit *
1021 lim_hold(limp)
1022 	struct plimit *limp;
1023 {
1024 
1025 	refcount_acquire(&limp->pl_refcnt);
1026 	return (limp);
1027 }
1028 
1029 void
1030 lim_free(limp)
1031 	struct plimit *limp;
1032 {
1033 
1034 	KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1035 	if (refcount_release(&limp->pl_refcnt))
1036 		free((void *)limp, M_PLIMIT);
1037 }
1038 
1039 /*
1040  * Make a copy of the plimit structure.
1041  * We share these structures copy-on-write after fork.
1042  */
1043 void
1044 lim_copy(dst, src)
1045 	struct plimit *dst, *src;
1046 {
1047 
1048 	KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1049 	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1050 }
1051 
1052 /*
1053  * Return the hard limit for a particular system resource.  The
1054  * which parameter specifies the index into the rlimit array.
1055  */
1056 rlim_t
1057 lim_max(struct proc *p, int which)
1058 {
1059 	struct rlimit rl;
1060 
1061 	lim_rlimit(p, which, &rl);
1062 	return (rl.rlim_max);
1063 }
1064 
1065 /*
1066  * Return the current (soft) limit for a particular system resource.
1067  * The which parameter which specifies the index into the rlimit array
1068  */
1069 rlim_t
1070 lim_cur(struct proc *p, int which)
1071 {
1072 	struct rlimit rl;
1073 
1074 	lim_rlimit(p, which, &rl);
1075 	return (rl.rlim_cur);
1076 }
1077 
1078 /*
1079  * Return a copy of the entire rlimit structure for the system limit
1080  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1081  */
1082 void
1083 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1084 {
1085 
1086 	PROC_LOCK_ASSERT(p, MA_OWNED);
1087 	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1088 	    ("request for invalid resource limit"));
1089 	*rlp = p->p_limit->pl_rlimit[which];
1090 }
1091 
1092 /*
1093  * Find the uidinfo structure for a uid.  This structure is used to
1094  * track the total resource consumption (process count, socket buffer
1095  * size, etc.) for the uid and impose limits.
1096  */
1097 void
1098 uihashinit()
1099 {
1100 
1101 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1102 	mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
1103 }
1104 
1105 /*
1106  * Look up a uidinfo struct for the parameter uid.
1107  * uihashtbl_mtx must be locked.
1108  */
1109 static struct uidinfo *
1110 uilookup(uid)
1111 	uid_t uid;
1112 {
1113 	struct uihashhead *uipp;
1114 	struct uidinfo *uip;
1115 
1116 	mtx_assert(&uihashtbl_mtx, MA_OWNED);
1117 	uipp = UIHASH(uid);
1118 	LIST_FOREACH(uip, uipp, ui_hash)
1119 		if (uip->ui_uid == uid)
1120 			break;
1121 
1122 	return (uip);
1123 }
1124 
1125 /*
1126  * Find or allocate a struct uidinfo for a particular uid.
1127  * Increase refcount on uidinfo struct returned.
1128  * uifree() should be called on a struct uidinfo when released.
1129  */
1130 struct uidinfo *
1131 uifind(uid)
1132 	uid_t uid;
1133 {
1134 	struct uidinfo *old_uip, *uip;
1135 
1136 	mtx_lock(&uihashtbl_mtx);
1137 	uip = uilookup(uid);
1138 	if (uip == NULL) {
1139 		mtx_unlock(&uihashtbl_mtx);
1140 		uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1141 		mtx_lock(&uihashtbl_mtx);
1142 		/*
1143 		 * There's a chance someone created our uidinfo while we
1144 		 * were in malloc and not holding the lock, so we have to
1145 		 * make sure we don't insert a duplicate uidinfo.
1146 		 */
1147 		if ((old_uip = uilookup(uid)) != NULL) {
1148 			/* Someone else beat us to it. */
1149 			free(uip, M_UIDINFO);
1150 			uip = old_uip;
1151 		} else {
1152 			uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
1153 			uip->ui_uid = uid;
1154 			LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1155 		}
1156 	}
1157 	uihold(uip);
1158 	mtx_unlock(&uihashtbl_mtx);
1159 	return (uip);
1160 }
1161 
1162 /*
1163  * Place another refcount on a uidinfo struct.
1164  */
1165 void
1166 uihold(uip)
1167 	struct uidinfo *uip;
1168 {
1169 
1170 	UIDINFO_LOCK(uip);
1171 	uip->ui_ref++;
1172 	UIDINFO_UNLOCK(uip);
1173 }
1174 
1175 /*-
1176  * Since uidinfo structs have a long lifetime, we use an
1177  * opportunistic refcounting scheme to avoid locking the lookup hash
1178  * for each release.
1179  *
1180  * If the refcount hits 0, we need to free the structure,
1181  * which means we need to lock the hash.
1182  * Optimal case:
1183  *   After locking the struct and lowering the refcount, if we find
1184  *   that we don't need to free, simply unlock and return.
1185  * Suboptimal case:
1186  *   If refcount lowering results in need to free, bump the count
1187  *   back up, lose the lock and aquire the locks in the proper
1188  *   order to try again.
1189  */
1190 void
1191 uifree(uip)
1192 	struct uidinfo *uip;
1193 {
1194 
1195 	/* Prepare for optimal case. */
1196 	UIDINFO_LOCK(uip);
1197 
1198 	if (--uip->ui_ref != 0) {
1199 		UIDINFO_UNLOCK(uip);
1200 		return;
1201 	}
1202 
1203 	/* Prepare for suboptimal case. */
1204 	uip->ui_ref++;
1205 	UIDINFO_UNLOCK(uip);
1206 	mtx_lock(&uihashtbl_mtx);
1207 	UIDINFO_LOCK(uip);
1208 
1209 	/*
1210 	 * We must subtract one from the count again because we backed out
1211 	 * our initial subtraction before dropping the lock.
1212 	 * Since another thread may have added a reference after we dropped the
1213 	 * initial lock we have to test for zero again.
1214 	 */
1215 	if (--uip->ui_ref == 0) {
1216 		LIST_REMOVE(uip, ui_hash);
1217 		mtx_unlock(&uihashtbl_mtx);
1218 		if (uip->ui_sbsize != 0)
1219 			printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1220 			    uip->ui_uid, (intmax_t)uip->ui_sbsize);
1221 		if (uip->ui_proccnt != 0)
1222 			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1223 			    uip->ui_uid, uip->ui_proccnt);
1224 		UIDINFO_UNLOCK(uip);
1225 		FREE(uip, M_UIDINFO);
1226 		return;
1227 	}
1228 
1229 	mtx_unlock(&uihashtbl_mtx);
1230 	UIDINFO_UNLOCK(uip);
1231 }
1232 
1233 /*
1234  * Change the count associated with number of processes
1235  * a given user is using.  When 'max' is 0, don't enforce a limit
1236  */
1237 int
1238 chgproccnt(uip, diff, max)
1239 	struct	uidinfo	*uip;
1240 	int	diff;
1241 	int	max;
1242 {
1243 
1244 	UIDINFO_LOCK(uip);
1245 	/* Don't allow them to exceed max, but allow subtraction. */
1246 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1247 		UIDINFO_UNLOCK(uip);
1248 		return (0);
1249 	}
1250 	uip->ui_proccnt += diff;
1251 	if (uip->ui_proccnt < 0)
1252 		printf("negative proccnt for uid = %d\n", uip->ui_uid);
1253 	UIDINFO_UNLOCK(uip);
1254 	return (1);
1255 }
1256 
1257 /*
1258  * Change the total socket buffer size a user has used.
1259  */
1260 int
1261 chgsbsize(uip, hiwat, to, max)
1262 	struct	uidinfo	*uip;
1263 	u_int  *hiwat;
1264 	u_int	to;
1265 	rlim_t	max;
1266 {
1267 	rlim_t new;
1268 
1269 	UIDINFO_LOCK(uip);
1270 	new = uip->ui_sbsize + to - *hiwat;
1271 	/* Don't allow them to exceed max, but allow subtraction. */
1272 	if (to > *hiwat && new > max) {
1273 		UIDINFO_UNLOCK(uip);
1274 		return (0);
1275 	}
1276 	uip->ui_sbsize = new;
1277 	UIDINFO_UNLOCK(uip);
1278 	*hiwat = to;
1279 	if (new < 0)
1280 		printf("negative sbsize for uid = %d\n", uip->ui_uid);
1281 	return (1);
1282 }
1283