xref: /freebsd/sys/kern/kern_resource.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_compat.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
45 #include <sys/file.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sx.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysent.h>
58 #include <sys/time.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_map.h>
64 
65 
66 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
67 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
68 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
69 static struct mtx uihashtbl_mtx;
70 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
71 static u_long uihash;		/* size of hash table - 1 */
72 
73 static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
74 		    struct timeval *up, struct timeval *sp);
75 static int	donice(struct thread *td, struct proc *chgp, int n);
76 static struct uidinfo *uilookup(uid_t uid);
77 
78 /*
79  * Resource controls and accounting.
80  */
81 #ifndef _SYS_SYSPROTO_H_
82 struct getpriority_args {
83 	int	which;
84 	int	who;
85 };
86 #endif
87 int
88 getpriority(td, uap)
89 	struct thread *td;
90 	register struct getpriority_args *uap;
91 {
92 	struct proc *p;
93 	struct pgrp *pg;
94 	int error, low;
95 
96 	error = 0;
97 	low = PRIO_MAX + 1;
98 	switch (uap->which) {
99 
100 	case PRIO_PROCESS:
101 		if (uap->who == 0)
102 			low = td->td_proc->p_nice;
103 		else {
104 			p = pfind(uap->who);
105 			if (p == NULL)
106 				break;
107 			if (p_cansee(td, p) == 0)
108 				low = p->p_nice;
109 			PROC_UNLOCK(p);
110 		}
111 		break;
112 
113 	case PRIO_PGRP:
114 		sx_slock(&proctree_lock);
115 		if (uap->who == 0) {
116 			pg = td->td_proc->p_pgrp;
117 			PGRP_LOCK(pg);
118 		} else {
119 			pg = pgfind(uap->who);
120 			if (pg == NULL) {
121 				sx_sunlock(&proctree_lock);
122 				break;
123 			}
124 		}
125 		sx_sunlock(&proctree_lock);
126 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
127 			PROC_LOCK(p);
128 			if (!p_cansee(td, p)) {
129 				if (p->p_nice < low)
130 					low = p->p_nice;
131 			}
132 			PROC_UNLOCK(p);
133 		}
134 		PGRP_UNLOCK(pg);
135 		break;
136 
137 	case PRIO_USER:
138 		if (uap->who == 0)
139 			uap->who = td->td_ucred->cr_uid;
140 		sx_slock(&allproc_lock);
141 		FOREACH_PROC_IN_SYSTEM(p) {
142 			/* Do not bother to check PRS_NEW processes */
143 			if (p->p_state == PRS_NEW)
144 				continue;
145 			PROC_LOCK(p);
146 			if (!p_cansee(td, p) &&
147 			    p->p_ucred->cr_uid == uap->who) {
148 				if (p->p_nice < low)
149 					low = p->p_nice;
150 			}
151 			PROC_UNLOCK(p);
152 		}
153 		sx_sunlock(&allproc_lock);
154 		break;
155 
156 	default:
157 		error = EINVAL;
158 		break;
159 	}
160 	if (low == PRIO_MAX + 1 && error == 0)
161 		error = ESRCH;
162 	td->td_retval[0] = low;
163 	return (error);
164 }
165 
166 #ifndef _SYS_SYSPROTO_H_
167 struct setpriority_args {
168 	int	which;
169 	int	who;
170 	int	prio;
171 };
172 #endif
173 int
174 setpriority(td, uap)
175 	struct thread *td;
176 	struct setpriority_args *uap;
177 {
178 	struct proc *curp, *p;
179 	struct pgrp *pg;
180 	int found = 0, error = 0;
181 
182 	curp = td->td_proc;
183 	switch (uap->which) {
184 	case PRIO_PROCESS:
185 		if (uap->who == 0) {
186 			PROC_LOCK(curp);
187 			error = donice(td, curp, uap->prio);
188 			PROC_UNLOCK(curp);
189 		} else {
190 			p = pfind(uap->who);
191 			if (p == 0)
192 				break;
193 			if (p_cansee(td, p) == 0)
194 				error = donice(td, p, uap->prio);
195 			PROC_UNLOCK(p);
196 		}
197 		found++;
198 		break;
199 
200 	case PRIO_PGRP:
201 		sx_slock(&proctree_lock);
202 		if (uap->who == 0) {
203 			pg = curp->p_pgrp;
204 			PGRP_LOCK(pg);
205 		} else {
206 			pg = pgfind(uap->who);
207 			if (pg == NULL) {
208 				sx_sunlock(&proctree_lock);
209 				break;
210 			}
211 		}
212 		sx_sunlock(&proctree_lock);
213 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
214 			PROC_LOCK(p);
215 			if (!p_cansee(td, p)) {
216 				error = donice(td, p, uap->prio);
217 				found++;
218 			}
219 			PROC_UNLOCK(p);
220 		}
221 		PGRP_UNLOCK(pg);
222 		break;
223 
224 	case PRIO_USER:
225 		if (uap->who == 0)
226 			uap->who = td->td_ucred->cr_uid;
227 		sx_slock(&allproc_lock);
228 		FOREACH_PROC_IN_SYSTEM(p) {
229 			PROC_LOCK(p);
230 			if (p->p_ucred->cr_uid == uap->who &&
231 			    !p_cansee(td, p)) {
232 				error = donice(td, p, uap->prio);
233 				found++;
234 			}
235 			PROC_UNLOCK(p);
236 		}
237 		sx_sunlock(&allproc_lock);
238 		break;
239 
240 	default:
241 		error = EINVAL;
242 		break;
243 	}
244 	if (found == 0 && error == 0)
245 		error = ESRCH;
246 	return (error);
247 }
248 
249 /*
250  * Set "nice" for a (whole) process.
251  */
252 static int
253 donice(struct thread *td, struct proc *p, int n)
254 {
255 	int error;
256 
257 	PROC_LOCK_ASSERT(p, MA_OWNED);
258 	if ((error = p_cansched(td, p)))
259 		return (error);
260 	if (n > PRIO_MAX)
261 		n = PRIO_MAX;
262 	if (n < PRIO_MIN)
263 		n = PRIO_MIN;
264  	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
265 		return (EACCES);
266 	mtx_lock_spin(&sched_lock);
267 	sched_nice(p, n);
268 	mtx_unlock_spin(&sched_lock);
269 	return (0);
270 }
271 
272 /*
273  * Set realtime priority for LWP.
274  */
275 #ifndef _SYS_SYSPROTO_H_
276 struct rtprio_thread_args {
277 	int		function;
278 	lwpid_t		lwpid;
279 	struct rtprio	*rtp;
280 };
281 #endif
282 int
283 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
284 {
285 	struct proc *curp;
286 	struct proc *p;
287 	struct rtprio rtp;
288 	struct thread *td1;
289 	int cierror, error;
290 
291 	/* Perform copyin before acquiring locks if needed. */
292 	if (uap->function == RTP_SET)
293 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
294 	else
295 		cierror = 0;
296 
297 	curp = td->td_proc;
298 	/*
299 	 * Though lwpid is unique, only current process is supported
300 	 * since there is no efficient way to look up a LWP yet.
301 	 */
302 	p = curp;
303 	PROC_LOCK(p);
304 
305 	switch (uap->function) {
306 	case RTP_LOOKUP:
307 		if ((error = p_cansee(td, p)))
308 			break;
309 		mtx_lock_spin(&sched_lock);
310 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
311 			td1 = td;
312 		else
313 			td1 = thread_find(p, uap->lwpid);
314 		if (td1 != NULL)
315 			pri_to_rtp(td1, &rtp);
316 		else
317 			error = ESRCH;
318 		mtx_unlock_spin(&sched_lock);
319 		PROC_UNLOCK(p);
320 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
321 	case RTP_SET:
322 		if ((error = p_cansched(td, p)) || (error = cierror))
323 			break;
324 
325 		/* Disallow setting rtprio in most cases if not superuser. */
326 		if (priv_check(td, PRIV_SCHED_RTPRIO) != 0) {
327 			/* can't set realtime priority */
328 /*
329  * Realtime priority has to be restricted for reasons which should be
330  * obvious.  However, for idle priority, there is a potential for
331  * system deadlock if an idleprio process gains a lock on a resource
332  * that other processes need (and the idleprio process can't run
333  * due to a CPU-bound normal process).  Fix me!  XXX
334  */
335 #if 0
336  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
337 #else
338 			if (rtp.type != RTP_PRIO_NORMAL) {
339 #endif
340 				error = EPERM;
341 				break;
342 			}
343 		}
344 
345 		mtx_lock_spin(&sched_lock);
346 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
347 			td1 = td;
348 		else
349 			td1 = thread_find(p, uap->lwpid);
350 		if (td1 != NULL)
351 			error = rtp_to_pri(&rtp, td1);
352 		else
353 			error = ESRCH;
354 		mtx_unlock_spin(&sched_lock);
355 		break;
356 	default:
357 		error = EINVAL;
358 		break;
359 	}
360 	PROC_UNLOCK(p);
361 	return (error);
362 }
363 
364 /*
365  * Set realtime priority.
366  */
367 #ifndef _SYS_SYSPROTO_H_
368 struct rtprio_args {
369 	int		function;
370 	pid_t		pid;
371 	struct rtprio	*rtp;
372 };
373 #endif
374 int
375 rtprio(td, uap)
376 	struct thread *td;		/* curthread */
377 	register struct rtprio_args *uap;
378 {
379 	struct proc *curp;
380 	struct proc *p;
381 	struct thread *tdp;
382 	struct rtprio rtp;
383 	int cierror, error;
384 
385 	/* Perform copyin before acquiring locks if needed. */
386 	if (uap->function == RTP_SET)
387 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
388 	else
389 		cierror = 0;
390 
391 	curp = td->td_proc;
392 	if (uap->pid == 0) {
393 		p = curp;
394 		PROC_LOCK(p);
395 	} else {
396 		p = pfind(uap->pid);
397 		if (p == NULL)
398 			return (ESRCH);
399 	}
400 
401 	switch (uap->function) {
402 	case RTP_LOOKUP:
403 		if ((error = p_cansee(td, p)))
404 			break;
405 		mtx_lock_spin(&sched_lock);
406 		/*
407 		 * Return OUR priority if no pid specified,
408 		 * or if one is, report the highest priority
409 		 * in the process.  There isn't much more you can do as
410 		 * there is only room to return a single priority.
411 		 * XXXKSE: maybe need a new interface to report
412 		 * priorities of multiple system scope threads.
413 		 * Note: specifying our own pid is not the same
414 		 * as leaving it zero.
415 		 */
416 		if (uap->pid == 0) {
417 			pri_to_rtp(td, &rtp);
418 		} else {
419 			struct rtprio rtp2;
420 
421 			rtp.type = RTP_PRIO_IDLE;
422 			rtp.prio = RTP_PRIO_MAX;
423 			FOREACH_THREAD_IN_PROC(p, tdp) {
424 				pri_to_rtp(tdp, &rtp2);
425 				if (rtp2.type <  rtp.type ||
426 				    (rtp2.type == rtp.type &&
427 				    rtp2.prio < rtp.prio)) {
428 					rtp.type = rtp2.type;
429 					rtp.prio = rtp2.prio;
430 				}
431 			}
432 		}
433 		mtx_unlock_spin(&sched_lock);
434 		PROC_UNLOCK(p);
435 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
436 	case RTP_SET:
437 		if ((error = p_cansched(td, p)) || (error = cierror))
438 			break;
439 
440 		/* Disallow setting rtprio in most cases if not superuser. */
441 		if (priv_check(td, PRIV_SCHED_RTPRIO) != 0) {
442 			/* can't set someone else's */
443 			if (uap->pid) {
444 				error = EPERM;
445 				break;
446 			}
447 			/* can't set realtime priority */
448 /*
449  * Realtime priority has to be restricted for reasons which should be
450  * obvious.  However, for idle priority, there is a potential for
451  * system deadlock if an idleprio process gains a lock on a resource
452  * that other processes need (and the idleprio process can't run
453  * due to a CPU-bound normal process).  Fix me!  XXX
454  */
455 #if 0
456  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
457 #else
458 			if (rtp.type != RTP_PRIO_NORMAL) {
459 #endif
460 				error = EPERM;
461 				break;
462 			}
463 		}
464 
465 		/*
466 		 * If we are setting our own priority, set just our
467 		 * thread but if we are doing another process,
468 		 * do all the threads on that process. If we
469 		 * specify our own pid we do the latter.
470 		 */
471 		mtx_lock_spin(&sched_lock);
472 		if (uap->pid == 0) {
473 			error = rtp_to_pri(&rtp, td);
474 		} else {
475 			FOREACH_THREAD_IN_PROC(p, td) {
476 				if ((error = rtp_to_pri(&rtp, td)) != 0)
477 					break;
478 			}
479 		}
480 		mtx_unlock_spin(&sched_lock);
481 		break;
482 	default:
483 		error = EINVAL;
484 		break;
485 	}
486 	PROC_UNLOCK(p);
487 	return (error);
488 }
489 
490 int
491 rtp_to_pri(struct rtprio *rtp, struct thread *td)
492 {
493 	u_char	newpri;
494 
495 	mtx_assert(&sched_lock, MA_OWNED);
496 	if (rtp->prio > RTP_PRIO_MAX)
497 		return (EINVAL);
498 	switch (RTP_PRIO_BASE(rtp->type)) {
499 	case RTP_PRIO_REALTIME:
500 		newpri = PRI_MIN_REALTIME + rtp->prio;
501 		break;
502 	case RTP_PRIO_NORMAL:
503 		newpri = PRI_MIN_TIMESHARE + rtp->prio;
504 		break;
505 	case RTP_PRIO_IDLE:
506 		newpri = PRI_MIN_IDLE + rtp->prio;
507 		break;
508 	default:
509 		return (EINVAL);
510 	}
511 	sched_class(td, rtp->type);	/* XXX fix */
512 	sched_user_prio(td, newpri);
513 	if (curthread == td)
514 		sched_prio(curthread, td->td_user_pri); /* XXX dubious */
515 	return (0);
516 }
517 
518 void
519 pri_to_rtp(struct thread *td, struct rtprio *rtp)
520 {
521 
522 	mtx_assert(&sched_lock, MA_OWNED);
523 	switch (PRI_BASE(td->td_pri_class)) {
524 	case PRI_REALTIME:
525 		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
526 		break;
527 	case PRI_TIMESHARE:
528 		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
529 		break;
530 	case PRI_IDLE:
531 		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
532 		break;
533 	default:
534 		break;
535 	}
536 	rtp->type = td->td_pri_class;
537 }
538 
539 #if defined(COMPAT_43)
540 #ifndef _SYS_SYSPROTO_H_
541 struct osetrlimit_args {
542 	u_int	which;
543 	struct	orlimit *rlp;
544 };
545 #endif
546 int
547 osetrlimit(td, uap)
548 	struct thread *td;
549 	register struct osetrlimit_args *uap;
550 {
551 	struct orlimit olim;
552 	struct rlimit lim;
553 	int error;
554 
555 	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
556 		return (error);
557 	lim.rlim_cur = olim.rlim_cur;
558 	lim.rlim_max = olim.rlim_max;
559 	error = kern_setrlimit(td, uap->which, &lim);
560 	return (error);
561 }
562 
563 #ifndef _SYS_SYSPROTO_H_
564 struct ogetrlimit_args {
565 	u_int	which;
566 	struct	orlimit *rlp;
567 };
568 #endif
569 int
570 ogetrlimit(td, uap)
571 	struct thread *td;
572 	register struct ogetrlimit_args *uap;
573 {
574 	struct orlimit olim;
575 	struct rlimit rl;
576 	struct proc *p;
577 	int error;
578 
579 	if (uap->which >= RLIM_NLIMITS)
580 		return (EINVAL);
581 	p = td->td_proc;
582 	PROC_LOCK(p);
583 	lim_rlimit(p, uap->which, &rl);
584 	PROC_UNLOCK(p);
585 
586 	/*
587 	 * XXX would be more correct to convert only RLIM_INFINITY to the
588 	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
589 	 * values.  Most 64->32 and 32->16 conversions, including not
590 	 * unimportant ones of uids are even more broken than what we
591 	 * do here (they blindly truncate).  We don't do this correctly
592 	 * here since we have little experience with EOVERFLOW yet.
593 	 * Elsewhere, getuid() can't fail...
594 	 */
595 	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
596 	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
597 	error = copyout(&olim, uap->rlp, sizeof(olim));
598 	return (error);
599 }
600 #endif /* COMPAT_43 */
601 
602 #ifndef _SYS_SYSPROTO_H_
603 struct __setrlimit_args {
604 	u_int	which;
605 	struct	rlimit *rlp;
606 };
607 #endif
608 int
609 setrlimit(td, uap)
610 	struct thread *td;
611 	register struct __setrlimit_args *uap;
612 {
613 	struct rlimit alim;
614 	int error;
615 
616 	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
617 		return (error);
618 	error = kern_setrlimit(td, uap->which, &alim);
619 	return (error);
620 }
621 
622 int
623 kern_setrlimit(td, which, limp)
624 	struct thread *td;
625 	u_int which;
626 	struct rlimit *limp;
627 {
628 	struct plimit *newlim, *oldlim;
629 	struct proc *p;
630 	register struct rlimit *alimp;
631 	rlim_t oldssiz;
632 	int error;
633 
634 	if (which >= RLIM_NLIMITS)
635 		return (EINVAL);
636 
637 	/*
638 	 * Preserve historical bugs by treating negative limits as unsigned.
639 	 */
640 	if (limp->rlim_cur < 0)
641 		limp->rlim_cur = RLIM_INFINITY;
642 	if (limp->rlim_max < 0)
643 		limp->rlim_max = RLIM_INFINITY;
644 
645 	oldssiz = 0;
646 	p = td->td_proc;
647 	newlim = lim_alloc();
648 	PROC_LOCK(p);
649 	oldlim = p->p_limit;
650 	alimp = &oldlim->pl_rlimit[which];
651 	if (limp->rlim_cur > alimp->rlim_max ||
652 	    limp->rlim_max > alimp->rlim_max)
653 		if ((error = priv_check_cred(td->td_ucred,
654 		    PRIV_PROC_SETRLIMIT, SUSER_ALLOWJAIL))) {
655 			PROC_UNLOCK(p);
656 			lim_free(newlim);
657 			return (error);
658 		}
659 	if (limp->rlim_cur > limp->rlim_max)
660 		limp->rlim_cur = limp->rlim_max;
661 	lim_copy(newlim, oldlim);
662 	alimp = &newlim->pl_rlimit[which];
663 
664 	switch (which) {
665 
666 	case RLIMIT_CPU:
667 		mtx_lock_spin(&sched_lock);
668 		p->p_cpulimit = limp->rlim_cur;
669 		mtx_unlock_spin(&sched_lock);
670 		break;
671 	case RLIMIT_DATA:
672 		if (limp->rlim_cur > maxdsiz)
673 			limp->rlim_cur = maxdsiz;
674 		if (limp->rlim_max > maxdsiz)
675 			limp->rlim_max = maxdsiz;
676 		break;
677 
678 	case RLIMIT_STACK:
679 		if (limp->rlim_cur > maxssiz)
680 			limp->rlim_cur = maxssiz;
681 		if (limp->rlim_max > maxssiz)
682 			limp->rlim_max = maxssiz;
683 		oldssiz = alimp->rlim_cur;
684 		break;
685 
686 	case RLIMIT_NOFILE:
687 		if (limp->rlim_cur > maxfilesperproc)
688 			limp->rlim_cur = maxfilesperproc;
689 		if (limp->rlim_max > maxfilesperproc)
690 			limp->rlim_max = maxfilesperproc;
691 		break;
692 
693 	case RLIMIT_NPROC:
694 		if (limp->rlim_cur > maxprocperuid)
695 			limp->rlim_cur = maxprocperuid;
696 		if (limp->rlim_max > maxprocperuid)
697 			limp->rlim_max = maxprocperuid;
698 		if (limp->rlim_cur < 1)
699 			limp->rlim_cur = 1;
700 		if (limp->rlim_max < 1)
701 			limp->rlim_max = 1;
702 		break;
703 	}
704 	*alimp = *limp;
705 	p->p_limit = newlim;
706 	PROC_UNLOCK(p);
707 	lim_free(oldlim);
708 
709 	if (which == RLIMIT_STACK) {
710 		/*
711 		 * Stack is allocated to the max at exec time with only
712 		 * "rlim_cur" bytes accessible.  If stack limit is going
713 		 * up make more accessible, if going down make inaccessible.
714 		 */
715 		if (limp->rlim_cur != oldssiz) {
716 			vm_offset_t addr;
717 			vm_size_t size;
718 			vm_prot_t prot;
719 
720 			if (limp->rlim_cur > oldssiz) {
721 				prot = p->p_sysent->sv_stackprot;
722 				size = limp->rlim_cur - oldssiz;
723 				addr = p->p_sysent->sv_usrstack -
724 				    limp->rlim_cur;
725 			} else {
726 				prot = VM_PROT_NONE;
727 				size = oldssiz - limp->rlim_cur;
728 				addr = p->p_sysent->sv_usrstack - oldssiz;
729 			}
730 			addr = trunc_page(addr);
731 			size = round_page(size);
732 			(void)vm_map_protect(&p->p_vmspace->vm_map,
733 			    addr, addr + size, prot, FALSE);
734 		}
735 	}
736 
737 	/*
738 	 * The data size limit may need to be changed to a value
739 	 * that makes sense for the 32 bit binary.
740 	 */
741 	if (p->p_sysent->sv_fixlimits != NULL)
742 		p->p_sysent->sv_fixlimits(p);
743 	return (0);
744 }
745 
746 #ifndef _SYS_SYSPROTO_H_
747 struct __getrlimit_args {
748 	u_int	which;
749 	struct	rlimit *rlp;
750 };
751 #endif
752 /* ARGSUSED */
753 int
754 getrlimit(td, uap)
755 	struct thread *td;
756 	register struct __getrlimit_args *uap;
757 {
758 	struct rlimit rlim;
759 	struct proc *p;
760 	int error;
761 
762 	if (uap->which >= RLIM_NLIMITS)
763 		return (EINVAL);
764 	p = td->td_proc;
765 	PROC_LOCK(p);
766 	lim_rlimit(p, uap->which, &rlim);
767 	PROC_UNLOCK(p);
768 	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
769 	return (error);
770 }
771 
772 /*
773  * Transform the running time and tick information for children of proc p
774  * into user and system time usage.
775  */
776 void
777 calccru(p, up, sp)
778 	struct proc *p;
779 	struct timeval *up;
780 	struct timeval *sp;
781 {
782 
783 	PROC_LOCK_ASSERT(p, MA_OWNED);
784 	calcru1(p, &p->p_crux, up, sp);
785 }
786 
787 /*
788  * Transform the running time and tick information in proc p into user
789  * and system time usage.  If appropriate, include the current time slice
790  * on this CPU.
791  */
792 void
793 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
794 {
795 	struct rusage_ext rux;
796 	struct thread *td;
797 	uint64_t u;
798 
799 	PROC_LOCK_ASSERT(p, MA_OWNED);
800 	mtx_assert(&sched_lock, MA_NOTOWNED);
801 	mtx_lock_spin(&sched_lock);
802 
803 	/*
804 	 * If we are getting stats for the current process, then add in the
805 	 * stats that this thread has accumulated in its current time slice.
806 	 * We reset the thread and CPU state as if we had performed a context
807 	 * switch right here.
808 	 */
809 	if (curthread->td_proc == p) {
810 		td = curthread;
811 		u = cpu_ticks();
812 		p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
813 		PCPU_SET(switchtime, u);
814 		p->p_rux.rux_uticks += td->td_uticks;
815 		td->td_uticks = 0;
816 		p->p_rux.rux_iticks += td->td_iticks;
817 		td->td_iticks = 0;
818 		p->p_rux.rux_sticks += td->td_sticks;
819 		td->td_sticks = 0;
820 	}
821 	/* Work on a copy of p_rux so we can let go of sched_lock */
822 	rux = p->p_rux;
823 	mtx_unlock_spin(&sched_lock);
824 	calcru1(p, &rux, up, sp);
825 	/* Update the result from the p_rux copy */
826 	p->p_rux.rux_uu = rux.rux_uu;
827 	p->p_rux.rux_su = rux.rux_su;
828 	p->p_rux.rux_tu = rux.rux_tu;
829 }
830 
831 static void
832 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
833     struct timeval *sp)
834 {
835 	/* {user, system, interrupt, total} {ticks, usec}: */
836 	u_int64_t ut, uu, st, su, it, tt, tu;
837 
838 	ut = ruxp->rux_uticks;
839 	st = ruxp->rux_sticks;
840 	it = ruxp->rux_iticks;
841 	tt = ut + st + it;
842 	if (tt == 0) {
843 		/* Avoid divide by zero */
844 		st = 1;
845 		tt = 1;
846 	}
847 	tu = cputick2usec(ruxp->rux_runtime);
848 	if ((int64_t)tu < 0) {
849 		/* XXX: this should be an assert /phk */
850 		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
851 		    (intmax_t)tu, p->p_pid, p->p_comm);
852 		tu = ruxp->rux_tu;
853 	}
854 
855 	if (tu >= ruxp->rux_tu) {
856 		/*
857 		 * The normal case, time increased.
858 		 * Enforce monotonicity of bucketed numbers.
859 		 */
860 		uu = (tu * ut) / tt;
861 		if (uu < ruxp->rux_uu)
862 			uu = ruxp->rux_uu;
863 		su = (tu * st) / tt;
864 		if (su < ruxp->rux_su)
865 			su = ruxp->rux_su;
866 	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
867 		/*
868 		 * When we calibrate the cputicker, it is not uncommon to
869 		 * see the presumably fixed frequency increase slightly over
870 		 * time as a result of thermal stabilization and NTP
871 		 * discipline (of the reference clock).  We therefore ignore
872 		 * a bit of backwards slop because we  expect to catch up
873  		 * shortly.  We use a 3 microsecond limit to catch low
874 		 * counts and a 1% limit for high counts.
875 		 */
876 		uu = ruxp->rux_uu;
877 		su = ruxp->rux_su;
878 		tu = ruxp->rux_tu;
879 	} else { /* tu < ruxp->rux_tu */
880 		/*
881 		 * What happene here was likely that a laptop, which ran at
882 		 * a reduced clock frequency at boot, kicked into high gear.
883 		 * The wisdom of spamming this message in that case is
884 		 * dubious, but it might also be indicative of something
885 		 * serious, so lets keep it and hope laptops can be made
886 		 * more truthful about their CPU speed via ACPI.
887 		 */
888 		printf("calcru: runtime went backwards from %ju usec "
889 		    "to %ju usec for pid %d (%s)\n",
890 		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
891 		    p->p_pid, p->p_comm);
892 		uu = (tu * ut) / tt;
893 		su = (tu * st) / tt;
894 	}
895 
896 	ruxp->rux_uu = uu;
897 	ruxp->rux_su = su;
898 	ruxp->rux_tu = tu;
899 
900 	up->tv_sec = uu / 1000000;
901 	up->tv_usec = uu % 1000000;
902 	sp->tv_sec = su / 1000000;
903 	sp->tv_usec = su % 1000000;
904 }
905 
906 #ifndef _SYS_SYSPROTO_H_
907 struct getrusage_args {
908 	int	who;
909 	struct	rusage *rusage;
910 };
911 #endif
912 int
913 getrusage(td, uap)
914 	register struct thread *td;
915 	register struct getrusage_args *uap;
916 {
917 	struct rusage ru;
918 	int error;
919 
920 	error = kern_getrusage(td, uap->who, &ru);
921 	if (error == 0)
922 		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
923 	return (error);
924 }
925 
926 int
927 kern_getrusage(td, who, rup)
928 	struct thread *td;
929 	int who;
930 	struct rusage *rup;
931 {
932 	struct proc *p;
933 
934 	p = td->td_proc;
935 	PROC_LOCK(p);
936 	switch (who) {
937 
938 	case RUSAGE_SELF:
939 		*rup = p->p_stats->p_ru;
940 		calcru(p, &rup->ru_utime, &rup->ru_stime);
941 		break;
942 
943 	case RUSAGE_CHILDREN:
944 		*rup = p->p_stats->p_cru;
945 		calccru(p, &rup->ru_utime, &rup->ru_stime);
946 		break;
947 
948 	default:
949 		PROC_UNLOCK(p);
950 		return (EINVAL);
951 	}
952 	PROC_UNLOCK(p);
953 	return (0);
954 }
955 
956 void
957 ruadd(ru, rux, ru2, rux2)
958 	struct rusage *ru;
959 	struct rusage_ext *rux;
960 	struct rusage *ru2;
961 	struct rusage_ext *rux2;
962 {
963 	register long *ip, *ip2;
964 	register int i;
965 
966 	rux->rux_runtime += rux2->rux_runtime;
967 	rux->rux_uticks += rux2->rux_uticks;
968 	rux->rux_sticks += rux2->rux_sticks;
969 	rux->rux_iticks += rux2->rux_iticks;
970 	rux->rux_uu += rux2->rux_uu;
971 	rux->rux_su += rux2->rux_su;
972 	rux->rux_tu += rux2->rux_tu;
973 	if (ru->ru_maxrss < ru2->ru_maxrss)
974 		ru->ru_maxrss = ru2->ru_maxrss;
975 	ip = &ru->ru_first;
976 	ip2 = &ru2->ru_first;
977 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
978 		*ip++ += *ip2++;
979 }
980 
981 /*
982  * Allocate a new resource limits structure and initialize its
983  * reference count and mutex pointer.
984  */
985 struct plimit *
986 lim_alloc()
987 {
988 	struct plimit *limp;
989 
990 	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
991 	refcount_init(&limp->pl_refcnt, 1);
992 	return (limp);
993 }
994 
995 struct plimit *
996 lim_hold(limp)
997 	struct plimit *limp;
998 {
999 
1000 	refcount_acquire(&limp->pl_refcnt);
1001 	return (limp);
1002 }
1003 
1004 void
1005 lim_free(limp)
1006 	struct plimit *limp;
1007 {
1008 
1009 	KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1010 	if (refcount_release(&limp->pl_refcnt))
1011 		free((void *)limp, M_PLIMIT);
1012 }
1013 
1014 /*
1015  * Make a copy of the plimit structure.
1016  * We share these structures copy-on-write after fork.
1017  */
1018 void
1019 lim_copy(dst, src)
1020 	struct plimit *dst, *src;
1021 {
1022 
1023 	KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1024 	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1025 }
1026 
1027 /*
1028  * Return the hard limit for a particular system resource.  The
1029  * which parameter specifies the index into the rlimit array.
1030  */
1031 rlim_t
1032 lim_max(struct proc *p, int which)
1033 {
1034 	struct rlimit rl;
1035 
1036 	lim_rlimit(p, which, &rl);
1037 	return (rl.rlim_max);
1038 }
1039 
1040 /*
1041  * Return the current (soft) limit for a particular system resource.
1042  * The which parameter which specifies the index into the rlimit array
1043  */
1044 rlim_t
1045 lim_cur(struct proc *p, int which)
1046 {
1047 	struct rlimit rl;
1048 
1049 	lim_rlimit(p, which, &rl);
1050 	return (rl.rlim_cur);
1051 }
1052 
1053 /*
1054  * Return a copy of the entire rlimit structure for the system limit
1055  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1056  */
1057 void
1058 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1059 {
1060 
1061 	PROC_LOCK_ASSERT(p, MA_OWNED);
1062 	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1063 	    ("request for invalid resource limit"));
1064 	*rlp = p->p_limit->pl_rlimit[which];
1065 }
1066 
1067 /*
1068  * Find the uidinfo structure for a uid.  This structure is used to
1069  * track the total resource consumption (process count, socket buffer
1070  * size, etc.) for the uid and impose limits.
1071  */
1072 void
1073 uihashinit()
1074 {
1075 
1076 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1077 	mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
1078 }
1079 
1080 /*
1081  * Look up a uidinfo struct for the parameter uid.
1082  * uihashtbl_mtx must be locked.
1083  */
1084 static struct uidinfo *
1085 uilookup(uid)
1086 	uid_t uid;
1087 {
1088 	struct uihashhead *uipp;
1089 	struct uidinfo *uip;
1090 
1091 	mtx_assert(&uihashtbl_mtx, MA_OWNED);
1092 	uipp = UIHASH(uid);
1093 	LIST_FOREACH(uip, uipp, ui_hash)
1094 		if (uip->ui_uid == uid)
1095 			break;
1096 
1097 	return (uip);
1098 }
1099 
1100 /*
1101  * Find or allocate a struct uidinfo for a particular uid.
1102  * Increase refcount on uidinfo struct returned.
1103  * uifree() should be called on a struct uidinfo when released.
1104  */
1105 struct uidinfo *
1106 uifind(uid)
1107 	uid_t uid;
1108 {
1109 	struct uidinfo *old_uip, *uip;
1110 
1111 	mtx_lock(&uihashtbl_mtx);
1112 	uip = uilookup(uid);
1113 	if (uip == NULL) {
1114 		mtx_unlock(&uihashtbl_mtx);
1115 		uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1116 		mtx_lock(&uihashtbl_mtx);
1117 		/*
1118 		 * There's a chance someone created our uidinfo while we
1119 		 * were in malloc and not holding the lock, so we have to
1120 		 * make sure we don't insert a duplicate uidinfo.
1121 		 */
1122 		if ((old_uip = uilookup(uid)) != NULL) {
1123 			/* Someone else beat us to it. */
1124 			free(uip, M_UIDINFO);
1125 			uip = old_uip;
1126 		} else {
1127 			uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
1128 			uip->ui_uid = uid;
1129 			LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1130 		}
1131 	}
1132 	uihold(uip);
1133 	mtx_unlock(&uihashtbl_mtx);
1134 	return (uip);
1135 }
1136 
1137 /*
1138  * Place another refcount on a uidinfo struct.
1139  */
1140 void
1141 uihold(uip)
1142 	struct uidinfo *uip;
1143 {
1144 
1145 	UIDINFO_LOCK(uip);
1146 	uip->ui_ref++;
1147 	UIDINFO_UNLOCK(uip);
1148 }
1149 
1150 /*-
1151  * Since uidinfo structs have a long lifetime, we use an
1152  * opportunistic refcounting scheme to avoid locking the lookup hash
1153  * for each release.
1154  *
1155  * If the refcount hits 0, we need to free the structure,
1156  * which means we need to lock the hash.
1157  * Optimal case:
1158  *   After locking the struct and lowering the refcount, if we find
1159  *   that we don't need to free, simply unlock and return.
1160  * Suboptimal case:
1161  *   If refcount lowering results in need to free, bump the count
1162  *   back up, lose the lock and aquire the locks in the proper
1163  *   order to try again.
1164  */
1165 void
1166 uifree(uip)
1167 	struct uidinfo *uip;
1168 {
1169 
1170 	/* Prepare for optimal case. */
1171 	UIDINFO_LOCK(uip);
1172 
1173 	if (--uip->ui_ref != 0) {
1174 		UIDINFO_UNLOCK(uip);
1175 		return;
1176 	}
1177 
1178 	/* Prepare for suboptimal case. */
1179 	uip->ui_ref++;
1180 	UIDINFO_UNLOCK(uip);
1181 	mtx_lock(&uihashtbl_mtx);
1182 	UIDINFO_LOCK(uip);
1183 
1184 	/*
1185 	 * We must subtract one from the count again because we backed out
1186 	 * our initial subtraction before dropping the lock.
1187 	 * Since another thread may have added a reference after we dropped the
1188 	 * initial lock we have to test for zero again.
1189 	 */
1190 	if (--uip->ui_ref == 0) {
1191 		LIST_REMOVE(uip, ui_hash);
1192 		mtx_unlock(&uihashtbl_mtx);
1193 		if (uip->ui_sbsize != 0)
1194 			printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1195 			    uip->ui_uid, (intmax_t)uip->ui_sbsize);
1196 		if (uip->ui_proccnt != 0)
1197 			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1198 			    uip->ui_uid, uip->ui_proccnt);
1199 		UIDINFO_UNLOCK(uip);
1200 		FREE(uip, M_UIDINFO);
1201 		return;
1202 	}
1203 
1204 	mtx_unlock(&uihashtbl_mtx);
1205 	UIDINFO_UNLOCK(uip);
1206 }
1207 
1208 /*
1209  * Change the count associated with number of processes
1210  * a given user is using.  When 'max' is 0, don't enforce a limit
1211  */
1212 int
1213 chgproccnt(uip, diff, max)
1214 	struct	uidinfo	*uip;
1215 	int	diff;
1216 	int	max;
1217 {
1218 
1219 	UIDINFO_LOCK(uip);
1220 	/* Don't allow them to exceed max, but allow subtraction. */
1221 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1222 		UIDINFO_UNLOCK(uip);
1223 		return (0);
1224 	}
1225 	uip->ui_proccnt += diff;
1226 	if (uip->ui_proccnt < 0)
1227 		printf("negative proccnt for uid = %d\n", uip->ui_uid);
1228 	UIDINFO_UNLOCK(uip);
1229 	return (1);
1230 }
1231 
1232 /*
1233  * Change the total socket buffer size a user has used.
1234  */
1235 int
1236 chgsbsize(uip, hiwat, to, max)
1237 	struct	uidinfo	*uip;
1238 	u_int  *hiwat;
1239 	u_int	to;
1240 	rlim_t	max;
1241 {
1242 	rlim_t new;
1243 
1244 	UIDINFO_LOCK(uip);
1245 	new = uip->ui_sbsize + to - *hiwat;
1246 	/* Don't allow them to exceed max, but allow subtraction. */
1247 	if (to > *hiwat && new > max) {
1248 		UIDINFO_UNLOCK(uip);
1249 		return (0);
1250 	}
1251 	uip->ui_sbsize = new;
1252 	UIDINFO_UNLOCK(uip);
1253 	*hiwat = to;
1254 	if (new < 0)
1255 		printf("negative sbsize for uid = %d\n", uip->ui_uid);
1256 	return (1);
1257 }
1258