xref: /freebsd/sys/kern/kern_resource.c (revision 646a7fea0c8a60ce2795ffc1bdf58e0fd0f7d624)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_compat.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
45 #include <sys/file.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/racct.h>
54 #include <sys/resourcevar.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/sx.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
61 #include <sys/time.h>
62 #include <sys/umtx.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 
69 
70 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
71 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
73 static struct rwlock uihashtbl_lock;
74 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75 static u_long uihash;		/* size of hash table - 1 */
76 
77 static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
78 		    struct timeval *up, struct timeval *sp);
79 static int	donice(struct thread *td, struct proc *chgp, int n);
80 static struct uidinfo *uilookup(uid_t uid);
81 static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
82 
83 /*
84  * Resource controls and accounting.
85  */
86 #ifndef _SYS_SYSPROTO_H_
87 struct getpriority_args {
88 	int	which;
89 	int	who;
90 };
91 #endif
92 int
93 sys_getpriority(td, uap)
94 	struct thread *td;
95 	register struct getpriority_args *uap;
96 {
97 	struct proc *p;
98 	struct pgrp *pg;
99 	int error, low;
100 
101 	error = 0;
102 	low = PRIO_MAX + 1;
103 	switch (uap->which) {
104 
105 	case PRIO_PROCESS:
106 		if (uap->who == 0)
107 			low = td->td_proc->p_nice;
108 		else {
109 			p = pfind(uap->who);
110 			if (p == NULL)
111 				break;
112 			if (p_cansee(td, p) == 0)
113 				low = p->p_nice;
114 			PROC_UNLOCK(p);
115 		}
116 		break;
117 
118 	case PRIO_PGRP:
119 		sx_slock(&proctree_lock);
120 		if (uap->who == 0) {
121 			pg = td->td_proc->p_pgrp;
122 			PGRP_LOCK(pg);
123 		} else {
124 			pg = pgfind(uap->who);
125 			if (pg == NULL) {
126 				sx_sunlock(&proctree_lock);
127 				break;
128 			}
129 		}
130 		sx_sunlock(&proctree_lock);
131 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
132 			PROC_LOCK(p);
133 			if (p->p_state == PRS_NORMAL &&
134 			    p_cansee(td, p) == 0) {
135 				if (p->p_nice < low)
136 					low = p->p_nice;
137 			}
138 			PROC_UNLOCK(p);
139 		}
140 		PGRP_UNLOCK(pg);
141 		break;
142 
143 	case PRIO_USER:
144 		if (uap->who == 0)
145 			uap->who = td->td_ucred->cr_uid;
146 		sx_slock(&allproc_lock);
147 		FOREACH_PROC_IN_SYSTEM(p) {
148 			PROC_LOCK(p);
149 			if (p->p_state == PRS_NORMAL &&
150 			    p_cansee(td, p) == 0 &&
151 			    p->p_ucred->cr_uid == uap->who) {
152 				if (p->p_nice < low)
153 					low = p->p_nice;
154 			}
155 			PROC_UNLOCK(p);
156 		}
157 		sx_sunlock(&allproc_lock);
158 		break;
159 
160 	default:
161 		error = EINVAL;
162 		break;
163 	}
164 	if (low == PRIO_MAX + 1 && error == 0)
165 		error = ESRCH;
166 	td->td_retval[0] = low;
167 	return (error);
168 }
169 
170 #ifndef _SYS_SYSPROTO_H_
171 struct setpriority_args {
172 	int	which;
173 	int	who;
174 	int	prio;
175 };
176 #endif
177 int
178 sys_setpriority(td, uap)
179 	struct thread *td;
180 	struct setpriority_args *uap;
181 {
182 	struct proc *curp, *p;
183 	struct pgrp *pg;
184 	int found = 0, error = 0;
185 
186 	curp = td->td_proc;
187 	switch (uap->which) {
188 	case PRIO_PROCESS:
189 		if (uap->who == 0) {
190 			PROC_LOCK(curp);
191 			error = donice(td, curp, uap->prio);
192 			PROC_UNLOCK(curp);
193 		} else {
194 			p = pfind(uap->who);
195 			if (p == NULL)
196 				break;
197 			error = p_cansee(td, p);
198 			if (error == 0)
199 				error = donice(td, p, uap->prio);
200 			PROC_UNLOCK(p);
201 		}
202 		found++;
203 		break;
204 
205 	case PRIO_PGRP:
206 		sx_slock(&proctree_lock);
207 		if (uap->who == 0) {
208 			pg = curp->p_pgrp;
209 			PGRP_LOCK(pg);
210 		} else {
211 			pg = pgfind(uap->who);
212 			if (pg == NULL) {
213 				sx_sunlock(&proctree_lock);
214 				break;
215 			}
216 		}
217 		sx_sunlock(&proctree_lock);
218 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
219 			PROC_LOCK(p);
220 			if (p->p_state == PRS_NORMAL &&
221 			    p_cansee(td, p) == 0) {
222 				error = donice(td, p, uap->prio);
223 				found++;
224 			}
225 			PROC_UNLOCK(p);
226 		}
227 		PGRP_UNLOCK(pg);
228 		break;
229 
230 	case PRIO_USER:
231 		if (uap->who == 0)
232 			uap->who = td->td_ucred->cr_uid;
233 		sx_slock(&allproc_lock);
234 		FOREACH_PROC_IN_SYSTEM(p) {
235 			PROC_LOCK(p);
236 			if (p->p_state == PRS_NORMAL &&
237 			    p->p_ucred->cr_uid == uap->who &&
238 			    p_cansee(td, p) == 0) {
239 				error = donice(td, p, uap->prio);
240 				found++;
241 			}
242 			PROC_UNLOCK(p);
243 		}
244 		sx_sunlock(&allproc_lock);
245 		break;
246 
247 	default:
248 		error = EINVAL;
249 		break;
250 	}
251 	if (found == 0 && error == 0)
252 		error = ESRCH;
253 	return (error);
254 }
255 
256 /*
257  * Set "nice" for a (whole) process.
258  */
259 static int
260 donice(struct thread *td, struct proc *p, int n)
261 {
262 	int error;
263 
264 	PROC_LOCK_ASSERT(p, MA_OWNED);
265 	if ((error = p_cansched(td, p)))
266 		return (error);
267 	if (n > PRIO_MAX)
268 		n = PRIO_MAX;
269 	if (n < PRIO_MIN)
270 		n = PRIO_MIN;
271 	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
272 		return (EACCES);
273 	sched_nice(p, n);
274 	return (0);
275 }
276 
277 static int unprivileged_idprio;
278 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
279     &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
280 
281 /*
282  * Set realtime priority for LWP.
283  */
284 #ifndef _SYS_SYSPROTO_H_
285 struct rtprio_thread_args {
286 	int		function;
287 	lwpid_t		lwpid;
288 	struct rtprio	*rtp;
289 };
290 #endif
291 int
292 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
293 {
294 	struct proc *p;
295 	struct rtprio rtp;
296 	struct thread *td1;
297 	int cierror, error;
298 
299 	/* Perform copyin before acquiring locks if needed. */
300 	if (uap->function == RTP_SET)
301 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
302 	else
303 		cierror = 0;
304 
305 	if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
306 		p = td->td_proc;
307 		td1 = td;
308 		PROC_LOCK(p);
309 	} else {
310 		/* Only look up thread in current process */
311 		td1 = tdfind(uap->lwpid, curproc->p_pid);
312 		if (td1 == NULL)
313 			return (ESRCH);
314 		p = td1->td_proc;
315 	}
316 
317 	switch (uap->function) {
318 	case RTP_LOOKUP:
319 		if ((error = p_cansee(td, p)))
320 			break;
321 		pri_to_rtp(td1, &rtp);
322 		PROC_UNLOCK(p);
323 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
324 	case RTP_SET:
325 		if ((error = p_cansched(td, p)) || (error = cierror))
326 			break;
327 
328 		/* Disallow setting rtprio in most cases if not superuser. */
329 
330 		/*
331 		 * Realtime priority has to be restricted for reasons which
332 		 * should be obvious.  However, for idleprio processes, there is
333 		 * a potential for system deadlock if an idleprio process gains
334 		 * a lock on a resource that other processes need (and the
335 		 * idleprio process can't run due to a CPU-bound normal
336 		 * process).  Fix me!  XXX
337 		 *
338 		 * This problem is not only related to idleprio process.
339 		 * A user level program can obtain a file lock and hold it
340 		 * indefinitely.  Additionally, without idleprio processes it is
341 		 * still conceivable that a program with low priority will never
342 		 * get to run.  In short, allowing this feature might make it
343 		 * easier to lock a resource indefinitely, but it is not the
344 		 * only thing that makes it possible.
345 		 */
346 		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
347 		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
348 		    unprivileged_idprio == 0)) {
349 			error = priv_check(td, PRIV_SCHED_RTPRIO);
350 			if (error)
351 				break;
352 		}
353 		error = rtp_to_pri(&rtp, td1);
354 		break;
355 	default:
356 		error = EINVAL;
357 		break;
358 	}
359 	PROC_UNLOCK(p);
360 	return (error);
361 }
362 
363 /*
364  * Set realtime priority.
365  */
366 #ifndef _SYS_SYSPROTO_H_
367 struct rtprio_args {
368 	int		function;
369 	pid_t		pid;
370 	struct rtprio	*rtp;
371 };
372 #endif
373 int
374 sys_rtprio(td, uap)
375 	struct thread *td;		/* curthread */
376 	register struct rtprio_args *uap;
377 {
378 	struct proc *p;
379 	struct thread *tdp;
380 	struct rtprio rtp;
381 	int cierror, error;
382 
383 	/* Perform copyin before acquiring locks if needed. */
384 	if (uap->function == RTP_SET)
385 		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
386 	else
387 		cierror = 0;
388 
389 	if (uap->pid == 0) {
390 		p = td->td_proc;
391 		PROC_LOCK(p);
392 	} else {
393 		p = pfind(uap->pid);
394 		if (p == NULL)
395 			return (ESRCH);
396 	}
397 
398 	switch (uap->function) {
399 	case RTP_LOOKUP:
400 		if ((error = p_cansee(td, p)))
401 			break;
402 		/*
403 		 * Return OUR priority if no pid specified,
404 		 * or if one is, report the highest priority
405 		 * in the process.  There isn't much more you can do as
406 		 * there is only room to return a single priority.
407 		 * Note: specifying our own pid is not the same
408 		 * as leaving it zero.
409 		 */
410 		if (uap->pid == 0) {
411 			pri_to_rtp(td, &rtp);
412 		} else {
413 			struct rtprio rtp2;
414 
415 			rtp.type = RTP_PRIO_IDLE;
416 			rtp.prio = RTP_PRIO_MAX;
417 			FOREACH_THREAD_IN_PROC(p, tdp) {
418 				pri_to_rtp(tdp, &rtp2);
419 				if (rtp2.type <  rtp.type ||
420 				    (rtp2.type == rtp.type &&
421 				    rtp2.prio < rtp.prio)) {
422 					rtp.type = rtp2.type;
423 					rtp.prio = rtp2.prio;
424 				}
425 			}
426 		}
427 		PROC_UNLOCK(p);
428 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
429 	case RTP_SET:
430 		if ((error = p_cansched(td, p)) || (error = cierror))
431 			break;
432 
433 		/*
434 		 * Disallow setting rtprio in most cases if not superuser.
435 		 * See the comment in sys_rtprio_thread about idprio
436 		 * threads holding a lock.
437 		 */
438 		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
439 		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
440 		    !unprivileged_idprio)) {
441 			error = priv_check(td, PRIV_SCHED_RTPRIO);
442 			if (error)
443 				break;
444 		}
445 
446 		/*
447 		 * If we are setting our own priority, set just our
448 		 * thread but if we are doing another process,
449 		 * do all the threads on that process. If we
450 		 * specify our own pid we do the latter.
451 		 */
452 		if (uap->pid == 0) {
453 			error = rtp_to_pri(&rtp, td);
454 		} else {
455 			FOREACH_THREAD_IN_PROC(p, td) {
456 				if ((error = rtp_to_pri(&rtp, td)) != 0)
457 					break;
458 			}
459 		}
460 		break;
461 	default:
462 		error = EINVAL;
463 		break;
464 	}
465 	PROC_UNLOCK(p);
466 	return (error);
467 }
468 
469 int
470 rtp_to_pri(struct rtprio *rtp, struct thread *td)
471 {
472 	u_char	newpri;
473 	u_char	oldpri;
474 
475 	switch (RTP_PRIO_BASE(rtp->type)) {
476 	case RTP_PRIO_REALTIME:
477 		if (rtp->prio > RTP_PRIO_MAX)
478 			return (EINVAL);
479 		newpri = PRI_MIN_REALTIME + rtp->prio;
480 		break;
481 	case RTP_PRIO_NORMAL:
482 		if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
483 			return (EINVAL);
484 		newpri = PRI_MIN_TIMESHARE + rtp->prio;
485 		break;
486 	case RTP_PRIO_IDLE:
487 		if (rtp->prio > RTP_PRIO_MAX)
488 			return (EINVAL);
489 		newpri = PRI_MIN_IDLE + rtp->prio;
490 		break;
491 	default:
492 		return (EINVAL);
493 	}
494 
495 	thread_lock(td);
496 	sched_class(td, rtp->type);	/* XXX fix */
497 	oldpri = td->td_user_pri;
498 	sched_user_prio(td, newpri);
499 	if (td->td_user_pri != oldpri && (td == curthread ||
500 	    td->td_priority == oldpri || td->td_user_pri <= PRI_MAX_REALTIME))
501 		sched_prio(td, td->td_user_pri);
502 	if (TD_ON_UPILOCK(td) && oldpri != newpri) {
503 		critical_enter();
504 		thread_unlock(td);
505 		umtx_pi_adjust(td, oldpri);
506 		critical_exit();
507 	} else
508 		thread_unlock(td);
509 	return (0);
510 }
511 
512 void
513 pri_to_rtp(struct thread *td, struct rtprio *rtp)
514 {
515 
516 	thread_lock(td);
517 	switch (PRI_BASE(td->td_pri_class)) {
518 	case PRI_REALTIME:
519 		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
520 		break;
521 	case PRI_TIMESHARE:
522 		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
523 		break;
524 	case PRI_IDLE:
525 		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
526 		break;
527 	default:
528 		break;
529 	}
530 	rtp->type = td->td_pri_class;
531 	thread_unlock(td);
532 }
533 
534 #if defined(COMPAT_43)
535 #ifndef _SYS_SYSPROTO_H_
536 struct osetrlimit_args {
537 	u_int	which;
538 	struct	orlimit *rlp;
539 };
540 #endif
541 int
542 osetrlimit(td, uap)
543 	struct thread *td;
544 	register struct osetrlimit_args *uap;
545 {
546 	struct orlimit olim;
547 	struct rlimit lim;
548 	int error;
549 
550 	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
551 		return (error);
552 	lim.rlim_cur = olim.rlim_cur;
553 	lim.rlim_max = olim.rlim_max;
554 	error = kern_setrlimit(td, uap->which, &lim);
555 	return (error);
556 }
557 
558 #ifndef _SYS_SYSPROTO_H_
559 struct ogetrlimit_args {
560 	u_int	which;
561 	struct	orlimit *rlp;
562 };
563 #endif
564 int
565 ogetrlimit(td, uap)
566 	struct thread *td;
567 	register struct ogetrlimit_args *uap;
568 {
569 	struct orlimit olim;
570 	struct rlimit rl;
571 	struct proc *p;
572 	int error;
573 
574 	if (uap->which >= RLIM_NLIMITS)
575 		return (EINVAL);
576 	p = td->td_proc;
577 	PROC_LOCK(p);
578 	lim_rlimit(p, uap->which, &rl);
579 	PROC_UNLOCK(p);
580 
581 	/*
582 	 * XXX would be more correct to convert only RLIM_INFINITY to the
583 	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
584 	 * values.  Most 64->32 and 32->16 conversions, including not
585 	 * unimportant ones of uids are even more broken than what we
586 	 * do here (they blindly truncate).  We don't do this correctly
587 	 * here since we have little experience with EOVERFLOW yet.
588 	 * Elsewhere, getuid() can't fail...
589 	 */
590 	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
591 	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
592 	error = copyout(&olim, uap->rlp, sizeof(olim));
593 	return (error);
594 }
595 #endif /* COMPAT_43 */
596 
597 #ifndef _SYS_SYSPROTO_H_
598 struct __setrlimit_args {
599 	u_int	which;
600 	struct	rlimit *rlp;
601 };
602 #endif
603 int
604 sys_setrlimit(td, uap)
605 	struct thread *td;
606 	register struct __setrlimit_args *uap;
607 {
608 	struct rlimit alim;
609 	int error;
610 
611 	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
612 		return (error);
613 	error = kern_setrlimit(td, uap->which, &alim);
614 	return (error);
615 }
616 
617 static void
618 lim_cb(void *arg)
619 {
620 	struct rlimit rlim;
621 	struct thread *td;
622 	struct proc *p;
623 
624 	p = arg;
625 	PROC_LOCK_ASSERT(p, MA_OWNED);
626 	/*
627 	 * Check if the process exceeds its cpu resource allocation.  If
628 	 * it reaches the max, arrange to kill the process in ast().
629 	 */
630 	if (p->p_cpulimit == RLIM_INFINITY)
631 		return;
632 	PROC_SLOCK(p);
633 	FOREACH_THREAD_IN_PROC(p, td) {
634 		ruxagg(p, td);
635 	}
636 	PROC_SUNLOCK(p);
637 	if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
638 		lim_rlimit(p, RLIMIT_CPU, &rlim);
639 		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
640 			killproc(p, "exceeded maximum CPU limit");
641 		} else {
642 			if (p->p_cpulimit < rlim.rlim_max)
643 				p->p_cpulimit += 5;
644 			kern_psignal(p, SIGXCPU);
645 		}
646 	}
647 	if ((p->p_flag & P_WEXIT) == 0)
648 		callout_reset(&p->p_limco, hz, lim_cb, p);
649 }
650 
651 int
652 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
653 {
654 
655 	return (kern_proc_setrlimit(td, td->td_proc, which, limp));
656 }
657 
658 int
659 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
660     struct rlimit *limp)
661 {
662 	struct plimit *newlim, *oldlim;
663 	register struct rlimit *alimp;
664 	struct rlimit oldssiz;
665 	int error;
666 
667 	if (which >= RLIM_NLIMITS)
668 		return (EINVAL);
669 
670 	/*
671 	 * Preserve historical bugs by treating negative limits as unsigned.
672 	 */
673 	if (limp->rlim_cur < 0)
674 		limp->rlim_cur = RLIM_INFINITY;
675 	if (limp->rlim_max < 0)
676 		limp->rlim_max = RLIM_INFINITY;
677 
678 	oldssiz.rlim_cur = 0;
679 	newlim = lim_alloc();
680 	PROC_LOCK(p);
681 	oldlim = p->p_limit;
682 	alimp = &oldlim->pl_rlimit[which];
683 	if (limp->rlim_cur > alimp->rlim_max ||
684 	    limp->rlim_max > alimp->rlim_max)
685 		if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
686 			PROC_UNLOCK(p);
687 			lim_free(newlim);
688 			return (error);
689 		}
690 	if (limp->rlim_cur > limp->rlim_max)
691 		limp->rlim_cur = limp->rlim_max;
692 	lim_copy(newlim, oldlim);
693 	alimp = &newlim->pl_rlimit[which];
694 
695 	switch (which) {
696 
697 	case RLIMIT_CPU:
698 		if (limp->rlim_cur != RLIM_INFINITY &&
699 		    p->p_cpulimit == RLIM_INFINITY)
700 			callout_reset(&p->p_limco, hz, lim_cb, p);
701 		p->p_cpulimit = limp->rlim_cur;
702 		break;
703 	case RLIMIT_DATA:
704 		if (limp->rlim_cur > maxdsiz)
705 			limp->rlim_cur = maxdsiz;
706 		if (limp->rlim_max > maxdsiz)
707 			limp->rlim_max = maxdsiz;
708 		break;
709 
710 	case RLIMIT_STACK:
711 		if (limp->rlim_cur > maxssiz)
712 			limp->rlim_cur = maxssiz;
713 		if (limp->rlim_max > maxssiz)
714 			limp->rlim_max = maxssiz;
715 		oldssiz = *alimp;
716 		if (p->p_sysent->sv_fixlimit != NULL)
717 			p->p_sysent->sv_fixlimit(&oldssiz,
718 			    RLIMIT_STACK);
719 		break;
720 
721 	case RLIMIT_NOFILE:
722 		if (limp->rlim_cur > maxfilesperproc)
723 			limp->rlim_cur = maxfilesperproc;
724 		if (limp->rlim_max > maxfilesperproc)
725 			limp->rlim_max = maxfilesperproc;
726 		break;
727 
728 	case RLIMIT_NPROC:
729 		if (limp->rlim_cur > maxprocperuid)
730 			limp->rlim_cur = maxprocperuid;
731 		if (limp->rlim_max > maxprocperuid)
732 			limp->rlim_max = maxprocperuid;
733 		if (limp->rlim_cur < 1)
734 			limp->rlim_cur = 1;
735 		if (limp->rlim_max < 1)
736 			limp->rlim_max = 1;
737 		break;
738 	}
739 	if (p->p_sysent->sv_fixlimit != NULL)
740 		p->p_sysent->sv_fixlimit(limp, which);
741 	*alimp = *limp;
742 	p->p_limit = newlim;
743 	PROC_UNLOCK(p);
744 	lim_free(oldlim);
745 
746 	if (which == RLIMIT_STACK) {
747 		/*
748 		 * Stack is allocated to the max at exec time with only
749 		 * "rlim_cur" bytes accessible.  If stack limit is going
750 		 * up make more accessible, if going down make inaccessible.
751 		 */
752 		if (limp->rlim_cur != oldssiz.rlim_cur) {
753 			vm_offset_t addr;
754 			vm_size_t size;
755 			vm_prot_t prot;
756 
757 			if (limp->rlim_cur > oldssiz.rlim_cur) {
758 				prot = p->p_sysent->sv_stackprot;
759 				size = limp->rlim_cur - oldssiz.rlim_cur;
760 				addr = p->p_sysent->sv_usrstack -
761 				    limp->rlim_cur;
762 			} else {
763 				prot = VM_PROT_NONE;
764 				size = oldssiz.rlim_cur - limp->rlim_cur;
765 				addr = p->p_sysent->sv_usrstack -
766 				    oldssiz.rlim_cur;
767 			}
768 			addr = trunc_page(addr);
769 			size = round_page(size);
770 			(void)vm_map_protect(&p->p_vmspace->vm_map,
771 			    addr, addr + size, prot, FALSE);
772 		}
773 	}
774 
775 	return (0);
776 }
777 
778 #ifndef _SYS_SYSPROTO_H_
779 struct __getrlimit_args {
780 	u_int	which;
781 	struct	rlimit *rlp;
782 };
783 #endif
784 /* ARGSUSED */
785 int
786 sys_getrlimit(td, uap)
787 	struct thread *td;
788 	register struct __getrlimit_args *uap;
789 {
790 	struct rlimit rlim;
791 	struct proc *p;
792 	int error;
793 
794 	if (uap->which >= RLIM_NLIMITS)
795 		return (EINVAL);
796 	p = td->td_proc;
797 	PROC_LOCK(p);
798 	lim_rlimit(p, uap->which, &rlim);
799 	PROC_UNLOCK(p);
800 	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
801 	return (error);
802 }
803 
804 /*
805  * Transform the running time and tick information for children of proc p
806  * into user and system time usage.
807  */
808 void
809 calccru(p, up, sp)
810 	struct proc *p;
811 	struct timeval *up;
812 	struct timeval *sp;
813 {
814 
815 	PROC_LOCK_ASSERT(p, MA_OWNED);
816 	calcru1(p, &p->p_crux, up, sp);
817 }
818 
819 /*
820  * Transform the running time and tick information in proc p into user
821  * and system time usage.  If appropriate, include the current time slice
822  * on this CPU.
823  */
824 void
825 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
826 {
827 	struct thread *td;
828 	uint64_t runtime, u;
829 
830 	PROC_LOCK_ASSERT(p, MA_OWNED);
831 	PROC_SLOCK_ASSERT(p, MA_OWNED);
832 	/*
833 	 * If we are getting stats for the current process, then add in the
834 	 * stats that this thread has accumulated in its current time slice.
835 	 * We reset the thread and CPU state as if we had performed a context
836 	 * switch right here.
837 	 */
838 	td = curthread;
839 	if (td->td_proc == p) {
840 		u = cpu_ticks();
841 		runtime = u - PCPU_GET(switchtime);
842 		td->td_runtime += runtime;
843 		td->td_incruntime += runtime;
844 		PCPU_SET(switchtime, u);
845 	}
846 	/* Make sure the per-thread stats are current. */
847 	FOREACH_THREAD_IN_PROC(p, td) {
848 		if (td->td_incruntime == 0)
849 			continue;
850 		ruxagg(p, td);
851 	}
852 	calcru1(p, &p->p_rux, up, sp);
853 }
854 
855 /* Collect resource usage for a single thread. */
856 void
857 rufetchtd(struct thread *td, struct rusage *ru)
858 {
859 	struct proc *p;
860 	uint64_t runtime, u;
861 
862 	p = td->td_proc;
863 	PROC_SLOCK_ASSERT(p, MA_OWNED);
864 	THREAD_LOCK_ASSERT(td, MA_OWNED);
865 	/*
866 	 * If we are getting stats for the current thread, then add in the
867 	 * stats that this thread has accumulated in its current time slice.
868 	 * We reset the thread and CPU state as if we had performed a context
869 	 * switch right here.
870 	 */
871 	if (td == curthread) {
872 		u = cpu_ticks();
873 		runtime = u - PCPU_GET(switchtime);
874 		td->td_runtime += runtime;
875 		td->td_incruntime += runtime;
876 		PCPU_SET(switchtime, u);
877 	}
878 	ruxagg(p, td);
879 	*ru = td->td_ru;
880 	calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
881 }
882 
883 static void
884 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
885     struct timeval *sp)
886 {
887 	/* {user, system, interrupt, total} {ticks, usec}: */
888 	uint64_t ut, uu, st, su, it, tt, tu;
889 
890 	ut = ruxp->rux_uticks;
891 	st = ruxp->rux_sticks;
892 	it = ruxp->rux_iticks;
893 	tt = ut + st + it;
894 	if (tt == 0) {
895 		/* Avoid divide by zero */
896 		st = 1;
897 		tt = 1;
898 	}
899 	tu = cputick2usec(ruxp->rux_runtime);
900 	if ((int64_t)tu < 0) {
901 		/* XXX: this should be an assert /phk */
902 		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
903 		    (intmax_t)tu, p->p_pid, p->p_comm);
904 		tu = ruxp->rux_tu;
905 	}
906 
907 	if (tu >= ruxp->rux_tu) {
908 		/*
909 		 * The normal case, time increased.
910 		 * Enforce monotonicity of bucketed numbers.
911 		 */
912 		uu = (tu * ut) / tt;
913 		if (uu < ruxp->rux_uu)
914 			uu = ruxp->rux_uu;
915 		su = (tu * st) / tt;
916 		if (su < ruxp->rux_su)
917 			su = ruxp->rux_su;
918 	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
919 		/*
920 		 * When we calibrate the cputicker, it is not uncommon to
921 		 * see the presumably fixed frequency increase slightly over
922 		 * time as a result of thermal stabilization and NTP
923 		 * discipline (of the reference clock).  We therefore ignore
924 		 * a bit of backwards slop because we  expect to catch up
925 		 * shortly.  We use a 3 microsecond limit to catch low
926 		 * counts and a 1% limit for high counts.
927 		 */
928 		uu = ruxp->rux_uu;
929 		su = ruxp->rux_su;
930 		tu = ruxp->rux_tu;
931 	} else { /* tu < ruxp->rux_tu */
932 		/*
933 		 * What happened here was likely that a laptop, which ran at
934 		 * a reduced clock frequency at boot, kicked into high gear.
935 		 * The wisdom of spamming this message in that case is
936 		 * dubious, but it might also be indicative of something
937 		 * serious, so lets keep it and hope laptops can be made
938 		 * more truthful about their CPU speed via ACPI.
939 		 */
940 		printf("calcru: runtime went backwards from %ju usec "
941 		    "to %ju usec for pid %d (%s)\n",
942 		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
943 		    p->p_pid, p->p_comm);
944 		uu = (tu * ut) / tt;
945 		su = (tu * st) / tt;
946 	}
947 
948 	ruxp->rux_uu = uu;
949 	ruxp->rux_su = su;
950 	ruxp->rux_tu = tu;
951 
952 	up->tv_sec = uu / 1000000;
953 	up->tv_usec = uu % 1000000;
954 	sp->tv_sec = su / 1000000;
955 	sp->tv_usec = su % 1000000;
956 }
957 
958 #ifndef _SYS_SYSPROTO_H_
959 struct getrusage_args {
960 	int	who;
961 	struct	rusage *rusage;
962 };
963 #endif
964 int
965 sys_getrusage(td, uap)
966 	register struct thread *td;
967 	register struct getrusage_args *uap;
968 {
969 	struct rusage ru;
970 	int error;
971 
972 	error = kern_getrusage(td, uap->who, &ru);
973 	if (error == 0)
974 		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
975 	return (error);
976 }
977 
978 int
979 kern_getrusage(struct thread *td, int who, struct rusage *rup)
980 {
981 	struct proc *p;
982 	int error;
983 
984 	error = 0;
985 	p = td->td_proc;
986 	PROC_LOCK(p);
987 	switch (who) {
988 	case RUSAGE_SELF:
989 		rufetchcalc(p, rup, &rup->ru_utime,
990 		    &rup->ru_stime);
991 		break;
992 
993 	case RUSAGE_CHILDREN:
994 		*rup = p->p_stats->p_cru;
995 		calccru(p, &rup->ru_utime, &rup->ru_stime);
996 		break;
997 
998 	case RUSAGE_THREAD:
999 		PROC_SLOCK(p);
1000 		thread_lock(td);
1001 		rufetchtd(td, rup);
1002 		thread_unlock(td);
1003 		PROC_SUNLOCK(p);
1004 		break;
1005 
1006 	default:
1007 		error = EINVAL;
1008 	}
1009 	PROC_UNLOCK(p);
1010 	return (error);
1011 }
1012 
1013 void
1014 rucollect(struct rusage *ru, struct rusage *ru2)
1015 {
1016 	long *ip, *ip2;
1017 	int i;
1018 
1019 	if (ru->ru_maxrss < ru2->ru_maxrss)
1020 		ru->ru_maxrss = ru2->ru_maxrss;
1021 	ip = &ru->ru_first;
1022 	ip2 = &ru2->ru_first;
1023 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1024 		*ip++ += *ip2++;
1025 }
1026 
1027 void
1028 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1029     struct rusage_ext *rux2)
1030 {
1031 
1032 	rux->rux_runtime += rux2->rux_runtime;
1033 	rux->rux_uticks += rux2->rux_uticks;
1034 	rux->rux_sticks += rux2->rux_sticks;
1035 	rux->rux_iticks += rux2->rux_iticks;
1036 	rux->rux_uu += rux2->rux_uu;
1037 	rux->rux_su += rux2->rux_su;
1038 	rux->rux_tu += rux2->rux_tu;
1039 	rucollect(ru, ru2);
1040 }
1041 
1042 /*
1043  * Aggregate tick counts into the proc's rusage_ext.
1044  */
1045 static void
1046 ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1047 {
1048 
1049 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1050 	PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1051 	rux->rux_runtime += td->td_incruntime;
1052 	rux->rux_uticks += td->td_uticks;
1053 	rux->rux_sticks += td->td_sticks;
1054 	rux->rux_iticks += td->td_iticks;
1055 }
1056 
1057 void
1058 ruxagg(struct proc *p, struct thread *td)
1059 {
1060 
1061 	thread_lock(td);
1062 	ruxagg_locked(&p->p_rux, td);
1063 	ruxagg_locked(&td->td_rux, td);
1064 	td->td_incruntime = 0;
1065 	td->td_uticks = 0;
1066 	td->td_iticks = 0;
1067 	td->td_sticks = 0;
1068 	thread_unlock(td);
1069 }
1070 
1071 /*
1072  * Update the rusage_ext structure and fetch a valid aggregate rusage
1073  * for proc p if storage for one is supplied.
1074  */
1075 void
1076 rufetch(struct proc *p, struct rusage *ru)
1077 {
1078 	struct thread *td;
1079 
1080 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1081 
1082 	*ru = p->p_ru;
1083 	if (p->p_numthreads > 0)  {
1084 		FOREACH_THREAD_IN_PROC(p, td) {
1085 			ruxagg(p, td);
1086 			rucollect(ru, &td->td_ru);
1087 		}
1088 	}
1089 }
1090 
1091 /*
1092  * Atomically perform a rufetch and a calcru together.
1093  * Consumers, can safely assume the calcru is executed only once
1094  * rufetch is completed.
1095  */
1096 void
1097 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1098     struct timeval *sp)
1099 {
1100 
1101 	PROC_SLOCK(p);
1102 	rufetch(p, ru);
1103 	calcru(p, up, sp);
1104 	PROC_SUNLOCK(p);
1105 }
1106 
1107 /*
1108  * Allocate a new resource limits structure and initialize its
1109  * reference count and mutex pointer.
1110  */
1111 struct plimit *
1112 lim_alloc()
1113 {
1114 	struct plimit *limp;
1115 
1116 	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1117 	refcount_init(&limp->pl_refcnt, 1);
1118 	return (limp);
1119 }
1120 
1121 struct plimit *
1122 lim_hold(limp)
1123 	struct plimit *limp;
1124 {
1125 
1126 	refcount_acquire(&limp->pl_refcnt);
1127 	return (limp);
1128 }
1129 
1130 void
1131 lim_fork(struct proc *p1, struct proc *p2)
1132 {
1133 
1134 	PROC_LOCK_ASSERT(p1, MA_OWNED);
1135 	PROC_LOCK_ASSERT(p2, MA_OWNED);
1136 
1137 	p2->p_limit = lim_hold(p1->p_limit);
1138 	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1139 	if (p1->p_cpulimit != RLIM_INFINITY)
1140 		callout_reset(&p2->p_limco, hz, lim_cb, p2);
1141 }
1142 
1143 void
1144 lim_free(limp)
1145 	struct plimit *limp;
1146 {
1147 
1148 	KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1149 	if (refcount_release(&limp->pl_refcnt))
1150 		free((void *)limp, M_PLIMIT);
1151 }
1152 
1153 /*
1154  * Make a copy of the plimit structure.
1155  * We share these structures copy-on-write after fork.
1156  */
1157 void
1158 lim_copy(dst, src)
1159 	struct plimit *dst, *src;
1160 {
1161 
1162 	KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1163 	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1164 }
1165 
1166 /*
1167  * Return the hard limit for a particular system resource.  The
1168  * which parameter specifies the index into the rlimit array.
1169  */
1170 rlim_t
1171 lim_max(struct proc *p, int which)
1172 {
1173 	struct rlimit rl;
1174 
1175 	lim_rlimit(p, which, &rl);
1176 	return (rl.rlim_max);
1177 }
1178 
1179 /*
1180  * Return the current (soft) limit for a particular system resource.
1181  * The which parameter which specifies the index into the rlimit array
1182  */
1183 rlim_t
1184 lim_cur(struct proc *p, int which)
1185 {
1186 	struct rlimit rl;
1187 
1188 	lim_rlimit(p, which, &rl);
1189 	return (rl.rlim_cur);
1190 }
1191 
1192 /*
1193  * Return a copy of the entire rlimit structure for the system limit
1194  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1195  */
1196 void
1197 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1198 {
1199 
1200 	PROC_LOCK_ASSERT(p, MA_OWNED);
1201 	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1202 	    ("request for invalid resource limit"));
1203 	*rlp = p->p_limit->pl_rlimit[which];
1204 	if (p->p_sysent->sv_fixlimit != NULL)
1205 		p->p_sysent->sv_fixlimit(rlp, which);
1206 }
1207 
1208 void
1209 uihashinit()
1210 {
1211 
1212 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1213 	rw_init(&uihashtbl_lock, "uidinfo hash");
1214 }
1215 
1216 /*
1217  * Look up a uidinfo struct for the parameter uid.
1218  * uihashtbl_lock must be locked.
1219  */
1220 static struct uidinfo *
1221 uilookup(uid)
1222 	uid_t uid;
1223 {
1224 	struct uihashhead *uipp;
1225 	struct uidinfo *uip;
1226 
1227 	rw_assert(&uihashtbl_lock, RA_LOCKED);
1228 	uipp = UIHASH(uid);
1229 	LIST_FOREACH(uip, uipp, ui_hash)
1230 		if (uip->ui_uid == uid)
1231 			break;
1232 
1233 	return (uip);
1234 }
1235 
1236 /*
1237  * Find or allocate a struct uidinfo for a particular uid.
1238  * Increase refcount on uidinfo struct returned.
1239  * uifree() should be called on a struct uidinfo when released.
1240  */
1241 struct uidinfo *
1242 uifind(uid)
1243 	uid_t uid;
1244 {
1245 	struct uidinfo *old_uip, *uip;
1246 
1247 	rw_rlock(&uihashtbl_lock);
1248 	uip = uilookup(uid);
1249 	if (uip == NULL) {
1250 		rw_runlock(&uihashtbl_lock);
1251 		uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1252 		racct_create(&uip->ui_racct);
1253 		rw_wlock(&uihashtbl_lock);
1254 		/*
1255 		 * There's a chance someone created our uidinfo while we
1256 		 * were in malloc and not holding the lock, so we have to
1257 		 * make sure we don't insert a duplicate uidinfo.
1258 		 */
1259 		if ((old_uip = uilookup(uid)) != NULL) {
1260 			/* Someone else beat us to it. */
1261 			racct_destroy(&uip->ui_racct);
1262 			free(uip, M_UIDINFO);
1263 			uip = old_uip;
1264 		} else {
1265 			refcount_init(&uip->ui_ref, 0);
1266 			uip->ui_uid = uid;
1267 			mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1268 			    MTX_DEF);
1269 			LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1270 		}
1271 	}
1272 	uihold(uip);
1273 	rw_unlock(&uihashtbl_lock);
1274 	return (uip);
1275 }
1276 
1277 /*
1278  * Place another refcount on a uidinfo struct.
1279  */
1280 void
1281 uihold(uip)
1282 	struct uidinfo *uip;
1283 {
1284 
1285 	refcount_acquire(&uip->ui_ref);
1286 }
1287 
1288 /*-
1289  * Since uidinfo structs have a long lifetime, we use an
1290  * opportunistic refcounting scheme to avoid locking the lookup hash
1291  * for each release.
1292  *
1293  * If the refcount hits 0, we need to free the structure,
1294  * which means we need to lock the hash.
1295  * Optimal case:
1296  *   After locking the struct and lowering the refcount, if we find
1297  *   that we don't need to free, simply unlock and return.
1298  * Suboptimal case:
1299  *   If refcount lowering results in need to free, bump the count
1300  *   back up, lose the lock and acquire the locks in the proper
1301  *   order to try again.
1302  */
1303 void
1304 uifree(uip)
1305 	struct uidinfo *uip;
1306 {
1307 	int old;
1308 
1309 	/* Prepare for optimal case. */
1310 	old = uip->ui_ref;
1311 	if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1312 		return;
1313 
1314 	/* Prepare for suboptimal case. */
1315 	rw_wlock(&uihashtbl_lock);
1316 	if (refcount_release(&uip->ui_ref)) {
1317 		racct_destroy(&uip->ui_racct);
1318 		LIST_REMOVE(uip, ui_hash);
1319 		rw_wunlock(&uihashtbl_lock);
1320 		if (uip->ui_sbsize != 0)
1321 			printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1322 			    uip->ui_uid, uip->ui_sbsize);
1323 		if (uip->ui_proccnt != 0)
1324 			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1325 			    uip->ui_uid, uip->ui_proccnt);
1326 		if (uip->ui_vmsize != 0)
1327 			printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1328 			    uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1329 		mtx_destroy(&uip->ui_vmsize_mtx);
1330 		free(uip, M_UIDINFO);
1331 		return;
1332 	}
1333 	/*
1334 	 * Someone added a reference between atomic_cmpset_int() and
1335 	 * rw_wlock(&uihashtbl_lock).
1336 	 */
1337 	rw_wunlock(&uihashtbl_lock);
1338 }
1339 
1340 void
1341 ui_racct_foreach(void (*callback)(struct racct *racct,
1342     void *arg2, void *arg3), void *arg2, void *arg3)
1343 {
1344 	struct uidinfo *uip;
1345 	struct uihashhead *uih;
1346 
1347 	rw_rlock(&uihashtbl_lock);
1348 	for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1349 		LIST_FOREACH(uip, uih, ui_hash) {
1350 			(callback)(uip->ui_racct, arg2, arg3);
1351 		}
1352 	}
1353 	rw_runlock(&uihashtbl_lock);
1354 }
1355 
1356 /*
1357  * Change the count associated with number of processes
1358  * a given user is using.  When 'max' is 0, don't enforce a limit
1359  */
1360 int
1361 chgproccnt(uip, diff, max)
1362 	struct	uidinfo	*uip;
1363 	int	diff;
1364 	rlim_t	max;
1365 {
1366 
1367 	/* Don't allow them to exceed max, but allow subtraction. */
1368 	if (diff > 0 && max != 0) {
1369 		if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1370 			atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1371 			return (0);
1372 		}
1373 	} else {
1374 		atomic_add_long(&uip->ui_proccnt, (long)diff);
1375 		if (uip->ui_proccnt < 0)
1376 			printf("negative proccnt for uid = %d\n", uip->ui_uid);
1377 	}
1378 	return (1);
1379 }
1380 
1381 /*
1382  * Change the total socket buffer size a user has used.
1383  */
1384 int
1385 chgsbsize(uip, hiwat, to, max)
1386 	struct	uidinfo	*uip;
1387 	u_int  *hiwat;
1388 	u_int	to;
1389 	rlim_t	max;
1390 {
1391 	int diff;
1392 
1393 	diff = to - *hiwat;
1394 	if (diff > 0) {
1395 		if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1396 			atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1397 			return (0);
1398 		}
1399 	} else {
1400 		atomic_add_long(&uip->ui_sbsize, (long)diff);
1401 		if (uip->ui_sbsize < 0)
1402 			printf("negative sbsize for uid = %d\n", uip->ui_uid);
1403 	}
1404 	*hiwat = to;
1405 	return (1);
1406 }
1407 
1408 /*
1409  * Change the count associated with number of pseudo-terminals
1410  * a given user is using.  When 'max' is 0, don't enforce a limit
1411  */
1412 int
1413 chgptscnt(uip, diff, max)
1414 	struct	uidinfo	*uip;
1415 	int	diff;
1416 	rlim_t	max;
1417 {
1418 
1419 	/* Don't allow them to exceed max, but allow subtraction. */
1420 	if (diff > 0 && max != 0) {
1421 		if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1422 			atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1423 			return (0);
1424 		}
1425 	} else {
1426 		atomic_add_long(&uip->ui_ptscnt, (long)diff);
1427 		if (uip->ui_ptscnt < 0)
1428 			printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1429 	}
1430 	return (1);
1431 }
1432