1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysproto.h>
40 #include <sys/file.h>
41 #include <sys/filedesc.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/refcount.h>
49 #include <sys/racct.h>
50 #include <sys/resourcevar.h>
51 #include <sys/rwlock.h>
52 #include <sys/sched.h>
53 #include <sys/sx.h>
54 #include <sys/syscallsubr.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysent.h>
57 #include <sys/time.h>
58 #include <sys/umtxvar.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_extern.h>
65
66 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
67 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
68 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
69 static struct rwlock uihashtbl_lock;
70 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
71 static u_long uihash; /* size of hash table - 1 */
72
73 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
74 struct timeval *up, struct timeval *sp);
75 static int donice(struct thread *td, struct proc *chgp, int n);
76 static struct uidinfo *uilookup(uid_t uid);
77 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td);
78
79 /*
80 * Resource controls and accounting.
81 */
82 #ifndef _SYS_SYSPROTO_H_
83 struct getpriority_args {
84 int which;
85 int who;
86 };
87 #endif
88 int
sys_getpriority(struct thread * td,struct getpriority_args * uap)89 sys_getpriority(struct thread *td, struct getpriority_args *uap)
90 {
91
92 return (kern_getpriority(td, uap->which, uap->who));
93 }
94
95 int
kern_getpriority(struct thread * td,int which,int who)96 kern_getpriority(struct thread *td, int which, int who)
97 {
98 struct proc *p;
99 struct pgrp *pg;
100 int error, low;
101
102 error = 0;
103 low = PRIO_MAX + 1;
104 switch (which) {
105 case PRIO_PROCESS:
106 if (who == 0)
107 low = td->td_proc->p_nice;
108 else {
109 p = pfind(who);
110 if (p == NULL)
111 break;
112 if (p_cansee(td, p) == 0)
113 low = p->p_nice;
114 PROC_UNLOCK(p);
115 }
116 break;
117
118 case PRIO_PGRP:
119 sx_slock(&proctree_lock);
120 if (who == 0) {
121 pg = td->td_proc->p_pgrp;
122 PGRP_LOCK(pg);
123 } else {
124 pg = pgfind(who);
125 if (pg == NULL) {
126 sx_sunlock(&proctree_lock);
127 break;
128 }
129 }
130 sx_sunlock(&proctree_lock);
131 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
132 PROC_LOCK(p);
133 if (p->p_state == PRS_NORMAL &&
134 p_cansee(td, p) == 0) {
135 if (p->p_nice < low)
136 low = p->p_nice;
137 }
138 PROC_UNLOCK(p);
139 }
140 PGRP_UNLOCK(pg);
141 break;
142
143 case PRIO_USER:
144 if (who == 0)
145 who = td->td_ucred->cr_uid;
146 sx_slock(&allproc_lock);
147 FOREACH_PROC_IN_SYSTEM(p) {
148 PROC_LOCK(p);
149 if (p->p_state == PRS_NORMAL &&
150 p_cansee(td, p) == 0 &&
151 p->p_ucred->cr_uid == who) {
152 if (p->p_nice < low)
153 low = p->p_nice;
154 }
155 PROC_UNLOCK(p);
156 }
157 sx_sunlock(&allproc_lock);
158 break;
159
160 default:
161 error = EINVAL;
162 break;
163 }
164 if (low == PRIO_MAX + 1 && error == 0)
165 error = ESRCH;
166 td->td_retval[0] = low;
167 return (error);
168 }
169
170 #ifndef _SYS_SYSPROTO_H_
171 struct setpriority_args {
172 int which;
173 int who;
174 int prio;
175 };
176 #endif
177 int
sys_setpriority(struct thread * td,struct setpriority_args * uap)178 sys_setpriority(struct thread *td, struct setpriority_args *uap)
179 {
180
181 return (kern_setpriority(td, uap->which, uap->who, uap->prio));
182 }
183
184 int
kern_setpriority(struct thread * td,int which,int who,int prio)185 kern_setpriority(struct thread *td, int which, int who, int prio)
186 {
187 struct proc *curp, *p;
188 struct pgrp *pg;
189 int found = 0, error = 0;
190
191 curp = td->td_proc;
192 switch (which) {
193 case PRIO_PROCESS:
194 if (who == 0) {
195 PROC_LOCK(curp);
196 error = donice(td, curp, prio);
197 PROC_UNLOCK(curp);
198 } else {
199 p = pfind(who);
200 if (p == NULL)
201 break;
202 error = p_cansee(td, p);
203 if (error == 0)
204 error = donice(td, p, prio);
205 PROC_UNLOCK(p);
206 }
207 found++;
208 break;
209
210 case PRIO_PGRP:
211 sx_slock(&proctree_lock);
212 if (who == 0) {
213 pg = curp->p_pgrp;
214 PGRP_LOCK(pg);
215 } else {
216 pg = pgfind(who);
217 if (pg == NULL) {
218 sx_sunlock(&proctree_lock);
219 break;
220 }
221 }
222 sx_sunlock(&proctree_lock);
223 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
224 PROC_LOCK(p);
225 if (p->p_state == PRS_NORMAL &&
226 p_cansee(td, p) == 0) {
227 error = donice(td, p, prio);
228 found++;
229 }
230 PROC_UNLOCK(p);
231 }
232 PGRP_UNLOCK(pg);
233 break;
234
235 case PRIO_USER:
236 if (who == 0)
237 who = td->td_ucred->cr_uid;
238 sx_slock(&allproc_lock);
239 FOREACH_PROC_IN_SYSTEM(p) {
240 PROC_LOCK(p);
241 if (p->p_state == PRS_NORMAL &&
242 p->p_ucred->cr_uid == who &&
243 p_cansee(td, p) == 0) {
244 error = donice(td, p, prio);
245 found++;
246 }
247 PROC_UNLOCK(p);
248 }
249 sx_sunlock(&allproc_lock);
250 break;
251
252 default:
253 error = EINVAL;
254 break;
255 }
256 if (found == 0 && error == 0)
257 error = ESRCH;
258 return (error);
259 }
260
261 /*
262 * Set "nice" for a (whole) process.
263 */
264 static int
donice(struct thread * td,struct proc * p,int n)265 donice(struct thread *td, struct proc *p, int n)
266 {
267 int error;
268
269 PROC_LOCK_ASSERT(p, MA_OWNED);
270 if ((error = p_cansched(td, p)))
271 return (error);
272 if (n > PRIO_MAX)
273 n = PRIO_MAX;
274 if (n < PRIO_MIN)
275 n = PRIO_MIN;
276 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
277 return (EACCES);
278 sched_nice(p, n);
279 return (0);
280 }
281
282 static int unprivileged_idprio;
283 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
284 &unprivileged_idprio, 0,
285 "Allow non-root users to set an idle priority (deprecated)");
286
287 /*
288 * Set realtime priority for LWP.
289 */
290 #ifndef _SYS_SYSPROTO_H_
291 struct rtprio_thread_args {
292 int function;
293 lwpid_t lwpid;
294 struct rtprio *rtp;
295 };
296 #endif
297 int
sys_rtprio_thread(struct thread * td,struct rtprio_thread_args * uap)298 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
299 {
300 struct proc *p;
301 struct rtprio rtp;
302 struct thread *td1;
303 int cierror, error;
304
305 /* Perform copyin before acquiring locks if needed. */
306 if (uap->function == RTP_SET)
307 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
308 else
309 cierror = 0;
310
311 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
312 p = td->td_proc;
313 td1 = td;
314 PROC_LOCK(p);
315 } else {
316 td1 = tdfind(uap->lwpid, -1);
317 if (td1 == NULL)
318 return (ESRCH);
319 p = td1->td_proc;
320 }
321
322 switch (uap->function) {
323 case RTP_LOOKUP:
324 if ((error = p_cansee(td, p)))
325 break;
326 pri_to_rtp(td1, &rtp);
327 PROC_UNLOCK(p);
328 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
329 case RTP_SET:
330 if ((error = p_cansched(td, p)) || (error = cierror))
331 break;
332
333 /* Disallow setting rtprio in most cases if not superuser. */
334
335 /*
336 * Realtime priority has to be restricted for reasons which
337 * should be obvious. However, for idleprio processes, there is
338 * a potential for system deadlock if an idleprio process gains
339 * a lock on a resource that other processes need (and the
340 * idleprio process can't run due to a CPU-bound normal
341 * process). Fix me! XXX
342 *
343 * This problem is not only related to idleprio process.
344 * A user level program can obtain a file lock and hold it
345 * indefinitely. Additionally, without idleprio processes it is
346 * still conceivable that a program with low priority will never
347 * get to run. In short, allowing this feature might make it
348 * easier to lock a resource indefinitely, but it is not the
349 * only thing that makes it possible.
350 */
351 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
352 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
353 break;
354 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
355 unprivileged_idprio == 0 &&
356 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
357 break;
358 error = rtp_to_pri(&rtp, td1);
359 break;
360 default:
361 error = EINVAL;
362 break;
363 }
364 PROC_UNLOCK(p);
365 return (error);
366 }
367
368 /*
369 * Set realtime priority.
370 */
371 #ifndef _SYS_SYSPROTO_H_
372 struct rtprio_args {
373 int function;
374 pid_t pid;
375 struct rtprio *rtp;
376 };
377 #endif
378 int
sys_rtprio(struct thread * td,struct rtprio_args * uap)379 sys_rtprio(struct thread *td, struct rtprio_args *uap)
380 {
381 struct proc *p;
382 struct thread *tdp;
383 struct rtprio rtp;
384 int cierror, error;
385
386 /* Perform copyin before acquiring locks if needed. */
387 if (uap->function == RTP_SET)
388 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
389 else
390 cierror = 0;
391
392 if (uap->pid == 0) {
393 p = td->td_proc;
394 PROC_LOCK(p);
395 } else {
396 p = pfind(uap->pid);
397 if (p == NULL)
398 return (ESRCH);
399 }
400
401 switch (uap->function) {
402 case RTP_LOOKUP:
403 if ((error = p_cansee(td, p)))
404 break;
405 /*
406 * Return OUR priority if no pid specified,
407 * or if one is, report the highest priority
408 * in the process. There isn't much more you can do as
409 * there is only room to return a single priority.
410 * Note: specifying our own pid is not the same
411 * as leaving it zero.
412 */
413 if (uap->pid == 0) {
414 pri_to_rtp(td, &rtp);
415 } else {
416 struct rtprio rtp2;
417
418 rtp.type = RTP_PRIO_IDLE;
419 rtp.prio = RTP_PRIO_MAX;
420 FOREACH_THREAD_IN_PROC(p, tdp) {
421 pri_to_rtp(tdp, &rtp2);
422 if (rtp2.type < rtp.type ||
423 (rtp2.type == rtp.type &&
424 rtp2.prio < rtp.prio)) {
425 rtp.type = rtp2.type;
426 rtp.prio = rtp2.prio;
427 }
428 }
429 }
430 PROC_UNLOCK(p);
431 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
432 case RTP_SET:
433 if ((error = p_cansched(td, p)) || (error = cierror))
434 break;
435
436 /*
437 * Disallow setting rtprio in most cases if not superuser.
438 * See the comment in sys_rtprio_thread about idprio
439 * threads holding a lock.
440 */
441 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
442 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
443 break;
444 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
445 unprivileged_idprio == 0 &&
446 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
447 break;
448
449 /*
450 * If we are setting our own priority, set just our
451 * thread but if we are doing another process,
452 * do all the threads on that process. If we
453 * specify our own pid we do the latter.
454 */
455 if (uap->pid == 0) {
456 error = rtp_to_pri(&rtp, td);
457 } else {
458 FOREACH_THREAD_IN_PROC(p, td) {
459 if ((error = rtp_to_pri(&rtp, td)) != 0)
460 break;
461 }
462 }
463 break;
464 default:
465 error = EINVAL;
466 break;
467 }
468 PROC_UNLOCK(p);
469 return (error);
470 }
471
472 int
rtp_to_pri(struct rtprio * rtp,struct thread * td)473 rtp_to_pri(struct rtprio *rtp, struct thread *td)
474 {
475 u_char newpri, oldclass, oldpri;
476
477 switch (RTP_PRIO_BASE(rtp->type)) {
478 case RTP_PRIO_REALTIME:
479 if (rtp->prio > RTP_PRIO_MAX)
480 return (EINVAL);
481 newpri = PRI_MIN_REALTIME + rtp->prio;
482 break;
483 case RTP_PRIO_NORMAL:
484 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
485 return (EINVAL);
486 newpri = PRI_MIN_TIMESHARE + rtp->prio;
487 break;
488 case RTP_PRIO_IDLE:
489 if (rtp->prio > RTP_PRIO_MAX)
490 return (EINVAL);
491 newpri = PRI_MIN_IDLE + rtp->prio;
492 break;
493 default:
494 return (EINVAL);
495 }
496
497 thread_lock(td);
498 oldclass = td->td_pri_class;
499 sched_class(td, rtp->type); /* XXX fix */
500 oldpri = td->td_user_pri;
501 sched_user_prio(td, newpri);
502 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
503 td->td_pri_class != RTP_PRIO_NORMAL))
504 sched_prio(td, td->td_user_pri);
505 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
506 critical_enter();
507 thread_unlock(td);
508 umtx_pi_adjust(td, oldpri);
509 critical_exit();
510 } else
511 thread_unlock(td);
512 return (0);
513 }
514
515 void
pri_to_rtp(struct thread * td,struct rtprio * rtp)516 pri_to_rtp(struct thread *td, struct rtprio *rtp)
517 {
518
519 thread_lock(td);
520 switch (PRI_BASE(td->td_pri_class)) {
521 case PRI_REALTIME:
522 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
523 break;
524 case PRI_TIMESHARE:
525 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
526 break;
527 case PRI_IDLE:
528 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
529 break;
530 default:
531 break;
532 }
533 rtp->type = td->td_pri_class;
534 thread_unlock(td);
535 }
536
537 #if defined(COMPAT_43)
538 #ifndef _SYS_SYSPROTO_H_
539 struct osetrlimit_args {
540 u_int which;
541 struct orlimit *rlp;
542 };
543 #endif
544 int
osetrlimit(struct thread * td,struct osetrlimit_args * uap)545 osetrlimit(struct thread *td, struct osetrlimit_args *uap)
546 {
547 struct orlimit olim;
548 struct rlimit lim;
549 int error;
550
551 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
552 return (error);
553 lim.rlim_cur = olim.rlim_cur;
554 lim.rlim_max = olim.rlim_max;
555 error = kern_setrlimit(td, uap->which, &lim);
556 return (error);
557 }
558
559 #ifndef _SYS_SYSPROTO_H_
560 struct ogetrlimit_args {
561 u_int which;
562 struct orlimit *rlp;
563 };
564 #endif
565 int
ogetrlimit(struct thread * td,struct ogetrlimit_args * uap)566 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap)
567 {
568 struct orlimit olim;
569 struct rlimit rl;
570 int error;
571
572 if (uap->which >= RLIM_NLIMITS)
573 return (EINVAL);
574 lim_rlimit(td, uap->which, &rl);
575
576 /*
577 * XXX would be more correct to convert only RLIM_INFINITY to the
578 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
579 * values. Most 64->32 and 32->16 conversions, including not
580 * unimportant ones of uids are even more broken than what we
581 * do here (they blindly truncate). We don't do this correctly
582 * here since we have little experience with EOVERFLOW yet.
583 * Elsewhere, getuid() can't fail...
584 */
585 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
586 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
587 error = copyout(&olim, uap->rlp, sizeof(olim));
588 return (error);
589 }
590 #endif /* COMPAT_43 */
591
592 #ifndef _SYS_SYSPROTO_H_
593 struct setrlimit_args {
594 u_int which;
595 struct rlimit *rlp;
596 };
597 #endif
598 int
sys_setrlimit(struct thread * td,struct setrlimit_args * uap)599 sys_setrlimit(struct thread *td, struct setrlimit_args *uap)
600 {
601 struct rlimit alim;
602 int error;
603
604 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
605 return (error);
606 error = kern_setrlimit(td, uap->which, &alim);
607 return (error);
608 }
609
610 static void
lim_cb(void * arg)611 lim_cb(void *arg)
612 {
613 struct rlimit rlim;
614 struct thread *td;
615 struct proc *p;
616
617 p = arg;
618 PROC_LOCK_ASSERT(p, MA_OWNED);
619 /*
620 * Check if the process exceeds its cpu resource allocation. If
621 * it reaches the max, arrange to kill the process in ast().
622 */
623 if (p->p_cpulimit == RLIM_INFINITY)
624 return;
625 PROC_STATLOCK(p);
626 FOREACH_THREAD_IN_PROC(p, td) {
627 ruxagg(p, td);
628 }
629 PROC_STATUNLOCK(p);
630 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
631 lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
632 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
633 killproc(p, "exceeded maximum CPU limit");
634 } else {
635 if (p->p_cpulimit < rlim.rlim_max)
636 p->p_cpulimit += 5;
637 kern_psignal(p, SIGXCPU);
638 }
639 }
640 if ((p->p_flag & P_WEXIT) == 0)
641 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
642 lim_cb, p, C_PREL(1));
643 }
644
645 int
kern_setrlimit(struct thread * td,u_int which,struct rlimit * limp)646 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
647 {
648
649 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
650 }
651
652 int
kern_proc_setrlimit(struct thread * td,struct proc * p,u_int which,struct rlimit * limp)653 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
654 struct rlimit *limp)
655 {
656 struct plimit *newlim, *oldlim, *oldlim_td;
657 struct rlimit *alimp;
658 struct rlimit oldssiz;
659 int error;
660
661 if (which >= RLIM_NLIMITS)
662 return (EINVAL);
663
664 /*
665 * Preserve historical bugs by treating negative limits as unsigned.
666 */
667 if (limp->rlim_cur < 0)
668 limp->rlim_cur = RLIM_INFINITY;
669 if (limp->rlim_max < 0)
670 limp->rlim_max = RLIM_INFINITY;
671
672 oldssiz.rlim_cur = 0;
673 newlim = lim_alloc();
674 PROC_LOCK(p);
675 oldlim = p->p_limit;
676 alimp = &oldlim->pl_rlimit[which];
677 if (limp->rlim_cur > alimp->rlim_max ||
678 limp->rlim_max > alimp->rlim_max)
679 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
680 PROC_UNLOCK(p);
681 lim_free(newlim);
682 return (error);
683 }
684 if (limp->rlim_cur > limp->rlim_max)
685 limp->rlim_cur = limp->rlim_max;
686 lim_copy(newlim, oldlim);
687 alimp = &newlim->pl_rlimit[which];
688
689 switch (which) {
690 case RLIMIT_CPU:
691 if (limp->rlim_cur != RLIM_INFINITY &&
692 p->p_cpulimit == RLIM_INFINITY)
693 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
694 lim_cb, p, C_PREL(1));
695 p->p_cpulimit = limp->rlim_cur;
696 break;
697 case RLIMIT_DATA:
698 if (limp->rlim_cur > maxdsiz)
699 limp->rlim_cur = maxdsiz;
700 if (limp->rlim_max > maxdsiz)
701 limp->rlim_max = maxdsiz;
702 break;
703
704 case RLIMIT_STACK:
705 if (limp->rlim_cur > maxssiz)
706 limp->rlim_cur = maxssiz;
707 if (limp->rlim_max > maxssiz)
708 limp->rlim_max = maxssiz;
709 oldssiz = *alimp;
710 if (p->p_sysent->sv_fixlimit != NULL)
711 p->p_sysent->sv_fixlimit(&oldssiz,
712 RLIMIT_STACK);
713 break;
714
715 case RLIMIT_NOFILE:
716 if (limp->rlim_cur > maxfilesperproc)
717 limp->rlim_cur = maxfilesperproc;
718 if (limp->rlim_max > maxfilesperproc)
719 limp->rlim_max = maxfilesperproc;
720 break;
721
722 case RLIMIT_NPROC:
723 if (limp->rlim_cur > maxprocperuid)
724 limp->rlim_cur = maxprocperuid;
725 if (limp->rlim_max > maxprocperuid)
726 limp->rlim_max = maxprocperuid;
727 if (limp->rlim_cur < 1)
728 limp->rlim_cur = 1;
729 if (limp->rlim_max < 1)
730 limp->rlim_max = 1;
731 break;
732 }
733 if (p->p_sysent->sv_fixlimit != NULL)
734 p->p_sysent->sv_fixlimit(limp, which);
735 *alimp = *limp;
736 p->p_limit = newlim;
737 PROC_UPDATE_COW(p);
738 oldlim_td = NULL;
739 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) {
740 oldlim_td = lim_cowsync();
741 thread_cow_synced(td);
742 }
743 PROC_UNLOCK(p);
744 if (oldlim_td != NULL) {
745 MPASS(oldlim_td == oldlim);
746 lim_freen(oldlim, 2);
747 } else {
748 lim_free(oldlim);
749 }
750
751 if (which == RLIMIT_STACK &&
752 /*
753 * Skip calls from exec_new_vmspace(), done when stack is
754 * not mapped yet.
755 */
756 (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
757 /*
758 * Stack is allocated to the max at exec time with only
759 * "rlim_cur" bytes accessible. If stack limit is going
760 * up make more accessible, if going down make inaccessible.
761 */
762 if (limp->rlim_cur != oldssiz.rlim_cur) {
763 vm_offset_t addr;
764 vm_size_t size;
765 vm_prot_t prot;
766
767 if (limp->rlim_cur > oldssiz.rlim_cur) {
768 prot = p->p_sysent->sv_stackprot;
769 size = limp->rlim_cur - oldssiz.rlim_cur;
770 addr = round_page(p->p_vmspace->vm_stacktop) -
771 limp->rlim_cur;
772 } else {
773 prot = VM_PROT_NONE;
774 size = oldssiz.rlim_cur - limp->rlim_cur;
775 addr = round_page(p->p_vmspace->vm_stacktop) -
776 oldssiz.rlim_cur;
777 }
778 addr = trunc_page(addr);
779 size = round_page(size);
780 (void)vm_map_protect(&p->p_vmspace->vm_map,
781 addr, addr + size, prot, 0,
782 VM_MAP_PROTECT_SET_PROT);
783 }
784 }
785
786 return (0);
787 }
788
789 #ifndef _SYS_SYSPROTO_H_
790 struct getrlimit_args {
791 u_int which;
792 struct rlimit *rlp;
793 };
794 #endif
795 /* ARGSUSED */
796 int
sys_getrlimit(struct thread * td,struct getrlimit_args * uap)797 sys_getrlimit(struct thread *td, struct getrlimit_args *uap)
798 {
799 struct rlimit rlim;
800 int error;
801
802 if (uap->which >= RLIM_NLIMITS)
803 return (EINVAL);
804 lim_rlimit(td, uap->which, &rlim);
805 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
806 return (error);
807 }
808
809 static int
getrlimitusage_one(struct proc * p,u_int which,int flags,rlim_t * res)810 getrlimitusage_one(struct proc *p, u_int which, int flags, rlim_t *res)
811 {
812 struct thread *td;
813 struct uidinfo *ui;
814 struct vmspace *vm;
815 uid_t uid;
816 int error;
817
818 error = 0;
819 PROC_LOCK(p);
820 uid = (flags & GETRLIMITUSAGE_EUID) == 0 ? p->p_ucred->cr_ruid :
821 p->p_ucred->cr_uid;
822 PROC_UNLOCK(p);
823
824 ui = uifind(uid);
825 vm = vmspace_acquire_ref(p);
826
827 switch (which) {
828 case RLIMIT_CPU:
829 PROC_LOCK(p);
830 PROC_STATLOCK(p);
831 FOREACH_THREAD_IN_PROC(p, td)
832 ruxagg(p, td);
833 *res = p->p_rux.rux_runtime;
834 PROC_STATUNLOCK(p);
835 PROC_UNLOCK(p);
836 *res /= cpu_tickrate();
837 break;
838 case RLIMIT_FSIZE:
839 error = ENXIO;
840 break;
841 case RLIMIT_DATA:
842 if (vm == NULL)
843 error = ENXIO;
844 else
845 *res = vm->vm_dsize * PAGE_SIZE;
846 break;
847 case RLIMIT_STACK:
848 if (vm == NULL)
849 error = ENXIO;
850 else
851 *res = vm->vm_ssize * PAGE_SIZE;
852 break;
853 case RLIMIT_CORE:
854 error = ENXIO;
855 break;
856 case RLIMIT_RSS:
857 if (vm == NULL)
858 error = ENXIO;
859 else
860 *res = vmspace_resident_count(vm) * PAGE_SIZE;
861 break;
862 case RLIMIT_MEMLOCK:
863 if (vm == NULL)
864 error = ENXIO;
865 else
866 *res = pmap_wired_count(vmspace_pmap(vm)) * PAGE_SIZE;
867 break;
868 case RLIMIT_NPROC:
869 *res = ui->ui_proccnt;
870 break;
871 case RLIMIT_NOFILE:
872 *res = proc_nfiles(p);
873 break;
874 case RLIMIT_SBSIZE:
875 *res = ui->ui_sbsize;
876 break;
877 case RLIMIT_VMEM:
878 if (vm == NULL)
879 error = ENXIO;
880 else
881 *res = vm->vm_map.size;
882 break;
883 case RLIMIT_NPTS:
884 *res = ui->ui_ptscnt;
885 break;
886 case RLIMIT_SWAP:
887 *res = ui->ui_vmsize;
888 break;
889 case RLIMIT_KQUEUES:
890 *res = ui->ui_kqcnt;
891 break;
892 case RLIMIT_UMTXP:
893 *res = ui->ui_umtxcnt;
894 break;
895 case RLIMIT_PIPEBUF:
896 *res = ui->ui_pipecnt;
897 break;
898 default:
899 error = EINVAL;
900 break;
901 }
902
903 vmspace_free(vm);
904 uifree(ui);
905 return (error);
906 }
907
908 int
sys_getrlimitusage(struct thread * td,struct getrlimitusage_args * uap)909 sys_getrlimitusage(struct thread *td, struct getrlimitusage_args *uap)
910 {
911 rlim_t res;
912 int error;
913
914 if ((uap->flags & ~(GETRLIMITUSAGE_EUID)) != 0)
915 return (EINVAL);
916 error = getrlimitusage_one(curproc, uap->which, uap->flags, &res);
917 if (error == 0)
918 error = copyout(&res, uap->res, sizeof(res));
919 return (error);
920 }
921
922 /*
923 * Transform the running time and tick information for children of proc p
924 * into user and system time usage.
925 */
926 void
calccru(struct proc * p,struct timeval * up,struct timeval * sp)927 calccru(struct proc *p, struct timeval *up, struct timeval *sp)
928 {
929
930 PROC_LOCK_ASSERT(p, MA_OWNED);
931 calcru1(p, &p->p_crux, up, sp);
932 }
933
934 /*
935 * Transform the running time and tick information in proc p into user
936 * and system time usage. If appropriate, include the current time slice
937 * on this CPU.
938 */
939 void
calcru(struct proc * p,struct timeval * up,struct timeval * sp)940 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
941 {
942 struct thread *td;
943 uint64_t runtime, u;
944
945 PROC_LOCK_ASSERT(p, MA_OWNED);
946 PROC_STATLOCK_ASSERT(p, MA_OWNED);
947 /*
948 * If we are getting stats for the current process, then add in the
949 * stats that this thread has accumulated in its current time slice.
950 * We reset the thread and CPU state as if we had performed a context
951 * switch right here.
952 */
953 td = curthread;
954 if (td->td_proc == p) {
955 u = cpu_ticks();
956 runtime = u - PCPU_GET(switchtime);
957 td->td_runtime += runtime;
958 td->td_incruntime += runtime;
959 PCPU_SET(switchtime, u);
960 }
961 /* Make sure the per-thread stats are current. */
962 FOREACH_THREAD_IN_PROC(p, td) {
963 if (td->td_incruntime == 0)
964 continue;
965 ruxagg(p, td);
966 }
967 calcru1(p, &p->p_rux, up, sp);
968 }
969
970 /* Collect resource usage for a single thread. */
971 void
rufetchtd(struct thread * td,struct rusage * ru)972 rufetchtd(struct thread *td, struct rusage *ru)
973 {
974 struct proc *p;
975 uint64_t runtime, u;
976
977 p = td->td_proc;
978 PROC_STATLOCK_ASSERT(p, MA_OWNED);
979 THREAD_LOCK_ASSERT(td, MA_OWNED);
980 /*
981 * If we are getting stats for the current thread, then add in the
982 * stats that this thread has accumulated in its current time slice.
983 * We reset the thread and CPU state as if we had performed a context
984 * switch right here.
985 */
986 if (td == curthread) {
987 u = cpu_ticks();
988 runtime = u - PCPU_GET(switchtime);
989 td->td_runtime += runtime;
990 td->td_incruntime += runtime;
991 PCPU_SET(switchtime, u);
992 }
993 ruxagg_locked(p, td);
994 *ru = td->td_ru;
995 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
996 }
997
998 static uint64_t
mul64_by_fraction(uint64_t a,uint64_t b,uint64_t c)999 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
1000 {
1001 uint64_t acc, bh, bl;
1002 int i, s, sa, sb;
1003
1004 /*
1005 * Calculate (a * b) / c accurately enough without overflowing. c
1006 * must be nonzero, and its top bit must be 0. a or b must be
1007 * <= c, and the implementation is tuned for b <= c.
1008 *
1009 * The comments about times are for use in calcru1() with units of
1010 * microseconds for 'a' and stathz ticks at 128 Hz for b and c.
1011 *
1012 * Let n be the number of top zero bits in c. Each iteration
1013 * either returns, or reduces b by right shifting it by at least n.
1014 * The number of iterations is at most 1 + 64 / n, and the error is
1015 * at most the number of iterations.
1016 *
1017 * It is very unusual to need even 2 iterations. Previous
1018 * implementations overflowed essentially by returning early in the
1019 * first iteration, with n = 38 giving overflow at 105+ hours and
1020 * n = 32 giving overlow at at 388+ days despite a more careful
1021 * calculation. 388 days is a reasonable uptime, and the calculation
1022 * needs to work for the uptime times the number of CPUs since 'a'
1023 * is per-process.
1024 */
1025 if (a >= (uint64_t)1 << 63)
1026 return (0); /* Unsupported arg -- can't happen. */
1027 acc = 0;
1028 for (i = 0; i < 128; i++) {
1029 sa = flsll(a);
1030 sb = flsll(b);
1031 if (sa + sb <= 64)
1032 /* Up to 105 hours on first iteration. */
1033 return (acc + (a * b) / c);
1034 if (a >= c) {
1035 /*
1036 * This reduction is based on a = q * c + r, with the
1037 * remainder r < c. 'a' may be large to start, and
1038 * moving bits from b into 'a' at the end of the loop
1039 * sets the top bit of 'a', so the reduction makes
1040 * significant progress.
1041 */
1042 acc += (a / c) * b;
1043 a %= c;
1044 sa = flsll(a);
1045 if (sa + sb <= 64)
1046 /* Up to 388 days on first iteration. */
1047 return (acc + (a * b) / c);
1048 }
1049
1050 /*
1051 * This step writes a * b as a * ((bh << s) + bl) =
1052 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2
1053 * additive terms are handled separately. Splitting in
1054 * this way is linear except for rounding errors.
1055 *
1056 * s = 64 - sa is the maximum such that a << s fits in 64
1057 * bits. Since a < c and c has at least 1 zero top bit,
1058 * sa < 64 and s > 0. Thus this step makes progress by
1059 * reducing b (it increases 'a', but taking remainders on
1060 * the next iteration completes the reduction).
1061 *
1062 * Finally, the choice for s is just what is needed to keep
1063 * a * bl from overflowing, so we don't need complications
1064 * like a recursive call mul64_by_fraction(a, bl, c) to
1065 * handle the second additive term.
1066 */
1067 s = 64 - sa;
1068 bh = b >> s;
1069 bl = b - (bh << s);
1070 acc += (a * bl) / c;
1071 a <<= s;
1072 b = bh;
1073 }
1074 return (0); /* Algorithm failure -- can't happen. */
1075 }
1076
1077 static void
calcru1(struct proc * p,struct rusage_ext * ruxp,struct timeval * up,struct timeval * sp)1078 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
1079 struct timeval *sp)
1080 {
1081 /* {user, system, interrupt, total} {ticks, usec}: */
1082 uint64_t ut, uu, st, su, it, tt, tu;
1083
1084 ut = ruxp->rux_uticks;
1085 st = ruxp->rux_sticks;
1086 it = ruxp->rux_iticks;
1087 tt = ut + st + it;
1088 if (tt == 0) {
1089 /* Avoid divide by zero */
1090 st = 1;
1091 tt = 1;
1092 }
1093 tu = cputick2usec(ruxp->rux_runtime);
1094 if ((int64_t)tu < 0) {
1095 /* XXX: this should be an assert /phk */
1096 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
1097 (intmax_t)tu, p->p_pid, p->p_comm);
1098 tu = ruxp->rux_tu;
1099 }
1100
1101 /* Subdivide tu. Avoid overflow in the multiplications. */
1102 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
1103 /* Up to 76 hours when stathz is 128. */
1104 uu = (tu * ut) / tt;
1105 su = (tu * st) / tt;
1106 } else {
1107 uu = mul64_by_fraction(tu, ut, tt);
1108 su = mul64_by_fraction(tu, st, tt);
1109 }
1110
1111 if (tu >= ruxp->rux_tu) {
1112 /*
1113 * The normal case, time increased.
1114 * Enforce monotonicity of bucketed numbers.
1115 */
1116 if (uu < ruxp->rux_uu)
1117 uu = ruxp->rux_uu;
1118 if (su < ruxp->rux_su)
1119 su = ruxp->rux_su;
1120 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
1121 /*
1122 * When we calibrate the cputicker, it is not uncommon to
1123 * see the presumably fixed frequency increase slightly over
1124 * time as a result of thermal stabilization and NTP
1125 * discipline (of the reference clock). We therefore ignore
1126 * a bit of backwards slop because we expect to catch up
1127 * shortly. We use a 3 microsecond limit to catch low
1128 * counts and a 1% limit for high counts.
1129 */
1130 uu = ruxp->rux_uu;
1131 su = ruxp->rux_su;
1132 tu = ruxp->rux_tu;
1133 } else if (vm_guest == VM_GUEST_NO) { /* tu < ruxp->rux_tu */
1134 /*
1135 * What happened here was likely that a laptop, which ran at
1136 * a reduced clock frequency at boot, kicked into high gear.
1137 * The wisdom of spamming this message in that case is
1138 * dubious, but it might also be indicative of something
1139 * serious, so lets keep it and hope laptops can be made
1140 * more truthful about their CPU speed via ACPI.
1141 */
1142 printf("calcru: runtime went backwards from %ju usec "
1143 "to %ju usec for pid %d (%s)\n",
1144 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
1145 p->p_pid, p->p_comm);
1146 }
1147
1148 ruxp->rux_uu = uu;
1149 ruxp->rux_su = su;
1150 ruxp->rux_tu = tu;
1151
1152 up->tv_sec = uu / 1000000;
1153 up->tv_usec = uu % 1000000;
1154 sp->tv_sec = su / 1000000;
1155 sp->tv_usec = su % 1000000;
1156 }
1157
1158 #ifndef _SYS_SYSPROTO_H_
1159 struct getrusage_args {
1160 int who;
1161 struct rusage *rusage;
1162 };
1163 #endif
1164 int
sys_getrusage(struct thread * td,struct getrusage_args * uap)1165 sys_getrusage(struct thread *td, struct getrusage_args *uap)
1166 {
1167 struct rusage ru;
1168 int error;
1169
1170 error = kern_getrusage(td, uap->who, &ru);
1171 if (error == 0)
1172 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
1173 return (error);
1174 }
1175
1176 int
kern_getrusage(struct thread * td,int who,struct rusage * rup)1177 kern_getrusage(struct thread *td, int who, struct rusage *rup)
1178 {
1179 struct proc *p;
1180 int error;
1181
1182 error = 0;
1183 p = td->td_proc;
1184 PROC_LOCK(p);
1185 switch (who) {
1186 case RUSAGE_SELF:
1187 rufetchcalc(p, rup, &rup->ru_utime,
1188 &rup->ru_stime);
1189 break;
1190
1191 case RUSAGE_CHILDREN:
1192 *rup = p->p_stats->p_cru;
1193 calccru(p, &rup->ru_utime, &rup->ru_stime);
1194 break;
1195
1196 case RUSAGE_THREAD:
1197 PROC_STATLOCK(p);
1198 thread_lock(td);
1199 rufetchtd(td, rup);
1200 thread_unlock(td);
1201 PROC_STATUNLOCK(p);
1202 break;
1203
1204 default:
1205 error = EINVAL;
1206 }
1207 PROC_UNLOCK(p);
1208 return (error);
1209 }
1210
1211 void
rucollect(struct rusage * ru,struct rusage * ru2)1212 rucollect(struct rusage *ru, struct rusage *ru2)
1213 {
1214 long *ip, *ip2;
1215 int i;
1216
1217 if (ru->ru_maxrss < ru2->ru_maxrss)
1218 ru->ru_maxrss = ru2->ru_maxrss;
1219 ip = &ru->ru_first;
1220 ip2 = &ru2->ru_first;
1221 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1222 *ip++ += *ip2++;
1223 }
1224
1225 void
ruadd(struct rusage * ru,struct rusage_ext * rux,struct rusage * ru2,struct rusage_ext * rux2)1226 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1227 struct rusage_ext *rux2)
1228 {
1229
1230 rux->rux_runtime += rux2->rux_runtime;
1231 rux->rux_uticks += rux2->rux_uticks;
1232 rux->rux_sticks += rux2->rux_sticks;
1233 rux->rux_iticks += rux2->rux_iticks;
1234 rux->rux_uu += rux2->rux_uu;
1235 rux->rux_su += rux2->rux_su;
1236 rux->rux_tu += rux2->rux_tu;
1237 rucollect(ru, ru2);
1238 }
1239
1240 /*
1241 * Aggregate tick counts into the proc's rusage_ext.
1242 */
1243 static void
ruxagg_ext_locked(struct rusage_ext * rux,struct thread * td)1244 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td)
1245 {
1246
1247 rux->rux_runtime += td->td_incruntime;
1248 rux->rux_uticks += td->td_uticks;
1249 rux->rux_sticks += td->td_sticks;
1250 rux->rux_iticks += td->td_iticks;
1251 }
1252
1253 void
ruxagg_locked(struct proc * p,struct thread * td)1254 ruxagg_locked(struct proc *p, struct thread *td)
1255 {
1256 THREAD_LOCK_ASSERT(td, MA_OWNED);
1257 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1258
1259 ruxagg_ext_locked(&p->p_rux, td);
1260 ruxagg_ext_locked(&td->td_rux, td);
1261 td->td_incruntime = 0;
1262 td->td_uticks = 0;
1263 td->td_iticks = 0;
1264 td->td_sticks = 0;
1265 }
1266
1267 void
ruxagg(struct proc * p,struct thread * td)1268 ruxagg(struct proc *p, struct thread *td)
1269 {
1270
1271 thread_lock(td);
1272 ruxagg_locked(p, td);
1273 thread_unlock(td);
1274 }
1275
1276 /*
1277 * Update the rusage_ext structure and fetch a valid aggregate rusage
1278 * for proc p if storage for one is supplied.
1279 */
1280 void
rufetch(struct proc * p,struct rusage * ru)1281 rufetch(struct proc *p, struct rusage *ru)
1282 {
1283 struct thread *td;
1284
1285 PROC_STATLOCK_ASSERT(p, MA_OWNED);
1286
1287 *ru = p->p_ru;
1288 if (p->p_numthreads > 0) {
1289 FOREACH_THREAD_IN_PROC(p, td) {
1290 ruxagg(p, td);
1291 rucollect(ru, &td->td_ru);
1292 }
1293 }
1294 }
1295
1296 /*
1297 * Atomically perform a rufetch and a calcru together.
1298 * Consumers, can safely assume the calcru is executed only once
1299 * rufetch is completed.
1300 */
1301 void
rufetchcalc(struct proc * p,struct rusage * ru,struct timeval * up,struct timeval * sp)1302 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1303 struct timeval *sp)
1304 {
1305
1306 PROC_STATLOCK(p);
1307 rufetch(p, ru);
1308 calcru(p, up, sp);
1309 PROC_STATUNLOCK(p);
1310 }
1311
1312 /*
1313 * Allocate a new resource limits structure and initialize its
1314 * reference count and mutex pointer.
1315 */
1316 struct plimit *
lim_alloc(void)1317 lim_alloc(void)
1318 {
1319 struct plimit *limp;
1320
1321 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1322 refcount_init(&limp->pl_refcnt, 1);
1323 return (limp);
1324 }
1325
1326 struct plimit *
lim_hold(struct plimit * limp)1327 lim_hold(struct plimit *limp)
1328 {
1329
1330 refcount_acquire(&limp->pl_refcnt);
1331 return (limp);
1332 }
1333
1334 struct plimit *
lim_cowsync(void)1335 lim_cowsync(void)
1336 {
1337 struct thread *td;
1338 struct proc *p;
1339 struct plimit *oldlimit;
1340
1341 td = curthread;
1342 p = td->td_proc;
1343 PROC_LOCK_ASSERT(p, MA_OWNED);
1344
1345 if (td->td_limit == p->p_limit)
1346 return (NULL);
1347
1348 oldlimit = td->td_limit;
1349 td->td_limit = lim_hold(p->p_limit);
1350
1351 return (oldlimit);
1352 }
1353
1354 void
lim_fork(struct proc * p1,struct proc * p2)1355 lim_fork(struct proc *p1, struct proc *p2)
1356 {
1357
1358 PROC_LOCK_ASSERT(p1, MA_OWNED);
1359 PROC_LOCK_ASSERT(p2, MA_OWNED);
1360
1361 p2->p_limit = lim_hold(p1->p_limit);
1362 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1363 if (p1->p_cpulimit != RLIM_INFINITY)
1364 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1365 lim_cb, p2, C_PREL(1));
1366 }
1367
1368 void
lim_free(struct plimit * limp)1369 lim_free(struct plimit *limp)
1370 {
1371
1372 if (refcount_release(&limp->pl_refcnt))
1373 free((void *)limp, M_PLIMIT);
1374 }
1375
1376 void
lim_freen(struct plimit * limp,int n)1377 lim_freen(struct plimit *limp, int n)
1378 {
1379
1380 if (refcount_releasen(&limp->pl_refcnt, n))
1381 free((void *)limp, M_PLIMIT);
1382 }
1383
1384 void
limbatch_add(struct limbatch * lb,struct thread * td)1385 limbatch_add(struct limbatch *lb, struct thread *td)
1386 {
1387 struct plimit *limp;
1388
1389 MPASS(td->td_limit != NULL);
1390 limp = td->td_limit;
1391
1392 if (lb->limp != limp) {
1393 if (lb->count != 0) {
1394 lim_freen(lb->limp, lb->count);
1395 lb->count = 0;
1396 }
1397 lb->limp = limp;
1398 }
1399
1400 lb->count++;
1401 }
1402
1403 void
limbatch_final(struct limbatch * lb)1404 limbatch_final(struct limbatch *lb)
1405 {
1406
1407 MPASS(lb->count != 0);
1408 lim_freen(lb->limp, lb->count);
1409 }
1410
1411 /*
1412 * Make a copy of the plimit structure.
1413 * We share these structures copy-on-write after fork.
1414 */
1415 void
lim_copy(struct plimit * dst,struct plimit * src)1416 lim_copy(struct plimit *dst, struct plimit *src)
1417 {
1418
1419 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
1420 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1421 }
1422
1423 /*
1424 * Return the hard limit for a particular system resource. The
1425 * which parameter specifies the index into the rlimit array.
1426 */
1427 rlim_t
lim_max(struct thread * td,int which)1428 lim_max(struct thread *td, int which)
1429 {
1430 struct rlimit rl;
1431
1432 lim_rlimit(td, which, &rl);
1433 return (rl.rlim_max);
1434 }
1435
1436 rlim_t
lim_max_proc(struct proc * p,int which)1437 lim_max_proc(struct proc *p, int which)
1438 {
1439 struct rlimit rl;
1440
1441 lim_rlimit_proc(p, which, &rl);
1442 return (rl.rlim_max);
1443 }
1444
1445 /*
1446 * Return the current (soft) limit for a particular system resource.
1447 * The which parameter which specifies the index into the rlimit array
1448 */
rlim_t(lim_cur)1449 rlim_t
1450 (lim_cur)(struct thread *td, int which)
1451 {
1452 struct rlimit rl;
1453
1454 lim_rlimit(td, which, &rl);
1455 return (rl.rlim_cur);
1456 }
1457
1458 rlim_t
lim_cur_proc(struct proc * p,int which)1459 lim_cur_proc(struct proc *p, int which)
1460 {
1461 struct rlimit rl;
1462
1463 lim_rlimit_proc(p, which, &rl);
1464 return (rl.rlim_cur);
1465 }
1466
1467 /*
1468 * Return a copy of the entire rlimit structure for the system limit
1469 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1470 */
1471 void
lim_rlimit(struct thread * td,int which,struct rlimit * rlp)1472 lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1473 {
1474 struct proc *p = td->td_proc;
1475
1476 MPASS(td == curthread);
1477 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1478 ("request for invalid resource limit"));
1479 *rlp = td->td_limit->pl_rlimit[which];
1480 if (p->p_sysent->sv_fixlimit != NULL)
1481 p->p_sysent->sv_fixlimit(rlp, which);
1482 }
1483
1484 void
lim_rlimit_proc(struct proc * p,int which,struct rlimit * rlp)1485 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1486 {
1487
1488 PROC_LOCK_ASSERT(p, MA_OWNED);
1489 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1490 ("request for invalid resource limit"));
1491 *rlp = p->p_limit->pl_rlimit[which];
1492 if (p->p_sysent->sv_fixlimit != NULL)
1493 p->p_sysent->sv_fixlimit(rlp, which);
1494 }
1495
1496 void
uihashinit(void)1497 uihashinit(void)
1498 {
1499
1500 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1501 rw_init(&uihashtbl_lock, "uidinfo hash");
1502 }
1503
1504 /*
1505 * Look up a uidinfo struct for the parameter uid.
1506 * uihashtbl_lock must be locked.
1507 * Increase refcount on uidinfo struct returned.
1508 */
1509 static struct uidinfo *
uilookup(uid_t uid)1510 uilookup(uid_t uid)
1511 {
1512 struct uihashhead *uipp;
1513 struct uidinfo *uip;
1514
1515 rw_assert(&uihashtbl_lock, RA_LOCKED);
1516 uipp = UIHASH(uid);
1517 LIST_FOREACH(uip, uipp, ui_hash)
1518 if (uip->ui_uid == uid) {
1519 uihold(uip);
1520 break;
1521 }
1522
1523 return (uip);
1524 }
1525
1526 /*
1527 * Find or allocate a struct uidinfo for a particular uid.
1528 * Returns with uidinfo struct referenced.
1529 * uifree() should be called on a struct uidinfo when released.
1530 */
1531 struct uidinfo *
uifind(uid_t uid)1532 uifind(uid_t uid)
1533 {
1534 struct uidinfo *new_uip, *uip;
1535 struct ucred *cred;
1536
1537 cred = curthread->td_ucred;
1538 if (cred->cr_uidinfo->ui_uid == uid) {
1539 uip = cred->cr_uidinfo;
1540 uihold(uip);
1541 return (uip);
1542 } else if (cred->cr_ruidinfo->ui_uid == uid) {
1543 uip = cred->cr_ruidinfo;
1544 uihold(uip);
1545 return (uip);
1546 }
1547
1548 rw_rlock(&uihashtbl_lock);
1549 uip = uilookup(uid);
1550 rw_runlock(&uihashtbl_lock);
1551 if (uip != NULL)
1552 return (uip);
1553
1554 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1555 racct_create(&new_uip->ui_racct);
1556 refcount_init(&new_uip->ui_ref, 1);
1557 new_uip->ui_uid = uid;
1558
1559 rw_wlock(&uihashtbl_lock);
1560 /*
1561 * There's a chance someone created our uidinfo while we
1562 * were in malloc and not holding the lock, so we have to
1563 * make sure we don't insert a duplicate uidinfo.
1564 */
1565 if ((uip = uilookup(uid)) == NULL) {
1566 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1567 rw_wunlock(&uihashtbl_lock);
1568 uip = new_uip;
1569 } else {
1570 rw_wunlock(&uihashtbl_lock);
1571 racct_destroy(&new_uip->ui_racct);
1572 free(new_uip, M_UIDINFO);
1573 }
1574 return (uip);
1575 }
1576
1577 /*
1578 * Place another refcount on a uidinfo struct.
1579 */
1580 void
uihold(struct uidinfo * uip)1581 uihold(struct uidinfo *uip)
1582 {
1583
1584 refcount_acquire(&uip->ui_ref);
1585 }
1586
1587 /*-
1588 * Since uidinfo structs have a long lifetime, we use an
1589 * opportunistic refcounting scheme to avoid locking the lookup hash
1590 * for each release.
1591 *
1592 * If the refcount hits 0, we need to free the structure,
1593 * which means we need to lock the hash.
1594 * Optimal case:
1595 * After locking the struct and lowering the refcount, if we find
1596 * that we don't need to free, simply unlock and return.
1597 * Suboptimal case:
1598 * If refcount lowering results in need to free, bump the count
1599 * back up, lose the lock and acquire the locks in the proper
1600 * order to try again.
1601 */
1602 void
uifree(struct uidinfo * uip)1603 uifree(struct uidinfo *uip)
1604 {
1605
1606 if (refcount_release_if_not_last(&uip->ui_ref))
1607 return;
1608
1609 rw_wlock(&uihashtbl_lock);
1610 if (refcount_release(&uip->ui_ref) == 0) {
1611 rw_wunlock(&uihashtbl_lock);
1612 return;
1613 }
1614
1615 racct_destroy(&uip->ui_racct);
1616 LIST_REMOVE(uip, ui_hash);
1617 rw_wunlock(&uihashtbl_lock);
1618
1619 if (uip->ui_sbsize != 0)
1620 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1621 uip->ui_uid, uip->ui_sbsize);
1622 if (uip->ui_proccnt != 0)
1623 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1624 uip->ui_uid, uip->ui_proccnt);
1625 if (uip->ui_vmsize != 0)
1626 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1627 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1628 if (uip->ui_ptscnt != 0)
1629 printf("freeing uidinfo: uid = %d, ptscnt = %ld\n",
1630 uip->ui_uid, uip->ui_ptscnt);
1631 if (uip->ui_kqcnt != 0)
1632 printf("freeing uidinfo: uid = %d, kqcnt = %ld\n",
1633 uip->ui_uid, uip->ui_kqcnt);
1634 if (uip->ui_umtxcnt != 0)
1635 printf("freeing uidinfo: uid = %d, umtxcnt = %ld\n",
1636 uip->ui_uid, uip->ui_umtxcnt);
1637 if (uip->ui_pipecnt != 0)
1638 printf("freeing uidinfo: uid = %d, pipecnt = %ld\n",
1639 uip->ui_uid, uip->ui_pipecnt);
1640 free(uip, M_UIDINFO);
1641 }
1642
1643 #ifdef RACCT
1644 void
ui_racct_foreach(void (* callback)(struct racct * racct,void * arg2,void * arg3),void (* pre)(void),void (* post)(void),void * arg2,void * arg3)1645 ui_racct_foreach(void (*callback)(struct racct *racct,
1646 void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1647 void *arg2, void *arg3)
1648 {
1649 struct uidinfo *uip;
1650 struct uihashhead *uih;
1651
1652 rw_rlock(&uihashtbl_lock);
1653 if (pre != NULL)
1654 (pre)();
1655 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1656 LIST_FOREACH(uip, uih, ui_hash) {
1657 (callback)(uip->ui_racct, arg2, arg3);
1658 }
1659 }
1660 if (post != NULL)
1661 (post)();
1662 rw_runlock(&uihashtbl_lock);
1663 }
1664 #endif
1665
1666 static inline int
chglimit(struct uidinfo * uip,long * limit,int diff,rlim_t max,const char * name)1667 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1668 {
1669 long new;
1670
1671 /* Don't allow them to exceed max, but allow subtraction. */
1672 new = atomic_fetchadd_long(limit, (long)diff) + diff;
1673 if (diff > 0 && max != 0) {
1674 if (new < 0 || new > max) {
1675 atomic_subtract_long(limit, (long)diff);
1676 return (0);
1677 }
1678 } else if (new < 0)
1679 printf("negative %s for uid = %d\n", name, uip->ui_uid);
1680 return (1);
1681 }
1682
1683 /*
1684 * Change the count associated with number of processes
1685 * a given user is using. When 'max' is 0, don't enforce a limit
1686 */
1687 int
chgproccnt(struct uidinfo * uip,int diff,rlim_t max)1688 chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1689 {
1690
1691 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1692 }
1693
1694 /*
1695 * Change the total socket buffer size a user has used.
1696 */
1697 int
chgsbsize(struct uidinfo * uip,u_int * hiwat,u_int to,rlim_t max)1698 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1699 {
1700 int diff, rv;
1701
1702 diff = to - *hiwat;
1703 if (diff > 0 && max == 0) {
1704 rv = 0;
1705 } else {
1706 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1707 if (rv != 0)
1708 *hiwat = to;
1709 }
1710 return (rv);
1711 }
1712
1713 /*
1714 * Change the count associated with number of pseudo-terminals
1715 * a given user is using. When 'max' is 0, don't enforce a limit
1716 */
1717 int
chgptscnt(struct uidinfo * uip,int diff,rlim_t max)1718 chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1719 {
1720
1721 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1722 }
1723
1724 int
chgkqcnt(struct uidinfo * uip,int diff,rlim_t max)1725 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1726 {
1727
1728 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1729 }
1730
1731 int
chgumtxcnt(struct uidinfo * uip,int diff,rlim_t max)1732 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1733 {
1734
1735 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
1736 }
1737
1738 int
chgpipecnt(struct uidinfo * uip,int diff,rlim_t max)1739 chgpipecnt(struct uidinfo *uip, int diff, rlim_t max)
1740 {
1741
1742 return (chglimit(uip, &uip->ui_pipecnt, diff, max, "pipecnt"));
1743 }
1744
1745 static int
sysctl_kern_proc_rlimit_usage(SYSCTL_HANDLER_ARGS)1746 sysctl_kern_proc_rlimit_usage(SYSCTL_HANDLER_ARGS)
1747 {
1748 rlim_t resval[RLIM_NLIMITS];
1749 struct proc *p;
1750 size_t len;
1751 int error, *name, i;
1752
1753 name = (int *)arg1;
1754 if ((u_int)arg2 != 1 && (u_int)arg2 != 2)
1755 return (EINVAL);
1756 if (req->newptr != NULL)
1757 return (EINVAL);
1758
1759 error = pget((pid_t)name[0], PGET_WANTREAD, &p);
1760 if (error != 0)
1761 return (error);
1762
1763 if ((u_int)arg2 == 1) {
1764 len = sizeof(resval);
1765 memset(resval, 0, sizeof(resval));
1766 for (i = 0; i < RLIM_NLIMITS; i++) {
1767 error = getrlimitusage_one(p, (unsigned)i, 0,
1768 &resval[i]);
1769 if (error == ENXIO) {
1770 resval[i] = -1;
1771 error = 0;
1772 } else if (error != 0) {
1773 break;
1774 }
1775 }
1776 } else {
1777 len = sizeof(resval[0]);
1778 error = getrlimitusage_one(p, (unsigned)name[1], 0,
1779 &resval[0]);
1780 if (error == ENXIO) {
1781 resval[0] = -1;
1782 error = 0;
1783 }
1784 }
1785 if (error == 0)
1786 error = SYSCTL_OUT(req, resval, len);
1787 PRELE(p);
1788 return (error);
1789 }
1790 static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT_USAGE, rlimit_usage,
1791 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
1792 sysctl_kern_proc_rlimit_usage,
1793 "Process limited resources usage info");
1794