xref: /titanic_41/usr/src/cmd/rcap/rcapd/rcapd_main.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * rcapd is a long-running daemon enforcing project-based resource caps (see
31  * rcapd(1M)).  Each instance of a process aggregate (project or, generically,
32  * "collection") may have a memory cap.  A single thread monitors the resource
33  * utilization of capped collections, enforces caps when they are exceeded (and
34  * other conditions are met), and incorporates changes in configuration or
35  * caps.  Each of these actions occurs not more frequently than the rate
36  * specified with rcapadm(1M).
37  */
38 
39 #include <sys/priocntl.h>
40 #include <sys/proc.h>
41 #include <sys/resource.h>
42 #include <sys/sysinfo.h>
43 #include <sys/stat.h>
44 #include <sys/sysmacros.h>
45 #include <sys/time.h>
46 #include <sys/types.h>
47 #include <dirent.h>
48 #include <errno.h>
49 #include <fcntl.h>
50 #include <kstat.h>
51 #include <libintl.h>
52 #include <limits.h>
53 #include <locale.h>
54 #include <priv.h>
55 #include <signal.h>
56 #include <stdarg.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <strings.h>
60 #include <time.h>
61 #include <unistd.h>
62 #include <zone.h>
63 #include <assert.h>
64 #include "rcapd.h"
65 #include "rcapd_mapping.h"
66 #include "rcapd_rfd.h"
67 #include "rcapd_stat.h"
68 #include "utils.h"
69 
70 #define	POSITIVE_MIN(x, y) \
71 	(((x) <= 0) ? (y) : ((y) <= 0) ? (x) : MIN(x, y))
72 #define	NEXT_EVENT_TIME(base, seconds) \
73 	(((int)seconds > 0) ? (base + (hrtime_t)seconds * (hrtime_t)NANOSEC) \
74 	: (hrtime_t)0)
75 #define	NEXT_REPORT_EVENT_TIME(base, seconds) \
76 	((rcfg.rcfg_stat_file[0] != 0) ?  \
77 	    NEXT_EVENT_TIME(gethrtime(), seconds) : (hrtime_t)0)
78 #define	EVENT_TIME(time, eventtime) \
79 	(((time) > (eventtime)) && (eventtime) != 0)
80 #define	STAT_TEMPLATE_SUFFIX	".XXXXXX"	/* suffix of mkstemp() arg */
81 #define	DAEMON_UID		1		/* uid to use */
82 
83 typedef struct soft_scan_arg {
84 	uint64_t ssa_sum_excess;
85 	int64_t ssa_scan_goal;
86 } soft_scan_arg_t;
87 
88 static int debug_mode = 0;		/* debug mode flag */
89 static pid_t rcapd_pid;			/* rcapd's pid to ensure it's not */
90 					/* scanned */
91 static kstat_ctl_t *kctl;		/* kstat chain */
92 static uint64_t new_sp = 0, old_sp = 0;	/* measure delta in page scan count */
93 static int enforce_caps = 0;		/* cap enforcement flag, dependent on */
94 					/* enforce_soft_caps and */
95 					/* global_scanner_running */
96 static int enforce_soft_caps = 0;	/* soft cap enforcement flag, */
97 					/* depending on memory pressure */
98 static int memory_pressure = 0;		/* physical memory utilization (%) */
99 static int memory_pressure_sample = 0;	/* count of samples */
100 static int global_scanner_running = 0;	/* global scanning flag, to avoid */
101 					/* interference with kernel's page */
102 					/* scanner */
103 static hrtime_t next_report;		/* time of next report */
104 static int termination_signal = 0;	/* terminating signal */
105 
106 rcfg_t rcfg;
107 
108 /*
109  * Flags.
110  */
111 static int ever_ran;
112 int should_run;
113 static int should_reconfigure;
114 
115 static int verify_statistics(void);
116 static int update_statistics(void);
117 
118 /*
119  * Checks if a process is marked 'system'.  Returns zero only when it is not.
120  */
121 static int
122 proc_issystem(pid_t pid)
123 {
124 	char pc_clname[PC_CLNMSZ];
125 
126 	if (priocntl(P_PID, pid, PC_GETXPARMS, NULL, PC_KY_CLNAME, pc_clname,
127 	    PC_KY_NULL) != -1) {
128 		return (strcmp(pc_clname, "SYS") == 0);
129 	} else {
130 		debug("cannot get class-specific scheduling parameters; "
131 		    "assuming system process");
132 		return (-1);
133 	}
134 }
135 
136 /*
137  * fname is the process name, for debugging messages, and unscannable is a flag
138  * indicating whether the process should be scanned.
139  */
140 static void
141 lprocess_insert_mark(pid_t pid, id_t colid, char *fname, int unscannable)
142 {
143 	lcollection_t *lcol;
144 	lprocess_t *lproc;
145 
146 	if ((lcol = lcollection_find(colid)) == NULL)
147 		return;
148 
149 	/*
150 	 * If the process is already being tracked, update the unscannable flag,
151 	 * as determined by the caller, from the process's psinfo.
152 	 */
153 	lproc = lcol->lcol_lprocess;
154 	while (lproc != NULL) {
155 		if (lproc->lpc_pid == pid) {
156 			lproc->lpc_mark = 1;
157 			if (unscannable != 0 && lproc->lpc_unscannable == 0) {
158 				debug("process %d: became unscannable\n",
159 				    (int)lproc->lpc_pid);
160 				lproc->lpc_unscannable = 1;
161 			}
162 			return;
163 		}
164 		lproc = lproc->lpc_next;
165 	}
166 
167 	/*
168 	 * We've fallen off the list without finding our current process;
169 	 * insert it at the list head.
170 	 */
171 	if ((lproc = malloc(sizeof (*lproc))) == NULL)
172 		debug("insufficient memory to track new process %d", (int)pid);
173 	else {
174 		(void) bzero(lproc, sizeof (*lproc));
175 		lproc->lpc_pid = pid;
176 		lproc->lpc_mark = 1;
177 		lproc->lpc_collection = lcol;
178 		lproc->lpc_psinfo_fd = -1;
179 		lproc->lpc_pgdata_fd = -1;
180 		lproc->lpc_xmap_fd = -1;
181 
182 		/*
183 		 * If the caller didn't flag this process as unscannable
184 		 * already, do some more checking.
185 		 */
186 		lproc->lpc_unscannable = unscannable || proc_issystem(pid);
187 
188 #ifdef DEBUG
189 		/*
190 		 * Verify the sanity of lprocess.  It should not contain the
191 		 * process we are about to prepend.
192 		 */
193 		if (lcollection_member(lcol, lproc)) {
194 			lprocess_t *cur = lcol->lcol_lprocess;
195 			debug("The collection %lld already has these members, "
196 			    "including me, %d!\n", (long long)lcol->lcol_id,
197 			    (int)lproc->lpc_pid);
198 			while (cur != NULL) {
199 				debug("\t%d\n", (int)cur->lpc_pid);
200 				cur = cur->lpc_next;
201 			}
202 			info(gettext("process already on lprocess\n"));
203 			abort();
204 		}
205 #endif /* DEBUG */
206 		lproc->lpc_next = lcol->lcol_lprocess;
207 		if (lproc->lpc_next != NULL)
208 			lproc->lpc_next->lpc_prev = lproc;
209 		lproc->lpc_prev = NULL;
210 		lcol->lcol_lprocess = lproc;
211 
212 		debug("tracking %d %d %s%s\n", (int)colid, (int)pid, fname,
213 		    (lproc->lpc_unscannable != 0) ? " (not scannable)" : "");
214 		lcol->lcol_stat.lcols_proc_in++;
215 	}
216 }
217 
218 static int
219 list_walk_process_cb(lcollection_t *lcol, void *arg)
220 {
221 	int (*cb)(lcollection_t *, lprocess_t *) =
222 	    (int(*)(lcollection_t *, lprocess_t *))arg;
223 	lprocess_t *member;
224 	lprocess_t *next;
225 
226 	member = lcol->lcol_lprocess;
227 	while (member != NULL) {
228 		pid_t pid = member->lpc_pid;
229 		next = member->lpc_next;
230 
231 		debug_high("list_walk_all lpc %d\n", (int)pid);
232 		if (cb(lcol, member) != 0) {
233 			debug_high("list_walk_all aborted at lpc %d\n",
234 			    (int)pid);
235 			return (1);
236 		}
237 		member = next;
238 	}
239 
240 	return (0);
241 }
242 
243 /*
244  * Invoke the given callback for each process in each collection.  Callbacks
245  * are allowed to change the linkage of the process on which they act.
246  */
247 static void
248 list_walk_all(int (*cb)(lcollection_t *, lprocess_t *))
249 {
250 	list_walk_collection(list_walk_process_cb, (void *)cb);
251 }
252 
253 static void
254 revoke_psinfo(rfd_t *rfd)
255 {
256 	lprocess_t *lpc = (lprocess_t *)rfd->rfd_data;
257 
258 	if (lpc != NULL) {
259 		debug("revoking psinfo fd for process %d\n", (int)lpc->lpc_pid);
260 		ASSERT(lpc->lpc_psinfo_fd != -1);
261 		lpc->lpc_psinfo_fd = -1;
262 	} else
263 		debug("revoking psinfo fd for unknown process\n");
264 }
265 
266 /*
267  * Retrieve a process's psinfo via an already-opened or new file descriptor.
268  * The supplied descriptor will be closed on failure.  An optional callback
269  * will be invoked with the last descriptor tried, and a supplied callback
270  * argument, as its arguments, such that the new descriptor may be cached, or
271  * an old one may be invalidated.  If the result of the callback is zero, the
272  * the caller is to assume responsibility for the file descriptor, to close it
273  * with rfd_close().
274  *
275  * On failure, a nonzero value is returned.
276  */
277 int
278 get_psinfo(pid_t pid, psinfo_t *psinfo, int cached_fd,
279     int(*fd_update_cb)(void *, int), void *arg, lprocess_t *lpc)
280 {
281 	int fd;
282 	int can_try_uncached;
283 
284 	ASSERT(!(cached_fd > 0 && fd_update_cb == NULL));
285 
286 	do {
287 		if (cached_fd >= 0) {
288 			fd = cached_fd;
289 			can_try_uncached = 1;
290 			debug_high("%d/psinfo, trying cached fd %d\n",
291 			    (int)pid, fd);
292 		} else {
293 			char pathbuf[PROC_PATH_MAX];
294 
295 			can_try_uncached = 0;
296 			(void) snprintf(pathbuf, sizeof (pathbuf),
297 			    "/proc/%d/psinfo", (int)pid);
298 			if ((fd = rfd_open(pathbuf, 1, RFD_PSINFO,
299 			    revoke_psinfo, lpc, O_RDONLY, 0000)) < 0) {
300 				debug("cannot open %s", pathbuf);
301 				break;
302 			} else
303 				debug_high("opened %s, fd %d\n", pathbuf, fd);
304 		}
305 
306 		if (pread(fd, psinfo, sizeof (*psinfo), 0) ==
307 		    sizeof (*psinfo) && psinfo->pr_pid == pid)
308 			break;
309 		else {
310 			debug_high("closed fd %d\n", fd);
311 			if (rfd_close(fd) != 0)
312 				debug("could not close fd %d", fd);
313 			fd = cached_fd = -1;
314 		}
315 	} while (can_try_uncached == 1);
316 
317 	if (fd_update_cb == NULL || fd_update_cb(arg, fd) != 0)
318 		if (fd >= 0) {
319 			debug_high("closed %s fd %d\n", fd_update_cb == NULL ?
320 			    "uncached" : "cached", fd);
321 			if (rfd_close(fd) != 0)
322 				debug("could not close fd %d", fd);
323 		}
324 
325 	debug_high("get_psinfo ret %d, fd %d, %s\n", ((fd >= 0) ? 0 : -1), fd,
326 	    fd_update_cb != NULL ? "cached" : "uncached");
327 	return ((fd >= 0) ? 0 : -1);
328 }
329 
330 /*
331  * Retrieve the collection membership of all processes in our zone, and update
332  * the psinfo of those non-system, non-zombie ones in collections.
333  */
334 static void
335 proc_cb(const pid_t pid)
336 {
337 	static zoneid_t ours = (zoneid_t)-1;
338 	psinfo_t psinfo;
339 
340 	if (ours == (zoneid_t)-1)
341 		ours = getzoneid();
342 
343 	if (get_psinfo(pid, &psinfo, -1, NULL, NULL, NULL) == 0 &&
344 	    psinfo.pr_zoneid == ours)
345 		lprocess_insert_mark(psinfo.pr_pid, rc_getidbypsinfo(&psinfo),
346 		    psinfo.pr_psargs, psinfo.pr_nlwp == 0);
347 }
348 
349 /*
350  * Cache the process' psinfo fd, taking responsibility for freeing it.
351  */
352 int
353 lprocess_update_psinfo_fd_cb(void *arg, int fd)
354 {
355 	lprocess_t *lpc = arg;
356 
357 	lpc->lpc_psinfo_fd = fd;
358 	return (0);
359 }
360 
361 /*
362  * Update the RSS of processes in monitored collections.
363  */
364 /*ARGSUSED*/
365 static int
366 mem_sample_cb(lcollection_t *lcol, lprocess_t *lpc)
367 {
368 	psinfo_t psinfo;
369 
370 	if (get_psinfo(lpc->lpc_pid, &psinfo, lpc->lpc_psinfo_fd,
371 	    lprocess_update_psinfo_fd_cb, lpc, lpc) == 0) {
372 		lpc->lpc_rss = psinfo.pr_rssize;
373 		lpc->lpc_size = psinfo.pr_size;
374 	} else {
375 		if (errno == ENOENT)
376 			debug("process %d finished\n", (int)lpc->lpc_pid);
377 		else
378 			debug("process %d: cannot read psinfo",
379 			    (int)lpc->lpc_pid);
380 		lprocess_free(lpc);
381 	}
382 
383 	return (0);
384 }
385 
386 /*
387  * Sample the collection RSS, updating the collection's statistics with the
388  * results.
389  */
390 /*ARGSUSED*/
391 static int
392 rss_sample_col_cb(lcollection_t *lcol, void *arg)
393 {
394 	int64_t excess;
395 	uint64_t rss;
396 
397 	/*
398 	 * If updating statistics for a new interval, reset the affected
399 	 * counters.
400 	 */
401 	if (lcol->lcol_stat_invalidate != 0) {
402 		lcol->lcol_stat_old = lcol->lcol_stat;
403 		lcol->lcol_stat.lcols_min_rss = (int64_t)-1;
404 		lcol->lcol_stat.lcols_max_rss = 0;
405 		lcol->lcol_stat_invalidate = 0;
406 	}
407 
408 	lcol->lcol_stat.lcols_rss_sample++;
409 	excess = lcol->lcol_rss - lcol->lcol_rss_cap;
410 	rss = lcol->lcol_rss;
411 	if (excess > 0)
412 		lcol->lcol_stat.lcols_rss_act_sum += rss;
413 	lcol->lcol_stat.lcols_rss_sum += rss;
414 
415 	if (lcol->lcol_stat.lcols_min_rss > rss)
416 		lcol->lcol_stat.lcols_min_rss = rss;
417 	if (lcol->lcol_stat.lcols_max_rss < rss)
418 		lcol->lcol_stat.lcols_max_rss = rss;
419 
420 	return (0);
421 }
422 
423 /*
424  * Open /proc and walk entries.
425  */
426 static void
427 proc_walk_all(void (*cb)(const pid_t))
428 {
429 	DIR *pdir;
430 	struct dirent *dirent;
431 	pid_t pid;
432 
433 	(void) rfd_reserve(1);
434 	if ((pdir = opendir("/proc")) == NULL)
435 		die(gettext("couldn't open /proc!"));
436 
437 	while ((dirent = readdir(pdir)) != NULL) {
438 		if (strcmp(".", dirent->d_name) == 0 ||
439 		    strcmp("..", dirent->d_name) == 0)
440 			continue;
441 		pid = atoi(dirent->d_name);
442 		ASSERT(pid != 0 || strcmp(dirent->d_name, "0") == 0);
443 		if (pid == rcapd_pid)
444 			continue;
445 		else
446 			cb(pid);
447 	}
448 	(void) closedir(pdir);
449 }
450 
451 /*
452  * Memory update callback.
453  */
454 static int
455 memory_all_cb(lcollection_t *lcol, lprocess_t *lpc)
456 {
457 	debug_high("%s %s, pid %d: rss += %llu/%llu\n", rcfg.rcfg_mode_name,
458 	    lcol->lcol_name, (int)lpc->lpc_pid,
459 	    (unsigned long long)lpc->lpc_rss,
460 	    (unsigned long long)lpc->lpc_size);
461 	ASSERT(lpc->lpc_rss <= lpc->lpc_size);
462 	lcol->lcol_rss += lpc->lpc_rss;
463 	lcol->lcol_image_size += lpc->lpc_size;
464 
465 	return (0);
466 }
467 
468 /*
469  * Clear unmarked callback.
470  */
471 /*ARGSUSED*/
472 static int
473 sweep_process_cb(lcollection_t *lcol, lprocess_t *lpc)
474 {
475 	if (lpc->lpc_mark) {
476 		lpc->lpc_mark = 0;
477 	} else {
478 		debug("process %d finished\n", (int)lpc->lpc_pid);
479 		lprocess_free(lpc);
480 	}
481 
482 	return (0);
483 }
484 
485 /*
486  * Memory clear callback.
487  */
488 /*ARGSUSED*/
489 static int
490 collection_zero_mem_cb(lcollection_t *lcol, void *arg)
491 {
492 	lcol->lcol_rss = 0;
493 	lcol->lcol_image_size = 0;
494 
495 	return (0);
496 }
497 
498 /*
499  * Print, for debugging purposes, a collection's recently-sampled RSS and
500  * excess.
501  */
502 /*ARGSUSED*/
503 static int
504 excess_print_cb(lcollection_t *lcol, void *arg)
505 {
506 	int64_t excess = lcol->lcol_rss - lcol->lcol_rss_cap;
507 
508 	debug("%s %s rss/cap: %llu/%llu, excess = %lld kB\n",
509 	    rcfg.rcfg_mode_name, lcol->lcol_name,
510 	    (unsigned long long)lcol->lcol_rss,
511 	    (unsigned long long)lcol->lcol_rss_cap,
512 	    (long long)excess);
513 
514 	return (0);
515 }
516 
517 /*
518  * Scan those collections which have exceeded their caps.
519  */
520 /*ARGSUSED*/
521 static int
522 scan_cb(lcollection_t *lcol, void *arg)
523 {
524 	int64_t excess;
525 
526 	if ((excess = lcol->lcol_rss - lcol->lcol_rss_cap) > 0) {
527 		scan(lcol, excess);
528 		lcol->lcol_stat.lcols_scan++;
529 	}
530 
531 	return (0);
532 }
533 
534 /*
535  * Do a soft scan of those collections which have excesses.  A soft scan is one
536  * in which the cap enforcement pressure is taken into account.  The difference
537  * between the utilized physical memory and the cap enforcement pressure will
538  * be scanned-for, and each collection will be scanned proportionally by their
539  * present excesses.
540  */
541 static int
542 soft_scan_cb(lcollection_t *lcol, void *a)
543 {
544 	int64_t excess;
545 	soft_scan_arg_t *arg = a;
546 
547 	if ((excess = lcol->lcol_rss - lcol->lcol_rss_cap) > 0) {
548 		debug("col %lld excess %lld scan_goal %lld sum_excess %llu, "
549 		    "scanning %lld\n", (long long)lcol->lcol_id,
550 		    (long long)excess, (long long)arg->ssa_scan_goal,
551 		    (unsigned long long)arg->ssa_sum_excess,
552 		    (long long)(excess * arg->ssa_scan_goal /
553 		    arg->ssa_sum_excess));
554 
555 		scan(lcol, (int64_t)(excess * arg->ssa_scan_goal /
556 		    arg->ssa_sum_excess));
557 		lcol->lcol_stat.lcols_scan++;
558 	}
559 
560 	return (0);
561 }
562 
563 /*
564  * When a scan could happen, but caps aren't enforced tick the
565  * lcols_unenforced_cap counter.
566  */
567 /*ARGSUSED*/
568 static int
569 unenforced_cap_cb(lcollection_t *lcol, void *arg)
570 {
571 	lcol->lcol_stat.lcols_unenforced_cap++;
572 
573 	return (0);
574 }
575 
576 /*
577  * Update the count of physically installed memory.
578  */
579 static void
580 update_phys_total(void)
581 {
582 	uint64_t old_phys_total;
583 
584 	old_phys_total = phys_total;
585 	phys_total = (uint64_t)sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE)
586 	    / 1024;
587 	if (phys_total != old_phys_total)
588 		debug("physical memory%s: %lluM\n", (old_phys_total == 0 ?
589 		    "" : " adjusted"), (unsigned long long)(phys_total / 1024));
590 }
591 
592 /*
593  * Unlink a process from its collection, updating relevant statistics, and
594  * freeing its associated memory.
595  */
596 void
597 lprocess_free(lprocess_t *lpc)
598 {
599 	pid_t pid;
600 
601 	lpc->lpc_collection->lcol_stat.lcols_proc_out++;
602 
603 	if (lpc->lpc_prev != NULL)
604 		lpc->lpc_prev->lpc_next = lpc->lpc_next;
605 	if (lpc->lpc_next != NULL)
606 		lpc->lpc_next->lpc_prev = lpc->lpc_prev;
607 	if (lpc->lpc_collection->lcol_lprocess == lpc)
608 		lpc->lpc_collection->lcol_lprocess = (lpc->lpc_next !=
609 		    lpc ? lpc->lpc_next : NULL);
610 	lpc->lpc_next = lpc->lpc_prev = NULL;
611 
612 	if (lpc->lpc_prpageheader != NULL)
613 		free(lpc->lpc_prpageheader);
614 	if (lpc->lpc_xmap != NULL)
615 		free(lpc->lpc_xmap);
616 	if (lpc->lpc_psinfo_fd >= 0) {
617 		if (rfd_close(lpc->lpc_psinfo_fd) != 0)
618 			debug("could not close %d lpc_psinfo_fd %d",
619 			    (int)lpc->lpc_pid, lpc->lpc_psinfo_fd);
620 		lpc->lpc_psinfo_fd = -1;
621 	}
622 	if (lpc->lpc_pgdata_fd >= 0) {
623 		if (rfd_close(lpc->lpc_pgdata_fd) != 0)
624 			debug("could not close %d lpc_pgdata_fd %d",
625 			    (int)lpc->lpc_pid, lpc->lpc_pgdata_fd);
626 		lpc->lpc_pgdata_fd = -1;
627 	}
628 	if (lpc->lpc_xmap_fd >= 0) {
629 		if (rfd_close(lpc->lpc_xmap_fd) != 0)
630 			debug("could not close %d lpc_xmap_fd %d",
631 			    (int)lpc->lpc_pid, lpc->lpc_xmap_fd);
632 		lpc->lpc_xmap_fd = -1;
633 	}
634 	if (lpc->lpc_ignore != NULL)
635 		lmapping_free(&lpc->lpc_ignore);
636 	pid = lpc->lpc_pid;
637 	free(lpc);
638 	debug_high("process %d freed\n", (int)pid);
639 }
640 
641 /*
642  * Collection clear callback.
643  */
644 /*ARGSUSED*/
645 static int
646 collection_clear_cb(lcollection_t *lcol, void *arg)
647 {
648 	lcol->lcol_mark = 0;
649 
650 	return (0);
651 }
652 
653 /*
654  * Respond to a terminating signal by setting a termination flag.
655  */
656 /*ARGSUSED*/
657 static void
658 terminate_signal(int signal)
659 {
660 	if (termination_signal == 0)
661 		termination_signal = signal;
662 	should_run = 0;
663 }
664 
665 /*
666  * Handle any synchronous or asynchronous signals that would ordinarily cause a
667  * process to abort.
668  */
669 /*ARGSUSED*/
670 static void
671 abort_signal(int signal)
672 {
673 	/*
674 	 * Allow the scanner to make a last-ditch effort to resume any stopped
675 	 * processes.
676 	 */
677 	scan_abort();
678 	abort();
679 }
680 
681 /*
682  * Clean up collections which have been removed due to configuration.  Unlink
683  * the collection from lcollection and free it.
684  */
685 /*ARGSUSED*/
686 static int
687 collection_sweep_cb(lcollection_t *lcol, void *arg)
688 {
689 	if (lcol->lcol_mark == 0) {
690 		debug("freeing %s %s\n", rcfg.rcfg_mode_name, lcol->lcol_name);
691 		lcollection_free(lcol);
692 	}
693 
694 	return (0);
695 }
696 
697 /*
698  * Set those variables which depend on the global configuration.
699  */
700 static void
701 finish_configuration(void)
702 {
703 	/*
704 	 * Warn that any lnode (or non-project) mode specification (by an SRM
705 	 * 1.3 configuration file, for example) is ignored.
706 	 */
707 	if (strcmp(rcfg.rcfg_mode_name, "project") != 0) {
708 		warn(gettext("%s mode specification ignored -- using project"
709 		    " mode\n"), rcfg.rcfg_mode_name);
710 		rcfg.rcfg_mode_name = "project";
711 		rcfg.rcfg_mode = rctype_project;
712 	}
713 
714 	lcollection_set_type(rcfg.rcfg_mode);
715 }
716 
717 /*
718  * Cause the configuration file to be reread and applied.
719  */
720 static void
721 reread_configuration_file(void)
722 {
723 	rcfg_t rcfg_new;
724 	struct stat st;
725 
726 	if (stat(rcfg.rcfg_filename, &st) == 0 && st.st_mtime ==
727 	    rcfg.rcfg_last_modification)
728 		return;
729 
730 	if (rcfg_read(rcfg.rcfg_filename, rcfg.rcfg_fd, &rcfg_new,
731 	    update_statistics) != 0)
732 		warn(gettext("can't reread configuration"));
733 	else {
734 		/*
735 		 * The configuration file has been read.  Remove existing
736 		 * collections in case there is a change in collection type.
737 		 */
738 		if (rcfg.rcfg_mode != rcfg_new.rcfg_mode) {
739 			list_walk_collection(collection_clear_cb, NULL);
740 			list_walk_collection(collection_sweep_cb, NULL);
741 		}
742 
743 		/*
744 		 * Make the newly-read configuration the global one, and update
745 		 * any variables that depend on it.
746 		 */
747 		rcfg = rcfg_new;
748 		finish_configuration();
749 	}
750 }
751 
752 /*
753  * Reread the configuration filex, then examine changes, additions, and
754  * deletions to cap definitions.
755  */
756 static void
757 reconfigure(void)
758 {
759 	debug("reconfigure...\n");
760 
761 	/*
762 	 * Reread the configuration data.
763 	 */
764 	reread_configuration_file();
765 
766 	/*
767 	 * Walk the lcollection, marking active collections so inactive ones
768 	 * can be freed.
769 	 */
770 	list_walk_collection(collection_clear_cb, NULL);
771 	lcollection_update(LCU_ACTIVE_ONLY); /* mark */
772 	list_walk_collection(collection_sweep_cb, NULL);
773 }
774 
775 /*
776  * Respond to SIGHUP by triggering the rereading the configuration file and cap
777  * definitions.
778  */
779 /*ARGSUSED*/
780 static void
781 sighup(int signal)
782 {
783 	should_reconfigure = 1;
784 }
785 
786 /*
787  * Print, for debugging purposes, each collection's interval statistics.
788  */
789 /*ARGSUSED*/
790 static int
791 simple_report_collection_cb(lcollection_t *lcol, void *arg)
792 {
793 #define	DELTA(field) \
794 	(unsigned long long)(lcol->lcol_stat_invalidate ? 0 : \
795 	    (lcol->lcol_stat.field - lcol->lcol_stat_old.field))
796 #define	VALID(field) \
797 	(unsigned long long)(lcol->lcol_stat_invalidate ? 0 : \
798 	    lcol->lcol_stat.field)
799 
800 	debug("%s %s status: succeeded/attempted (k): %llu/%llu, "
801 	    "ineffective/scans/unenforced/samplings:  %llu/%llu/%llu/%llu, RSS "
802 	    "min/max (k): %llu/%llu, cap %llu kB, processes/thpt: %llu/%llu, "
803 	    "%llu scans over %llu ms\n", rcfg.rcfg_mode_name, lcol->lcol_name,
804 	    DELTA(lcols_pg_eff), DELTA(lcols_pg_att),
805 	    DELTA(lcols_scan_ineffective), DELTA(lcols_scan),
806 	    DELTA(lcols_unenforced_cap), DELTA(lcols_rss_sample),
807 	    VALID(lcols_min_rss), VALID(lcols_max_rss),
808 	    (unsigned long long)lcol->lcol_rss_cap,
809 	    (unsigned long long)(lcol->lcol_stat.lcols_proc_in -
810 	    lcol->lcol_stat.lcols_proc_out), DELTA(lcols_proc_out),
811 	    DELTA(lcols_scan_count), DELTA(lcols_scan_time_complete) / (NANOSEC
812 	    / MILLISEC));
813 
814 #undef DELTA
815 #undef VALID
816 
817 	return (0);
818 }
819 
820 /*
821  * Record each collection's interval statistics in the statistics file.
822  */
823 static int
824 report_collection_cb(lcollection_t *lcol, void *arg)
825 {
826 	lcollection_report_t dc;
827 	int fd = (intptr_t)arg;
828 
829 	/*
830 	 * Copy the relevant fields to the collection's record.
831 	 */
832 	bzero(&dc, sizeof (dc));
833 	dc.lcol_id = lcol->lcol_id;
834 	(void) strcpy(dc.lcol_name, lcol->lcol_name);
835 	dc.lcol_rss = lcol->lcol_rss;
836 	dc.lcol_image_size = lcol->lcol_image_size;
837 	dc.lcol_rss_cap = lcol->lcol_rss_cap;
838 	dc.lcol_stat = lcol->lcol_stat;
839 
840 	if (write(fd, &dc, sizeof (dc)) == sizeof (dc)) {
841 		/*
842 		 * Set a flag to indicate that the exported interval snapshot
843 		 * values should be reset at the next sample.
844 		 */
845 		lcol->lcol_stat_invalidate = 1;
846 	} else {
847 		debug("can't write %s %s statistics", rcfg.rcfg_mode_name,
848 		    lcol->lcol_name);
849 	}
850 
851 	return (0);
852 }
853 
854 /*
855  * Determine the count of pages scanned by the global page scanner, obtained
856  * from the cpu_stat:*::scan kstats.  Return zero on success.
857  */
858 static int
859 get_globally_scanned_pages(uint64_t *scannedp)
860 {
861 	kstat_t *ksp;
862 	uint64_t scanned = 0;
863 
864 	if (kstat_chain_update(kctl) == -1) {
865 		warn(gettext("can't update kstat chain"));
866 		return (0);
867 	}
868 
869 	for (ksp = kctl->kc_chain; ksp != NULL; ksp = ksp->ks_next) {
870 		if (strcmp(ksp->ks_module, "cpu_stat") == 0) {
871 			if (kstat_read(kctl, ksp, NULL) != -1) {
872 				scanned += ((cpu_stat_t *)
873 				    ksp->ks_data)->cpu_vminfo.scan;
874 			} else
875 				return (-1);
876 		}
877 	}
878 
879 	*scannedp = scanned;
880 	return (0);
881 }
882 
883 /*
884  * Update the shared statistics file with each collection's current statistics.
885  * Return zero on success.
886  */
887 static int
888 update_statistics(void)
889 {
890 	int fd, res;
891 	static char template[LINELEN];
892 
893 	/*
894 	 * Create a temporary file.
895 	 */
896 	if (sizeof (template) < (strlen(rcfg.rcfg_stat_file) +
897 	    strlen(STAT_TEMPLATE_SUFFIX) + 1)) {
898 		debug("temporary file template size too small\n");
899 		return (-1);
900 	}
901 	(void) strcpy(template, rcfg.rcfg_stat_file);
902 	(void) strcat(template, STAT_TEMPLATE_SUFFIX);
903 	(void) rfd_reserve(1);
904 	fd = mkstemp(template);
905 
906 	/*
907 	 * Write the header and per-collection statistics.
908 	 */
909 	if (fd >= 0) {
910 		rcapd_stat_hdr_t rs;
911 
912 		rs.rs_pid = rcapd_pid;
913 		rs.rs_time = gethrtime();
914 		ASSERT(sizeof (rs.rs_mode) > strlen(rcfg.rcfg_mode_name));
915 		(void) strcpy(rs.rs_mode, rcfg.rcfg_mode_name);
916 		rs.rs_pressure_cur = memory_pressure;
917 		rs.rs_pressure_cap = rcfg.rcfg_memory_cap_enforcement_pressure;
918 		rs.rs_pressure_sample = memory_pressure_sample;
919 
920 		if (fchmod(fd, 0644) == 0 && write(fd, &rs, sizeof (rs)) ==
921 		    sizeof (rs)) {
922 			list_walk_collection(report_collection_cb,
923 				(void *)(intptr_t)fd);
924 			/*
925 			 * Replace the existing statistics file with this new
926 			 * one.
927 			 */
928 			res = rename(template, rcfg.rcfg_stat_file);
929 		} else
930 			res = -1;
931 		(void) close(fd);
932 	} else
933 		res = -1;
934 
935 	return (res);
936 }
937 
938 /*
939  * Verify the statistics file can be created and written to, and die if an
940  * existing file may be in use by another rcapd.
941  */
942 static int
943 verify_statistics(void)
944 {
945 	pid_t pid;
946 
947 	/*
948 	 * Warn if another instance of rcapd might be active.
949 	 */
950 	(void) rfd_reserve(1);
951 	pid = stat_get_rcapd_pid(rcfg.rcfg_stat_file);
952 	if (pid != rcapd_pid && pid != -1)
953 		die(gettext("%s exists; rcapd may already be active\n"),
954 		    rcfg.rcfg_stat_file);
955 
956 	return (update_statistics());
957 }
958 
959 static int
960 sum_excess_cb(lcollection_t *lcol, void *arg)
961 {
962 	uint64_t *sum_excess = arg;
963 
964 	*sum_excess += MAX((int64_t)0, (int64_t)(lcol->lcol_rss -
965 	    lcol->lcol_rss_cap));
966 	return (0);
967 }
968 
969 static void
970 rcapd_usage(void)
971 {
972 	info(gettext("usage: rcapd [-d]\n"));
973 }
974 
975 void
976 check_update_statistics(void)
977 {
978 	hrtime_t now = gethrtime();
979 
980 	if (EVENT_TIME(now, next_report)) {
981 		debug("updating statistics...\n");
982 		list_walk_collection(simple_report_collection_cb, NULL);
983 		if (update_statistics() != 0)
984 			debug("couldn't update statistics");
985 		next_report = NEXT_REPORT_EVENT_TIME(now,
986 		    rcfg.rcfg_report_interval);
987 	}
988 }
989 
990 static void
991 verify_and_set_privileges(void)
992 {
993 	priv_set_t *required =
994 	    priv_str_to_set("zone,sys_resource,proc_owner", ",", NULL);
995 
996 	/*
997 	 * Ensure the required privileges, suitable for controlling processes,
998 	 * are possessed.
999 	 */
1000 	if (setppriv(PRIV_SET, PRIV_PERMITTED, required) != 0 || setppriv(
1001 	    PRIV_SET, PRIV_EFFECTIVE, required) != 0)
1002 		die(gettext("can't set requisite privileges"));
1003 
1004 	/*
1005 	 * Ensure access to /var/run/daemon.
1006 	 */
1007 	if (setreuid(DAEMON_UID, DAEMON_UID) != 0)
1008 		die(gettext("cannot become user daemon"));
1009 
1010 	priv_freeset(required);
1011 }
1012 
1013 int
1014 main(int argc, char *argv[])
1015 {
1016 	int res;
1017 	int should_fork = 1;	/* fork flag */
1018 	hrtime_t now;		/* current time */
1019 	hrtime_t next;		/* time of next event */
1020 	int sig;		/* signal iteration */
1021 	struct rlimit rl;
1022 	hrtime_t next_proc_walk;	/* time of next /proc scan */
1023 	hrtime_t next_configuration;	/* time of next configuration */
1024 	hrtime_t next_rss_sample;	/* (latest) time of next RSS sample */
1025 	int old_enforce_caps;		/* track changes in enforcement */
1026 					/* conditions */
1027 	soft_scan_arg_t arg;
1028 
1029 	(void) set_message_priority(RCM_INFO);
1030 	(void) setprogname("rcapd");
1031 	rcapd_pid = getpid();
1032 	(void) chdir("/");
1033 	should_run = 1;
1034 	ever_ran = 0;
1035 
1036 	(void) setlocale(LC_ALL, "");
1037 	(void) textdomain(TEXT_DOMAIN);
1038 
1039 	/*
1040 	 * Parse command-line options.
1041 	 */
1042 	while ((res = getopt(argc, argv, "dF")) > 0)
1043 		switch (res) {
1044 		case 'd':
1045 			should_fork = 0;
1046 			if (debug_mode == 0) {
1047 				debug_mode = 1;
1048 				(void) set_message_priority(RCM_DEBUG);
1049 			} else
1050 				(void) set_message_priority(RCM_DEBUG_HIGH);
1051 			break;
1052 		case 'F':
1053 			should_fork = 0;
1054 			break;
1055 		default:
1056 			rcapd_usage();
1057 			return (E_USAGE);
1058 			/*NOTREACHED*/
1059 		}
1060 
1061 	/*
1062 	 * If not debugging, fork and continue operating, changing the
1063 	 * destination of messages to syslog().
1064 	 */
1065 	if (should_fork == 1) {
1066 		pid_t child;
1067 		debug("forking\n");
1068 		child = fork();
1069 		if (child == -1)
1070 			die(gettext("cannot fork"));
1071 		if (child > 0)
1072 			return (0);
1073 		else {
1074 			rcapd_pid = getpid();
1075 			(void) set_message_destination(RCD_SYSLOG);
1076 			(void) fclose(stdin);
1077 			(void) fclose(stdout);
1078 			(void) fclose(stderr);
1079 		}
1080 		/*
1081 		 * Start a new session and detatch from the controlling tty.
1082 		 */
1083 		if (setsid() == (pid_t)-1)
1084 			debug(gettext("setsid() failed; cannot detach from "
1085 			    "terminal"));
1086 	}
1087 
1088 	/*
1089 	 * Read the configuration file.
1090 	 */
1091 	if (rcfg_read(RCAPD_DEFAULT_CONF_FILE, -1, &rcfg, verify_statistics)
1092 	    != 0)
1093 		die(gettext("invalid configuration: %s"),
1094 		    RCAPD_DEFAULT_CONF_FILE);
1095 	finish_configuration();
1096 	should_reconfigure = 0;
1097 
1098 	/*
1099 	 * Check that required privileges are possessed.
1100 	 */
1101 	verify_and_set_privileges();
1102 
1103 	now = next_report = next_proc_walk = next_rss_sample = gethrtime();
1104 	next_configuration = NEXT_EVENT_TIME(gethrtime(),
1105 	    rcfg.rcfg_reconfiguration_interval);
1106 
1107 	if (rcfg.rcfg_memory_cap_enforcement_pressure == 0) {
1108 		/*
1109 		 * Always enforce caps when strict caps are used.
1110 		 */
1111 		enforce_caps = 1;
1112 	}
1113 
1114 	/*
1115 	 * Open the kstat chain.
1116 	 */
1117 	kctl = kstat_open();
1118 	if (kctl == NULL)
1119 		die(gettext("can't open kstats"));
1120 
1121 	/*
1122 	 * Set RLIMIT_NOFILE as high as practical, so roughly 10K processes can
1123 	 * be effectively managed without revoking descriptors (at 3 per
1124 	 * process).
1125 	 */
1126 	rl.rlim_cur = 32 * 1024;
1127 	rl.rlim_max = 32 * 1024;
1128 	if (setrlimit(RLIMIT_NOFILE, &rl) != 0 &&
1129 	    getrlimit(RLIMIT_NOFILE, &rl) == 0) {
1130 		rl.rlim_cur = rl.rlim_max;
1131 		(void) setrlimit(RLIMIT_NOFILE, &rl);
1132 	}
1133 	if (getrlimit(RLIMIT_NOFILE, &rl) == 0)
1134 		debug("fd limit: %lu\n", rl.rlim_cur);
1135 	else
1136 		debug("fd limit: unknown\n");
1137 
1138 	/*
1139 	 * Handle those signals whose (default) exit disposition
1140 	 * prevents rcapd from finishing scanning before terminating.
1141 	 */
1142 	(void) sigset(SIGINT, terminate_signal);
1143 	(void) sigset(SIGQUIT, abort_signal);
1144 	(void) sigset(SIGILL, abort_signal);
1145 	(void) sigset(SIGEMT, abort_signal);
1146 	(void) sigset(SIGFPE, abort_signal);
1147 	(void) sigset(SIGBUS, abort_signal);
1148 	(void) sigset(SIGSEGV, abort_signal);
1149 	(void) sigset(SIGSYS, abort_signal);
1150 	(void) sigset(SIGPIPE, terminate_signal);
1151 	(void) sigset(SIGALRM, terminate_signal);
1152 	(void) sigset(SIGTERM, terminate_signal);
1153 	(void) sigset(SIGUSR1, terminate_signal);
1154 	(void) sigset(SIGUSR2, terminate_signal);
1155 	(void) sigset(SIGPOLL, terminate_signal);
1156 	(void) sigset(SIGVTALRM, terminate_signal);
1157 	(void) sigset(SIGXCPU, abort_signal);
1158 	(void) sigset(SIGXFSZ, abort_signal);
1159 	for (sig = SIGRTMIN; sig <= SIGRTMAX; sig++)
1160 		(void) sigset(sig, terminate_signal);
1161 
1162 	/*
1163 	 * Install a signal handler for reconfiguration processing.
1164 	 */
1165 	(void) sigset(SIGHUP, sighup);
1166 
1167 	/*
1168 	 * Determine which process collections to cap.
1169 	 */
1170 	lcollection_update(LCU_COMPLETE);
1171 
1172 	/*
1173 	 * Loop forever, monitoring collections' resident set sizes and
1174 	 * enforcing their caps.  Look for changes in caps and process
1175 	 * membership, as well as responding to requests to reread the
1176 	 * configuration.  Update per-collection statistics periodically.
1177 	 */
1178 	while (should_run != 0) {
1179 		struct timespec ts;
1180 
1181 		/*
1182 		 * Announce that rcapd is starting.
1183 		 */
1184 		if (ever_ran == 0) {
1185 			info(gettext("starting\n"));
1186 			ever_ran = 1;
1187 		}
1188 
1189 		/*
1190 		 * Update the process list once every proc_walk_interval.  The
1191 		 * condition of global memory pressure is also checked at the
1192 		 * same frequency, if strict caps are in use.
1193 		 */
1194 		now = gethrtime();
1195 
1196 		/*
1197 		 * Detect configuration and cap changes at every
1198 		 * reconfiguration_interval, or when SIGHUP has been received.
1199 		 */
1200 		if (EVENT_TIME(now, next_configuration) ||
1201 		    should_reconfigure == 1) {
1202 			reconfigure();
1203 			next_configuration = NEXT_EVENT_TIME(now,
1204 			    rcfg.rcfg_reconfiguration_interval);
1205 
1206 			/*
1207 			 * Reset each event time to the shorter of the
1208 			 * previous and new intervals.
1209 			 */
1210 			if (next_report == 0 &&
1211 			    rcfg.rcfg_report_interval > 0)
1212 				next_report = now;
1213 			else
1214 				next_report = POSITIVE_MIN(next_report,
1215 				    NEXT_REPORT_EVENT_TIME(now,
1216 				    rcfg.rcfg_report_interval));
1217 			if (next_proc_walk == 0 &&
1218 			    rcfg.rcfg_proc_walk_interval > 0)
1219 				next_proc_walk = now;
1220 			else
1221 				next_proc_walk = POSITIVE_MIN(next_proc_walk,
1222 				    NEXT_EVENT_TIME(now,
1223 				    rcfg.rcfg_proc_walk_interval));
1224 			if (next_rss_sample == 0 &&
1225 			    rcfg.rcfg_rss_sample_interval > 0)
1226 				next_rss_sample = now;
1227 			else
1228 				next_rss_sample = POSITIVE_MIN(next_rss_sample,
1229 				    NEXT_EVENT_TIME(now,
1230 				    rcfg.rcfg_rss_sample_interval));
1231 
1232 			should_reconfigure = 0;
1233 			continue;
1234 		}
1235 
1236 		if (EVENT_TIME(now, next_proc_walk)) {
1237 			debug("scanning process list...\n");
1238 			proc_walk_all(proc_cb); /* mark */
1239 			list_walk_all(sweep_process_cb);
1240 			next_proc_walk = NEXT_EVENT_TIME(now,
1241 			    rcfg.rcfg_proc_walk_interval);
1242 		}
1243 
1244 		if (EVENT_TIME(now, next_rss_sample)) {
1245 			/*
1246 			 * Check for changes to the amount of installed
1247 			 * physical memory, to compute the current memory
1248 			 * pressure.
1249 			 */
1250 			update_phys_total();
1251 
1252 			/*
1253 			 * If soft caps are in use, determine if global memory
1254 			 * pressure exceeds the configured maximum above which
1255 			 * soft caps are enforced.
1256 			 */
1257 			memory_pressure = 100 -
1258 			    (int)((sysconf(_SC_AVPHYS_PAGES) *
1259 			    (sysconf(_SC_PAGESIZE) / 1024)) * 100.0 /
1260 			    phys_total);
1261 			memory_pressure_sample++;
1262 			if (rcfg.rcfg_memory_cap_enforcement_pressure > 0) {
1263 				if (memory_pressure >
1264 				    rcfg.rcfg_memory_cap_enforcement_pressure) {
1265 					if (enforce_soft_caps == 0) {
1266 						debug("memory pressure %d%%\n",
1267 						    memory_pressure);
1268 						enforce_soft_caps = 1;
1269 					}
1270 				} else {
1271 					if (enforce_soft_caps == 1)
1272 						enforce_soft_caps = 0;
1273 				}
1274 			}
1275 
1276 			/*
1277 			 * Determine if the global page scanner is running,
1278 			 * while which no memory caps should be enforced, to
1279 			 * prevent interference with the global page scanner.
1280 			 */
1281 			if (get_globally_scanned_pages(&new_sp) == 0) {
1282 				if (old_sp == 0)
1283 					/*EMPTY*/
1284 					;
1285 				else if ((new_sp - old_sp) > 0) {
1286 					if (global_scanner_running == 0) {
1287 						debug("global memory pressure "
1288 						    "detected (%llu pages "
1289 						    "scanned since last "
1290 						    "interval)\n",
1291 						    (unsigned long long)
1292 						    (new_sp - old_sp));
1293 						global_scanner_running = 1;
1294 					}
1295 				} else if (global_scanner_running == 1) {
1296 					debug("global memory pressure "
1297 					    "relieved\n");
1298 					global_scanner_running = 0;
1299 				}
1300 				old_sp = new_sp;
1301 			} else {
1302 				warn(gettext("kstat_read() failed"));
1303 				new_sp = old_sp;
1304 			}
1305 
1306 			/*
1307 			 * Cap enforcement is determined by the previous two
1308 			 * conditions.
1309 			 */
1310 			old_enforce_caps = enforce_caps;
1311 			enforce_caps =
1312 			    (rcfg.rcfg_memory_cap_enforcement_pressure ==
1313 			    0 || enforce_soft_caps == 1) &&
1314 			    !global_scanner_running;
1315 			if (old_enforce_caps != enforce_caps)
1316 				debug("%senforcing caps\n", enforce_caps == 0 ?
1317 				    "not " : "");
1318 
1319 			/*
1320 			 * Sample collections' member processes' RSSes and
1321 			 * recompute collections' excess.
1322 			 */
1323 			list_walk_all(mem_sample_cb);
1324 			list_walk_collection(collection_zero_mem_cb, NULL);
1325 			list_walk_all(memory_all_cb);
1326 			list_walk_collection(rss_sample_col_cb, NULL);
1327 			if (rcfg.rcfg_memory_cap_enforcement_pressure > 0)
1328 				debug("memory pressure %d%%\n",
1329 				    memory_pressure);
1330 			list_walk_collection(excess_print_cb, NULL);
1331 
1332 			/*
1333 			 * If soft caps are in use, determine the size of the
1334 			 * portion from each collection to scan for.
1335 			 */
1336 			if (enforce_soft_caps == 1) {
1337 				/*
1338 				 * Compute the sum of the collections'
1339 				 * excesses, which will be the denominator.
1340 				 */
1341 				arg.ssa_sum_excess = 0;
1342 				list_walk_collection(sum_excess_cb,
1343 				    &arg.ssa_sum_excess);
1344 
1345 				/*
1346 				 * Compute the quantity of memory (in
1347 				 * kilobytes) above the cap enforcement
1348 				 * pressure.  Set the scan goal to that
1349 				 * quantity (or at most the excess).
1350 				 */
1351 				arg.ssa_scan_goal = MIN((
1352 				    sysconf(_SC_PHYS_PAGES) * (100 -
1353 				    rcfg.rcfg_memory_cap_enforcement_pressure)
1354 				    / 100 - sysconf(_SC_AVPHYS_PAGES)) *
1355 				    (sysconf(_SC_PAGESIZE) / 1024),
1356 				    arg.ssa_sum_excess);
1357 			}
1358 
1359 			/*
1360 			 * Victimize offending collections.
1361 			 */
1362 			if (enforce_caps == 1 && ((enforce_soft_caps == 1 &&
1363 			    arg.ssa_scan_goal > 0 && arg.ssa_sum_excess > 0) ||
1364 			    (enforce_soft_caps == 0)))
1365 				if (enforce_soft_caps == 1) {
1366 					debug("scan goal is %lldKB\n",
1367 					    (long long)arg.ssa_scan_goal);
1368 					list_walk_collection(soft_scan_cb,
1369 					    &arg);
1370 				} else
1371 					list_walk_collection(scan_cb, NULL);
1372 			else
1373 				list_walk_collection(unenforced_cap_cb, NULL);
1374 
1375 			next_rss_sample = NEXT_EVENT_TIME(now,
1376 			    rcfg.rcfg_rss_sample_interval);
1377 		}
1378 
1379 		/*
1380 		 * Update the statistics file, if it's time.
1381 		 */
1382 		check_update_statistics();
1383 
1384 		/*
1385 		 * Sleep for some time before repeating.
1386 		 */
1387 		now = gethrtime();
1388 		next = next_configuration;
1389 		next = POSITIVE_MIN(next, next_proc_walk);
1390 		next = POSITIVE_MIN(next, next_report);
1391 		next = POSITIVE_MIN(next, next_rss_sample);
1392 		if (next > now && should_run != 0) {
1393 			debug("sleeping %-4.2f seconds\n", (float)(next -
1394 			    now) / (float)NANOSEC);
1395 			hrt2ts(next - now, &ts);
1396 			(void) nanosleep(&ts, NULL);
1397 		}
1398 	}
1399 	if (termination_signal != 0)
1400 		debug("exiting due to signal %d\n", termination_signal);
1401 	if (ever_ran != 0)
1402 		info(gettext("exiting\n"));
1403 
1404 	/*
1405 	 * Unlink the statistics file before exiting.
1406 	 */
1407 	if (rcfg.rcfg_stat_file[0] != 0)
1408 		(void) unlink(rcfg.rcfg_stat_file);
1409 
1410 	return (E_SUCCESS);
1411 }
1412