xref: /freebsd/sys/kern/tty_info.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Copyright (c) 2002 Networks Associates Technologies, Inc.
13  * All rights reserved.
14  *
15  * Portions of this software were developed for the FreeBSD Project by
16  * ThinkSec AS and NAI Labs, the Security Research Division of Network
17  * Associates, Inc.  under DARPA/SPAWAR contract N66001-01-C-8035
18  * ("CBOSS"), as part of the DARPA CHATS research program.
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_stack.h"
49 
50 #include <sys/param.h>
51 #include <sys/cons.h>
52 #include <sys/kdb.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sbuf.h>
59 #include <sys/sched.h>
60 #include <sys/stack.h>
61 #include <sys/sysctl.h>
62 #include <sys/systm.h>
63 #include <sys/tty.h>
64 
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 
69 /*
70  * Returns 1 if p2 is "better" than p1
71  *
72  * The algorithm for picking the "interesting" process is thus:
73  *
74  *	1) Only foreground processes are eligible - implied.
75  *	2) Runnable processes are favored over anything else.  The runner
76  *	   with the highest cpu utilization is picked (p_estcpu).  Ties are
77  *	   broken by picking the highest pid.
78  *	3) The sleeper with the shortest sleep time is next.  With ties,
79  *	   we pick out just "short-term" sleepers (P_SINTR == 0).
80  *	4) Further ties are broken by picking the highest pid.
81  */
82 
83 #define TESTAB(a, b)    ((a)<<1 | (b))
84 #define ONLYA   2
85 #define ONLYB   1
86 #define BOTH    3
87 
88 static int
89 proc_sum(struct proc *p, fixpt_t *estcpup)
90 {
91 	struct thread *td;
92 	int estcpu;
93 	int val;
94 
95 	val = 0;
96 	estcpu = 0;
97 	FOREACH_THREAD_IN_PROC(p, td) {
98 		thread_lock(td);
99 		if (TD_ON_RUNQ(td) ||
100 		    TD_IS_RUNNING(td))
101 			val = 1;
102 		estcpu += sched_pctcpu(td);
103 		thread_unlock(td);
104 	}
105 	*estcpup = estcpu;
106 
107 	return (val);
108 }
109 
110 static int
111 thread_compare(struct thread *td, struct thread *td2)
112 {
113 	int runa, runb;
114 	int slpa, slpb;
115 	fixpt_t esta, estb;
116 
117 	if (td == NULL)
118 		return (1);
119 
120 	/*
121 	 * Fetch running stats, pctcpu usage, and interruptable flag.
122 	 */
123 	thread_lock(td);
124 	runa = TD_IS_RUNNING(td) | TD_ON_RUNQ(td);
125 	slpa = td->td_flags & TDF_SINTR;
126 	esta = sched_pctcpu(td);
127 	thread_unlock(td);
128 	thread_lock(td2);
129 	runb = TD_IS_RUNNING(td2) | TD_ON_RUNQ(td2);
130 	estb = sched_pctcpu(td2);
131 	slpb = td2->td_flags & TDF_SINTR;
132 	thread_unlock(td2);
133 	/*
134 	 * see if at least one of them is runnable
135 	 */
136 	switch (TESTAB(runa, runb)) {
137 	case ONLYA:
138 		return (0);
139 	case ONLYB:
140 		return (1);
141 	case BOTH:
142 		break;
143 	}
144 	/*
145 	 *  favor one with highest recent cpu utilization
146 	 */
147 	if (estb > esta)
148 		return (1);
149 	if (esta > estb)
150 		return (0);
151 	/*
152 	 * favor one sleeping in a non-interruptible sleep
153 	 */
154 	switch (TESTAB(slpa, slpb)) {
155 	case ONLYA:
156 		return (0);
157 	case ONLYB:
158 		return (1);
159 	case BOTH:
160 		break;
161 	}
162 
163 	return (td < td2);
164 }
165 
166 static int
167 proc_compare(struct proc *p1, struct proc *p2)
168 {
169 
170 	int runa, runb;
171 	fixpt_t esta, estb;
172 
173 	if (p1 == NULL)
174 		return (1);
175 
176 	/*
177 	 * Fetch various stats about these processes.  After we drop the
178 	 * lock the information could be stale but the race is unimportant.
179 	 */
180 	PROC_LOCK(p1);
181 	runa = proc_sum(p1, &esta);
182 	PROC_UNLOCK(p1);
183 	PROC_LOCK(p2);
184 	runb = proc_sum(p2, &estb);
185 	PROC_UNLOCK(p2);
186 
187 	/*
188 	 * see if at least one of them is runnable
189 	 */
190 	switch (TESTAB(runa, runb)) {
191 	case ONLYA:
192 		return (0);
193 	case ONLYB:
194 		return (1);
195 	case BOTH:
196 		break;
197 	}
198 	/*
199 	 *  favor one with highest recent cpu utilization
200 	 */
201 	if (estb > esta)
202 		return (1);
203 	if (esta > estb)
204 		return (0);
205 	/*
206 	 * weed out zombies
207 	 */
208 	switch (TESTAB(p1->p_state == PRS_ZOMBIE, p2->p_state == PRS_ZOMBIE)) {
209 	case ONLYA:
210 		return (1);
211 	case ONLYB:
212 		return (0);
213 	case BOTH:
214 		break;
215 	}
216 
217 	return (p2->p_pid > p1->p_pid);		/* tie - return highest pid */
218 }
219 
220 static int
221 sbuf_tty_drain(void *a, const char *d, int len)
222 {
223 	struct tty *tp;
224 	int rc;
225 
226 	tp = a;
227 
228 	if (kdb_active) {
229 		cnputsn(d, len);
230 		return (len);
231 	}
232 	if (tp != NULL && panicstr == NULL) {
233 		rc = tty_putstrn(tp, d, len);
234 		if (rc != 0)
235 			return (-ENXIO);
236 		return (len);
237 	}
238 	return (-ENXIO);
239 }
240 
241 #ifdef STACK
242 static bool tty_info_kstacks = false;
243 SYSCTL_BOOL(_kern, OID_AUTO, tty_info_kstacks, CTLFLAG_RWTUN,
244     &tty_info_kstacks, 0,
245     "Enable printing kernel stack(9) traces on ^T (tty info)");
246 #endif
247 
248 /*
249  * Report on state of foreground process group.
250  */
251 void
252 tty_info(struct tty *tp)
253 {
254 	struct timeval rtime, utime, stime;
255 #ifdef STACK
256 	struct stack stack;
257 	int sterr;
258 #endif
259 	struct proc *p, *ppick;
260 	struct thread *td, *tdpick;
261 	const char *stateprefix, *state;
262 	struct sbuf sb;
263 	long rss;
264 	int load, pctcpu;
265 	pid_t pid;
266 	char comm[MAXCOMLEN + 1];
267 	struct rusage ru;
268 
269 	tty_lock_assert(tp, MA_OWNED);
270 
271 	if (tty_checkoutq(tp) == 0)
272 		return;
273 
274 	(void)sbuf_new(&sb, tp->t_prbuf, tp->t_prbufsz, SBUF_FIXEDLEN);
275 	sbuf_set_drain(&sb, sbuf_tty_drain, tp);
276 
277 	/* Print load average. */
278 	load = (averunnable.ldavg[0] * 100 + FSCALE / 2) >> FSHIFT;
279 	sbuf_printf(&sb, "%sload: %d.%02d ", tp->t_column == 0 ? "" : "\n",
280 	    load / 100, load % 100);
281 
282 	if (tp->t_session == NULL) {
283 		sbuf_printf(&sb, "not a controlling terminal\n");
284 		goto out;
285 	}
286 	if (tp->t_pgrp == NULL) {
287 		sbuf_printf(&sb, "no foreground process group\n");
288 		goto out;
289 	}
290 	PGRP_LOCK(tp->t_pgrp);
291 	if (LIST_EMPTY(&tp->t_pgrp->pg_members)) {
292 		PGRP_UNLOCK(tp->t_pgrp);
293 		sbuf_printf(&sb, "empty foreground process group\n");
294 		goto out;
295 	}
296 
297 	/*
298 	 * Pick the most interesting process and copy some of its
299 	 * state for printing later.  This operation could rely on stale
300 	 * data as we can't hold the proc slock or thread locks over the
301 	 * whole list. However, we're guaranteed not to reference an exited
302 	 * thread or proc since we hold the tty locked.
303 	 */
304 	p = NULL;
305 	LIST_FOREACH(ppick, &tp->t_pgrp->pg_members, p_pglist)
306 		if (proc_compare(p, ppick))
307 			p = ppick;
308 
309 	PROC_LOCK(p);
310 	PGRP_UNLOCK(tp->t_pgrp);
311 	td = NULL;
312 	FOREACH_THREAD_IN_PROC(p, tdpick)
313 		if (thread_compare(td, tdpick))
314 			td = tdpick;
315 	stateprefix = "";
316 	thread_lock(td);
317 	if (TD_IS_RUNNING(td))
318 		state = "running";
319 	else if (TD_ON_RUNQ(td) || TD_CAN_RUN(td))
320 		state = "runnable";
321 	else if (TD_IS_SLEEPING(td)) {
322 		/* XXX: If we're sleeping, are we ever not in a queue? */
323 		if (TD_ON_SLEEPQ(td))
324 			state = td->td_wmesg;
325 		else
326 			state = "sleeping without queue";
327 	} else if (TD_ON_LOCK(td)) {
328 		state = td->td_lockname;
329 		stateprefix = "*";
330 	} else if (TD_IS_SUSPENDED(td))
331 		state = "suspended";
332 	else if (TD_AWAITING_INTR(td))
333 		state = "intrwait";
334 	else if (p->p_state == PRS_ZOMBIE)
335 		state = "zombie";
336 	else
337 		state = "unknown";
338 	pctcpu = (sched_pctcpu(td) * 10000 + FSCALE / 2) >> FSHIFT;
339 #ifdef STACK
340 	if (tty_info_kstacks) {
341 		stack_zero(&stack);
342 		if (TD_IS_SWAPPED(td) || TD_IS_RUNNING(td))
343 			sterr = stack_save_td_running(&stack, td);
344 		else {
345 			stack_save_td(&stack, td);
346 			sterr = 0;
347 		}
348 	}
349 #endif
350 	thread_unlock(td);
351 	if (p->p_state == PRS_NEW || p->p_state == PRS_ZOMBIE)
352 		rss = 0;
353 	else
354 		rss = pgtok(vmspace_resident_count(p->p_vmspace));
355 	microuptime(&rtime);
356 	timevalsub(&rtime, &p->p_stats->p_start);
357 	rufetchcalc(p, &ru, &utime, &stime);
358 	pid = p->p_pid;
359 	strlcpy(comm, p->p_comm, sizeof comm);
360 	PROC_UNLOCK(p);
361 
362 	/* Print command, pid, state, rtime, utime, stime, %cpu, and rss. */
363 	sbuf_printf(&sb,
364 	    " cmd: %s %d [%s%s] %ld.%02ldr %ld.%02ldu %ld.%02lds %d%% %ldk\n",
365 	    comm, pid, stateprefix, state,
366 	    (long)rtime.tv_sec, rtime.tv_usec / 10000,
367 	    (long)utime.tv_sec, utime.tv_usec / 10000,
368 	    (long)stime.tv_sec, stime.tv_usec / 10000,
369 	    pctcpu / 100, rss);
370 
371 #ifdef STACK
372 	if (tty_info_kstacks && sterr == 0)
373 		stack_sbuf_print_flags(&sb, &stack, M_NOWAIT);
374 #endif
375 
376 out:
377 	sbuf_finish(&sb);
378 	sbuf_delete(&sb);
379 }
380