xref: /titanic_44/usr/src/cmd/truss/fcall.c (revision 749f21d359d8fbd020c974a1a5227316221bfc9c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #define	_SYSCALL32
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <unistd.h>
34 #include <ctype.h>
35 #include <string.h>
36 #include <memory.h>
37 #include <errno.h>
38 #include <sys/types.h>
39 #include <sys/stack.h>
40 #include <signal.h>
41 #include <limits.h>
42 #include <sys/isa_defs.h>
43 #include <proc_service.h>
44 #include <dlfcn.h>
45 #include <fnmatch.h>
46 #include <libproc.h>
47 #include "ramdata.h"
48 #include "systable.h"
49 #include "print.h"
50 #include "proto.h"
51 #include "htbl.h"
52 
53 /*
54  * Functions supporting library function call tracing.
55  */
56 
57 typedef struct {
58 	prmap_t	*pmap;
59 	int	nmap;
60 } ph_map_t;
61 
62 /*
63  * static functions in this file.
64  */
65 void function_entry(private_t *, struct bkpt *, struct callstack *);
66 void function_return(private_t *, struct callstack *);
67 int object_iter(void *, const prmap_t *, const char *);
68 int symbol_iter(void *, const GElf_Sym *, const char *);
69 uintptr_t get_return_address(uintptr_t *);
70 int get_arguments(long *argp);
71 uintptr_t previous_fp(uintptr_t, uintptr_t *);
72 int lwp_stack_traps(void *cd, const lwpstatus_t *Lsp);
73 int thr_stack_traps(const td_thrhandle_t *Thp, void *cd);
74 struct bkpt *create_bkpt(uintptr_t, int, int);
75 void set_deferred_breakpoints(void);
76 
77 #define	DEF_MAXCALL	16	/* initial value of Stk->maxcall */
78 
79 #define	FAULT_ADDR	((uintptr_t)(0-8))
80 
81 #define	HASHSZ	2048
82 #define	bpt_hash(addr)	((((addr) >> 13) ^ ((addr) >> 2)) & 0x7ff)
83 
84 static void
85 setup_thread_agent(void)
86 {
87 	struct bkpt *Bp;
88 	td_notify_t notify;
89 	td_thr_events_t events;
90 
91 	if (Thr_agent != NULL)	/* only once */
92 		return;
93 	if (td_init() != TD_OK || td_ta_new(Proc, &Thr_agent) != TD_OK)
94 		Thr_agent = NULL;
95 	else {
96 		td_event_emptyset(&events);
97 		td_event_addset(&events, TD_CREATE);
98 		if (td_ta_event_addr(Thr_agent, TD_CREATE, &notify) == TD_OK &&
99 		    notify.type == NOTIFY_BPT &&
100 		    td_ta_set_event(Thr_agent, &events) == TD_OK &&
101 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
102 			Bp->flags |= BPT_TD_CREATE;
103 	}
104 }
105 
106 /*
107  * Establishment of breakpoints on traced library functions.
108  */
109 void
110 establish_breakpoints(void)
111 {
112 	if (Dynpat == NULL)
113 		return;
114 
115 	/* allocate the breakpoint hash table */
116 	if (bpt_hashtable == NULL) {
117 		bpt_hashtable = my_malloc(HASHSZ * sizeof (struct bkpt *),
118 			NULL);
119 		(void) memset(bpt_hashtable, 0,
120 			HASHSZ * sizeof (struct bkpt *));
121 	}
122 
123 	/*
124 	 * Set special rtld_db event breakpoints, first time only.
125 	 */
126 	if (Rdb_agent == NULL &&
127 	    (Rdb_agent = Prd_agent(Proc)) != NULL) {
128 		rd_notify_t notify;
129 		struct bkpt *Bp;
130 
131 		(void) rd_event_enable(Rdb_agent, 1);
132 		if (rd_event_addr(Rdb_agent, RD_PREINIT, &notify) == RD_OK &&
133 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
134 			Bp->flags |= BPT_PREINIT;
135 		if (rd_event_addr(Rdb_agent, RD_POSTINIT, &notify) == RD_OK &&
136 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
137 			Bp->flags |= BPT_POSTINIT;
138 		if (rd_event_addr(Rdb_agent, RD_DLACTIVITY, &notify) == RD_OK &&
139 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
140 			Bp->flags |= BPT_DLACTIVITY;
141 	}
142 
143 	/*
144 	 * Set special thread event breakpoint, first time libc is seen.
145 	 */
146 	if (Thr_agent == NULL)
147 		setup_thread_agent();
148 
149 	/*
150 	 * Tell libproc to update its mappings.
151 	 */
152 	Pupdate_maps(Proc);
153 
154 	/*
155 	 * Iterate over the shared objects, creating breakpoints.
156 	 */
157 	(void) Pobject_iter(Proc, object_iter, NULL);
158 
159 	/*
160 	 * Now actually set all the breakpoints we just created.
161 	 */
162 	set_deferred_breakpoints();
163 }
164 
165 /*
166  * Initial establishment of stacks in a newly-grabbed process.
167  * establish_breakpoints() has already been called.
168  */
169 void
170 establish_stacks(void)
171 {
172 	const pstatus_t *Psp = Pstatus(Proc);
173 	char mapfile[64];
174 	int mapfd;
175 	struct stat statb;
176 	prmap_t *Pmap = NULL;
177 	int nmap = 0;
178 	ph_map_t ph_map;
179 
180 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
181 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
182 	    fstat(mapfd, &statb) != 0 ||
183 	    statb.st_size < sizeof (prmap_t) ||
184 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
185 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
186 	    (nmap /= sizeof (prmap_t)) == 0) {
187 		if (Pmap != NULL)
188 			free(Pmap);
189 		Pmap = NULL;
190 		nmap = 0;
191 	}
192 	if (mapfd >= 0)
193 		(void) close(mapfd);
194 
195 	/*
196 	 * Iterate over lwps, establishing stacks.
197 	 */
198 	ph_map.pmap = Pmap;
199 	ph_map.nmap = nmap;
200 	(void) Plwp_iter(Proc, lwp_stack_traps, &ph_map);
201 	if (Pmap != NULL)
202 		free(Pmap);
203 
204 	if (Thr_agent == NULL)
205 		return;
206 
207 	/*
208 	 * Iterate over unbound threads, establishing stacks.
209 	 */
210 	(void) td_ta_thr_iter(Thr_agent, thr_stack_traps, NULL,
211 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
212 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
213 }
214 
215 void
216 do_symbol_iter(const char *object_name, struct dynpat *Dyp)
217 {
218 	if (*Dyp->Dp->prt_name == '\0')
219 		object_name = PR_OBJ_EXEC;
220 
221 	/*
222 	 * Always search the dynamic symbol table.
223 	 */
224 	(void) Psymbol_iter(Proc, object_name,
225 		PR_DYNSYM, BIND_WEAK|BIND_GLOBAL|TYPE_FUNC,
226 		symbol_iter, Dyp);
227 
228 	/*
229 	 * Search the static symbol table if this is the
230 	 * executable file or if we are being asked to
231 	 * report internal calls within the library.
232 	 */
233 	if (object_name == PR_OBJ_EXEC || Dyp->internal)
234 		(void) Psymbol_iter(Proc, object_name,
235 			PR_SYMTAB, BIND_ANY|TYPE_FUNC,
236 			symbol_iter, Dyp);
237 }
238 
239 /* ARGSUSED */
240 int
241 object_iter(void *cd, const prmap_t *pmp, const char *object_name)
242 {
243 	char name[100];
244 	struct dynpat *Dyp;
245 	struct dynlib *Dp;
246 	const char *str;
247 	char *s;
248 	int i;
249 
250 	if ((pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_EXEC))
251 		return (0);
252 
253 	/*
254 	 * Set special thread event breakpoint, first time libc is seen.
255 	 */
256 	if (Thr_agent == NULL && strstr(object_name, "/libc.so.") != NULL)
257 		setup_thread_agent();
258 
259 	for (Dp = Dyn; Dp != NULL; Dp = Dp->next)
260 		if (strcmp(object_name, Dp->lib_name) == 0 ||
261 		    (strcmp(Dp->lib_name, "a.out") == 0 &&
262 		    strcmp(pmp->pr_mapname, "a.out") == 0))
263 			break;
264 
265 	if (Dp == NULL) {
266 		Dp = my_malloc(sizeof (struct dynlib), NULL);
267 		(void) memset(Dp, 0, sizeof (struct dynlib));
268 		if (strcmp(pmp->pr_mapname, "a.out") == 0) {
269 			Dp->lib_name = strdup(pmp->pr_mapname);
270 			Dp->match_name = strdup(pmp->pr_mapname);
271 			Dp->prt_name = strdup("");
272 		} else {
273 			Dp->lib_name = strdup(object_name);
274 			if ((str = strrchr(object_name, '/')) != NULL)
275 				str++;
276 			else
277 				str = object_name;
278 			(void) strncpy(name, str, sizeof (name) - 2);
279 			name[sizeof (name) - 2] = '\0';
280 			if ((s = strstr(name, ".so")) != NULL)
281 				*s = '\0';
282 			Dp->match_name = strdup(name);
283 			(void) strcat(name, ":");
284 			Dp->prt_name = strdup(name);
285 		}
286 		Dp->next = Dyn;
287 		Dyn = Dp;
288 	}
289 
290 	if (Dp->built ||
291 	    (not_consist && strcmp(Dp->prt_name, "ld:") != 0))	/* kludge */
292 		return (0);
293 
294 	if (hflag && not_consist)
295 		(void) fprintf(stderr, "not_consist is TRUE, building %s\n",
296 			Dp->lib_name);
297 
298 	Dp->base = pmp->pr_vaddr;
299 	Dp->size = pmp->pr_size;
300 
301 	/*
302 	 * For every dynlib pattern that matches this library's name,
303 	 * iterate through all of the library's symbols looking for
304 	 * matching symbol name patterns.
305 	 */
306 	for (Dyp = Dynpat; Dyp != NULL; Dyp = Dyp->next) {
307 		if (interrupt|sigusr1)
308 			break;
309 		for (i = 0; i < Dyp->nlibpat; i++) {
310 			if (interrupt|sigusr1)
311 				break;
312 			if (fnmatch(Dyp->libpat[i], Dp->match_name, 0) != 0)
313 				continue;	/* no match */
314 
315 			/*
316 			 * Require an exact match for the executable (a.out)
317 			 * and for the dynamic linker (ld.so.1).
318 			 */
319 			if ((strcmp(Dp->match_name, "a.out") == 0 ||
320 			    strcmp(Dp->match_name, "ld") == 0) &&
321 			    strcmp(Dyp->libpat[i], Dp->match_name) != 0)
322 				continue;
323 
324 			/*
325 			 * Set Dyp->Dp to Dp so symbol_iter() can use it.
326 			 */
327 			Dyp->Dp = Dp;
328 			do_symbol_iter(object_name, Dyp);
329 			Dyp->Dp = NULL;
330 		}
331 	}
332 
333 	Dp->built = TRUE;
334 	return (interrupt | sigusr1);
335 }
336 
337 /*
338  * Search for an existing breakpoint at the 'pc' location.
339  */
340 struct bkpt *
341 get_bkpt(uintptr_t pc)
342 {
343 	struct bkpt *Bp;
344 
345 	for (Bp = bpt_hashtable[bpt_hash(pc)]; Bp != NULL; Bp = Bp->next)
346 		if (pc == Bp->addr)
347 			break;
348 
349 	return (Bp);
350 }
351 
352 /*
353  * Create a breakpoint at 'pc', if one is not there already.
354  * 'ret' is true when creating a function return breakpoint, in which case
355  * fail and return NULL if the breakpoint would be created in writeable data.
356  * If 'set' it true, set the breakpoint in the process now.
357  */
358 struct bkpt *
359 create_bkpt(uintptr_t pc, int ret, int set)
360 {
361 	uint_t hix = bpt_hash(pc);
362 	struct bkpt *Bp;
363 	const prmap_t *pmp;
364 
365 	for (Bp = bpt_hashtable[hix]; Bp != NULL; Bp = Bp->next)
366 		if (pc == Bp->addr)
367 			return (Bp);
368 
369 	/*
370 	 * Don't set return breakpoints on writeable data
371 	 * or on any space other than executable text.
372 	 * Don't set breakpoints in the child of a vfork()
373 	 * because that would modify the parent's address space.
374 	 */
375 	if (is_vfork_child ||
376 	    (ret &&
377 	    ((pmp = Paddr_to_text_map(Proc, pc)) == NULL ||
378 	    !(pmp->pr_mflags & MA_EXEC) ||
379 	    (pmp->pr_mflags & MA_WRITE))))
380 		return (NULL);
381 
382 	/* create a new unnamed breakpoint */
383 	Bp = my_malloc(sizeof (struct bkpt), NULL);
384 	Bp->sym_name = NULL;
385 	Bp->dyn = NULL;
386 	Bp->addr = pc;
387 	Bp->instr = 0;
388 	Bp->flags = 0;
389 	if (set && Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
390 		Bp->flags |= BPT_ACTIVE;
391 	Bp->next = bpt_hashtable[hix];
392 	bpt_hashtable[hix] = Bp;
393 
394 	return (Bp);
395 }
396 
397 /*
398  * Set all breakpoints that haven't been set yet.
399  * Deactivate all breakpoints from modules that are not present any more.
400  */
401 void
402 set_deferred_breakpoints(void)
403 {
404 	struct bkpt *Bp;
405 	int i;
406 
407 	if (is_vfork_child)
408 		return;
409 
410 	for (i = 0; i < HASHSZ; i++) {
411 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
412 			if (!(Bp->flags & BPT_ACTIVE)) {
413 				if (!(Bp->flags & BPT_EXCLUDE) &&
414 				    Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
415 					Bp->flags |= BPT_ACTIVE;
416 			} else if (Paddr_to_text_map(Proc, Bp->addr) == NULL) {
417 				Bp->flags &= ~BPT_ACTIVE;
418 			}
419 		}
420 	}
421 }
422 
423 int
424 symbol_iter(void *cd, const GElf_Sym *sym, const char *sym_name)
425 {
426 	struct dynpat *Dyp = cd;
427 	struct dynlib *Dp = Dyp->Dp;
428 	uintptr_t pc = sym->st_value;
429 	struct bkpt *Bp;
430 	int i;
431 
432 	/* ignore any undefined symbols */
433 	if (sym->st_shndx == SHN_UNDEF)
434 		return (0);
435 
436 	/*
437 	 * Arbitrarily omit "_start" from the executable.
438 	 * (Avoid indentation before main().)
439 	 */
440 	if (*Dp->prt_name == '\0' && strcmp(sym_name, "_start") == 0)
441 		return (0);
442 
443 	/*
444 	 * Arbitrarily omit "_rt_boot" from the dynamic linker.
445 	 * (Avoid indentation before main().)
446 	 */
447 	if (strcmp(Dp->match_name, "ld") == 0 &&
448 	    strcmp(sym_name, "_rt_boot") == 0)
449 		return (0);
450 
451 	/*
452 	 * Arbitrarily omit any symbols whose name starts with '.'.
453 	 * Apparantly putting a breakpoint on .umul causes a
454 	 * fatal error in libthread (%y is not restored correctly
455 	 * when a single step is taken).  Looks like a /proc bug.
456 	 */
457 	if (*sym_name == '.')
458 		return (0);
459 
460 	/*
461 	 * For each pattern in the array of symbol patterns,
462 	 * if the pattern matches the symbol name, then
463 	 * create a breakpoint at the function in question.
464 	 */
465 	for (i = 0; i < Dyp->nsympat; i++) {
466 		if (interrupt|sigusr1)
467 			break;
468 		if (fnmatch(Dyp->sympat[i], sym_name, 0) != 0)
469 			continue;
470 
471 		if ((Bp = create_bkpt(pc, 0, 0)) == NULL)	/* can't fail */
472 			return (0);
473 
474 		/*
475 		 * New breakpoints receive a name now.
476 		 * For existing breakpoints, prefer the subset name if possible,
477 		 * else prefer the shorter name.
478 		 */
479 		if (Bp->sym_name == NULL) {
480 			Bp->sym_name = strdup(sym_name);
481 		} else if (strstr(Bp->sym_name, sym_name) != NULL ||
482 		    strlen(Bp->sym_name) > strlen(sym_name)) {
483 			free(Bp->sym_name);
484 			Bp->sym_name = strdup(sym_name);
485 		}
486 		Bp->dyn = Dp;
487 		Bp->flags |= Dyp->flag;
488 		if (Dyp->exclude)
489 			Bp->flags |= BPT_EXCLUDE;
490 		else if (Dyp->internal || *Dp->prt_name == '\0')
491 			Bp->flags |= BPT_INTERNAL;
492 		return (0);
493 	}
494 
495 	return (interrupt | sigusr1);
496 }
497 
498 /* For debugging only ---- */
499 void
500 report_htable_stats(void)
501 {
502 	const pstatus_t *Psp = Pstatus(Proc);
503 	struct callstack *Stk;
504 	struct bkpt *Bp;
505 	uint_t Min = 1000000;
506 	uint_t Max = 0;
507 	uint_t Avg = 0;
508 	uint_t Total = 0;
509 	uint_t i, j;
510 	uint_t bucket[HASHSZ];
511 
512 	if (Dynpat == NULL || !hflag)
513 		return;
514 
515 	hflag = FALSE;
516 	(void) memset(bucket, 0, sizeof (bucket));
517 
518 	for (i = 0; i < HASHSZ; i++) {
519 		j = 0;
520 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next)
521 			j++;
522 		if (j < Min)
523 			Min = j;
524 		if (j > Max)
525 			Max = j;
526 		if (j < HASHSZ)
527 			bucket[j]++;
528 		Total += j;
529 	}
530 	Avg = (Total + HASHSZ / 2) / HASHSZ;
531 	(void) fprintf(stderr, "truss hash table statistics --------\n");
532 	(void) fprintf(stderr, "    Total = %u\n", Total);
533 	(void) fprintf(stderr, "      Min = %u\n", Min);
534 	(void) fprintf(stderr, "      Max = %u\n", Max);
535 	(void) fprintf(stderr, "      Avg = %u\n", Avg);
536 	for (i = 0; i < HASHSZ; i++)
537 		if (bucket[i])
538 			(void) fprintf(stderr, "    %3u buckets of size %d\n",
539 				bucket[i], i);
540 
541 	(void) fprintf(stderr, "truss-detected stacks --------\n");
542 	for (Stk = callstack; Stk != NULL; Stk = Stk->next) {
543 		(void) fprintf(stderr,
544 			"    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
545 			(ulong_t)Stk->stkbase,
546 			(ulong_t)Stk->stkend,
547 			(ulong_t)(Stk->stkend - Stk->stkbase));
548 	}
549 	(void) fprintf(stderr, "primary unix stack --------\n");
550 	(void) fprintf(stderr,
551 		"    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
552 		(ulong_t)Psp->pr_stkbase,
553 		(ulong_t)(Psp->pr_stkbase + Psp->pr_stksize),
554 		(ulong_t)Psp->pr_stksize);
555 	(void) fprintf(stderr, "nthr_create = %u\n", nthr_create);
556 }
557 
558 void
559 make_lwp_stack(const lwpstatus_t *Lsp, prmap_t *Pmap, int nmap)
560 {
561 	const pstatus_t *Psp = Pstatus(Proc);
562 	uintptr_t sp = Lsp->pr_reg[R_SP];
563 	id_t lwpid = Lsp->pr_lwpid;
564 	struct callstack *Stk;
565 	td_thrhandle_t th;
566 	td_thrinfo_t thrinfo;
567 
568 	if (data_model != PR_MODEL_LP64)
569 		sp = (uint32_t)sp;
570 
571 	/* check to see if we already have this stack */
572 	if (sp == 0)
573 		return;
574 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
575 		if (sp >= Stk->stkbase && sp < Stk->stkend)
576 			return;
577 
578 	Stk = my_malloc(sizeof (struct callstack), NULL);
579 	Stk->next = callstack;
580 	callstack = Stk;
581 	nstack++;
582 	Stk->tref = 0;
583 	Stk->tid = 0;
584 	Stk->nthr_create = 0;
585 	Stk->ncall = 0;
586 	Stk->maxcall = DEF_MAXCALL;
587 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
588 
589 	/* primary stack */
590 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
591 		Stk->stkbase = Psp->pr_stkbase;
592 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
593 		return;
594 	}
595 
596 	/* alternate stack */
597 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
598 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
599 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
600 	    + Lsp->pr_altstack.ss_size) {
601 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
602 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
603 		return;
604 	}
605 
606 	/* thread stacks? */
607 	if (Thr_agent != NULL &&
608 	    td_ta_map_lwp2thr(Thr_agent, lwpid, &th) == TD_OK &&
609 	    td_thr_get_info(&th, &thrinfo) == TD_OK &&
610 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
611 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
612 		/* The bloody fools got this backwards! */
613 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
614 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
615 		return;
616 	}
617 
618 	/* last chance -- try the raw memory map */
619 	for (; nmap; nmap--, Pmap++) {
620 		if (sp >= Pmap->pr_vaddr &&
621 		    sp < Pmap->pr_vaddr + Pmap->pr_size) {
622 			Stk->stkbase = Pmap->pr_vaddr;
623 			Stk->stkend = Pmap->pr_vaddr + Pmap->pr_size;
624 			return;
625 		}
626 	}
627 
628 	callstack = Stk->next;
629 	nstack--;
630 	free(Stk->stack);
631 	free(Stk);
632 }
633 
634 void
635 make_thr_stack(const td_thrhandle_t *Thp, prgregset_t reg)
636 {
637 	const pstatus_t *Psp = Pstatus(Proc);
638 	td_thrinfo_t thrinfo;
639 	uintptr_t sp = reg[R_SP];
640 	struct callstack *Stk;
641 
642 	if (data_model != PR_MODEL_LP64)
643 		sp = (uint32_t)sp;
644 
645 	/* check to see if we already have this stack */
646 	if (sp == 0)
647 		return;
648 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
649 		if (sp >= Stk->stkbase && sp < Stk->stkend)
650 			return;
651 
652 	Stk = my_malloc(sizeof (struct callstack), NULL);
653 	Stk->next = callstack;
654 	callstack = Stk;
655 	nstack++;
656 	Stk->tref = 0;
657 	Stk->tid = 0;
658 	Stk->nthr_create = 0;
659 	Stk->ncall = 0;
660 	Stk->maxcall = DEF_MAXCALL;
661 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
662 
663 	/* primary stack */
664 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
665 		Stk->stkbase = Psp->pr_stkbase;
666 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
667 		return;
668 	}
669 
670 	if (td_thr_get_info(Thp, &thrinfo) == TD_OK &&
671 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
672 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
673 		/* The bloody fools got this backwards! */
674 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
675 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
676 		return;
677 	}
678 
679 	callstack = Stk->next;
680 	nstack--;
681 	free(Stk->stack);
682 	free(Stk);
683 }
684 
685 struct callstack *
686 find_lwp_stack(uintptr_t sp)
687 {
688 	const pstatus_t *Psp = Pstatus(Proc);
689 	char mapfile[64];
690 	int mapfd;
691 	struct stat statb;
692 	prmap_t *Pmap = NULL;
693 	prmap_t *pmap = NULL;
694 	int nmap = 0;
695 	struct callstack *Stk = NULL;
696 
697 	/*
698 	 * Get the address space map.
699 	 */
700 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
701 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
702 	    fstat(mapfd, &statb) != 0 ||
703 	    statb.st_size < sizeof (prmap_t) ||
704 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
705 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
706 	    (nmap /= sizeof (prmap_t)) == 0) {
707 		if (Pmap != NULL)
708 			free(Pmap);
709 		if (mapfd >= 0)
710 			(void) close(mapfd);
711 		return (NULL);
712 	}
713 	(void) close(mapfd);
714 
715 	for (pmap = Pmap; nmap--; pmap++) {
716 		if (sp >= pmap->pr_vaddr &&
717 		    sp < pmap->pr_vaddr + pmap->pr_size) {
718 			Stk = my_malloc(sizeof (struct callstack), NULL);
719 			Stk->next = callstack;
720 			callstack = Stk;
721 			nstack++;
722 			Stk->stkbase = pmap->pr_vaddr;
723 			Stk->stkend = pmap->pr_vaddr + pmap->pr_size;
724 			Stk->tref = 0;
725 			Stk->tid = 0;
726 			Stk->nthr_create = 0;
727 			Stk->ncall = 0;
728 			Stk->maxcall = DEF_MAXCALL;
729 			Stk->stack = my_malloc(
730 				DEF_MAXCALL * sizeof (*Stk->stack), NULL);
731 			break;
732 		}
733 	}
734 
735 	free(Pmap);
736 	return (Stk);
737 }
738 
739 struct callstack *
740 find_stack(uintptr_t sp)
741 {
742 	const pstatus_t *Psp = Pstatus(Proc);
743 	private_t *pri = get_private();
744 	const lwpstatus_t *Lsp = pri->lwpstat;
745 	id_t lwpid = Lsp->pr_lwpid;
746 #if defined(__sparc)
747 	prgreg_t tref = Lsp->pr_reg[R_G7];
748 #elif defined(__amd64)
749 	prgreg_t tref = Lsp->pr_reg[REG_FS];
750 #elif defined(__i386)
751 	prgreg_t tref = Lsp->pr_reg[GS];
752 #endif
753 	struct callstack *Stk = NULL;
754 	td_thrhandle_t th;
755 	td_thrinfo_t thrinfo;
756 	td_err_e error;
757 
758 	/* primary stack */
759 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
760 		Stk = my_malloc(sizeof (struct callstack), NULL);
761 		Stk->next = callstack;
762 		callstack = Stk;
763 		nstack++;
764 		Stk->stkbase = Psp->pr_stkbase;
765 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
766 		Stk->tref = 0;
767 		Stk->tid = 0;
768 		Stk->nthr_create = 0;
769 		Stk->ncall = 0;
770 		Stk->maxcall = DEF_MAXCALL;
771 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
772 			NULL);
773 		return (Stk);
774 	}
775 
776 	/* alternate stack */
777 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
778 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
779 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
780 	    + Lsp->pr_altstack.ss_size) {
781 		Stk = my_malloc(sizeof (struct callstack), NULL);
782 		Stk->next = callstack;
783 		callstack = Stk;
784 		nstack++;
785 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
786 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
787 		Stk->tref = 0;
788 		Stk->tid = 0;
789 		Stk->nthr_create = 0;
790 		Stk->ncall = 0;
791 		Stk->maxcall = DEF_MAXCALL;
792 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
793 			NULL);
794 		return (Stk);
795 	}
796 
797 	if (Thr_agent == NULL)
798 		return (find_lwp_stack(sp));
799 
800 	/* thread stacks? */
801 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
802 		if (hflag)
803 			(void) fprintf(stderr,
804 				"cannot get thread handle for "
805 				"lwp#%d, error=%d, tref=0x%.8lx\n",
806 				(int)lwpid, error, (long)tref);
807 		return (NULL);
808 	}
809 
810 	if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
811 		if (hflag)
812 			(void) fprintf(stderr,
813 				"cannot get thread info for "
814 				"lwp#%d, error=%d, tref=0x%.8lx\n",
815 				(int)lwpid, error, (long)tref);
816 		return (NULL);
817 	}
818 
819 	if (sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
820 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
821 		Stk = my_malloc(sizeof (struct callstack), NULL);
822 		Stk->next = callstack;
823 		callstack = Stk;
824 		nstack++;
825 		/* The bloody fools got this backwards! */
826 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
827 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
828 		Stk->tref = tref;
829 		Stk->tid = thrinfo.ti_tid;
830 		Stk->nthr_create = nthr_create;
831 		Stk->ncall = 0;
832 		Stk->maxcall = DEF_MAXCALL;
833 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
834 			NULL);
835 		return (Stk);
836 	}
837 
838 	/* stack bounds failure -- complain bitterly */
839 	if (hflag) {
840 		(void) fprintf(stderr,
841 			"sp not within thread stack: "
842 			"sp=0x%.8lx stkbase=0x%.8lx stkend=0x%.8lx\n",
843 			(ulong_t)sp,
844 			/* The bloody fools got this backwards! */
845 			(ulong_t)thrinfo.ti_stkbase - thrinfo.ti_stksize,
846 			(ulong_t)thrinfo.ti_stkbase);
847 	}
848 
849 	return (NULL);
850 }
851 
852 void
853 get_tid(struct callstack *Stk)
854 {
855 	private_t *pri = get_private();
856 	const lwpstatus_t *Lsp = pri->lwpstat;
857 	id_t lwpid = Lsp->pr_lwpid;
858 #if defined(__sparc)
859 	prgreg_t tref = Lsp->pr_reg[R_G7];
860 #elif defined(__amd64)
861 	prgreg_t tref = (data_model == PR_MODEL_LP64) ?
862 	    Lsp->pr_reg[REG_FS] : Lsp->pr_reg[REG_GS];
863 #elif defined(__i386)
864 	prgreg_t tref = Lsp->pr_reg[GS];
865 #endif
866 	td_thrhandle_t th;
867 	td_thrinfo_t thrinfo;
868 	td_err_e error;
869 
870 	if (Thr_agent == NULL) {
871 		Stk->tref = 0;
872 		Stk->tid = 0;
873 		Stk->nthr_create = 0;
874 		return;
875 	}
876 
877 	/*
878 	 * Shortcut here --
879 	 * If we have a matching tref and no new threads have
880 	 * been created since the last time we encountered this
881 	 * stack, then we don't have to go through the overhead
882 	 * of calling td_ta_map_lwp2thr() to get the thread-id.
883 	 */
884 	if (tref == Stk->tref && Stk->nthr_create == nthr_create)
885 		return;
886 
887 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
888 		if (hflag)
889 			(void) fprintf(stderr,
890 				"cannot get thread handle for "
891 				"lwp#%d, error=%d, tref=0x%.8lx\n",
892 				(int)lwpid, error, (long)tref);
893 		Stk->tref = 0;
894 		Stk->tid = 0;
895 		Stk->nthr_create = 0;
896 	} else if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
897 		if (hflag)
898 			(void) fprintf(stderr,
899 				"cannot get thread info for "
900 				"lwp#%d, error=%d, tref=0x%.8lx\n",
901 				(int)lwpid, error, (long)tref);
902 		Stk->tref = 0;
903 		Stk->tid = 0;
904 		Stk->nthr_create = 0;
905 	} else {
906 		Stk->tref = tref;
907 		Stk->tid = thrinfo.ti_tid;
908 		Stk->nthr_create = nthr_create;
909 	}
910 }
911 
912 struct callstack *
913 callstack_info(uintptr_t sp, uintptr_t fp, int makeid)
914 {
915 	struct callstack *Stk;
916 	uintptr_t trash;
917 
918 	if (sp == 0 ||
919 	    Pread(Proc, &trash, sizeof (trash), sp) != sizeof (trash))
920 		return (NULL);
921 
922 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
923 		if (sp >= Stk->stkbase && sp < Stk->stkend)
924 			break;
925 
926 	/*
927 	 * If we didn't find the stack, do it the hard way.
928 	 */
929 	if (Stk == NULL) {
930 		uintptr_t stkbase = sp;
931 		uintptr_t stkend;
932 		uint_t minsize;
933 
934 #if defined(i386) || defined(__amd64)
935 #ifdef _LP64
936 		if (data_model == PR_MODEL_LP64)
937 			minsize = 2 * sizeof (uintptr_t);	/* fp + pc */
938 		else
939 #endif
940 			minsize = 2 * sizeof (uint32_t);
941 #else
942 #ifdef _LP64
943 		if (data_model != PR_MODEL_LP64)
944 			minsize = SA32(MINFRAME32);
945 		else
946 			minsize = SA64(MINFRAME64);
947 #else
948 		minsize = SA(MINFRAME);
949 #endif
950 #endif	/* i386 */
951 		stkend = sp + minsize;
952 
953 		while (Stk == NULL && fp != 0 && fp >= sp) {
954 			stkend = fp + minsize;
955 			for (Stk = callstack; Stk != NULL; Stk = Stk->next)
956 				if ((fp >= Stk->stkbase && fp < Stk->stkend) ||
957 				    (stkend > Stk->stkbase &&
958 				    stkend <= Stk->stkend))
959 					break;
960 			if (Stk == NULL)
961 				fp = previous_fp(fp, NULL);
962 		}
963 
964 		if (Stk != NULL)	/* the stack grew */
965 			Stk->stkbase = stkbase;
966 	}
967 
968 	if (Stk == NULL && makeid)	/* new stack */
969 		Stk = find_stack(sp);
970 
971 	if (Stk == NULL)
972 		return (NULL);
973 
974 	/*
975 	 * Ensure that there is room for at least one more entry.
976 	 */
977 	if (Stk->ncall == Stk->maxcall) {
978 		Stk->maxcall *= 2;
979 		Stk->stack = my_realloc(Stk->stack,
980 		    Stk->maxcall * sizeof (*Stk->stack), NULL);
981 	}
982 
983 	if (makeid)
984 		get_tid(Stk);
985 
986 	return (Stk);
987 }
988 
989 /*
990  * Reset the breakpoint information (called on successful exec()).
991  */
992 void
993 reset_breakpoints(void)
994 {
995 	struct dynlib *Dp;
996 	struct bkpt *Bp;
997 	struct callstack *Stk;
998 	int i;
999 
1000 	if (Dynpat == NULL)
1001 		return;
1002 
1003 	/* destroy all previous dynamic library information */
1004 	while ((Dp = Dyn) != NULL) {
1005 		Dyn = Dp->next;
1006 		free(Dp->lib_name);
1007 		free(Dp->match_name);
1008 		free(Dp->prt_name);
1009 		free(Dp);
1010 	}
1011 
1012 	/* destroy all previous breakpoint trap information */
1013 	if (bpt_hashtable != NULL) {
1014 		for (i = 0; i < HASHSZ; i++) {
1015 			while ((Bp = bpt_hashtable[i]) != NULL) {
1016 				bpt_hashtable[i] = Bp->next;
1017 				if (Bp->sym_name)
1018 					free(Bp->sym_name);
1019 				free(Bp);
1020 			}
1021 		}
1022 	}
1023 
1024 	/* destroy all the callstack information */
1025 	while ((Stk = callstack) != NULL) {
1026 		callstack = Stk->next;
1027 		free(Stk->stack);
1028 		free(Stk);
1029 	}
1030 
1031 	/* we are not a multi-threaded process anymore */
1032 	if (Thr_agent != NULL)
1033 		(void) td_ta_delete(Thr_agent);
1034 	Thr_agent = NULL;
1035 
1036 	/* tell libproc to clear out its mapping information */
1037 	Preset_maps(Proc);
1038 	Rdb_agent = NULL;
1039 
1040 	/* Reestablish the symbols from the executable */
1041 	(void) establish_breakpoints();
1042 }
1043 
1044 /*
1045  * Clear breakpoints from the process (called before Prelease()).
1046  * Don't actually destroy the breakpoint table;
1047  * threads currently fielding breakpoints will need it.
1048  */
1049 void
1050 clear_breakpoints(void)
1051 {
1052 	struct bkpt *Bp;
1053 	int i;
1054 
1055 	if (Dynpat == NULL)
1056 		return;
1057 
1058 	/*
1059 	 * Change all breakpoint traps back to normal instructions.
1060 	 * We attempt to remove a breakpoint from every address which
1061 	 * may have ever contained a breakpoint to protect our victims.
1062 	 */
1063 	report_htable_stats();	/* report stats first */
1064 	for (i = 0; i < HASHSZ; i++) {
1065 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1066 			if (Bp->flags & BPT_ACTIVE)
1067 				(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1068 			Bp->flags &= ~BPT_ACTIVE;
1069 		}
1070 	}
1071 
1072 	if (Thr_agent != NULL) {
1073 		td_thr_events_t events;
1074 
1075 		td_event_emptyset(&events);
1076 		(void) td_ta_set_event(Thr_agent, &events);
1077 		(void) td_ta_delete(Thr_agent);
1078 	}
1079 	Thr_agent = NULL;
1080 }
1081 
1082 /*
1083  * Reestablish the breakpoint traps in the process.
1084  * Called after resuming from a vfork() in the parent.
1085  */
1086 void
1087 reestablish_traps(void)
1088 {
1089 	struct bkpt *Bp;
1090 	ulong_t instr;
1091 	int i;
1092 
1093 	if (Dynpat == NULL || is_vfork_child)
1094 		return;
1095 
1096 	for (i = 0; i < HASHSZ; i++) {
1097 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1098 			if ((Bp->flags & BPT_ACTIVE) &&
1099 			    Psetbkpt(Proc, Bp->addr, &instr) != 0)
1100 				Bp->flags &= ~BPT_ACTIVE;
1101 		}
1102 	}
1103 }
1104 
1105 void
1106 show_function_call(private_t *pri,
1107 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1108 {
1109 	long arg[8];
1110 	int narg;
1111 	int i;
1112 
1113 	narg = get_arguments(arg);
1114 	make_pname(pri, (Stk != NULL)? Stk->tid : 0);
1115 	putpname(pri);
1116 	timestamp(pri);
1117 	if (Stk != NULL) {
1118 		for (i = 1; i < Stk->ncall; i++) {
1119 			(void) fputc(' ', stdout);
1120 			(void) fputc(' ', stdout);
1121 		}
1122 	}
1123 	(void) printf("-> %s%s(", Dp->prt_name, Bp->sym_name);
1124 	for (i = 0; i < narg; i++) {
1125 		(void) printf("0x%lx", arg[i]);
1126 		if (i < narg-1) {
1127 			(void) fputc(',', stdout);
1128 			(void) fputc(' ', stdout);
1129 		}
1130 	}
1131 	(void) printf(")\n");
1132 	Flush();
1133 }
1134 
1135 /* ARGSUSED */
1136 void
1137 show_function_return(private_t *pri, long rval, int stret,
1138 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1139 {
1140 	int i;
1141 
1142 	make_pname(pri, Stk->tid);
1143 	putpname(pri);
1144 	timestamp(pri);
1145 	for (i = 0; i < Stk->ncall; i++) {
1146 		(void) fputc(' ', stdout);
1147 		(void) fputc(' ', stdout);
1148 	}
1149 	(void) printf("<- %s%s() = ", Dp->prt_name, Bp->sym_name);
1150 	if (stret) {
1151 		(void) printf("struct return\n");
1152 	} else if (data_model == PR_MODEL_LP64) {
1153 		if (rval >= (64 * 1024) || -rval >= (64 * 1024))
1154 			(void) printf("0x%lx\n", rval);
1155 		else
1156 			(void) printf("%ld\n", rval);
1157 	} else {
1158 		int rval32 = (int)rval;
1159 		if (rval32 >= (64 * 1024) || -rval32 >= (64 * 1024))
1160 			(void) printf("0x%x\n", rval32);
1161 		else
1162 			(void) printf("%d\n", rval32);
1163 	}
1164 	Flush();
1165 }
1166 
1167 /*
1168  * Called to deal with function-call tracing.
1169  * Return 0 on normal success, 1 to indicate a BPT_HANG success,
1170  * and -1 on failure (not tracing functions or unknown breakpoint).
1171  */
1172 int
1173 function_trace(private_t *pri, int first, int clear, int dotrace)
1174 {
1175 	struct ps_lwphandle *Lwp = pri->Lwp;
1176 	const lwpstatus_t *Lsp = pri->lwpstat;
1177 	uintptr_t pc = Lsp->pr_reg[R_PC];
1178 	uintptr_t sp = Lsp->pr_reg[R_SP];
1179 	uintptr_t fp = Lsp->pr_reg[R_FP];
1180 	struct bkpt *Bp;
1181 	struct dynlib *Dp;
1182 	struct callstack *Stk;
1183 	ulong_t instr;
1184 	int active;
1185 	int rval = 0;
1186 
1187 	if (Dynpat == NULL)
1188 		return (-1);
1189 
1190 	if (data_model != PR_MODEL_LP64) {
1191 		pc = (uint32_t)pc;
1192 		sp = (uint32_t)sp;
1193 		fp = (uint32_t)fp;
1194 	}
1195 
1196 	if ((Bp = get_bkpt(pc)) == NULL) {
1197 		if (hflag)
1198 			(void) fprintf(stderr,
1199 				"function_trace(): "
1200 				"cannot find breakpoint for pc: 0x%.8lx\n",
1201 				(ulong_t)pc);
1202 		return (-1);
1203 	}
1204 
1205 	if ((Bp->flags & (BPT_PREINIT|BPT_POSTINIT|BPT_DLACTIVITY)) && !clear) {
1206 		rd_event_msg_t event_msg;
1207 
1208 		if (hflag) {
1209 			if (Bp->flags & BPT_PREINIT)
1210 				(void) fprintf(stderr, "function_trace(): "
1211 					"RD_PREINIT breakpoint\n");
1212 			if (Bp->flags & BPT_POSTINIT)
1213 				(void) fprintf(stderr, "function_trace(): "
1214 					"RD_POSTINIT breakpoint\n");
1215 			if (Bp->flags & BPT_DLACTIVITY)
1216 				(void) fprintf(stderr, "function_trace(): "
1217 					"RD_DLACTIVITY breakpoint\n");
1218 		}
1219 		if (rd_event_getmsg(Rdb_agent, &event_msg) == RD_OK) {
1220 			if (event_msg.type == RD_DLACTIVITY) {
1221 				if (event_msg.u.state == RD_CONSISTENT)
1222 					establish_breakpoints();
1223 				if (event_msg.u.state == RD_ADD) {
1224 					if (hflag)
1225 						(void) fprintf(stderr,
1226 							"RD_DLACTIVITY/RD_ADD "
1227 							"state reached\n");
1228 					not_consist = TRUE;	/* kludge */
1229 					establish_breakpoints();
1230 					not_consist = FALSE;
1231 				}
1232 			}
1233 			if (hflag) {
1234 				const char *et;
1235 				char buf[32];
1236 
1237 				switch (event_msg.type) {
1238 				case RD_NONE:
1239 					et = "RD_NONE";
1240 					break;
1241 				case RD_PREINIT:
1242 					et = "RD_PREINIT";
1243 					break;
1244 				case RD_POSTINIT:
1245 					et = "RD_POSTINIT";
1246 					break;
1247 				case RD_DLACTIVITY:
1248 					et = "RD_DLACTIVITY";
1249 					break;
1250 				default:
1251 					(void) sprintf(buf, "0x%x",
1252 						event_msg.type);
1253 					et = buf;
1254 					break;
1255 				}
1256 				(void) fprintf(stderr,
1257 					"event_msg.type = %s ", et);
1258 				switch (event_msg.u.state) {
1259 				case RD_NOSTATE:
1260 					et = "RD_NOSTATE";
1261 					break;
1262 				case RD_CONSISTENT:
1263 					et = "RD_CONSISTENT";
1264 					break;
1265 				case RD_ADD:
1266 					et = "RD_ADD";
1267 					break;
1268 				case RD_DELETE:
1269 					et = "RD_DELETE";
1270 					break;
1271 				default:
1272 					(void) sprintf(buf, "0x%x",
1273 						event_msg.u.state);
1274 					et = buf;
1275 					break;
1276 				}
1277 				(void) fprintf(stderr,
1278 					"event_msg.u.state = %s\n", et);
1279 			}
1280 		}
1281 	}
1282 
1283 	if ((Bp->flags & BPT_TD_CREATE) && !clear) {
1284 		nthr_create++;
1285 		if (hflag)
1286 			(void) fprintf(stderr, "function_trace(): "
1287 				"BPT_TD_CREATE breakpoint\n");
1288 		/* we don't care about the event message */
1289 	}
1290 
1291 	Dp = Bp->dyn;
1292 
1293 	if (dotrace) {
1294 		if ((Stk = callstack_info(sp, fp, 1)) == NULL) {
1295 			if (Dp != NULL && !clear) {
1296 				if (cflag) {
1297 					add_fcall(fcall_tbl, Dp->prt_name,
1298 					    Bp->sym_name, (unsigned long)1);
1299 				}
1300 				else
1301 					show_function_call(pri, NULL, Dp, Bp);
1302 				if ((Bp->flags & BPT_HANG) && !first)
1303 					rval = 1;
1304 			}
1305 		} else if (!clear) {
1306 			if (Dp != NULL) {
1307 				function_entry(pri, Bp, Stk);
1308 				if ((Bp->flags & BPT_HANG) && !first)
1309 					rval = 1;
1310 			} else {
1311 				function_return(pri, Stk);
1312 			}
1313 		}
1314 	}
1315 
1316 	/*
1317 	 * Single-step the traced instruction. Since it's possible that
1318 	 * another thread has deactivated this breakpoint, we indicate
1319 	 * that we have reactivated it by virtue of executing it.
1320 	 *
1321 	 * To avoid a deadlock with some other thread in the process
1322 	 * performing a fork() or a thr_suspend() operation, we must
1323 	 * drop and later reacquire truss_lock.  Some fancy dancing here.
1324 	 */
1325 	active = (Bp->flags & BPT_ACTIVE);
1326 	Bp->flags |= BPT_ACTIVE;
1327 	instr = Bp->instr;
1328 	(void) mutex_unlock(&truss_lock);
1329 	(void) Lxecbkpt(Lwp, instr);
1330 	(void) mutex_lock(&truss_lock);
1331 
1332 	if (rval || clear) {	/* leave process stopped and abandoned */
1333 #if defined(__i386)
1334 		/*
1335 		 * Leave it stopped in a state that a stack trace is reasonable.
1336 		 */
1337 		/* XX64 needs to be updated for amd64 & gcc */
1338 		if (rval && instr == 0x55) {	/* pushl %ebp */
1339 			/* step it over the movl %esp,%ebp */
1340 			(void) mutex_unlock(&truss_lock);
1341 			(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTEP);
1342 			/* we're wrapping up; wait one second at most */
1343 			(void) Lwait(Lwp, MILLISEC);
1344 			(void) mutex_lock(&truss_lock);
1345 		}
1346 #endif
1347 		if (get_bkpt(pc) != Bp)
1348 			abend("function_trace: lost breakpoint", NULL);
1349 		(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1350 		Bp->flags &= ~BPT_ACTIVE;
1351 		(void) mutex_unlock(&truss_lock);
1352 		(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTOP);
1353 		/* we're wrapping up; wait one second at most */
1354 		(void) Lwait(Lwp, MILLISEC);
1355 		(void) mutex_lock(&truss_lock);
1356 	} else {
1357 		if (get_bkpt(pc) != Bp)
1358 			abend("function_trace: lost breakpoint", NULL);
1359 		if (!active || !(Bp->flags & BPT_ACTIVE)) {
1360 			(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1361 			Bp->flags &= ~BPT_ACTIVE;
1362 		}
1363 	}
1364 	return (rval);
1365 }
1366 
1367 void
1368 function_entry(private_t *pri, struct bkpt *Bp, struct callstack *Stk)
1369 {
1370 	const lwpstatus_t *Lsp = pri->lwpstat;
1371 	uintptr_t sp = Lsp->pr_reg[R_SP];
1372 	uintptr_t rpc = get_return_address(&sp);
1373 	struct dynlib *Dp = Bp->dyn;
1374 	int oldframe = FALSE;
1375 	int i;
1376 
1377 #ifdef _LP64
1378 	if (data_model != PR_MODEL_LP64) {
1379 		sp = (uint32_t)sp;
1380 		rpc = (uint32_t)rpc;
1381 	}
1382 #endif
1383 
1384 	/*
1385 	 * If the sp is not within the stack bounds, forget it.
1386 	 * If the symbol's 'internal' flag is false,
1387 	 * don't report internal calls within the library.
1388 	 */
1389 	if (!(sp >= Stk->stkbase && sp < Stk->stkend) ||
1390 	    (!(Bp->flags & BPT_INTERNAL) &&
1391 	    rpc >= Dp->base && rpc < Dp->base + Dp->size))
1392 		return;
1393 
1394 	for (i = 0; i < Stk->ncall; i++) {
1395 		if (sp >= Stk->stack[i].sp) {
1396 			Stk->ncall = i;
1397 			if (sp == Stk->stack[i].sp)
1398 				oldframe = TRUE;
1399 			break;
1400 		}
1401 	}
1402 
1403 	/*
1404 	 * Breakpoints for function returns are set here
1405 	 * If we're counting function calls, there is no need to set
1406 	 * a breakpoint upon return
1407 	 */
1408 
1409 	if (!oldframe && !cflag) {
1410 		(void) create_bkpt(rpc, 1, 1); /* may or may not be set */
1411 		Stk->stack[Stk->ncall].sp = sp;	/* record it anyeay */
1412 		Stk->stack[Stk->ncall].pc = rpc;
1413 		Stk->stack[Stk->ncall].fcn = Bp;
1414 	}
1415 	Stk->ncall++;
1416 	if (cflag) {
1417 		add_fcall(fcall_tbl, Dp->prt_name, Bp->sym_name,
1418 		    (unsigned long)1);
1419 	} else {
1420 		show_function_call(pri, Stk, Dp, Bp);
1421 	}
1422 }
1423 
1424 /*
1425  * We are here because we hit an unnamed breakpoint.
1426  * Attempt to match this up with a return pc on the stack
1427  * and report the function return.
1428  */
1429 void
1430 function_return(private_t *pri, struct callstack *Stk)
1431 {
1432 	const lwpstatus_t *Lsp = pri->lwpstat;
1433 	uintptr_t sp = Lsp->pr_reg[R_SP];
1434 	uintptr_t fp = Lsp->pr_reg[R_FP];
1435 	int i;
1436 
1437 #ifdef _LP64
1438 	if (data_model != PR_MODEL_LP64) {
1439 		sp = (uint32_t)sp;
1440 		fp = (uint32_t)fp;
1441 	}
1442 #endif
1443 
1444 	if (fp < sp + 8)
1445 		fp = sp + 8;
1446 
1447 	for (i = Stk->ncall - 1; i >= 0; i--) {
1448 		if (sp <= Stk->stack[i].sp && fp > Stk->stack[i].sp) {
1449 			Stk->ncall = i;
1450 			break;
1451 		}
1452 	}
1453 
1454 #if defined(i386) || defined(__amd64)
1455 	if (i < 0) {
1456 		/* probably __mul64() or friends -- try harder */
1457 		int j;
1458 		for (j = 0; i < 0 && j < 8; j++) {	/* up to 8 args */
1459 			sp -= 4;
1460 			for (i = Stk->ncall - 1; i >= 0; i--) {
1461 				if (sp <= Stk->stack[i].sp &&
1462 				    fp > Stk->stack[i].sp) {
1463 					Stk->ncall = i;
1464 					break;
1465 				}
1466 			}
1467 		}
1468 	}
1469 #endif
1470 
1471 	if ((i >= 0) && (!cflag)) {
1472 		show_function_return(pri, Lsp->pr_reg[R_R0], 0,
1473 			Stk, Stk->stack[i].fcn->dyn, Stk->stack[i].fcn);
1474 	}
1475 }
1476 
1477 #if defined(__sparc)
1478 #define	FPADJUST	0
1479 #elif defined(__amd64)
1480 #define	FPADJUST	8
1481 #elif defined(__i386)
1482 #define	FPADJUST	4
1483 #endif
1484 
1485 void
1486 trap_one_stack(prgregset_t reg)
1487 {
1488 	struct dynlib *Dp;
1489 	struct bkpt *Bp;
1490 	struct callstack *Stk;
1491 	GElf_Sym sym;
1492 	char sym_name[32];
1493 	uintptr_t sp = reg[R_SP];
1494 	uintptr_t pc = reg[R_PC];
1495 	uintptr_t fp;
1496 	uintptr_t rpc;
1497 	uint_t nframe = 0;
1498 	uint_t maxframe = 8;
1499 	struct {
1500 		uintptr_t sp;		/* %sp within called function */
1501 		uintptr_t pc;		/* %pc within called function */
1502 		uintptr_t rsp;		/* the return sp */
1503 		uintptr_t rpc;		/* the return pc */
1504 	} *frame = my_malloc(maxframe * sizeof (*frame), NULL);
1505 
1506 	/*
1507 	 * Gather stack frames bottom to top.
1508 	 */
1509 	while (sp != 0) {
1510 		fp = sp;	/* remember higest non-null sp */
1511 		frame[nframe].sp = sp;
1512 		frame[nframe].pc = pc;
1513 		sp = previous_fp(sp, &pc);
1514 		frame[nframe].rsp = sp;
1515 		frame[nframe].rpc = pc;
1516 		if (++nframe == maxframe) {
1517 			maxframe *= 2;
1518 			frame = my_realloc(frame, maxframe * sizeof (*frame),
1519 				NULL);
1520 		}
1521 	}
1522 
1523 	/*
1524 	 * Scan for function return breakpoints top to bottom.
1525 	 */
1526 	while (nframe--) {
1527 		/* lookup the called function in the symbol tables */
1528 		if (Plookup_by_addr(Proc, frame[nframe].pc, sym_name,
1529 		    sizeof (sym_name), &sym) != 0)
1530 			continue;
1531 
1532 		pc = sym.st_value;	/* entry point of the function */
1533 		rpc = frame[nframe].rpc;	/* caller's return pc */
1534 
1535 		/* lookup the function in the breakpoint table */
1536 		if ((Bp = get_bkpt(pc)) == NULL || (Dp = Bp->dyn) == NULL)
1537 			continue;
1538 
1539 		if (!(Bp->flags & BPT_INTERNAL) &&
1540 		    rpc >= Dp->base && rpc < Dp->base + Dp->size)
1541 			continue;
1542 
1543 		sp = frame[nframe].rsp + FPADJUST;  /* %sp at time of call */
1544 		if ((Stk = callstack_info(sp, fp, 0)) == NULL)
1545 			continue;	/* can't happen? */
1546 
1547 		if (create_bkpt(rpc, 1, 1) != NULL) {
1548 			Stk->stack[Stk->ncall].sp = sp;
1549 			Stk->stack[Stk->ncall].pc = rpc;
1550 			Stk->stack[Stk->ncall].fcn = Bp;
1551 			Stk->ncall++;
1552 		}
1553 	}
1554 
1555 	free(frame);
1556 }
1557 
1558 int
1559 lwp_stack_traps(void *cd, const lwpstatus_t *Lsp)
1560 {
1561 	ph_map_t *ph_map = (ph_map_t *)cd;
1562 	prgregset_t reg;
1563 
1564 	(void) memcpy(reg, Lsp->pr_reg, sizeof (prgregset_t));
1565 	make_lwp_stack(Lsp, ph_map->pmap, ph_map->nmap);
1566 	trap_one_stack(reg);
1567 
1568 	return (interrupt | sigusr1);
1569 }
1570 
1571 /* ARGSUSED */
1572 int
1573 thr_stack_traps(const td_thrhandle_t *Thp, void *cd)
1574 {
1575 	prgregset_t reg;
1576 
1577 	/*
1578 	 * We have already dealt with all the lwps.
1579 	 * We only care about unbound threads here (TD_PARTIALREG).
1580 	 */
1581 	if (td_thr_getgregs(Thp, reg) != TD_PARTIALREG)
1582 		return (0);
1583 
1584 	make_thr_stack(Thp, reg);
1585 	trap_one_stack(reg);
1586 
1587 	return (interrupt | sigusr1);
1588 }
1589 
1590 #if defined(__sparc)
1591 
1592 uintptr_t
1593 previous_fp(uintptr_t sp, uintptr_t *rpc)
1594 {
1595 	uintptr_t fp = 0;
1596 	uintptr_t pc = 0;
1597 
1598 #ifdef _LP64
1599 	if (data_model == PR_MODEL_LP64) {
1600 		struct rwindow64 rwin;
1601 		if (Pread(Proc, &rwin, sizeof (rwin), sp + STACK_BIAS)
1602 		    == sizeof (rwin)) {
1603 			fp = (uintptr_t)rwin.rw_fp;
1604 			pc = (uintptr_t)rwin.rw_rtn;
1605 		}
1606 		if (fp != 0 &&
1607 		    Pread(Proc, &rwin, sizeof (rwin), fp + STACK_BIAS)
1608 		    != sizeof (rwin))
1609 			fp = pc = 0;
1610 	} else {
1611 		struct rwindow32 rwin;
1612 #else	/* _LP64 */
1613 		struct rwindow rwin;
1614 #endif	/* _LP64 */
1615 		if (Pread(Proc, &rwin, sizeof (rwin), sp) == sizeof (rwin)) {
1616 			fp = (uint32_t)rwin.rw_fp;
1617 			pc = (uint32_t)rwin.rw_rtn;
1618 		}
1619 		if (fp != 0 &&
1620 		    Pread(Proc, &rwin, sizeof (rwin), fp) != sizeof (rwin))
1621 			fp = pc = 0;
1622 #ifdef _LP64
1623 	}
1624 #endif
1625 	if (rpc)
1626 		*rpc = pc;
1627 	return (fp);
1628 }
1629 
1630 /* ARGSUSED */
1631 uintptr_t
1632 get_return_address(uintptr_t *psp)
1633 {
1634 	instr_t inst;
1635 	private_t *pri = get_private();
1636 	const lwpstatus_t *Lsp = pri->lwpstat;
1637 	uintptr_t rpc;
1638 
1639 	rpc = (uintptr_t)Lsp->pr_reg[R_O7] + 8;
1640 	if (data_model != PR_MODEL_LP64)
1641 		rpc = (uint32_t)rpc;
1642 
1643 	/* check for structure return (bletch!) */
1644 	if (Pread(Proc, &inst, sizeof (inst), rpc) == sizeof (inst) &&
1645 	    inst < 0x1000)
1646 		rpc += sizeof (instr_t);
1647 
1648 	return (rpc);
1649 }
1650 
1651 int
1652 get_arguments(long *argp)
1653 {
1654 	private_t *pri = get_private();
1655 	const lwpstatus_t *Lsp = pri->lwpstat;
1656 	int i;
1657 
1658 	if (data_model != PR_MODEL_LP64)
1659 		for (i = 0; i < 4; i++)
1660 			argp[i] = (uint_t)Lsp->pr_reg[R_O0+i];
1661 	else
1662 		for (i = 0; i < 4; i++)
1663 			argp[i] = (long)Lsp->pr_reg[R_O0+i];
1664 	return (4);
1665 }
1666 
1667 #endif	/* __sparc */
1668 
1669 #if defined(__i386) || defined(__amd64)
1670 
1671 uintptr_t
1672 previous_fp(uintptr_t fp, uintptr_t *rpc)
1673 {
1674 	uintptr_t frame[2];
1675 	uintptr_t trash[2];
1676 
1677 	if (Pread(Proc, frame, sizeof (frame), fp) != sizeof (frame) ||
1678 	    (frame[0] != 0 &&
1679 	    Pread(Proc, trash, sizeof (trash), frame[0]) != sizeof (trash)))
1680 		frame[0] = frame[1] = 0;
1681 
1682 	if (rpc)
1683 		*rpc = frame[1];
1684 	return (frame[0]);
1685 }
1686 
1687 #endif
1688 
1689 #if defined(__amd64) || defined(__i386)
1690 
1691 /*
1692  * Examine the instruction at the return location of a function call
1693  * and return the byte count by which the stack is adjusted on return.
1694  * It the instruction at the return location is an addl, as expected,
1695  * then adjust the return pc by the size of that instruction so that
1696  * we will place the return breakpoint on the following instruction.
1697  * This allows programs that interrogate their own stacks and record
1698  * function calls and arguments to work correctly even while we interfere.
1699  * Return the count on success, -1 on failure.
1700  */
1701 int
1702 return_count32(uint32_t *ppc)
1703 {
1704 	uintptr_t pc = *ppc;
1705 	struct bkpt *Bp;
1706 	int count;
1707 	uchar_t instr[6];	/* instruction at pc */
1708 
1709 	if ((count = Pread(Proc, instr, sizeof (instr), pc)) < 0)
1710 		return (-1);
1711 
1712 	/* find the replaced instruction at pc (if any) */
1713 	if ((Bp = get_bkpt(pc)) != NULL && (Bp->flags & BPT_ACTIVE))
1714 		instr[0] = (uchar_t)Bp->instr;
1715 
1716 	if (count != sizeof (instr) &&
1717 	    (count < 3 || instr[0] != 0x83))
1718 		return (-1);
1719 
1720 	/*
1721 	 * A bit of disassembly of the instruction is required here.
1722 	 */
1723 	if (instr[1] != 0xc4) {	/* not an addl mumble,%esp inctruction */
1724 		count = 0;
1725 	} else if (instr[0] == 0x81) {	/* count is a longword */
1726 		count = instr[2]+(instr[3]<<8)+(instr[4]<<16)+(instr[5]<<24);
1727 		*ppc += 6;
1728 	} else if (instr[0] == 0x83) {	/* count is a byte */
1729 		count = instr[2];
1730 		*ppc += 3;
1731 	} else {		/* not an addl inctruction */
1732 		count = 0;
1733 	}
1734 
1735 	return (count);
1736 }
1737 
1738 uintptr_t
1739 get_return_address32(uintptr_t *psp)
1740 {
1741 	uint32_t sp = *psp;
1742 	uint32_t rpc;
1743 	int count;
1744 
1745 	*psp += 4;	/* account for popping the stack on return */
1746 	if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1747 		return (0);
1748 	if ((count = return_count32(&rpc)) < 0)
1749 		count = 0;
1750 	*psp += count;		/* expected sp on return */
1751 	return (rpc);
1752 }
1753 
1754 uintptr_t
1755 get_return_address(uintptr_t *psp)
1756 {
1757 #ifdef _LP64
1758 	uintptr_t rpc;
1759 	uintptr_t sp = *psp;
1760 
1761 	if (data_model == PR_MODEL_LP64) {
1762 		if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1763 			return (0);
1764 		/*
1765 		 * Ignore arguments pushed on the stack.  See comments in
1766 		 * get_arguments().
1767 		 */
1768 		return (rpc);
1769 	} else
1770 #endif
1771 		return (get_return_address32(psp));
1772 }
1773 
1774 
1775 int
1776 get_arguments32(long *argp)
1777 {
1778 	private_t *pri = get_private();
1779 	const lwpstatus_t *Lsp = pri->lwpstat;
1780 	uint32_t frame[5];	/* return pc + 4 args */
1781 	int narg;
1782 	int count;
1783 	int i;
1784 
1785 	narg = Pread(Proc, frame, sizeof (frame),
1786 		(uintptr_t)Lsp->pr_reg[R_SP]);
1787 	narg -= sizeof (greg32_t);
1788 	if (narg <= 0)
1789 		return (0);
1790 	narg /= sizeof (greg32_t); /* no more than 4 */
1791 
1792 	/*
1793 	 * Given the return PC, determine the number of arguments.
1794 	 */
1795 	if ((count = return_count32(&frame[0])) < 0)
1796 		narg = 0;
1797 	else {
1798 		count /= sizeof (greg32_t);
1799 		if (narg > count)
1800 			narg = count;
1801 	}
1802 
1803 	for (i = 0; i < narg; i++)
1804 		argp[i] = (long)frame[i+1];
1805 
1806 	return (narg);
1807 }
1808 
1809 int
1810 get_arguments(long *argp)
1811 {
1812 #ifdef _LP64
1813 	private_t *pri = get_private();
1814 	const lwpstatus_t *Lsp = pri->lwpstat;
1815 
1816 	if (data_model == PR_MODEL_LP64) {
1817 		/*
1818 		 * On amd64, we do not know how many arguments are passed to
1819 		 * each function.  While it may be possible to detect if we
1820 		 * have more than 6 arguments, it is of marginal value.
1821 		 * Instead, assume that we always have 6 arguments, which are
1822 		 * passed via registers.
1823 		 */
1824 		argp[0] = Lsp->pr_reg[REG_RDI];
1825 		argp[1] = Lsp->pr_reg[REG_RSI];
1826 		argp[2] = Lsp->pr_reg[REG_RDX];
1827 		argp[3] = Lsp->pr_reg[REG_RCX];
1828 		argp[4] = Lsp->pr_reg[REG_R8];
1829 		argp[5] = Lsp->pr_reg[REG_R9];
1830 		return (6);
1831 	} else
1832 #endif
1833 		return (get_arguments32(argp));
1834 }
1835 
1836 #endif	/* __amd64 || __i386 */
1837