xref: /titanic_41/usr/src/cmd/truss/fcall.c (revision 1c42de6d020629af774dd9e9fc81be3f3ed9398e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #define	_SYSCALL32
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <unistd.h>
34 #include <ctype.h>
35 #include <string.h>
36 #include <memory.h>
37 #include <errno.h>
38 #include <sys/types.h>
39 #include <sys/stack.h>
40 #include <signal.h>
41 #include <limits.h>
42 #include <sys/isa_defs.h>
43 #include <proc_service.h>
44 #include <dlfcn.h>
45 #include <fnmatch.h>
46 #include <libproc.h>
47 #include "ramdata.h"
48 #include "systable.h"
49 #include "print.h"
50 #include "proto.h"
51 #include "htbl.h"
52 
53 /*
54  * Functions supporting library function call tracing.
55  */
56 
57 typedef struct {
58 	prmap_t	*pmap;
59 	int	nmap;
60 } ph_map_t;
61 
62 /*
63  * static functions in this file.
64  */
65 void function_entry(private_t *, struct bkpt *, struct callstack *);
66 void function_return(private_t *, struct callstack *);
67 int object_iter(void *, const prmap_t *, const char *);
68 int object_present(void *, const prmap_t *, const char *);
69 int symbol_iter(void *, const GElf_Sym *, const char *);
70 uintptr_t get_return_address(uintptr_t *);
71 int get_arguments(long *argp);
72 uintptr_t previous_fp(uintptr_t, uintptr_t *);
73 int lwp_stack_traps(void *cd, const lwpstatus_t *Lsp);
74 int thr_stack_traps(const td_thrhandle_t *Thp, void *cd);
75 struct bkpt *create_bkpt(uintptr_t, int, int);
76 void set_deferred_breakpoints(void);
77 
78 #define	DEF_MAXCALL	16	/* initial value of Stk->maxcall */
79 
80 #define	FAULT_ADDR	((uintptr_t)(0-8))
81 
82 #define	HASHSZ	2048
83 #define	bpt_hash(addr)	((((addr) >> 13) ^ ((addr) >> 2)) & 0x7ff)
84 
85 static void
86 setup_thread_agent(void)
87 {
88 	struct bkpt *Bp;
89 	td_notify_t notify;
90 	td_thr_events_t events;
91 
92 	if (Thr_agent != NULL)	/* only once */
93 		return;
94 	if (td_init() != TD_OK || td_ta_new(Proc, &Thr_agent) != TD_OK)
95 		Thr_agent = NULL;
96 	else {
97 		td_event_emptyset(&events);
98 		td_event_addset(&events, TD_CREATE);
99 		if (td_ta_event_addr(Thr_agent, TD_CREATE, &notify) == TD_OK &&
100 		    notify.type == NOTIFY_BPT &&
101 		    td_ta_set_event(Thr_agent, &events) == TD_OK &&
102 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
103 			Bp->flags |= BPT_TD_CREATE;
104 	}
105 }
106 
107 /*
108  * Delete all breakpoints in the range [base .. base+size)
109  * from the breakpoint hash table.
110  */
111 static void
112 delete_breakpoints(uintptr_t base, size_t size)
113 {
114 	struct bkpt **Bpp;
115 	struct bkpt *Bp;
116 	int i;
117 
118 	if (bpt_hashtable == NULL)
119 		return;
120 	for (i = 0; i < HASHSZ; i++) {
121 		Bpp = &bpt_hashtable[i];
122 		while ((Bp = *Bpp) != NULL) {
123 			if (Bp->addr < base || Bp->addr >= base + size) {
124 				Bpp = &Bp->next;
125 				continue;
126 			}
127 			*Bpp = Bp->next;
128 			if (Bp->sym_name)
129 				free(Bp->sym_name);
130 			free(Bp);
131 		}
132 	}
133 }
134 
135 /*
136  * Establishment of breakpoints on traced library functions.
137  */
138 void
139 establish_breakpoints(void)
140 {
141 	if (Dynpat == NULL)
142 		return;
143 
144 	/* allocate the breakpoint hash table */
145 	if (bpt_hashtable == NULL) {
146 		bpt_hashtable = my_malloc(HASHSZ * sizeof (struct bkpt *),
147 			NULL);
148 		(void) memset(bpt_hashtable, 0,
149 			HASHSZ * sizeof (struct bkpt *));
150 	}
151 
152 	/*
153 	 * Set special rtld_db event breakpoints, first time only.
154 	 */
155 	if (Rdb_agent == NULL &&
156 	    (Rdb_agent = Prd_agent(Proc)) != NULL) {
157 		rd_notify_t notify;
158 		struct bkpt *Bp;
159 
160 		(void) rd_event_enable(Rdb_agent, 1);
161 		if (rd_event_addr(Rdb_agent, RD_PREINIT, &notify) == RD_OK &&
162 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
163 			Bp->flags |= BPT_PREINIT;
164 		if (rd_event_addr(Rdb_agent, RD_POSTINIT, &notify) == RD_OK &&
165 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
166 			Bp->flags |= BPT_POSTINIT;
167 		if (rd_event_addr(Rdb_agent, RD_DLACTIVITY, &notify) == RD_OK &&
168 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
169 			Bp->flags |= BPT_DLACTIVITY;
170 	}
171 
172 	/*
173 	 * Set special thread event breakpoint, first time libc is seen.
174 	 */
175 	if (Thr_agent == NULL)
176 		setup_thread_agent();
177 
178 	/*
179 	 * Tell libproc to update its mappings.
180 	 */
181 	Pupdate_maps(Proc);
182 
183 	/*
184 	 * If rtld_db told us a library was being deleted,
185 	 * first mark all of the dynlibs as not present, then
186 	 * iterate over the shared objects, marking only those
187 	 * present that really are present, and finally delete
188 	 * all of the not-present dynlibs.
189 	 */
190 	if (delete_library) {
191 		struct dynlib **Dpp;
192 		struct dynlib *Dp;
193 
194 		for (Dp = Dyn; Dp != NULL; Dp = Dp->next)
195 			Dp->present = FALSE;
196 		(void) Pobject_iter(Proc, object_present, NULL);
197 		Dpp = &Dyn;
198 		while ((Dp = *Dpp) != NULL) {
199 			if (Dp->present) {
200 				Dpp = &Dp->next;
201 				continue;
202 			}
203 			delete_breakpoints(Dp->base, Dp->size);
204 			*Dpp = Dp->next;
205 			free(Dp->lib_name);
206 			free(Dp->match_name);
207 			free(Dp->prt_name);
208 			free(Dp);
209 		}
210 		delete_library = FALSE;
211 	}
212 
213 	/*
214 	 * Iterate over the shared objects, creating breakpoints.
215 	 */
216 	(void) Pobject_iter(Proc, object_iter, NULL);
217 
218 	/*
219 	 * Now actually set all the breakpoints we just created.
220 	 */
221 	set_deferred_breakpoints();
222 }
223 
224 /*
225  * Initial establishment of stacks in a newly-grabbed process.
226  * establish_breakpoints() has already been called.
227  */
228 void
229 establish_stacks(void)
230 {
231 	const pstatus_t *Psp = Pstatus(Proc);
232 	char mapfile[64];
233 	int mapfd;
234 	struct stat statb;
235 	prmap_t *Pmap = NULL;
236 	int nmap = 0;
237 	ph_map_t ph_map;
238 
239 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
240 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
241 	    fstat(mapfd, &statb) != 0 ||
242 	    statb.st_size < sizeof (prmap_t) ||
243 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
244 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
245 	    (nmap /= sizeof (prmap_t)) == 0) {
246 		if (Pmap != NULL)
247 			free(Pmap);
248 		Pmap = NULL;
249 		nmap = 0;
250 	}
251 	if (mapfd >= 0)
252 		(void) close(mapfd);
253 
254 	/*
255 	 * Iterate over lwps, establishing stacks.
256 	 */
257 	ph_map.pmap = Pmap;
258 	ph_map.nmap = nmap;
259 	(void) Plwp_iter(Proc, lwp_stack_traps, &ph_map);
260 	if (Pmap != NULL)
261 		free(Pmap);
262 
263 	if (Thr_agent == NULL)
264 		return;
265 
266 	/*
267 	 * Iterate over unbound threads, establishing stacks.
268 	 */
269 	(void) td_ta_thr_iter(Thr_agent, thr_stack_traps, NULL,
270 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
271 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
272 }
273 
274 void
275 do_symbol_iter(const char *object_name, struct dynpat *Dyp)
276 {
277 	if (*Dyp->Dp->prt_name == '\0')
278 		object_name = PR_OBJ_EXEC;
279 
280 	/*
281 	 * Always search the dynamic symbol table.
282 	 */
283 	(void) Psymbol_iter(Proc, object_name,
284 		PR_DYNSYM, BIND_WEAK|BIND_GLOBAL|TYPE_FUNC,
285 		symbol_iter, Dyp);
286 
287 	/*
288 	 * Search the static symbol table if this is the
289 	 * executable file or if we are being asked to
290 	 * report internal calls within the library.
291 	 */
292 	if (object_name == PR_OBJ_EXEC || Dyp->internal)
293 		(void) Psymbol_iter(Proc, object_name,
294 			PR_SYMTAB, BIND_ANY|TYPE_FUNC,
295 			symbol_iter, Dyp);
296 }
297 
298 /* ARGSUSED */
299 int
300 object_iter(void *cd, const prmap_t *pmp, const char *object_name)
301 {
302 	char name[100];
303 	struct dynpat *Dyp;
304 	struct dynlib *Dp;
305 	const char *str;
306 	char *s;
307 	int i;
308 
309 	if ((pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_EXEC))
310 		return (0);
311 
312 	/*
313 	 * Set special thread event breakpoint, first time libc is seen.
314 	 */
315 	if (Thr_agent == NULL && strstr(object_name, "/libc.so.") != NULL)
316 		setup_thread_agent();
317 
318 	for (Dp = Dyn; Dp != NULL; Dp = Dp->next)
319 		if (strcmp(object_name, Dp->lib_name) == 0 ||
320 		    (strcmp(Dp->lib_name, "a.out") == 0 &&
321 		    strcmp(pmp->pr_mapname, "a.out") == 0))
322 			break;
323 
324 	if (Dp == NULL) {
325 		Dp = my_malloc(sizeof (struct dynlib), NULL);
326 		(void) memset(Dp, 0, sizeof (struct dynlib));
327 		if (strcmp(pmp->pr_mapname, "a.out") == 0) {
328 			Dp->lib_name = strdup(pmp->pr_mapname);
329 			Dp->match_name = strdup(pmp->pr_mapname);
330 			Dp->prt_name = strdup("");
331 		} else {
332 			Dp->lib_name = strdup(object_name);
333 			if ((str = strrchr(object_name, '/')) != NULL)
334 				str++;
335 			else
336 				str = object_name;
337 			(void) strncpy(name, str, sizeof (name) - 2);
338 			name[sizeof (name) - 2] = '\0';
339 			if ((s = strstr(name, ".so")) != NULL)
340 				*s = '\0';
341 			Dp->match_name = strdup(name);
342 			(void) strcat(name, ":");
343 			Dp->prt_name = strdup(name);
344 		}
345 		Dp->next = Dyn;
346 		Dyn = Dp;
347 	}
348 
349 	if (Dp->built ||
350 	    (not_consist && strcmp(Dp->prt_name, "ld:") != 0))	/* kludge */
351 		return (0);
352 
353 	if (hflag && not_consist)
354 		(void) fprintf(stderr, "not_consist is TRUE, building %s\n",
355 			Dp->lib_name);
356 
357 	Dp->base = pmp->pr_vaddr;
358 	Dp->size = pmp->pr_size;
359 
360 	/*
361 	 * For every dynlib pattern that matches this library's name,
362 	 * iterate through all of the library's symbols looking for
363 	 * matching symbol name patterns.
364 	 */
365 	for (Dyp = Dynpat; Dyp != NULL; Dyp = Dyp->next) {
366 		if (interrupt|sigusr1)
367 			break;
368 		for (i = 0; i < Dyp->nlibpat; i++) {
369 			if (interrupt|sigusr1)
370 				break;
371 			if (fnmatch(Dyp->libpat[i], Dp->match_name, 0) != 0)
372 				continue;	/* no match */
373 
374 			/*
375 			 * Require an exact match for the executable (a.out)
376 			 * and for the dynamic linker (ld.so.1).
377 			 */
378 			if ((strcmp(Dp->match_name, "a.out") == 0 ||
379 			    strcmp(Dp->match_name, "ld") == 0) &&
380 			    strcmp(Dyp->libpat[i], Dp->match_name) != 0)
381 				continue;
382 
383 			/*
384 			 * Set Dyp->Dp to Dp so symbol_iter() can use it.
385 			 */
386 			Dyp->Dp = Dp;
387 			do_symbol_iter(object_name, Dyp);
388 			Dyp->Dp = NULL;
389 		}
390 	}
391 
392 	Dp->built = TRUE;
393 	return (interrupt | sigusr1);
394 }
395 
396 /* ARGSUSED */
397 int
398 object_present(void *cd, const prmap_t *pmp, const char *object_name)
399 {
400 	struct dynlib *Dp;
401 
402 	for (Dp = Dyn; Dp != NULL; Dp = Dp->next) {
403 		if (Dp->base == pmp->pr_vaddr)
404 			Dp->present = TRUE;
405 	}
406 
407 	return (0);
408 }
409 
410 /*
411  * Search for an existing breakpoint at the 'pc' location.
412  */
413 struct bkpt *
414 get_bkpt(uintptr_t pc)
415 {
416 	struct bkpt *Bp;
417 
418 	for (Bp = bpt_hashtable[bpt_hash(pc)]; Bp != NULL; Bp = Bp->next)
419 		if (pc == Bp->addr)
420 			break;
421 
422 	return (Bp);
423 }
424 
425 /*
426  * Create a breakpoint at 'pc', if one is not there already.
427  * 'ret' is true when creating a function return breakpoint, in which case
428  * fail and return NULL if the breakpoint would be created in writeable data.
429  * If 'set' it true, set the breakpoint in the process now.
430  */
431 struct bkpt *
432 create_bkpt(uintptr_t pc, int ret, int set)
433 {
434 	uint_t hix = bpt_hash(pc);
435 	struct bkpt *Bp;
436 	const prmap_t *pmp;
437 
438 	for (Bp = bpt_hashtable[hix]; Bp != NULL; Bp = Bp->next)
439 		if (pc == Bp->addr)
440 			return (Bp);
441 
442 	/*
443 	 * Don't set return breakpoints on writeable data
444 	 * or on any space other than executable text.
445 	 * Don't set breakpoints in the child of a vfork()
446 	 * because that would modify the parent's address space.
447 	 */
448 	if (is_vfork_child ||
449 	    (ret &&
450 	    ((pmp = Paddr_to_text_map(Proc, pc)) == NULL ||
451 	    !(pmp->pr_mflags & MA_EXEC) ||
452 	    (pmp->pr_mflags & MA_WRITE))))
453 		return (NULL);
454 
455 	/* create a new unnamed breakpoint */
456 	Bp = my_malloc(sizeof (struct bkpt), NULL);
457 	Bp->sym_name = NULL;
458 	Bp->dyn = NULL;
459 	Bp->addr = pc;
460 	Bp->instr = 0;
461 	Bp->flags = 0;
462 	if (set && Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
463 		Bp->flags |= BPT_ACTIVE;
464 	Bp->next = bpt_hashtable[hix];
465 	bpt_hashtable[hix] = Bp;
466 
467 	return (Bp);
468 }
469 
470 /*
471  * Set all breakpoints that haven't been set yet.
472  * Deactivate all breakpoints from modules that are not present any more.
473  */
474 void
475 set_deferred_breakpoints(void)
476 {
477 	struct bkpt *Bp;
478 	int i;
479 
480 	if (is_vfork_child)
481 		return;
482 
483 	for (i = 0; i < HASHSZ; i++) {
484 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
485 			if (!(Bp->flags & BPT_ACTIVE)) {
486 				if (!(Bp->flags & BPT_EXCLUDE) &&
487 				    Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
488 					Bp->flags |= BPT_ACTIVE;
489 			} else if (Paddr_to_text_map(Proc, Bp->addr) == NULL) {
490 				Bp->flags &= ~BPT_ACTIVE;
491 			}
492 		}
493 	}
494 }
495 
496 int
497 symbol_iter(void *cd, const GElf_Sym *sym, const char *sym_name)
498 {
499 	struct dynpat *Dyp = cd;
500 	struct dynlib *Dp = Dyp->Dp;
501 	uintptr_t pc = sym->st_value;
502 	struct bkpt *Bp;
503 	int i;
504 
505 	/* ignore any undefined symbols */
506 	if (sym->st_shndx == SHN_UNDEF)
507 		return (0);
508 
509 	/*
510 	 * Arbitrarily omit "_start" from the executable.
511 	 * (Avoid indentation before main().)
512 	 */
513 	if (*Dp->prt_name == '\0' && strcmp(sym_name, "_start") == 0)
514 		return (0);
515 
516 	/*
517 	 * Arbitrarily omit "_rt_boot" from the dynamic linker.
518 	 * (Avoid indentation before main().)
519 	 */
520 	if (strcmp(Dp->match_name, "ld") == 0 &&
521 	    strcmp(sym_name, "_rt_boot") == 0)
522 		return (0);
523 
524 	/*
525 	 * Arbitrarily omit any symbols whose name starts with '.'.
526 	 * Apparantly putting a breakpoint on .umul causes a
527 	 * fatal error in libthread (%y is not restored correctly
528 	 * when a single step is taken).  Looks like a /proc bug.
529 	 */
530 	if (*sym_name == '.')
531 		return (0);
532 
533 	/*
534 	 * For each pattern in the array of symbol patterns,
535 	 * if the pattern matches the symbol name, then
536 	 * create a breakpoint at the function in question.
537 	 */
538 	for (i = 0; i < Dyp->nsympat; i++) {
539 		if (interrupt|sigusr1)
540 			break;
541 		if (fnmatch(Dyp->sympat[i], sym_name, 0) != 0)
542 			continue;
543 
544 		if ((Bp = create_bkpt(pc, 0, 0)) == NULL)	/* can't fail */
545 			return (0);
546 
547 		/*
548 		 * New breakpoints receive a name now.
549 		 * For existing breakpoints, prefer the subset name if possible,
550 		 * else prefer the shorter name.
551 		 */
552 		if (Bp->sym_name == NULL) {
553 			Bp->sym_name = strdup(sym_name);
554 		} else if (strstr(Bp->sym_name, sym_name) != NULL ||
555 		    strlen(Bp->sym_name) > strlen(sym_name)) {
556 			free(Bp->sym_name);
557 			Bp->sym_name = strdup(sym_name);
558 		}
559 		Bp->dyn = Dp;
560 		Bp->flags |= Dyp->flag;
561 		if (Dyp->exclude)
562 			Bp->flags |= BPT_EXCLUDE;
563 		else if (Dyp->internal || *Dp->prt_name == '\0')
564 			Bp->flags |= BPT_INTERNAL;
565 		return (0);
566 	}
567 
568 	return (interrupt | sigusr1);
569 }
570 
571 /* For debugging only ---- */
572 void
573 report_htable_stats(void)
574 {
575 	const pstatus_t *Psp = Pstatus(Proc);
576 	struct callstack *Stk;
577 	struct bkpt *Bp;
578 	uint_t Min = 1000000;
579 	uint_t Max = 0;
580 	uint_t Avg = 0;
581 	uint_t Total = 0;
582 	uint_t i, j;
583 	uint_t bucket[HASHSZ];
584 
585 	if (Dynpat == NULL || !hflag)
586 		return;
587 
588 	hflag = FALSE;
589 	(void) memset(bucket, 0, sizeof (bucket));
590 
591 	for (i = 0; i < HASHSZ; i++) {
592 		j = 0;
593 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next)
594 			j++;
595 		if (j < Min)
596 			Min = j;
597 		if (j > Max)
598 			Max = j;
599 		if (j < HASHSZ)
600 			bucket[j]++;
601 		Total += j;
602 	}
603 	Avg = (Total + HASHSZ / 2) / HASHSZ;
604 	(void) fprintf(stderr, "truss hash table statistics --------\n");
605 	(void) fprintf(stderr, "    Total = %u\n", Total);
606 	(void) fprintf(stderr, "      Min = %u\n", Min);
607 	(void) fprintf(stderr, "      Max = %u\n", Max);
608 	(void) fprintf(stderr, "      Avg = %u\n", Avg);
609 	for (i = 0; i < HASHSZ; i++)
610 		if (bucket[i])
611 			(void) fprintf(stderr, "    %3u buckets of size %d\n",
612 				bucket[i], i);
613 
614 	(void) fprintf(stderr, "truss-detected stacks --------\n");
615 	for (Stk = callstack; Stk != NULL; Stk = Stk->next) {
616 		(void) fprintf(stderr,
617 			"    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
618 			(ulong_t)Stk->stkbase,
619 			(ulong_t)Stk->stkend,
620 			(ulong_t)(Stk->stkend - Stk->stkbase));
621 	}
622 	(void) fprintf(stderr, "primary unix stack --------\n");
623 	(void) fprintf(stderr,
624 		"    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
625 		(ulong_t)Psp->pr_stkbase,
626 		(ulong_t)(Psp->pr_stkbase + Psp->pr_stksize),
627 		(ulong_t)Psp->pr_stksize);
628 	(void) fprintf(stderr, "nthr_create = %u\n", nthr_create);
629 }
630 
631 void
632 make_lwp_stack(const lwpstatus_t *Lsp, prmap_t *Pmap, int nmap)
633 {
634 	const pstatus_t *Psp = Pstatus(Proc);
635 	uintptr_t sp = Lsp->pr_reg[R_SP];
636 	id_t lwpid = Lsp->pr_lwpid;
637 	struct callstack *Stk;
638 	td_thrhandle_t th;
639 	td_thrinfo_t thrinfo;
640 
641 	if (data_model != PR_MODEL_LP64)
642 		sp = (uint32_t)sp;
643 
644 	/* check to see if we already have this stack */
645 	if (sp == 0)
646 		return;
647 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
648 		if (sp >= Stk->stkbase && sp < Stk->stkend)
649 			return;
650 
651 	Stk = my_malloc(sizeof (struct callstack), NULL);
652 	Stk->next = callstack;
653 	callstack = Stk;
654 	nstack++;
655 	Stk->tref = 0;
656 	Stk->tid = 0;
657 	Stk->nthr_create = 0;
658 	Stk->ncall = 0;
659 	Stk->maxcall = DEF_MAXCALL;
660 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
661 
662 	/* primary stack */
663 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
664 		Stk->stkbase = Psp->pr_stkbase;
665 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
666 		return;
667 	}
668 
669 	/* alternate stack */
670 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
671 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
672 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
673 	    + Lsp->pr_altstack.ss_size) {
674 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
675 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
676 		return;
677 	}
678 
679 	/* thread stacks? */
680 	if (Thr_agent != NULL &&
681 	    td_ta_map_lwp2thr(Thr_agent, lwpid, &th) == TD_OK &&
682 	    td_thr_get_info(&th, &thrinfo) == TD_OK &&
683 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
684 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
685 		/* The bloody fools got this backwards! */
686 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
687 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
688 		return;
689 	}
690 
691 	/* last chance -- try the raw memory map */
692 	for (; nmap; nmap--, Pmap++) {
693 		if (sp >= Pmap->pr_vaddr &&
694 		    sp < Pmap->pr_vaddr + Pmap->pr_size) {
695 			Stk->stkbase = Pmap->pr_vaddr;
696 			Stk->stkend = Pmap->pr_vaddr + Pmap->pr_size;
697 			return;
698 		}
699 	}
700 
701 	callstack = Stk->next;
702 	nstack--;
703 	free(Stk->stack);
704 	free(Stk);
705 }
706 
707 void
708 make_thr_stack(const td_thrhandle_t *Thp, prgregset_t reg)
709 {
710 	const pstatus_t *Psp = Pstatus(Proc);
711 	td_thrinfo_t thrinfo;
712 	uintptr_t sp = reg[R_SP];
713 	struct callstack *Stk;
714 
715 	if (data_model != PR_MODEL_LP64)
716 		sp = (uint32_t)sp;
717 
718 	/* check to see if we already have this stack */
719 	if (sp == 0)
720 		return;
721 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
722 		if (sp >= Stk->stkbase && sp < Stk->stkend)
723 			return;
724 
725 	Stk = my_malloc(sizeof (struct callstack), NULL);
726 	Stk->next = callstack;
727 	callstack = Stk;
728 	nstack++;
729 	Stk->tref = 0;
730 	Stk->tid = 0;
731 	Stk->nthr_create = 0;
732 	Stk->ncall = 0;
733 	Stk->maxcall = DEF_MAXCALL;
734 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
735 
736 	/* primary stack */
737 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
738 		Stk->stkbase = Psp->pr_stkbase;
739 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
740 		return;
741 	}
742 
743 	if (td_thr_get_info(Thp, &thrinfo) == TD_OK &&
744 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
745 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
746 		/* The bloody fools got this backwards! */
747 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
748 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
749 		return;
750 	}
751 
752 	callstack = Stk->next;
753 	nstack--;
754 	free(Stk->stack);
755 	free(Stk);
756 }
757 
758 struct callstack *
759 find_lwp_stack(uintptr_t sp)
760 {
761 	const pstatus_t *Psp = Pstatus(Proc);
762 	char mapfile[64];
763 	int mapfd;
764 	struct stat statb;
765 	prmap_t *Pmap = NULL;
766 	prmap_t *pmap = NULL;
767 	int nmap = 0;
768 	struct callstack *Stk = NULL;
769 
770 	/*
771 	 * Get the address space map.
772 	 */
773 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
774 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
775 	    fstat(mapfd, &statb) != 0 ||
776 	    statb.st_size < sizeof (prmap_t) ||
777 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
778 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
779 	    (nmap /= sizeof (prmap_t)) == 0) {
780 		if (Pmap != NULL)
781 			free(Pmap);
782 		if (mapfd >= 0)
783 			(void) close(mapfd);
784 		return (NULL);
785 	}
786 	(void) close(mapfd);
787 
788 	for (pmap = Pmap; nmap--; pmap++) {
789 		if (sp >= pmap->pr_vaddr &&
790 		    sp < pmap->pr_vaddr + pmap->pr_size) {
791 			Stk = my_malloc(sizeof (struct callstack), NULL);
792 			Stk->next = callstack;
793 			callstack = Stk;
794 			nstack++;
795 			Stk->stkbase = pmap->pr_vaddr;
796 			Stk->stkend = pmap->pr_vaddr + pmap->pr_size;
797 			Stk->tref = 0;
798 			Stk->tid = 0;
799 			Stk->nthr_create = 0;
800 			Stk->ncall = 0;
801 			Stk->maxcall = DEF_MAXCALL;
802 			Stk->stack = my_malloc(
803 				DEF_MAXCALL * sizeof (*Stk->stack), NULL);
804 			break;
805 		}
806 	}
807 
808 	free(Pmap);
809 	return (Stk);
810 }
811 
812 struct callstack *
813 find_stack(uintptr_t sp)
814 {
815 	const pstatus_t *Psp = Pstatus(Proc);
816 	private_t *pri = get_private();
817 	const lwpstatus_t *Lsp = pri->lwpstat;
818 	id_t lwpid = Lsp->pr_lwpid;
819 #if defined(__sparc)
820 	prgreg_t tref = Lsp->pr_reg[R_G7];
821 #elif defined(__amd64)
822 	prgreg_t tref = Lsp->pr_reg[REG_FS];
823 #elif defined(__i386)
824 	prgreg_t tref = Lsp->pr_reg[GS];
825 #endif
826 	struct callstack *Stk = NULL;
827 	td_thrhandle_t th;
828 	td_thrinfo_t thrinfo;
829 	td_err_e error;
830 
831 	/* primary stack */
832 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
833 		Stk = my_malloc(sizeof (struct callstack), NULL);
834 		Stk->next = callstack;
835 		callstack = Stk;
836 		nstack++;
837 		Stk->stkbase = Psp->pr_stkbase;
838 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
839 		Stk->tref = 0;
840 		Stk->tid = 0;
841 		Stk->nthr_create = 0;
842 		Stk->ncall = 0;
843 		Stk->maxcall = DEF_MAXCALL;
844 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
845 			NULL);
846 		return (Stk);
847 	}
848 
849 	/* alternate stack */
850 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
851 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
852 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
853 	    + Lsp->pr_altstack.ss_size) {
854 		Stk = my_malloc(sizeof (struct callstack), NULL);
855 		Stk->next = callstack;
856 		callstack = Stk;
857 		nstack++;
858 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
859 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
860 		Stk->tref = 0;
861 		Stk->tid = 0;
862 		Stk->nthr_create = 0;
863 		Stk->ncall = 0;
864 		Stk->maxcall = DEF_MAXCALL;
865 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
866 			NULL);
867 		return (Stk);
868 	}
869 
870 	if (Thr_agent == NULL)
871 		return (find_lwp_stack(sp));
872 
873 	/* thread stacks? */
874 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
875 		if (hflag)
876 			(void) fprintf(stderr,
877 				"cannot get thread handle for "
878 				"lwp#%d, error=%d, tref=0x%.8lx\n",
879 				(int)lwpid, error, (long)tref);
880 		return (NULL);
881 	}
882 
883 	if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
884 		if (hflag)
885 			(void) fprintf(stderr,
886 				"cannot get thread info for "
887 				"lwp#%d, error=%d, tref=0x%.8lx\n",
888 				(int)lwpid, error, (long)tref);
889 		return (NULL);
890 	}
891 
892 	if (sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
893 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
894 		Stk = my_malloc(sizeof (struct callstack), NULL);
895 		Stk->next = callstack;
896 		callstack = Stk;
897 		nstack++;
898 		/* The bloody fools got this backwards! */
899 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
900 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
901 		Stk->tref = tref;
902 		Stk->tid = thrinfo.ti_tid;
903 		Stk->nthr_create = nthr_create;
904 		Stk->ncall = 0;
905 		Stk->maxcall = DEF_MAXCALL;
906 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
907 			NULL);
908 		return (Stk);
909 	}
910 
911 	/* stack bounds failure -- complain bitterly */
912 	if (hflag) {
913 		(void) fprintf(stderr,
914 			"sp not within thread stack: "
915 			"sp=0x%.8lx stkbase=0x%.8lx stkend=0x%.8lx\n",
916 			(ulong_t)sp,
917 			/* The bloody fools got this backwards! */
918 			(ulong_t)thrinfo.ti_stkbase - thrinfo.ti_stksize,
919 			(ulong_t)thrinfo.ti_stkbase);
920 	}
921 
922 	return (NULL);
923 }
924 
925 void
926 get_tid(struct callstack *Stk)
927 {
928 	private_t *pri = get_private();
929 	const lwpstatus_t *Lsp = pri->lwpstat;
930 	id_t lwpid = Lsp->pr_lwpid;
931 #if defined(__sparc)
932 	prgreg_t tref = Lsp->pr_reg[R_G7];
933 #elif defined(__amd64)
934 	prgreg_t tref = (data_model == PR_MODEL_LP64) ?
935 	    Lsp->pr_reg[REG_FS] : Lsp->pr_reg[REG_GS];
936 #elif defined(__i386)
937 	prgreg_t tref = Lsp->pr_reg[GS];
938 #endif
939 	td_thrhandle_t th;
940 	td_thrinfo_t thrinfo;
941 	td_err_e error;
942 
943 	if (Thr_agent == NULL) {
944 		Stk->tref = 0;
945 		Stk->tid = 0;
946 		Stk->nthr_create = 0;
947 		return;
948 	}
949 
950 	/*
951 	 * Shortcut here --
952 	 * If we have a matching tref and no new threads have
953 	 * been created since the last time we encountered this
954 	 * stack, then we don't have to go through the overhead
955 	 * of calling td_ta_map_lwp2thr() to get the thread-id.
956 	 */
957 	if (tref == Stk->tref && Stk->nthr_create == nthr_create)
958 		return;
959 
960 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
961 		if (hflag)
962 			(void) fprintf(stderr,
963 				"cannot get thread handle for "
964 				"lwp#%d, error=%d, tref=0x%.8lx\n",
965 				(int)lwpid, error, (long)tref);
966 		Stk->tref = 0;
967 		Stk->tid = 0;
968 		Stk->nthr_create = 0;
969 	} else if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
970 		if (hflag)
971 			(void) fprintf(stderr,
972 				"cannot get thread info for "
973 				"lwp#%d, error=%d, tref=0x%.8lx\n",
974 				(int)lwpid, error, (long)tref);
975 		Stk->tref = 0;
976 		Stk->tid = 0;
977 		Stk->nthr_create = 0;
978 	} else {
979 		Stk->tref = tref;
980 		Stk->tid = thrinfo.ti_tid;
981 		Stk->nthr_create = nthr_create;
982 	}
983 }
984 
985 struct callstack *
986 callstack_info(uintptr_t sp, uintptr_t fp, int makeid)
987 {
988 	struct callstack *Stk;
989 	uintptr_t trash;
990 
991 	if (sp == 0 ||
992 	    Pread(Proc, &trash, sizeof (trash), sp) != sizeof (trash))
993 		return (NULL);
994 
995 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
996 		if (sp >= Stk->stkbase && sp < Stk->stkend)
997 			break;
998 
999 	/*
1000 	 * If we didn't find the stack, do it the hard way.
1001 	 */
1002 	if (Stk == NULL) {
1003 		uintptr_t stkbase = sp;
1004 		uintptr_t stkend;
1005 		uint_t minsize;
1006 
1007 #if defined(i386) || defined(__amd64)
1008 #ifdef _LP64
1009 		if (data_model == PR_MODEL_LP64)
1010 			minsize = 2 * sizeof (uintptr_t);	/* fp + pc */
1011 		else
1012 #endif
1013 			minsize = 2 * sizeof (uint32_t);
1014 #else
1015 #ifdef _LP64
1016 		if (data_model != PR_MODEL_LP64)
1017 			minsize = SA32(MINFRAME32);
1018 		else
1019 			minsize = SA64(MINFRAME64);
1020 #else
1021 		minsize = SA(MINFRAME);
1022 #endif
1023 #endif	/* i386 */
1024 		stkend = sp + minsize;
1025 
1026 		while (Stk == NULL && fp != 0 && fp >= sp) {
1027 			stkend = fp + minsize;
1028 			for (Stk = callstack; Stk != NULL; Stk = Stk->next)
1029 				if ((fp >= Stk->stkbase && fp < Stk->stkend) ||
1030 				    (stkend > Stk->stkbase &&
1031 				    stkend <= Stk->stkend))
1032 					break;
1033 			if (Stk == NULL)
1034 				fp = previous_fp(fp, NULL);
1035 		}
1036 
1037 		if (Stk != NULL)	/* the stack grew */
1038 			Stk->stkbase = stkbase;
1039 	}
1040 
1041 	if (Stk == NULL && makeid)	/* new stack */
1042 		Stk = find_stack(sp);
1043 
1044 	if (Stk == NULL)
1045 		return (NULL);
1046 
1047 	/*
1048 	 * Ensure that there is room for at least one more entry.
1049 	 */
1050 	if (Stk->ncall == Stk->maxcall) {
1051 		Stk->maxcall *= 2;
1052 		Stk->stack = my_realloc(Stk->stack,
1053 		    Stk->maxcall * sizeof (*Stk->stack), NULL);
1054 	}
1055 
1056 	if (makeid)
1057 		get_tid(Stk);
1058 
1059 	return (Stk);
1060 }
1061 
1062 /*
1063  * Reset the breakpoint information (called on successful exec()).
1064  */
1065 void
1066 reset_breakpoints(void)
1067 {
1068 	struct dynlib *Dp;
1069 	struct bkpt *Bp;
1070 	struct callstack *Stk;
1071 	int i;
1072 
1073 	if (Dynpat == NULL)
1074 		return;
1075 
1076 	/* destroy all previous dynamic library information */
1077 	while ((Dp = Dyn) != NULL) {
1078 		Dyn = Dp->next;
1079 		free(Dp->lib_name);
1080 		free(Dp->match_name);
1081 		free(Dp->prt_name);
1082 		free(Dp);
1083 	}
1084 
1085 	/* destroy all previous breakpoint trap information */
1086 	if (bpt_hashtable != NULL) {
1087 		for (i = 0; i < HASHSZ; i++) {
1088 			while ((Bp = bpt_hashtable[i]) != NULL) {
1089 				bpt_hashtable[i] = Bp->next;
1090 				if (Bp->sym_name)
1091 					free(Bp->sym_name);
1092 				free(Bp);
1093 			}
1094 		}
1095 	}
1096 
1097 	/* destroy all the callstack information */
1098 	while ((Stk = callstack) != NULL) {
1099 		callstack = Stk->next;
1100 		free(Stk->stack);
1101 		free(Stk);
1102 	}
1103 
1104 	/* we are not a multi-threaded process anymore */
1105 	if (Thr_agent != NULL)
1106 		(void) td_ta_delete(Thr_agent);
1107 	Thr_agent = NULL;
1108 
1109 	/* tell libproc to clear out its mapping information */
1110 	Preset_maps(Proc);
1111 	Rdb_agent = NULL;
1112 
1113 	/* Reestablish the symbols from the executable */
1114 	(void) establish_breakpoints();
1115 }
1116 
1117 /*
1118  * Clear breakpoints from the process (called before Prelease()).
1119  * Don't actually destroy the breakpoint table;
1120  * threads currently fielding breakpoints will need it.
1121  */
1122 void
1123 clear_breakpoints(void)
1124 {
1125 	struct bkpt *Bp;
1126 	int i;
1127 
1128 	if (Dynpat == NULL)
1129 		return;
1130 
1131 	/*
1132 	 * Change all breakpoint traps back to normal instructions.
1133 	 * We attempt to remove a breakpoint from every address which
1134 	 * may have ever contained a breakpoint to protect our victims.
1135 	 */
1136 	report_htable_stats();	/* report stats first */
1137 	for (i = 0; i < HASHSZ; i++) {
1138 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1139 			if (Bp->flags & BPT_ACTIVE)
1140 				(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1141 			Bp->flags &= ~BPT_ACTIVE;
1142 		}
1143 	}
1144 
1145 	if (Thr_agent != NULL) {
1146 		td_thr_events_t events;
1147 
1148 		td_event_emptyset(&events);
1149 		(void) td_ta_set_event(Thr_agent, &events);
1150 		(void) td_ta_delete(Thr_agent);
1151 	}
1152 	Thr_agent = NULL;
1153 }
1154 
1155 /*
1156  * Reestablish the breakpoint traps in the process.
1157  * Called after resuming from a vfork() in the parent.
1158  */
1159 void
1160 reestablish_traps(void)
1161 {
1162 	struct bkpt *Bp;
1163 	ulong_t instr;
1164 	int i;
1165 
1166 	if (Dynpat == NULL || is_vfork_child)
1167 		return;
1168 
1169 	for (i = 0; i < HASHSZ; i++) {
1170 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1171 			if ((Bp->flags & BPT_ACTIVE) &&
1172 			    Psetbkpt(Proc, Bp->addr, &instr) != 0)
1173 				Bp->flags &= ~BPT_ACTIVE;
1174 		}
1175 	}
1176 }
1177 
1178 void
1179 show_function_call(private_t *pri,
1180 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1181 {
1182 	long arg[8];
1183 	int narg;
1184 	int i;
1185 
1186 	narg = get_arguments(arg);
1187 	make_pname(pri, (Stk != NULL)? Stk->tid : 0);
1188 	putpname(pri);
1189 	timestamp(pri);
1190 	if (Stk != NULL) {
1191 		for (i = 1; i < Stk->ncall; i++) {
1192 			(void) fputc(' ', stdout);
1193 			(void) fputc(' ', stdout);
1194 		}
1195 	}
1196 	(void) printf("-> %s%s(", Dp->prt_name, Bp->sym_name);
1197 	for (i = 0; i < narg; i++) {
1198 		(void) printf("0x%lx", arg[i]);
1199 		if (i < narg-1) {
1200 			(void) fputc(',', stdout);
1201 			(void) fputc(' ', stdout);
1202 		}
1203 	}
1204 	(void) printf(")\n");
1205 	Flush();
1206 }
1207 
1208 /* ARGSUSED */
1209 void
1210 show_function_return(private_t *pri, long rval, int stret,
1211 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1212 {
1213 	int i;
1214 
1215 	make_pname(pri, Stk->tid);
1216 	putpname(pri);
1217 	timestamp(pri);
1218 	for (i = 0; i < Stk->ncall; i++) {
1219 		(void) fputc(' ', stdout);
1220 		(void) fputc(' ', stdout);
1221 	}
1222 	(void) printf("<- %s%s() = ", Dp->prt_name, Bp->sym_name);
1223 	if (stret) {
1224 		(void) printf("struct return\n");
1225 	} else if (data_model == PR_MODEL_LP64) {
1226 		if (rval >= (64 * 1024) || -rval >= (64 * 1024))
1227 			(void) printf("0x%lx\n", rval);
1228 		else
1229 			(void) printf("%ld\n", rval);
1230 	} else {
1231 		int rval32 = (int)rval;
1232 		if (rval32 >= (64 * 1024) || -rval32 >= (64 * 1024))
1233 			(void) printf("0x%x\n", rval32);
1234 		else
1235 			(void) printf("%d\n", rval32);
1236 	}
1237 	Flush();
1238 }
1239 
1240 /*
1241  * Called to deal with function-call tracing.
1242  * Return 0 on normal success, 1 to indicate a BPT_HANG success,
1243  * and -1 on failure (not tracing functions or unknown breakpoint).
1244  */
1245 int
1246 function_trace(private_t *pri, int first, int clear, int dotrace)
1247 {
1248 	struct ps_lwphandle *Lwp = pri->Lwp;
1249 	const lwpstatus_t *Lsp = pri->lwpstat;
1250 	uintptr_t pc = Lsp->pr_reg[R_PC];
1251 	uintptr_t sp = Lsp->pr_reg[R_SP];
1252 	uintptr_t fp = Lsp->pr_reg[R_FP];
1253 	struct bkpt *Bp;
1254 	struct dynlib *Dp;
1255 	struct callstack *Stk;
1256 	ulong_t instr;
1257 	int active;
1258 	int rval = 0;
1259 
1260 	if (Dynpat == NULL)
1261 		return (-1);
1262 
1263 	if (data_model != PR_MODEL_LP64) {
1264 		pc = (uint32_t)pc;
1265 		sp = (uint32_t)sp;
1266 		fp = (uint32_t)fp;
1267 	}
1268 
1269 	if ((Bp = get_bkpt(pc)) == NULL) {
1270 		if (hflag)
1271 			(void) fprintf(stderr,
1272 				"function_trace(): "
1273 				"cannot find breakpoint for pc: 0x%.8lx\n",
1274 				(ulong_t)pc);
1275 		return (-1);
1276 	}
1277 
1278 	if ((Bp->flags & (BPT_PREINIT|BPT_POSTINIT|BPT_DLACTIVITY)) && !clear) {
1279 		rd_event_msg_t event_msg;
1280 
1281 		if (hflag) {
1282 			if (Bp->flags & BPT_PREINIT)
1283 				(void) fprintf(stderr, "function_trace(): "
1284 					"RD_PREINIT breakpoint\n");
1285 			if (Bp->flags & BPT_POSTINIT)
1286 				(void) fprintf(stderr, "function_trace(): "
1287 					"RD_POSTINIT breakpoint\n");
1288 			if (Bp->flags & BPT_DLACTIVITY)
1289 				(void) fprintf(stderr, "function_trace(): "
1290 					"RD_DLACTIVITY breakpoint\n");
1291 		}
1292 		if (rd_event_getmsg(Rdb_agent, &event_msg) == RD_OK) {
1293 			if (event_msg.type == RD_DLACTIVITY) {
1294 				switch (event_msg.u.state) {
1295 				case RD_CONSISTENT:
1296 					establish_breakpoints();
1297 					break;
1298 				case RD_ADD:
1299 					not_consist = TRUE;	/* kludge */
1300 					establish_breakpoints();
1301 					not_consist = FALSE;
1302 					break;
1303 				case RD_DELETE:
1304 					delete_library = TRUE;
1305 					break;
1306 				default:
1307 					break;
1308 				}
1309 			}
1310 			if (hflag) {
1311 				const char *et;
1312 				char buf[32];
1313 
1314 				switch (event_msg.type) {
1315 				case RD_NONE:
1316 					et = "RD_NONE";
1317 					break;
1318 				case RD_PREINIT:
1319 					et = "RD_PREINIT";
1320 					break;
1321 				case RD_POSTINIT:
1322 					et = "RD_POSTINIT";
1323 					break;
1324 				case RD_DLACTIVITY:
1325 					et = "RD_DLACTIVITY";
1326 					break;
1327 				default:
1328 					(void) sprintf(buf, "0x%x",
1329 						event_msg.type);
1330 					et = buf;
1331 					break;
1332 				}
1333 				(void) fprintf(stderr,
1334 					"event_msg.type = %s ", et);
1335 				switch (event_msg.u.state) {
1336 				case RD_NOSTATE:
1337 					et = "RD_NOSTATE";
1338 					break;
1339 				case RD_CONSISTENT:
1340 					et = "RD_CONSISTENT";
1341 					break;
1342 				case RD_ADD:
1343 					et = "RD_ADD";
1344 					break;
1345 				case RD_DELETE:
1346 					et = "RD_DELETE";
1347 					break;
1348 				default:
1349 					(void) sprintf(buf, "0x%x",
1350 						event_msg.u.state);
1351 					et = buf;
1352 					break;
1353 				}
1354 				(void) fprintf(stderr,
1355 					"event_msg.u.state = %s\n", et);
1356 			}
1357 		}
1358 	}
1359 
1360 	if ((Bp->flags & BPT_TD_CREATE) && !clear) {
1361 		nthr_create++;
1362 		if (hflag)
1363 			(void) fprintf(stderr, "function_trace(): "
1364 				"BPT_TD_CREATE breakpoint\n");
1365 		/* we don't care about the event message */
1366 	}
1367 
1368 	Dp = Bp->dyn;
1369 
1370 	if (dotrace) {
1371 		if ((Stk = callstack_info(sp, fp, 1)) == NULL) {
1372 			if (Dp != NULL && !clear) {
1373 				if (cflag) {
1374 					add_fcall(fcall_tbl, Dp->prt_name,
1375 					    Bp->sym_name, (unsigned long)1);
1376 				}
1377 				else
1378 					show_function_call(pri, NULL, Dp, Bp);
1379 				if ((Bp->flags & BPT_HANG) && !first)
1380 					rval = 1;
1381 			}
1382 		} else if (!clear) {
1383 			if (Dp != NULL) {
1384 				function_entry(pri, Bp, Stk);
1385 				if ((Bp->flags & BPT_HANG) && !first)
1386 					rval = 1;
1387 			} else {
1388 				function_return(pri, Stk);
1389 			}
1390 		}
1391 	}
1392 
1393 	/*
1394 	 * Single-step the traced instruction. Since it's possible that
1395 	 * another thread has deactivated this breakpoint, we indicate
1396 	 * that we have reactivated it by virtue of executing it.
1397 	 *
1398 	 * To avoid a deadlock with some other thread in the process
1399 	 * performing a fork() or a thr_suspend() operation, we must
1400 	 * drop and later reacquire truss_lock.  Some fancy dancing here.
1401 	 */
1402 	active = (Bp->flags & BPT_ACTIVE);
1403 	Bp->flags |= BPT_ACTIVE;
1404 	instr = Bp->instr;
1405 	(void) mutex_unlock(&truss_lock);
1406 	(void) Lxecbkpt(Lwp, instr);
1407 	(void) mutex_lock(&truss_lock);
1408 
1409 	if (rval || clear) {	/* leave process stopped and abandoned */
1410 #if defined(__i386)
1411 		/*
1412 		 * Leave it stopped in a state that a stack trace is reasonable.
1413 		 */
1414 		/* XX64 needs to be updated for amd64 & gcc */
1415 		if (rval && instr == 0x55) {	/* pushl %ebp */
1416 			/* step it over the movl %esp,%ebp */
1417 			(void) mutex_unlock(&truss_lock);
1418 			(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTEP);
1419 			/* we're wrapping up; wait one second at most */
1420 			(void) Lwait(Lwp, MILLISEC);
1421 			(void) mutex_lock(&truss_lock);
1422 		}
1423 #endif
1424 		if (get_bkpt(pc) != Bp)
1425 			abend("function_trace: lost breakpoint", NULL);
1426 		(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1427 		Bp->flags &= ~BPT_ACTIVE;
1428 		(void) mutex_unlock(&truss_lock);
1429 		(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTOP);
1430 		/* we're wrapping up; wait one second at most */
1431 		(void) Lwait(Lwp, MILLISEC);
1432 		(void) mutex_lock(&truss_lock);
1433 	} else {
1434 		if (get_bkpt(pc) != Bp)
1435 			abend("function_trace: lost breakpoint", NULL);
1436 		if (!active || !(Bp->flags & BPT_ACTIVE)) {
1437 			(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1438 			Bp->flags &= ~BPT_ACTIVE;
1439 		}
1440 	}
1441 	return (rval);
1442 }
1443 
1444 void
1445 function_entry(private_t *pri, struct bkpt *Bp, struct callstack *Stk)
1446 {
1447 	const lwpstatus_t *Lsp = pri->lwpstat;
1448 	uintptr_t sp = Lsp->pr_reg[R_SP];
1449 	uintptr_t rpc = get_return_address(&sp);
1450 	struct dynlib *Dp = Bp->dyn;
1451 	int oldframe = FALSE;
1452 	int i;
1453 
1454 #ifdef _LP64
1455 	if (data_model != PR_MODEL_LP64) {
1456 		sp = (uint32_t)sp;
1457 		rpc = (uint32_t)rpc;
1458 	}
1459 #endif
1460 
1461 	/*
1462 	 * If the sp is not within the stack bounds, forget it.
1463 	 * If the symbol's 'internal' flag is false,
1464 	 * don't report internal calls within the library.
1465 	 */
1466 	if (!(sp >= Stk->stkbase && sp < Stk->stkend) ||
1467 	    (!(Bp->flags & BPT_INTERNAL) &&
1468 	    rpc >= Dp->base && rpc < Dp->base + Dp->size))
1469 		return;
1470 
1471 	for (i = 0; i < Stk->ncall; i++) {
1472 		if (sp >= Stk->stack[i].sp) {
1473 			Stk->ncall = i;
1474 			if (sp == Stk->stack[i].sp)
1475 				oldframe = TRUE;
1476 			break;
1477 		}
1478 	}
1479 
1480 	/*
1481 	 * Breakpoints for function returns are set here
1482 	 * If we're counting function calls, there is no need to set
1483 	 * a breakpoint upon return
1484 	 */
1485 
1486 	if (!oldframe && !cflag) {
1487 		(void) create_bkpt(rpc, 1, 1); /* may or may not be set */
1488 		Stk->stack[Stk->ncall].sp = sp;	/* record it anyeay */
1489 		Stk->stack[Stk->ncall].pc = rpc;
1490 		Stk->stack[Stk->ncall].fcn = Bp;
1491 	}
1492 	Stk->ncall++;
1493 	if (cflag) {
1494 		add_fcall(fcall_tbl, Dp->prt_name, Bp->sym_name,
1495 		    (unsigned long)1);
1496 	} else {
1497 		show_function_call(pri, Stk, Dp, Bp);
1498 	}
1499 }
1500 
1501 /*
1502  * We are here because we hit an unnamed breakpoint.
1503  * Attempt to match this up with a return pc on the stack
1504  * and report the function return.
1505  */
1506 void
1507 function_return(private_t *pri, struct callstack *Stk)
1508 {
1509 	const lwpstatus_t *Lsp = pri->lwpstat;
1510 	uintptr_t sp = Lsp->pr_reg[R_SP];
1511 	uintptr_t fp = Lsp->pr_reg[R_FP];
1512 	int i;
1513 
1514 #ifdef _LP64
1515 	if (data_model != PR_MODEL_LP64) {
1516 		sp = (uint32_t)sp;
1517 		fp = (uint32_t)fp;
1518 	}
1519 #endif
1520 
1521 	if (fp < sp + 8)
1522 		fp = sp + 8;
1523 
1524 	for (i = Stk->ncall - 1; i >= 0; i--) {
1525 		if (sp <= Stk->stack[i].sp && fp > Stk->stack[i].sp) {
1526 			Stk->ncall = i;
1527 			break;
1528 		}
1529 	}
1530 
1531 #if defined(i386) || defined(__amd64)
1532 	if (i < 0) {
1533 		/* probably __mul64() or friends -- try harder */
1534 		int j;
1535 		for (j = 0; i < 0 && j < 8; j++) {	/* up to 8 args */
1536 			sp -= 4;
1537 			for (i = Stk->ncall - 1; i >= 0; i--) {
1538 				if (sp <= Stk->stack[i].sp &&
1539 				    fp > Stk->stack[i].sp) {
1540 					Stk->ncall = i;
1541 					break;
1542 				}
1543 			}
1544 		}
1545 	}
1546 #endif
1547 
1548 	if ((i >= 0) && (!cflag)) {
1549 		show_function_return(pri, Lsp->pr_reg[R_R0], 0,
1550 			Stk, Stk->stack[i].fcn->dyn, Stk->stack[i].fcn);
1551 	}
1552 }
1553 
1554 #if defined(__sparc)
1555 #define	FPADJUST	0
1556 #elif defined(__amd64)
1557 #define	FPADJUST	8
1558 #elif defined(__i386)
1559 #define	FPADJUST	4
1560 #endif
1561 
1562 void
1563 trap_one_stack(prgregset_t reg)
1564 {
1565 	struct dynlib *Dp;
1566 	struct bkpt *Bp;
1567 	struct callstack *Stk;
1568 	GElf_Sym sym;
1569 	char sym_name[32];
1570 	uintptr_t sp = reg[R_SP];
1571 	uintptr_t pc = reg[R_PC];
1572 	uintptr_t fp;
1573 	uintptr_t rpc;
1574 	uint_t nframe = 0;
1575 	uint_t maxframe = 8;
1576 	struct {
1577 		uintptr_t sp;		/* %sp within called function */
1578 		uintptr_t pc;		/* %pc within called function */
1579 		uintptr_t rsp;		/* the return sp */
1580 		uintptr_t rpc;		/* the return pc */
1581 	} *frame = my_malloc(maxframe * sizeof (*frame), NULL);
1582 
1583 	/*
1584 	 * Gather stack frames bottom to top.
1585 	 */
1586 	while (sp != 0) {
1587 		fp = sp;	/* remember higest non-null sp */
1588 		frame[nframe].sp = sp;
1589 		frame[nframe].pc = pc;
1590 		sp = previous_fp(sp, &pc);
1591 		frame[nframe].rsp = sp;
1592 		frame[nframe].rpc = pc;
1593 		if (++nframe == maxframe) {
1594 			maxframe *= 2;
1595 			frame = my_realloc(frame, maxframe * sizeof (*frame),
1596 				NULL);
1597 		}
1598 	}
1599 
1600 	/*
1601 	 * Scan for function return breakpoints top to bottom.
1602 	 */
1603 	while (nframe--) {
1604 		/* lookup the called function in the symbol tables */
1605 		if (Plookup_by_addr(Proc, frame[nframe].pc, sym_name,
1606 		    sizeof (sym_name), &sym) != 0)
1607 			continue;
1608 
1609 		pc = sym.st_value;	/* entry point of the function */
1610 		rpc = frame[nframe].rpc;	/* caller's return pc */
1611 
1612 		/* lookup the function in the breakpoint table */
1613 		if ((Bp = get_bkpt(pc)) == NULL || (Dp = Bp->dyn) == NULL)
1614 			continue;
1615 
1616 		if (!(Bp->flags & BPT_INTERNAL) &&
1617 		    rpc >= Dp->base && rpc < Dp->base + Dp->size)
1618 			continue;
1619 
1620 		sp = frame[nframe].rsp + FPADJUST;  /* %sp at time of call */
1621 		if ((Stk = callstack_info(sp, fp, 0)) == NULL)
1622 			continue;	/* can't happen? */
1623 
1624 		if (create_bkpt(rpc, 1, 1) != NULL) {
1625 			Stk->stack[Stk->ncall].sp = sp;
1626 			Stk->stack[Stk->ncall].pc = rpc;
1627 			Stk->stack[Stk->ncall].fcn = Bp;
1628 			Stk->ncall++;
1629 		}
1630 	}
1631 
1632 	free(frame);
1633 }
1634 
1635 int
1636 lwp_stack_traps(void *cd, const lwpstatus_t *Lsp)
1637 {
1638 	ph_map_t *ph_map = (ph_map_t *)cd;
1639 	prgregset_t reg;
1640 
1641 	(void) memcpy(reg, Lsp->pr_reg, sizeof (prgregset_t));
1642 	make_lwp_stack(Lsp, ph_map->pmap, ph_map->nmap);
1643 	trap_one_stack(reg);
1644 
1645 	return (interrupt | sigusr1);
1646 }
1647 
1648 /* ARGSUSED */
1649 int
1650 thr_stack_traps(const td_thrhandle_t *Thp, void *cd)
1651 {
1652 	prgregset_t reg;
1653 
1654 	/*
1655 	 * We have already dealt with all the lwps.
1656 	 * We only care about unbound threads here (TD_PARTIALREG).
1657 	 */
1658 	if (td_thr_getgregs(Thp, reg) != TD_PARTIALREG)
1659 		return (0);
1660 
1661 	make_thr_stack(Thp, reg);
1662 	trap_one_stack(reg);
1663 
1664 	return (interrupt | sigusr1);
1665 }
1666 
1667 #if defined(__sparc)
1668 
1669 uintptr_t
1670 previous_fp(uintptr_t sp, uintptr_t *rpc)
1671 {
1672 	uintptr_t fp = 0;
1673 	uintptr_t pc = 0;
1674 
1675 #ifdef _LP64
1676 	if (data_model == PR_MODEL_LP64) {
1677 		struct rwindow64 rwin;
1678 		if (Pread(Proc, &rwin, sizeof (rwin), sp + STACK_BIAS)
1679 		    == sizeof (rwin)) {
1680 			fp = (uintptr_t)rwin.rw_fp;
1681 			pc = (uintptr_t)rwin.rw_rtn;
1682 		}
1683 		if (fp != 0 &&
1684 		    Pread(Proc, &rwin, sizeof (rwin), fp + STACK_BIAS)
1685 		    != sizeof (rwin))
1686 			fp = pc = 0;
1687 	} else {
1688 		struct rwindow32 rwin;
1689 #else	/* _LP64 */
1690 		struct rwindow rwin;
1691 #endif	/* _LP64 */
1692 		if (Pread(Proc, &rwin, sizeof (rwin), sp) == sizeof (rwin)) {
1693 			fp = (uint32_t)rwin.rw_fp;
1694 			pc = (uint32_t)rwin.rw_rtn;
1695 		}
1696 		if (fp != 0 &&
1697 		    Pread(Proc, &rwin, sizeof (rwin), fp) != sizeof (rwin))
1698 			fp = pc = 0;
1699 #ifdef _LP64
1700 	}
1701 #endif
1702 	if (rpc)
1703 		*rpc = pc;
1704 	return (fp);
1705 }
1706 
1707 /* ARGSUSED */
1708 uintptr_t
1709 get_return_address(uintptr_t *psp)
1710 {
1711 	instr_t inst;
1712 	private_t *pri = get_private();
1713 	const lwpstatus_t *Lsp = pri->lwpstat;
1714 	uintptr_t rpc;
1715 
1716 	rpc = (uintptr_t)Lsp->pr_reg[R_O7] + 8;
1717 	if (data_model != PR_MODEL_LP64)
1718 		rpc = (uint32_t)rpc;
1719 
1720 	/* check for structure return (bletch!) */
1721 	if (Pread(Proc, &inst, sizeof (inst), rpc) == sizeof (inst) &&
1722 	    inst < 0x1000)
1723 		rpc += sizeof (instr_t);
1724 
1725 	return (rpc);
1726 }
1727 
1728 int
1729 get_arguments(long *argp)
1730 {
1731 	private_t *pri = get_private();
1732 	const lwpstatus_t *Lsp = pri->lwpstat;
1733 	int i;
1734 
1735 	if (data_model != PR_MODEL_LP64)
1736 		for (i = 0; i < 4; i++)
1737 			argp[i] = (uint_t)Lsp->pr_reg[R_O0+i];
1738 	else
1739 		for (i = 0; i < 4; i++)
1740 			argp[i] = (long)Lsp->pr_reg[R_O0+i];
1741 	return (4);
1742 }
1743 
1744 #endif	/* __sparc */
1745 
1746 #if defined(__i386) || defined(__amd64)
1747 
1748 uintptr_t
1749 previous_fp(uintptr_t fp, uintptr_t *rpc)
1750 {
1751 	uintptr_t frame[2];
1752 	uintptr_t trash[2];
1753 
1754 	if (Pread(Proc, frame, sizeof (frame), fp) != sizeof (frame) ||
1755 	    (frame[0] != 0 &&
1756 	    Pread(Proc, trash, sizeof (trash), frame[0]) != sizeof (trash)))
1757 		frame[0] = frame[1] = 0;
1758 
1759 	if (rpc)
1760 		*rpc = frame[1];
1761 	return (frame[0]);
1762 }
1763 
1764 #endif
1765 
1766 #if defined(__amd64) || defined(__i386)
1767 
1768 /*
1769  * Examine the instruction at the return location of a function call
1770  * and return the byte count by which the stack is adjusted on return.
1771  * It the instruction at the return location is an addl, as expected,
1772  * then adjust the return pc by the size of that instruction so that
1773  * we will place the return breakpoint on the following instruction.
1774  * This allows programs that interrogate their own stacks and record
1775  * function calls and arguments to work correctly even while we interfere.
1776  * Return the count on success, -1 on failure.
1777  */
1778 int
1779 return_count32(uint32_t *ppc)
1780 {
1781 	uintptr_t pc = *ppc;
1782 	struct bkpt *Bp;
1783 	int count;
1784 	uchar_t instr[6];	/* instruction at pc */
1785 
1786 	if ((count = Pread(Proc, instr, sizeof (instr), pc)) < 0)
1787 		return (-1);
1788 
1789 	/* find the replaced instruction at pc (if any) */
1790 	if ((Bp = get_bkpt(pc)) != NULL && (Bp->flags & BPT_ACTIVE))
1791 		instr[0] = (uchar_t)Bp->instr;
1792 
1793 	if (count != sizeof (instr) &&
1794 	    (count < 3 || instr[0] != 0x83))
1795 		return (-1);
1796 
1797 	/*
1798 	 * A bit of disassembly of the instruction is required here.
1799 	 */
1800 	if (instr[1] != 0xc4) {	/* not an addl mumble,%esp inctruction */
1801 		count = 0;
1802 	} else if (instr[0] == 0x81) {	/* count is a longword */
1803 		count = instr[2]+(instr[3]<<8)+(instr[4]<<16)+(instr[5]<<24);
1804 		*ppc += 6;
1805 	} else if (instr[0] == 0x83) {	/* count is a byte */
1806 		count = instr[2];
1807 		*ppc += 3;
1808 	} else {		/* not an addl inctruction */
1809 		count = 0;
1810 	}
1811 
1812 	return (count);
1813 }
1814 
1815 uintptr_t
1816 get_return_address32(uintptr_t *psp)
1817 {
1818 	uint32_t sp = *psp;
1819 	uint32_t rpc;
1820 	int count;
1821 
1822 	*psp += 4;	/* account for popping the stack on return */
1823 	if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1824 		return (0);
1825 	if ((count = return_count32(&rpc)) < 0)
1826 		count = 0;
1827 	*psp += count;		/* expected sp on return */
1828 	return (rpc);
1829 }
1830 
1831 uintptr_t
1832 get_return_address(uintptr_t *psp)
1833 {
1834 #ifdef _LP64
1835 	uintptr_t rpc;
1836 	uintptr_t sp = *psp;
1837 
1838 	if (data_model == PR_MODEL_LP64) {
1839 		if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1840 			return (0);
1841 		/*
1842 		 * Ignore arguments pushed on the stack.  See comments in
1843 		 * get_arguments().
1844 		 */
1845 		return (rpc);
1846 	} else
1847 #endif
1848 		return (get_return_address32(psp));
1849 }
1850 
1851 
1852 int
1853 get_arguments32(long *argp)
1854 {
1855 	private_t *pri = get_private();
1856 	const lwpstatus_t *Lsp = pri->lwpstat;
1857 	uint32_t frame[5];	/* return pc + 4 args */
1858 	int narg;
1859 	int count;
1860 	int i;
1861 
1862 	narg = Pread(Proc, frame, sizeof (frame),
1863 		(uintptr_t)Lsp->pr_reg[R_SP]);
1864 	narg -= sizeof (greg32_t);
1865 	if (narg <= 0)
1866 		return (0);
1867 	narg /= sizeof (greg32_t); /* no more than 4 */
1868 
1869 	/*
1870 	 * Given the return PC, determine the number of arguments.
1871 	 */
1872 	if ((count = return_count32(&frame[0])) < 0)
1873 		narg = 0;
1874 	else {
1875 		count /= sizeof (greg32_t);
1876 		if (narg > count)
1877 			narg = count;
1878 	}
1879 
1880 	for (i = 0; i < narg; i++)
1881 		argp[i] = (long)frame[i+1];
1882 
1883 	return (narg);
1884 }
1885 
1886 int
1887 get_arguments(long *argp)
1888 {
1889 #ifdef _LP64
1890 	private_t *pri = get_private();
1891 	const lwpstatus_t *Lsp = pri->lwpstat;
1892 
1893 	if (data_model == PR_MODEL_LP64) {
1894 		/*
1895 		 * On amd64, we do not know how many arguments are passed to
1896 		 * each function.  While it may be possible to detect if we
1897 		 * have more than 6 arguments, it is of marginal value.
1898 		 * Instead, assume that we always have 6 arguments, which are
1899 		 * passed via registers.
1900 		 */
1901 		argp[0] = Lsp->pr_reg[REG_RDI];
1902 		argp[1] = Lsp->pr_reg[REG_RSI];
1903 		argp[2] = Lsp->pr_reg[REG_RDX];
1904 		argp[3] = Lsp->pr_reg[REG_RCX];
1905 		argp[4] = Lsp->pr_reg[REG_R8];
1906 		argp[5] = Lsp->pr_reg[REG_R9];
1907 		return (6);
1908 	} else
1909 #endif
1910 		return (get_arguments32(argp));
1911 }
1912 
1913 #endif	/* __amd64 || __i386 */
1914