xref: /titanic_52/usr/src/cmd/truss/fcall.c (revision 88447a05f537aabe9a1bc3d5313f22581ec992a7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #define	_SYSCALL32
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <ctype.h>
33 #include <string.h>
34 #include <memory.h>
35 #include <errno.h>
36 #include <sys/types.h>
37 #include <sys/stack.h>
38 #include <signal.h>
39 #include <limits.h>
40 #include <sys/isa_defs.h>
41 #include <proc_service.h>
42 #include <dlfcn.h>
43 #include <fnmatch.h>
44 #include <libproc.h>
45 #include "ramdata.h"
46 #include "systable.h"
47 #include "print.h"
48 #include "proto.h"
49 #include "htbl.h"
50 
51 /*
52  * Functions supporting library function call tracing.
53  */
54 
55 typedef struct {
56 	prmap_t	*pmap;
57 	int	nmap;
58 } ph_map_t;
59 
60 /*
61  * static functions in this file.
62  */
63 void function_entry(private_t *, struct bkpt *, struct callstack *);
64 void function_return(private_t *, struct callstack *);
65 int object_iter(void *, const prmap_t *, const char *);
66 int object_present(void *, const prmap_t *, const char *);
67 int symbol_iter(void *, const GElf_Sym *, const char *);
68 uintptr_t get_return_address(uintptr_t *);
69 int get_arguments(long *argp);
70 uintptr_t previous_fp(uintptr_t, uintptr_t *);
71 int lwp_stack_traps(void *cd, const lwpstatus_t *Lsp);
72 int thr_stack_traps(const td_thrhandle_t *Thp, void *cd);
73 struct bkpt *create_bkpt(uintptr_t, int, int);
74 void set_deferred_breakpoints(void);
75 
76 #define	DEF_MAXCALL	16	/* initial value of Stk->maxcall */
77 
78 #define	FAULT_ADDR	((uintptr_t)(0-8))
79 
80 #define	HASHSZ	2048
81 #define	bpt_hash(addr)	((((addr) >> 13) ^ ((addr) >> 2)) & 0x7ff)
82 
83 static void
84 setup_thread_agent(void)
85 {
86 	struct bkpt *Bp;
87 	td_notify_t notify;
88 	td_thr_events_t events;
89 
90 	if (Thr_agent != NULL)	/* only once */
91 		return;
92 	if (td_init() != TD_OK || td_ta_new(Proc, &Thr_agent) != TD_OK)
93 		Thr_agent = NULL;
94 	else {
95 		td_event_emptyset(&events);
96 		td_event_addset(&events, TD_CREATE);
97 		if (td_ta_event_addr(Thr_agent, TD_CREATE, &notify) == TD_OK &&
98 		    notify.type == NOTIFY_BPT &&
99 		    td_ta_set_event(Thr_agent, &events) == TD_OK &&
100 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
101 			Bp->flags |= BPT_TD_CREATE;
102 	}
103 }
104 
105 /*
106  * Delete all breakpoints in the range [base .. base+size)
107  * from the breakpoint hash table.
108  */
109 static void
110 delete_breakpoints(uintptr_t base, size_t size)
111 {
112 	struct bkpt **Bpp;
113 	struct bkpt *Bp;
114 	int i;
115 
116 	if (bpt_hashtable == NULL)
117 		return;
118 	for (i = 0; i < HASHSZ; i++) {
119 		Bpp = &bpt_hashtable[i];
120 		while ((Bp = *Bpp) != NULL) {
121 			if (Bp->addr < base || Bp->addr >= base + size) {
122 				Bpp = &Bp->next;
123 				continue;
124 			}
125 			*Bpp = Bp->next;
126 			if (Bp->sym_name)
127 				free(Bp->sym_name);
128 			free(Bp);
129 		}
130 	}
131 }
132 
133 /*
134  * Establishment of breakpoints on traced library functions.
135  */
136 void
137 establish_breakpoints(void)
138 {
139 	if (Dynpat == NULL)
140 		return;
141 
142 	/* allocate the breakpoint hash table */
143 	if (bpt_hashtable == NULL) {
144 		bpt_hashtable = my_malloc(HASHSZ * sizeof (struct bkpt *),
145 		    NULL);
146 		(void) memset(bpt_hashtable, 0,
147 		    HASHSZ * sizeof (struct bkpt *));
148 	}
149 
150 	/*
151 	 * Set special rtld_db event breakpoints, first time only.
152 	 */
153 	if (Rdb_agent == NULL &&
154 	    (Rdb_agent = Prd_agent(Proc)) != NULL) {
155 		rd_notify_t notify;
156 		struct bkpt *Bp;
157 
158 		(void) rd_event_enable(Rdb_agent, 1);
159 		if (rd_event_addr(Rdb_agent, RD_PREINIT, &notify) == RD_OK &&
160 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
161 			Bp->flags |= BPT_PREINIT;
162 		if (rd_event_addr(Rdb_agent, RD_POSTINIT, &notify) == RD_OK &&
163 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
164 			Bp->flags |= BPT_POSTINIT;
165 		if (rd_event_addr(Rdb_agent, RD_DLACTIVITY, &notify) == RD_OK &&
166 		    (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
167 			Bp->flags |= BPT_DLACTIVITY;
168 	}
169 
170 	/*
171 	 * Set special thread event breakpoint, first time libc is seen.
172 	 */
173 	if (Thr_agent == NULL)
174 		setup_thread_agent();
175 
176 	/*
177 	 * Tell libproc to update its mappings.
178 	 */
179 	Pupdate_maps(Proc);
180 
181 	/*
182 	 * If rtld_db told us a library was being deleted,
183 	 * first mark all of the dynlibs as not present, then
184 	 * iterate over the shared objects, marking only those
185 	 * present that really are present, and finally delete
186 	 * all of the not-present dynlibs.
187 	 */
188 	if (delete_library) {
189 		struct dynlib **Dpp;
190 		struct dynlib *Dp;
191 
192 		for (Dp = Dynlib; Dp != NULL; Dp = Dp->next)
193 			Dp->present = FALSE;
194 		(void) Pobject_iter(Proc, object_present, NULL);
195 		Dpp = &Dynlib;
196 		while ((Dp = *Dpp) != NULL) {
197 			if (Dp->present) {
198 				Dpp = &Dp->next;
199 				continue;
200 			}
201 			delete_breakpoints(Dp->base, Dp->size);
202 			*Dpp = Dp->next;
203 			free(Dp->lib_name);
204 			free(Dp->match_name);
205 			free(Dp->prt_name);
206 			free(Dp);
207 		}
208 		delete_library = FALSE;
209 	}
210 
211 	/*
212 	 * Iterate over the shared objects, creating breakpoints.
213 	 */
214 	(void) Pobject_iter(Proc, object_iter, NULL);
215 
216 	/*
217 	 * Now actually set all the breakpoints we just created.
218 	 */
219 	set_deferred_breakpoints();
220 }
221 
222 /*
223  * Initial establishment of stacks in a newly-grabbed process.
224  * establish_breakpoints() has already been called.
225  */
226 void
227 establish_stacks(void)
228 {
229 	const pstatus_t *Psp = Pstatus(Proc);
230 	char mapfile[64];
231 	int mapfd;
232 	struct stat statb;
233 	prmap_t *Pmap = NULL;
234 	int nmap = 0;
235 	ph_map_t ph_map;
236 
237 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
238 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
239 	    fstat(mapfd, &statb) != 0 ||
240 	    statb.st_size < sizeof (prmap_t) ||
241 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
242 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
243 	    (nmap /= sizeof (prmap_t)) == 0) {
244 		if (Pmap != NULL)
245 			free(Pmap);
246 		Pmap = NULL;
247 		nmap = 0;
248 	}
249 	if (mapfd >= 0)
250 		(void) close(mapfd);
251 
252 	/*
253 	 * Iterate over lwps, establishing stacks.
254 	 */
255 	ph_map.pmap = Pmap;
256 	ph_map.nmap = nmap;
257 	(void) Plwp_iter(Proc, lwp_stack_traps, &ph_map);
258 	if (Pmap != NULL)
259 		free(Pmap);
260 
261 	if (Thr_agent == NULL)
262 		return;
263 
264 	/*
265 	 * Iterate over unbound threads, establishing stacks.
266 	 */
267 	(void) td_ta_thr_iter(Thr_agent, thr_stack_traps, NULL,
268 	    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
269 	    TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
270 }
271 
272 void
273 do_symbol_iter(const char *object_name, struct dynpat *Dyp)
274 {
275 	if (*Dyp->Dp->prt_name == '\0')
276 		object_name = PR_OBJ_EXEC;
277 
278 	/*
279 	 * Always search the dynamic symbol table.
280 	 */
281 	(void) Psymbol_iter(Proc, object_name,
282 	    PR_DYNSYM, BIND_WEAK|BIND_GLOBAL|TYPE_FUNC,
283 	    symbol_iter, Dyp);
284 
285 	/*
286 	 * Search the static symbol table if this is the
287 	 * executable file or if we are being asked to
288 	 * report internal calls within the library.
289 	 */
290 	if (object_name == PR_OBJ_EXEC || Dyp->internal)
291 		(void) Psymbol_iter(Proc, object_name,
292 		    PR_SYMTAB, BIND_ANY|TYPE_FUNC,
293 		    symbol_iter, Dyp);
294 }
295 
296 /* ARGSUSED */
297 int
298 object_iter(void *cd, const prmap_t *pmp, const char *object_name)
299 {
300 	char name[100];
301 	struct dynpat *Dyp;
302 	struct dynlib *Dp;
303 	const char *str;
304 	char *s;
305 	int i;
306 
307 	if ((pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_EXEC))
308 		return (0);
309 
310 	/*
311 	 * Set special thread event breakpoint, first time libc is seen.
312 	 */
313 	if (Thr_agent == NULL && strstr(object_name, "/libc.so.") != NULL)
314 		setup_thread_agent();
315 
316 	for (Dp = Dynlib; Dp != NULL; Dp = Dp->next)
317 		if (strcmp(object_name, Dp->lib_name) == 0 ||
318 		    (strcmp(Dp->lib_name, "a.out") == 0 &&
319 		    strcmp(pmp->pr_mapname, "a.out") == 0))
320 			break;
321 
322 	if (Dp == NULL) {
323 		Dp = my_malloc(sizeof (struct dynlib), NULL);
324 		(void) memset(Dp, 0, sizeof (struct dynlib));
325 		if (strcmp(pmp->pr_mapname, "a.out") == 0) {
326 			Dp->lib_name = strdup(pmp->pr_mapname);
327 			Dp->match_name = strdup(pmp->pr_mapname);
328 			Dp->prt_name = strdup("");
329 		} else {
330 			Dp->lib_name = strdup(object_name);
331 			if ((str = strrchr(object_name, '/')) != NULL)
332 				str++;
333 			else
334 				str = object_name;
335 			(void) strncpy(name, str, sizeof (name) - 2);
336 			name[sizeof (name) - 2] = '\0';
337 			if ((s = strstr(name, ".so")) != NULL)
338 				*s = '\0';
339 			Dp->match_name = strdup(name);
340 			(void) strcat(name, ":");
341 			Dp->prt_name = strdup(name);
342 		}
343 		Dp->next = Dynlib;
344 		Dynlib = Dp;
345 	}
346 
347 	if (Dp->built ||
348 	    (not_consist && strcmp(Dp->prt_name, "ld:") != 0))	/* kludge */
349 		return (0);
350 
351 	if (hflag && not_consist)
352 		(void) fprintf(stderr, "not_consist is TRUE, building %s\n",
353 		    Dp->lib_name);
354 
355 	Dp->base = pmp->pr_vaddr;
356 	Dp->size = pmp->pr_size;
357 
358 	/*
359 	 * For every dynlib pattern that matches this library's name,
360 	 * iterate through all of the library's symbols looking for
361 	 * matching symbol name patterns.
362 	 */
363 	for (Dyp = Dynpat; Dyp != NULL; Dyp = Dyp->next) {
364 		if (interrupt|sigusr1)
365 			break;
366 		for (i = 0; i < Dyp->nlibpat; i++) {
367 			if (interrupt|sigusr1)
368 				break;
369 			if (fnmatch(Dyp->libpat[i], Dp->match_name, 0) != 0)
370 				continue;	/* no match */
371 
372 			/*
373 			 * Require an exact match for the executable (a.out)
374 			 * and for the dynamic linker (ld.so.1).
375 			 */
376 			if ((strcmp(Dp->match_name, "a.out") == 0 ||
377 			    strcmp(Dp->match_name, "ld") == 0) &&
378 			    strcmp(Dyp->libpat[i], Dp->match_name) != 0)
379 				continue;
380 
381 			/*
382 			 * Set Dyp->Dp to Dp so symbol_iter() can use it.
383 			 */
384 			Dyp->Dp = Dp;
385 			do_symbol_iter(object_name, Dyp);
386 			Dyp->Dp = NULL;
387 		}
388 	}
389 
390 	Dp->built = TRUE;
391 	return (interrupt | sigusr1);
392 }
393 
394 /* ARGSUSED */
395 int
396 object_present(void *cd, const prmap_t *pmp, const char *object_name)
397 {
398 	struct dynlib *Dp;
399 
400 	for (Dp = Dynlib; Dp != NULL; Dp = Dp->next) {
401 		if (Dp->base == pmp->pr_vaddr)
402 			Dp->present = TRUE;
403 	}
404 
405 	return (0);
406 }
407 
408 /*
409  * Search for an existing breakpoint at the 'pc' location.
410  */
411 struct bkpt *
412 get_bkpt(uintptr_t pc)
413 {
414 	struct bkpt *Bp;
415 
416 	for (Bp = bpt_hashtable[bpt_hash(pc)]; Bp != NULL; Bp = Bp->next)
417 		if (pc == Bp->addr)
418 			break;
419 
420 	return (Bp);
421 }
422 
423 /*
424  * Create a breakpoint at 'pc', if one is not there already.
425  * 'ret' is true when creating a function return breakpoint, in which case
426  * fail and return NULL if the breakpoint would be created in writeable data.
427  * If 'set' it true, set the breakpoint in the process now.
428  */
429 struct bkpt *
430 create_bkpt(uintptr_t pc, int ret, int set)
431 {
432 	uint_t hix = bpt_hash(pc);
433 	struct bkpt *Bp;
434 	const prmap_t *pmp;
435 
436 	for (Bp = bpt_hashtable[hix]; Bp != NULL; Bp = Bp->next)
437 		if (pc == Bp->addr)
438 			return (Bp);
439 
440 	/*
441 	 * Don't set return breakpoints on writeable data
442 	 * or on any space other than executable text.
443 	 * Don't set breakpoints in the child of a vfork()
444 	 * because that would modify the parent's address space.
445 	 */
446 	if (is_vfork_child ||
447 	    (ret &&
448 	    ((pmp = Paddr_to_text_map(Proc, pc)) == NULL ||
449 	    !(pmp->pr_mflags & MA_EXEC) ||
450 	    (pmp->pr_mflags & MA_WRITE))))
451 		return (NULL);
452 
453 	/* create a new unnamed breakpoint */
454 	Bp = my_malloc(sizeof (struct bkpt), NULL);
455 	Bp->sym_name = NULL;
456 	Bp->dyn = NULL;
457 	Bp->addr = pc;
458 	Bp->instr = 0;
459 	Bp->flags = 0;
460 	if (set && Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
461 		Bp->flags |= BPT_ACTIVE;
462 	Bp->next = bpt_hashtable[hix];
463 	bpt_hashtable[hix] = Bp;
464 
465 	return (Bp);
466 }
467 
468 /*
469  * Set all breakpoints that haven't been set yet.
470  * Deactivate all breakpoints from modules that are not present any more.
471  */
472 void
473 set_deferred_breakpoints(void)
474 {
475 	struct bkpt *Bp;
476 	int i;
477 
478 	if (is_vfork_child)
479 		return;
480 
481 	for (i = 0; i < HASHSZ; i++) {
482 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
483 			if (!(Bp->flags & BPT_ACTIVE)) {
484 				if (!(Bp->flags & BPT_EXCLUDE) &&
485 				    Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
486 					Bp->flags |= BPT_ACTIVE;
487 			} else if (Paddr_to_text_map(Proc, Bp->addr) == NULL) {
488 				Bp->flags &= ~BPT_ACTIVE;
489 			}
490 		}
491 	}
492 }
493 
494 int
495 symbol_iter(void *cd, const GElf_Sym *sym, const char *sym_name)
496 {
497 	struct dynpat *Dyp = cd;
498 	struct dynlib *Dp = Dyp->Dp;
499 	uintptr_t pc = sym->st_value;
500 	struct bkpt *Bp;
501 	int i;
502 
503 	/* ignore any undefined symbols */
504 	if (sym->st_shndx == SHN_UNDEF)
505 		return (0);
506 
507 	/*
508 	 * Arbitrarily omit "_start" from the executable.
509 	 * (Avoid indentation before main().)
510 	 */
511 	if (*Dp->prt_name == '\0' && strcmp(sym_name, "_start") == 0)
512 		return (0);
513 
514 	/*
515 	 * Arbitrarily omit "_rt_boot" from the dynamic linker.
516 	 * (Avoid indentation before main().)
517 	 */
518 	if (strcmp(Dp->match_name, "ld") == 0 &&
519 	    strcmp(sym_name, "_rt_boot") == 0)
520 		return (0);
521 
522 	/*
523 	 * Arbitrarily omit any symbols whose name starts with '.'.
524 	 * Apparantly putting a breakpoint on .umul causes a
525 	 * fatal error in libthread (%y is not restored correctly
526 	 * when a single step is taken).  Looks like a /proc bug.
527 	 */
528 	if (*sym_name == '.')
529 		return (0);
530 
531 	/*
532 	 * For each pattern in the array of symbol patterns,
533 	 * if the pattern matches the symbol name, then
534 	 * create a breakpoint at the function in question.
535 	 */
536 	for (i = 0; i < Dyp->nsympat; i++) {
537 		if (interrupt|sigusr1)
538 			break;
539 		if (fnmatch(Dyp->sympat[i], sym_name, 0) != 0)
540 			continue;
541 
542 		if ((Bp = create_bkpt(pc, 0, 0)) == NULL)	/* can't fail */
543 			return (0);
544 
545 		/*
546 		 * New breakpoints receive a name now.
547 		 * For existing breakpoints, prefer the subset name if possible,
548 		 * else prefer the shorter name.
549 		 */
550 		if (Bp->sym_name == NULL) {
551 			Bp->sym_name = strdup(sym_name);
552 		} else if (strstr(Bp->sym_name, sym_name) != NULL ||
553 		    strlen(Bp->sym_name) > strlen(sym_name)) {
554 			free(Bp->sym_name);
555 			Bp->sym_name = strdup(sym_name);
556 		}
557 		Bp->dyn = Dp;
558 		Bp->flags |= Dyp->flag;
559 		if (Dyp->exclude)
560 			Bp->flags |= BPT_EXCLUDE;
561 		else if (Dyp->internal || *Dp->prt_name == '\0')
562 			Bp->flags |= BPT_INTERNAL;
563 		return (0);
564 	}
565 
566 	return (interrupt | sigusr1);
567 }
568 
569 /* For debugging only ---- */
570 void
571 report_htable_stats(void)
572 {
573 	const pstatus_t *Psp = Pstatus(Proc);
574 	struct callstack *Stk;
575 	struct bkpt *Bp;
576 	uint_t Min = 1000000;
577 	uint_t Max = 0;
578 	uint_t Avg = 0;
579 	uint_t Total = 0;
580 	uint_t i, j;
581 	uint_t bucket[HASHSZ];
582 
583 	if (Dynpat == NULL || !hflag)
584 		return;
585 
586 	hflag = FALSE;
587 	(void) memset(bucket, 0, sizeof (bucket));
588 
589 	for (i = 0; i < HASHSZ; i++) {
590 		j = 0;
591 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next)
592 			j++;
593 		if (j < Min)
594 			Min = j;
595 		if (j > Max)
596 			Max = j;
597 		if (j < HASHSZ)
598 			bucket[j]++;
599 		Total += j;
600 	}
601 	Avg = (Total + HASHSZ / 2) / HASHSZ;
602 	(void) fprintf(stderr, "truss hash table statistics --------\n");
603 	(void) fprintf(stderr, "    Total = %u\n", Total);
604 	(void) fprintf(stderr, "      Min = %u\n", Min);
605 	(void) fprintf(stderr, "      Max = %u\n", Max);
606 	(void) fprintf(stderr, "      Avg = %u\n", Avg);
607 	for (i = 0; i < HASHSZ; i++)
608 		if (bucket[i])
609 			(void) fprintf(stderr, "    %3u buckets of size %d\n",
610 			    bucket[i], i);
611 
612 	(void) fprintf(stderr, "truss-detected stacks --------\n");
613 	for (Stk = callstack; Stk != NULL; Stk = Stk->next) {
614 		(void) fprintf(stderr,
615 		    "    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
616 		    (ulong_t)Stk->stkbase,
617 		    (ulong_t)Stk->stkend,
618 		    (ulong_t)(Stk->stkend - Stk->stkbase));
619 	}
620 	(void) fprintf(stderr, "primary unix stack --------\n");
621 	(void) fprintf(stderr,
622 	    "    base = 0x%.8lx  end = 0x%.8lx  size = %ld\n",
623 	    (ulong_t)Psp->pr_stkbase,
624 	    (ulong_t)(Psp->pr_stkbase + Psp->pr_stksize),
625 	    (ulong_t)Psp->pr_stksize);
626 	(void) fprintf(stderr, "nthr_create = %u\n", nthr_create);
627 }
628 
629 void
630 make_lwp_stack(const lwpstatus_t *Lsp, prmap_t *Pmap, int nmap)
631 {
632 	const pstatus_t *Psp = Pstatus(Proc);
633 	uintptr_t sp = Lsp->pr_reg[R_SP];
634 	id_t lwpid = Lsp->pr_lwpid;
635 	struct callstack *Stk;
636 	td_thrhandle_t th;
637 	td_thrinfo_t thrinfo;
638 
639 	if (data_model != PR_MODEL_LP64)
640 		sp = (uint32_t)sp;
641 
642 	/* check to see if we already have this stack */
643 	if (sp == 0)
644 		return;
645 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
646 		if (sp >= Stk->stkbase && sp < Stk->stkend)
647 			return;
648 
649 	Stk = my_malloc(sizeof (struct callstack), NULL);
650 	Stk->next = callstack;
651 	callstack = Stk;
652 	nstack++;
653 	Stk->tref = 0;
654 	Stk->tid = 0;
655 	Stk->nthr_create = 0;
656 	Stk->ncall = 0;
657 	Stk->maxcall = DEF_MAXCALL;
658 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
659 
660 	/* primary stack */
661 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
662 		Stk->stkbase = Psp->pr_stkbase;
663 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
664 		return;
665 	}
666 
667 	/* alternate stack */
668 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
669 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
670 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
671 	    + Lsp->pr_altstack.ss_size) {
672 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
673 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
674 		return;
675 	}
676 
677 	/* thread stacks? */
678 	if (Thr_agent != NULL &&
679 	    td_ta_map_lwp2thr(Thr_agent, lwpid, &th) == TD_OK &&
680 	    td_thr_get_info(&th, &thrinfo) == TD_OK &&
681 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
682 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
683 		/* The bloody fools got this backwards! */
684 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
685 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
686 		return;
687 	}
688 
689 	/* last chance -- try the raw memory map */
690 	for (; nmap; nmap--, Pmap++) {
691 		if (sp >= Pmap->pr_vaddr &&
692 		    sp < Pmap->pr_vaddr + Pmap->pr_size) {
693 			Stk->stkbase = Pmap->pr_vaddr;
694 			Stk->stkend = Pmap->pr_vaddr + Pmap->pr_size;
695 			return;
696 		}
697 	}
698 
699 	callstack = Stk->next;
700 	nstack--;
701 	free(Stk->stack);
702 	free(Stk);
703 }
704 
705 void
706 make_thr_stack(const td_thrhandle_t *Thp, prgregset_t reg)
707 {
708 	const pstatus_t *Psp = Pstatus(Proc);
709 	td_thrinfo_t thrinfo;
710 	uintptr_t sp = reg[R_SP];
711 	struct callstack *Stk;
712 
713 	if (data_model != PR_MODEL_LP64)
714 		sp = (uint32_t)sp;
715 
716 	/* check to see if we already have this stack */
717 	if (sp == 0)
718 		return;
719 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
720 		if (sp >= Stk->stkbase && sp < Stk->stkend)
721 			return;
722 
723 	Stk = my_malloc(sizeof (struct callstack), NULL);
724 	Stk->next = callstack;
725 	callstack = Stk;
726 	nstack++;
727 	Stk->tref = 0;
728 	Stk->tid = 0;
729 	Stk->nthr_create = 0;
730 	Stk->ncall = 0;
731 	Stk->maxcall = DEF_MAXCALL;
732 	Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
733 
734 	/* primary stack */
735 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
736 		Stk->stkbase = Psp->pr_stkbase;
737 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
738 		return;
739 	}
740 
741 	if (td_thr_get_info(Thp, &thrinfo) == TD_OK &&
742 	    sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
743 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
744 		/* The bloody fools got this backwards! */
745 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
746 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
747 		return;
748 	}
749 
750 	callstack = Stk->next;
751 	nstack--;
752 	free(Stk->stack);
753 	free(Stk);
754 }
755 
756 struct callstack *
757 find_lwp_stack(uintptr_t sp)
758 {
759 	const pstatus_t *Psp = Pstatus(Proc);
760 	char mapfile[64];
761 	int mapfd;
762 	struct stat statb;
763 	prmap_t *Pmap = NULL;
764 	prmap_t *pmap = NULL;
765 	int nmap = 0;
766 	struct callstack *Stk = NULL;
767 
768 	/*
769 	 * Get the address space map.
770 	 */
771 	(void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
772 	if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
773 	    fstat(mapfd, &statb) != 0 ||
774 	    statb.st_size < sizeof (prmap_t) ||
775 	    (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
776 	    (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
777 	    (nmap /= sizeof (prmap_t)) == 0) {
778 		if (Pmap != NULL)
779 			free(Pmap);
780 		if (mapfd >= 0)
781 			(void) close(mapfd);
782 		return (NULL);
783 	}
784 	(void) close(mapfd);
785 
786 	for (pmap = Pmap; nmap--; pmap++) {
787 		if (sp >= pmap->pr_vaddr &&
788 		    sp < pmap->pr_vaddr + pmap->pr_size) {
789 			Stk = my_malloc(sizeof (struct callstack), NULL);
790 			Stk->next = callstack;
791 			callstack = Stk;
792 			nstack++;
793 			Stk->stkbase = pmap->pr_vaddr;
794 			Stk->stkend = pmap->pr_vaddr + pmap->pr_size;
795 			Stk->tref = 0;
796 			Stk->tid = 0;
797 			Stk->nthr_create = 0;
798 			Stk->ncall = 0;
799 			Stk->maxcall = DEF_MAXCALL;
800 			Stk->stack = my_malloc(
801 			    DEF_MAXCALL * sizeof (*Stk->stack), NULL);
802 			break;
803 		}
804 	}
805 
806 	free(Pmap);
807 	return (Stk);
808 }
809 
810 struct callstack *
811 find_stack(uintptr_t sp)
812 {
813 	const pstatus_t *Psp = Pstatus(Proc);
814 	private_t *pri = get_private();
815 	const lwpstatus_t *Lsp = pri->lwpstat;
816 	id_t lwpid = Lsp->pr_lwpid;
817 #if defined(__sparc)
818 	prgreg_t tref = Lsp->pr_reg[R_G7];
819 #elif defined(__amd64)
820 	prgreg_t tref = Lsp->pr_reg[REG_FS];
821 #elif defined(__i386)
822 	prgreg_t tref = Lsp->pr_reg[GS];
823 #endif
824 	struct callstack *Stk = NULL;
825 	td_thrhandle_t th;
826 	td_thrinfo_t thrinfo;
827 	td_err_e error;
828 
829 	/* primary stack */
830 	if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
831 		Stk = my_malloc(sizeof (struct callstack), NULL);
832 		Stk->next = callstack;
833 		callstack = Stk;
834 		nstack++;
835 		Stk->stkbase = Psp->pr_stkbase;
836 		Stk->stkend = Stk->stkbase + Psp->pr_stksize;
837 		Stk->tref = 0;
838 		Stk->tid = 0;
839 		Stk->nthr_create = 0;
840 		Stk->ncall = 0;
841 		Stk->maxcall = DEF_MAXCALL;
842 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
843 		    NULL);
844 		return (Stk);
845 	}
846 
847 	/* alternate stack */
848 	if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
849 	    sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
850 	    sp < (uintptr_t)Lsp->pr_altstack.ss_sp
851 	    + Lsp->pr_altstack.ss_size) {
852 		Stk = my_malloc(sizeof (struct callstack), NULL);
853 		Stk->next = callstack;
854 		callstack = Stk;
855 		nstack++;
856 		Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
857 		Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
858 		Stk->tref = 0;
859 		Stk->tid = 0;
860 		Stk->nthr_create = 0;
861 		Stk->ncall = 0;
862 		Stk->maxcall = DEF_MAXCALL;
863 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
864 		    NULL);
865 		return (Stk);
866 	}
867 
868 	if (Thr_agent == NULL)
869 		return (find_lwp_stack(sp));
870 
871 	/* thread stacks? */
872 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
873 		if (hflag)
874 			(void) fprintf(stderr,
875 			    "cannot get thread handle for "
876 			    "lwp#%d, error=%d, tref=0x%.8lx\n",
877 			    (int)lwpid, error, (long)tref);
878 		return (NULL);
879 	}
880 
881 	if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
882 		if (hflag)
883 			(void) fprintf(stderr,
884 			    "cannot get thread info for "
885 			    "lwp#%d, error=%d, tref=0x%.8lx\n",
886 			    (int)lwpid, error, (long)tref);
887 		return (NULL);
888 	}
889 
890 	if (sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
891 	    sp < (uintptr_t)thrinfo.ti_stkbase) {
892 		Stk = my_malloc(sizeof (struct callstack), NULL);
893 		Stk->next = callstack;
894 		callstack = Stk;
895 		nstack++;
896 		/* The bloody fools got this backwards! */
897 		Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
898 		Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
899 		Stk->tref = tref;
900 		Stk->tid = thrinfo.ti_tid;
901 		Stk->nthr_create = nthr_create;
902 		Stk->ncall = 0;
903 		Stk->maxcall = DEF_MAXCALL;
904 		Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
905 		    NULL);
906 		return (Stk);
907 	}
908 
909 	/* stack bounds failure -- complain bitterly */
910 	if (hflag) {
911 		(void) fprintf(stderr,
912 		    "sp not within thread stack: "
913 		    "sp=0x%.8lx stkbase=0x%.8lx stkend=0x%.8lx\n",
914 		    (ulong_t)sp,
915 		    /* The bloody fools got this backwards! */
916 		    (ulong_t)thrinfo.ti_stkbase - thrinfo.ti_stksize,
917 		    (ulong_t)thrinfo.ti_stkbase);
918 	}
919 
920 	return (NULL);
921 }
922 
923 void
924 get_tid(struct callstack *Stk)
925 {
926 	private_t *pri = get_private();
927 	const lwpstatus_t *Lsp = pri->lwpstat;
928 	id_t lwpid = Lsp->pr_lwpid;
929 #if defined(__sparc)
930 	prgreg_t tref = Lsp->pr_reg[R_G7];
931 #elif defined(__amd64)
932 	prgreg_t tref = (data_model == PR_MODEL_LP64) ?
933 	    Lsp->pr_reg[REG_FS] : Lsp->pr_reg[REG_GS];
934 #elif defined(__i386)
935 	prgreg_t tref = Lsp->pr_reg[GS];
936 #endif
937 	td_thrhandle_t th;
938 	td_thrinfo_t thrinfo;
939 	td_err_e error;
940 
941 	if (Thr_agent == NULL) {
942 		Stk->tref = 0;
943 		Stk->tid = 0;
944 		Stk->nthr_create = 0;
945 		return;
946 	}
947 
948 	/*
949 	 * Shortcut here --
950 	 * If we have a matching tref and no new threads have
951 	 * been created since the last time we encountered this
952 	 * stack, then we don't have to go through the overhead
953 	 * of calling td_ta_map_lwp2thr() to get the thread-id.
954 	 */
955 	if (tref == Stk->tref && Stk->nthr_create == nthr_create)
956 		return;
957 
958 	if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
959 		if (hflag)
960 			(void) fprintf(stderr,
961 			    "cannot get thread handle for "
962 			    "lwp#%d, error=%d, tref=0x%.8lx\n",
963 			    (int)lwpid, error, (long)tref);
964 		Stk->tref = 0;
965 		Stk->tid = 0;
966 		Stk->nthr_create = 0;
967 	} else if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
968 		if (hflag)
969 			(void) fprintf(stderr,
970 			    "cannot get thread info for "
971 			    "lwp#%d, error=%d, tref=0x%.8lx\n",
972 			    (int)lwpid, error, (long)tref);
973 		Stk->tref = 0;
974 		Stk->tid = 0;
975 		Stk->nthr_create = 0;
976 	} else {
977 		Stk->tref = tref;
978 		Stk->tid = thrinfo.ti_tid;
979 		Stk->nthr_create = nthr_create;
980 	}
981 }
982 
983 struct callstack *
984 callstack_info(uintptr_t sp, uintptr_t fp, int makeid)
985 {
986 	struct callstack *Stk;
987 	uintptr_t trash;
988 
989 	if (sp == 0 ||
990 	    Pread(Proc, &trash, sizeof (trash), sp) != sizeof (trash))
991 		return (NULL);
992 
993 	for (Stk = callstack; Stk != NULL; Stk = Stk->next)
994 		if (sp >= Stk->stkbase && sp < Stk->stkend)
995 			break;
996 
997 	/*
998 	 * If we didn't find the stack, do it the hard way.
999 	 */
1000 	if (Stk == NULL) {
1001 		uintptr_t stkbase = sp;
1002 		uintptr_t stkend;
1003 		uint_t minsize;
1004 
1005 #if defined(i386) || defined(__amd64)
1006 #ifdef _LP64
1007 		if (data_model == PR_MODEL_LP64)
1008 			minsize = 2 * sizeof (uintptr_t);	/* fp + pc */
1009 		else
1010 #endif
1011 			minsize = 2 * sizeof (uint32_t);
1012 #else
1013 #ifdef _LP64
1014 		if (data_model != PR_MODEL_LP64)
1015 			minsize = SA32(MINFRAME32);
1016 		else
1017 			minsize = SA64(MINFRAME64);
1018 #else
1019 		minsize = SA(MINFRAME);
1020 #endif
1021 #endif	/* i386 */
1022 		stkend = sp + minsize;
1023 
1024 		while (Stk == NULL && fp != 0 && fp >= sp) {
1025 			stkend = fp + minsize;
1026 			for (Stk = callstack; Stk != NULL; Stk = Stk->next)
1027 				if ((fp >= Stk->stkbase && fp < Stk->stkend) ||
1028 				    (stkend > Stk->stkbase &&
1029 				    stkend <= Stk->stkend))
1030 					break;
1031 			if (Stk == NULL)
1032 				fp = previous_fp(fp, NULL);
1033 		}
1034 
1035 		if (Stk != NULL)	/* the stack grew */
1036 			Stk->stkbase = stkbase;
1037 	}
1038 
1039 	if (Stk == NULL && makeid)	/* new stack */
1040 		Stk = find_stack(sp);
1041 
1042 	if (Stk == NULL)
1043 		return (NULL);
1044 
1045 	/*
1046 	 * Ensure that there is room for at least one more entry.
1047 	 */
1048 	if (Stk->ncall == Stk->maxcall) {
1049 		Stk->maxcall *= 2;
1050 		Stk->stack = my_realloc(Stk->stack,
1051 		    Stk->maxcall * sizeof (*Stk->stack), NULL);
1052 	}
1053 
1054 	if (makeid)
1055 		get_tid(Stk);
1056 
1057 	return (Stk);
1058 }
1059 
1060 /*
1061  * Reset the breakpoint information (called on successful exec()).
1062  */
1063 void
1064 reset_breakpoints(void)
1065 {
1066 	struct dynlib *Dp;
1067 	struct bkpt *Bp;
1068 	struct callstack *Stk;
1069 	int i;
1070 
1071 	if (Dynpat == NULL)
1072 		return;
1073 
1074 	/* destroy all previous dynamic library information */
1075 	while ((Dp = Dynlib) != NULL) {
1076 		Dynlib = Dp->next;
1077 		free(Dp->lib_name);
1078 		free(Dp->match_name);
1079 		free(Dp->prt_name);
1080 		free(Dp);
1081 	}
1082 
1083 	/* destroy all previous breakpoint trap information */
1084 	if (bpt_hashtable != NULL) {
1085 		for (i = 0; i < HASHSZ; i++) {
1086 			while ((Bp = bpt_hashtable[i]) != NULL) {
1087 				bpt_hashtable[i] = Bp->next;
1088 				if (Bp->sym_name)
1089 					free(Bp->sym_name);
1090 				free(Bp);
1091 			}
1092 		}
1093 	}
1094 
1095 	/* destroy all the callstack information */
1096 	while ((Stk = callstack) != NULL) {
1097 		callstack = Stk->next;
1098 		free(Stk->stack);
1099 		free(Stk);
1100 	}
1101 
1102 	/* we are not a multi-threaded process anymore */
1103 	if (Thr_agent != NULL)
1104 		(void) td_ta_delete(Thr_agent);
1105 	Thr_agent = NULL;
1106 
1107 	/* tell libproc to clear out its mapping information */
1108 	Preset_maps(Proc);
1109 	Rdb_agent = NULL;
1110 
1111 	/* Reestablish the symbols from the executable */
1112 	(void) establish_breakpoints();
1113 }
1114 
1115 /*
1116  * Clear breakpoints from the process (called before Prelease()).
1117  * Don't actually destroy the breakpoint table;
1118  * threads currently fielding breakpoints will need it.
1119  */
1120 void
1121 clear_breakpoints(void)
1122 {
1123 	struct bkpt *Bp;
1124 	int i;
1125 
1126 	if (Dynpat == NULL)
1127 		return;
1128 
1129 	/*
1130 	 * Change all breakpoint traps back to normal instructions.
1131 	 * We attempt to remove a breakpoint from every address which
1132 	 * may have ever contained a breakpoint to protect our victims.
1133 	 */
1134 	report_htable_stats();	/* report stats first */
1135 	for (i = 0; i < HASHSZ; i++) {
1136 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1137 			if (Bp->flags & BPT_ACTIVE)
1138 				(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1139 			Bp->flags &= ~BPT_ACTIVE;
1140 		}
1141 	}
1142 
1143 	if (Thr_agent != NULL) {
1144 		td_thr_events_t events;
1145 
1146 		td_event_fillset(&events);
1147 		(void) td_ta_clear_event(Thr_agent, &events);
1148 		(void) td_ta_delete(Thr_agent);
1149 	}
1150 	Thr_agent = NULL;
1151 }
1152 
1153 /*
1154  * Reestablish the breakpoint traps in the process.
1155  * Called after resuming from a vfork() in the parent.
1156  */
1157 void
1158 reestablish_traps(void)
1159 {
1160 	struct bkpt *Bp;
1161 	ulong_t instr;
1162 	int i;
1163 
1164 	if (Dynpat == NULL || is_vfork_child)
1165 		return;
1166 
1167 	for (i = 0; i < HASHSZ; i++) {
1168 		for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1169 			if ((Bp->flags & BPT_ACTIVE) &&
1170 			    Psetbkpt(Proc, Bp->addr, &instr) != 0)
1171 				Bp->flags &= ~BPT_ACTIVE;
1172 		}
1173 	}
1174 }
1175 
1176 void
1177 show_function_call(private_t *pri,
1178 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1179 {
1180 	long arg[8];
1181 	int narg;
1182 	int i;
1183 
1184 	narg = get_arguments(arg);
1185 	make_pname(pri, (Stk != NULL)? Stk->tid : 0);
1186 	putpname(pri);
1187 	timestamp(pri);
1188 	if (Stk != NULL) {
1189 		for (i = 1; i < Stk->ncall; i++) {
1190 			(void) fputc(' ', stdout);
1191 			(void) fputc(' ', stdout);
1192 		}
1193 	}
1194 	(void) printf("-> %s%s(", Dp->prt_name, Bp->sym_name);
1195 	for (i = 0; i < narg; i++) {
1196 		(void) printf("0x%lx", arg[i]);
1197 		if (i < narg-1) {
1198 			(void) fputc(',', stdout);
1199 			(void) fputc(' ', stdout);
1200 		}
1201 	}
1202 	(void) printf(")\n");
1203 	Flush();
1204 }
1205 
1206 /* ARGSUSED */
1207 void
1208 show_function_return(private_t *pri, long rval, int stret,
1209 	struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1210 {
1211 	int i;
1212 
1213 	make_pname(pri, Stk->tid);
1214 	putpname(pri);
1215 	timestamp(pri);
1216 	for (i = 0; i < Stk->ncall; i++) {
1217 		(void) fputc(' ', stdout);
1218 		(void) fputc(' ', stdout);
1219 	}
1220 	(void) printf("<- %s%s() = ", Dp->prt_name, Bp->sym_name);
1221 	if (stret) {
1222 		(void) printf("struct return\n");
1223 	} else if (data_model == PR_MODEL_LP64) {
1224 		if (rval >= (64 * 1024) || -rval >= (64 * 1024))
1225 			(void) printf("0x%lx\n", rval);
1226 		else
1227 			(void) printf("%ld\n", rval);
1228 	} else {
1229 		int rval32 = (int)rval;
1230 		if (rval32 >= (64 * 1024) || -rval32 >= (64 * 1024))
1231 			(void) printf("0x%x\n", rval32);
1232 		else
1233 			(void) printf("%d\n", rval32);
1234 	}
1235 	Flush();
1236 }
1237 
1238 /*
1239  * Called to deal with function-call tracing.
1240  * Return 0 on normal success, 1 to indicate a BPT_HANG success,
1241  * and -1 on failure (not tracing functions or unknown breakpoint).
1242  */
1243 int
1244 function_trace(private_t *pri, int first, int clear, int dotrace)
1245 {
1246 	struct ps_lwphandle *Lwp = pri->Lwp;
1247 	const lwpstatus_t *Lsp = pri->lwpstat;
1248 	uintptr_t pc = Lsp->pr_reg[R_PC];
1249 	uintptr_t sp = Lsp->pr_reg[R_SP];
1250 	uintptr_t fp = Lsp->pr_reg[R_FP];
1251 	struct bkpt *Bp;
1252 	struct dynlib *Dp;
1253 	struct callstack *Stk;
1254 	ulong_t instr;
1255 	int active;
1256 	int rval = 0;
1257 
1258 	if (Dynpat == NULL)
1259 		return (-1);
1260 
1261 	if (data_model != PR_MODEL_LP64) {
1262 		pc = (uint32_t)pc;
1263 		sp = (uint32_t)sp;
1264 		fp = (uint32_t)fp;
1265 	}
1266 
1267 	if ((Bp = get_bkpt(pc)) == NULL) {
1268 		if (hflag)
1269 			(void) fprintf(stderr,
1270 			    "function_trace(): "
1271 			    "cannot find breakpoint for pc: 0x%.8lx\n",
1272 			    (ulong_t)pc);
1273 		return (-1);
1274 	}
1275 
1276 	if ((Bp->flags & (BPT_PREINIT|BPT_POSTINIT|BPT_DLACTIVITY)) && !clear) {
1277 		rd_event_msg_t event_msg;
1278 
1279 		if (hflag) {
1280 			if (Bp->flags & BPT_PREINIT)
1281 				(void) fprintf(stderr, "function_trace(): "
1282 				    "RD_PREINIT breakpoint\n");
1283 			if (Bp->flags & BPT_POSTINIT)
1284 				(void) fprintf(stderr, "function_trace(): "
1285 				    "RD_POSTINIT breakpoint\n");
1286 			if (Bp->flags & BPT_DLACTIVITY)
1287 				(void) fprintf(stderr, "function_trace(): "
1288 				    "RD_DLACTIVITY breakpoint\n");
1289 		}
1290 		if (rd_event_getmsg(Rdb_agent, &event_msg) == RD_OK) {
1291 			if (event_msg.type == RD_DLACTIVITY) {
1292 				switch (event_msg.u.state) {
1293 				case RD_CONSISTENT:
1294 					establish_breakpoints();
1295 					break;
1296 				case RD_ADD:
1297 					not_consist = TRUE;	/* kludge */
1298 					establish_breakpoints();
1299 					not_consist = FALSE;
1300 					break;
1301 				case RD_DELETE:
1302 					delete_library = TRUE;
1303 					break;
1304 				default:
1305 					break;
1306 				}
1307 			}
1308 			if (hflag) {
1309 				const char *et;
1310 				char buf[32];
1311 
1312 				switch (event_msg.type) {
1313 				case RD_NONE:
1314 					et = "RD_NONE";
1315 					break;
1316 				case RD_PREINIT:
1317 					et = "RD_PREINIT";
1318 					break;
1319 				case RD_POSTINIT:
1320 					et = "RD_POSTINIT";
1321 					break;
1322 				case RD_DLACTIVITY:
1323 					et = "RD_DLACTIVITY";
1324 					break;
1325 				default:
1326 					(void) sprintf(buf, "0x%x",
1327 					    event_msg.type);
1328 					et = buf;
1329 					break;
1330 				}
1331 				(void) fprintf(stderr,
1332 				    "event_msg.type = %s ", et);
1333 				switch (event_msg.u.state) {
1334 				case RD_NOSTATE:
1335 					et = "RD_NOSTATE";
1336 					break;
1337 				case RD_CONSISTENT:
1338 					et = "RD_CONSISTENT";
1339 					break;
1340 				case RD_ADD:
1341 					et = "RD_ADD";
1342 					break;
1343 				case RD_DELETE:
1344 					et = "RD_DELETE";
1345 					break;
1346 				default:
1347 					(void) sprintf(buf, "0x%x",
1348 					    event_msg.u.state);
1349 					et = buf;
1350 					break;
1351 				}
1352 				(void) fprintf(stderr,
1353 				    "event_msg.u.state = %s\n", et);
1354 			}
1355 		}
1356 	}
1357 
1358 	if ((Bp->flags & BPT_TD_CREATE) && !clear) {
1359 		nthr_create++;
1360 		if (hflag)
1361 			(void) fprintf(stderr, "function_trace(): "
1362 			    "BPT_TD_CREATE breakpoint\n");
1363 		/* we don't care about the event message */
1364 	}
1365 
1366 	Dp = Bp->dyn;
1367 
1368 	if (dotrace) {
1369 		if ((Stk = callstack_info(sp, fp, 1)) == NULL) {
1370 			if (Dp != NULL && !clear) {
1371 				if (cflag) {
1372 					add_fcall(fcall_tbl, Dp->prt_name,
1373 					    Bp->sym_name, (unsigned long)1);
1374 				}
1375 				else
1376 					show_function_call(pri, NULL, Dp, Bp);
1377 				if ((Bp->flags & BPT_HANG) && !first)
1378 					rval = 1;
1379 			}
1380 		} else if (!clear) {
1381 			if (Dp != NULL) {
1382 				function_entry(pri, Bp, Stk);
1383 				if ((Bp->flags & BPT_HANG) && !first)
1384 					rval = 1;
1385 			} else {
1386 				function_return(pri, Stk);
1387 			}
1388 		}
1389 	}
1390 
1391 	/*
1392 	 * Single-step the traced instruction. Since it's possible that
1393 	 * another thread has deactivated this breakpoint, we indicate
1394 	 * that we have reactivated it by virtue of executing it.
1395 	 *
1396 	 * To avoid a deadlock with some other thread in the process
1397 	 * performing a fork() or a thr_suspend() operation, we must
1398 	 * drop and later reacquire truss_lock.  Some fancy dancing here.
1399 	 */
1400 	active = (Bp->flags & BPT_ACTIVE);
1401 	Bp->flags |= BPT_ACTIVE;
1402 	instr = Bp->instr;
1403 	(void) mutex_unlock(&truss_lock);
1404 	(void) Lxecbkpt(Lwp, instr);
1405 	(void) mutex_lock(&truss_lock);
1406 
1407 	if (rval || clear) {	/* leave process stopped and abandoned */
1408 #if defined(__i386)
1409 		/*
1410 		 * Leave it stopped in a state that a stack trace is reasonable.
1411 		 */
1412 		/* XX64 needs to be updated for amd64 & gcc */
1413 		if (rval && instr == 0x55) {	/* pushl %ebp */
1414 			/* step it over the movl %esp,%ebp */
1415 			(void) mutex_unlock(&truss_lock);
1416 			(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTEP);
1417 			/* we're wrapping up; wait one second at most */
1418 			(void) Lwait(Lwp, MILLISEC);
1419 			(void) mutex_lock(&truss_lock);
1420 		}
1421 #endif
1422 		if (get_bkpt(pc) != Bp)
1423 			abend("function_trace: lost breakpoint", NULL);
1424 		(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1425 		Bp->flags &= ~BPT_ACTIVE;
1426 		(void) mutex_unlock(&truss_lock);
1427 		(void) Lsetrun(Lwp, 0, PRCFAULT|PRSTOP);
1428 		/* we're wrapping up; wait one second at most */
1429 		(void) Lwait(Lwp, MILLISEC);
1430 		(void) mutex_lock(&truss_lock);
1431 	} else {
1432 		if (get_bkpt(pc) != Bp)
1433 			abend("function_trace: lost breakpoint", NULL);
1434 		if (!active || !(Bp->flags & BPT_ACTIVE)) {
1435 			(void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1436 			Bp->flags &= ~BPT_ACTIVE;
1437 		}
1438 	}
1439 	return (rval);
1440 }
1441 
1442 void
1443 function_entry(private_t *pri, struct bkpt *Bp, struct callstack *Stk)
1444 {
1445 	const lwpstatus_t *Lsp = pri->lwpstat;
1446 	uintptr_t sp = Lsp->pr_reg[R_SP];
1447 	uintptr_t rpc = get_return_address(&sp);
1448 	struct dynlib *Dp = Bp->dyn;
1449 	int oldframe = FALSE;
1450 	int i;
1451 
1452 #ifdef _LP64
1453 	if (data_model != PR_MODEL_LP64) {
1454 		sp = (uint32_t)sp;
1455 		rpc = (uint32_t)rpc;
1456 	}
1457 #endif
1458 
1459 	/*
1460 	 * If the sp is not within the stack bounds, forget it.
1461 	 * If the symbol's 'internal' flag is false,
1462 	 * don't report internal calls within the library.
1463 	 */
1464 	if (!(sp >= Stk->stkbase && sp < Stk->stkend) ||
1465 	    (!(Bp->flags & BPT_INTERNAL) &&
1466 	    rpc >= Dp->base && rpc < Dp->base + Dp->size))
1467 		return;
1468 
1469 	for (i = 0; i < Stk->ncall; i++) {
1470 		if (sp >= Stk->stack[i].sp) {
1471 			Stk->ncall = i;
1472 			if (sp == Stk->stack[i].sp)
1473 				oldframe = TRUE;
1474 			break;
1475 		}
1476 	}
1477 
1478 	/*
1479 	 * Breakpoints for function returns are set here
1480 	 * If we're counting function calls, there is no need to set
1481 	 * a breakpoint upon return
1482 	 */
1483 
1484 	if (!oldframe && !cflag) {
1485 		(void) create_bkpt(rpc, 1, 1); /* may or may not be set */
1486 		Stk->stack[Stk->ncall].sp = sp;	/* record it anyeay */
1487 		Stk->stack[Stk->ncall].pc = rpc;
1488 		Stk->stack[Stk->ncall].fcn = Bp;
1489 	}
1490 	Stk->ncall++;
1491 	if (cflag) {
1492 		add_fcall(fcall_tbl, Dp->prt_name, Bp->sym_name,
1493 		    (unsigned long)1);
1494 	} else {
1495 		show_function_call(pri, Stk, Dp, Bp);
1496 	}
1497 }
1498 
1499 /*
1500  * We are here because we hit an unnamed breakpoint.
1501  * Attempt to match this up with a return pc on the stack
1502  * and report the function return.
1503  */
1504 void
1505 function_return(private_t *pri, struct callstack *Stk)
1506 {
1507 	const lwpstatus_t *Lsp = pri->lwpstat;
1508 	uintptr_t sp = Lsp->pr_reg[R_SP];
1509 	uintptr_t fp = Lsp->pr_reg[R_FP];
1510 	int i;
1511 
1512 #ifdef _LP64
1513 	if (data_model != PR_MODEL_LP64) {
1514 		sp = (uint32_t)sp;
1515 		fp = (uint32_t)fp;
1516 	}
1517 #endif
1518 
1519 	if (fp < sp + 8)
1520 		fp = sp + 8;
1521 
1522 	for (i = Stk->ncall - 1; i >= 0; i--) {
1523 		if (sp <= Stk->stack[i].sp && fp > Stk->stack[i].sp) {
1524 			Stk->ncall = i;
1525 			break;
1526 		}
1527 	}
1528 
1529 #if defined(i386) || defined(__amd64)
1530 	if (i < 0) {
1531 		/* probably __mul64() or friends -- try harder */
1532 		int j;
1533 		for (j = 0; i < 0 && j < 8; j++) {	/* up to 8 args */
1534 			sp -= 4;
1535 			for (i = Stk->ncall - 1; i >= 0; i--) {
1536 				if (sp <= Stk->stack[i].sp &&
1537 				    fp > Stk->stack[i].sp) {
1538 					Stk->ncall = i;
1539 					break;
1540 				}
1541 			}
1542 		}
1543 	}
1544 #endif
1545 
1546 	if ((i >= 0) && (!cflag)) {
1547 		show_function_return(pri, Lsp->pr_reg[R_R0], 0,
1548 		    Stk, Stk->stack[i].fcn->dyn, Stk->stack[i].fcn);
1549 	}
1550 }
1551 
1552 #if defined(__sparc)
1553 #define	FPADJUST	0
1554 #elif defined(__amd64)
1555 #define	FPADJUST	8
1556 #elif defined(__i386)
1557 #define	FPADJUST	4
1558 #endif
1559 
1560 void
1561 trap_one_stack(prgregset_t reg)
1562 {
1563 	struct dynlib *Dp;
1564 	struct bkpt *Bp;
1565 	struct callstack *Stk;
1566 	GElf_Sym sym;
1567 	char sym_name[32];
1568 	uintptr_t sp = reg[R_SP];
1569 	uintptr_t pc = reg[R_PC];
1570 	uintptr_t fp;
1571 	uintptr_t rpc;
1572 	uint_t nframe = 0;
1573 	uint_t maxframe = 8;
1574 	struct {
1575 		uintptr_t sp;		/* %sp within called function */
1576 		uintptr_t pc;		/* %pc within called function */
1577 		uintptr_t rsp;		/* the return sp */
1578 		uintptr_t rpc;		/* the return pc */
1579 	} *frame = my_malloc(maxframe * sizeof (*frame), NULL);
1580 
1581 	/*
1582 	 * Gather stack frames bottom to top.
1583 	 */
1584 	while (sp != 0) {
1585 		fp = sp;	/* remember higest non-null sp */
1586 		frame[nframe].sp = sp;
1587 		frame[nframe].pc = pc;
1588 		sp = previous_fp(sp, &pc);
1589 		frame[nframe].rsp = sp;
1590 		frame[nframe].rpc = pc;
1591 		if (++nframe == maxframe) {
1592 			maxframe *= 2;
1593 			frame = my_realloc(frame, maxframe * sizeof (*frame),
1594 			    NULL);
1595 		}
1596 	}
1597 
1598 	/*
1599 	 * Scan for function return breakpoints top to bottom.
1600 	 */
1601 	while (nframe--) {
1602 		/* lookup the called function in the symbol tables */
1603 		if (Plookup_by_addr(Proc, frame[nframe].pc, sym_name,
1604 		    sizeof (sym_name), &sym) != 0)
1605 			continue;
1606 
1607 		pc = sym.st_value;	/* entry point of the function */
1608 		rpc = frame[nframe].rpc;	/* caller's return pc */
1609 
1610 		/* lookup the function in the breakpoint table */
1611 		if ((Bp = get_bkpt(pc)) == NULL || (Dp = Bp->dyn) == NULL)
1612 			continue;
1613 
1614 		if (!(Bp->flags & BPT_INTERNAL) &&
1615 		    rpc >= Dp->base && rpc < Dp->base + Dp->size)
1616 			continue;
1617 
1618 		sp = frame[nframe].rsp + FPADJUST;  /* %sp at time of call */
1619 		if ((Stk = callstack_info(sp, fp, 0)) == NULL)
1620 			continue;	/* can't happen? */
1621 
1622 		if (create_bkpt(rpc, 1, 1) != NULL) {
1623 			Stk->stack[Stk->ncall].sp = sp;
1624 			Stk->stack[Stk->ncall].pc = rpc;
1625 			Stk->stack[Stk->ncall].fcn = Bp;
1626 			Stk->ncall++;
1627 		}
1628 	}
1629 
1630 	free(frame);
1631 }
1632 
1633 int
1634 lwp_stack_traps(void *cd, const lwpstatus_t *Lsp)
1635 {
1636 	ph_map_t *ph_map = (ph_map_t *)cd;
1637 	prgregset_t reg;
1638 
1639 	(void) memcpy(reg, Lsp->pr_reg, sizeof (prgregset_t));
1640 	make_lwp_stack(Lsp, ph_map->pmap, ph_map->nmap);
1641 	trap_one_stack(reg);
1642 
1643 	return (interrupt | sigusr1);
1644 }
1645 
1646 /* ARGSUSED */
1647 int
1648 thr_stack_traps(const td_thrhandle_t *Thp, void *cd)
1649 {
1650 	prgregset_t reg;
1651 
1652 	/*
1653 	 * We have already dealt with all the lwps.
1654 	 * We only care about unbound threads here (TD_PARTIALREG).
1655 	 */
1656 	if (td_thr_getgregs(Thp, reg) != TD_PARTIALREG)
1657 		return (0);
1658 
1659 	make_thr_stack(Thp, reg);
1660 	trap_one_stack(reg);
1661 
1662 	return (interrupt | sigusr1);
1663 }
1664 
1665 #if defined(__sparc)
1666 
1667 uintptr_t
1668 previous_fp(uintptr_t sp, uintptr_t *rpc)
1669 {
1670 	uintptr_t fp = 0;
1671 	uintptr_t pc = 0;
1672 
1673 #ifdef _LP64
1674 	if (data_model == PR_MODEL_LP64) {
1675 		struct rwindow64 rwin;
1676 		if (Pread(Proc, &rwin, sizeof (rwin), sp + STACK_BIAS)
1677 		    == sizeof (rwin)) {
1678 			fp = (uintptr_t)rwin.rw_fp;
1679 			pc = (uintptr_t)rwin.rw_rtn;
1680 		}
1681 		if (fp != 0 &&
1682 		    Pread(Proc, &rwin, sizeof (rwin), fp + STACK_BIAS)
1683 		    != sizeof (rwin))
1684 			fp = pc = 0;
1685 	} else {
1686 		struct rwindow32 rwin;
1687 #else	/* _LP64 */
1688 		struct rwindow rwin;
1689 #endif	/* _LP64 */
1690 		if (Pread(Proc, &rwin, sizeof (rwin), sp) == sizeof (rwin)) {
1691 			fp = (uint32_t)rwin.rw_fp;
1692 			pc = (uint32_t)rwin.rw_rtn;
1693 		}
1694 		if (fp != 0 &&
1695 		    Pread(Proc, &rwin, sizeof (rwin), fp) != sizeof (rwin))
1696 			fp = pc = 0;
1697 #ifdef _LP64
1698 	}
1699 #endif
1700 	if (rpc)
1701 		*rpc = pc;
1702 	return (fp);
1703 }
1704 
1705 /* ARGSUSED */
1706 uintptr_t
1707 get_return_address(uintptr_t *psp)
1708 {
1709 	instr_t inst;
1710 	private_t *pri = get_private();
1711 	const lwpstatus_t *Lsp = pri->lwpstat;
1712 	uintptr_t rpc;
1713 
1714 	rpc = (uintptr_t)Lsp->pr_reg[R_O7] + 8;
1715 	if (data_model != PR_MODEL_LP64)
1716 		rpc = (uint32_t)rpc;
1717 
1718 	/* check for structure return (bletch!) */
1719 	if (Pread(Proc, &inst, sizeof (inst), rpc) == sizeof (inst) &&
1720 	    inst < 0x1000)
1721 		rpc += sizeof (instr_t);
1722 
1723 	return (rpc);
1724 }
1725 
1726 int
1727 get_arguments(long *argp)
1728 {
1729 	private_t *pri = get_private();
1730 	const lwpstatus_t *Lsp = pri->lwpstat;
1731 	int i;
1732 
1733 	if (data_model != PR_MODEL_LP64)
1734 		for (i = 0; i < 4; i++)
1735 			argp[i] = (uint_t)Lsp->pr_reg[R_O0+i];
1736 	else
1737 		for (i = 0; i < 4; i++)
1738 			argp[i] = (long)Lsp->pr_reg[R_O0+i];
1739 	return (4);
1740 }
1741 
1742 #endif	/* __sparc */
1743 
1744 #if defined(__i386) || defined(__amd64)
1745 
1746 uintptr_t
1747 previous_fp(uintptr_t fp, uintptr_t *rpc)
1748 {
1749 	uintptr_t frame[2];
1750 	uintptr_t trash[2];
1751 
1752 	if (Pread(Proc, frame, sizeof (frame), fp) != sizeof (frame) ||
1753 	    (frame[0] != 0 &&
1754 	    Pread(Proc, trash, sizeof (trash), frame[0]) != sizeof (trash)))
1755 		frame[0] = frame[1] = 0;
1756 
1757 	if (rpc)
1758 		*rpc = frame[1];
1759 	return (frame[0]);
1760 }
1761 
1762 #endif
1763 
1764 #if defined(__amd64) || defined(__i386)
1765 
1766 /*
1767  * Examine the instruction at the return location of a function call
1768  * and return the byte count by which the stack is adjusted on return.
1769  * It the instruction at the return location is an addl, as expected,
1770  * then adjust the return pc by the size of that instruction so that
1771  * we will place the return breakpoint on the following instruction.
1772  * This allows programs that interrogate their own stacks and record
1773  * function calls and arguments to work correctly even while we interfere.
1774  * Return the count on success, -1 on failure.
1775  */
1776 int
1777 return_count32(uint32_t *ppc)
1778 {
1779 	uintptr_t pc = *ppc;
1780 	struct bkpt *Bp;
1781 	int count;
1782 	uchar_t instr[6];	/* instruction at pc */
1783 
1784 	if ((count = Pread(Proc, instr, sizeof (instr), pc)) < 0)
1785 		return (-1);
1786 
1787 	/* find the replaced instruction at pc (if any) */
1788 	if ((Bp = get_bkpt(pc)) != NULL && (Bp->flags & BPT_ACTIVE))
1789 		instr[0] = (uchar_t)Bp->instr;
1790 
1791 	if (count != sizeof (instr) &&
1792 	    (count < 3 || instr[0] != 0x83))
1793 		return (-1);
1794 
1795 	/*
1796 	 * A bit of disassembly of the instruction is required here.
1797 	 */
1798 	if (instr[1] != 0xc4) {	/* not an addl mumble,%esp inctruction */
1799 		count = 0;
1800 	} else if (instr[0] == 0x81) {	/* count is a longword */
1801 		count = instr[2]+(instr[3]<<8)+(instr[4]<<16)+(instr[5]<<24);
1802 		*ppc += 6;
1803 	} else if (instr[0] == 0x83) {	/* count is a byte */
1804 		count = instr[2];
1805 		*ppc += 3;
1806 	} else {		/* not an addl inctruction */
1807 		count = 0;
1808 	}
1809 
1810 	return (count);
1811 }
1812 
1813 uintptr_t
1814 get_return_address32(uintptr_t *psp)
1815 {
1816 	uint32_t sp = *psp;
1817 	uint32_t rpc;
1818 	int count;
1819 
1820 	*psp += 4;	/* account for popping the stack on return */
1821 	if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1822 		return (0);
1823 	if ((count = return_count32(&rpc)) < 0)
1824 		count = 0;
1825 	*psp += count;		/* expected sp on return */
1826 	return (rpc);
1827 }
1828 
1829 uintptr_t
1830 get_return_address(uintptr_t *psp)
1831 {
1832 #ifdef _LP64
1833 	uintptr_t rpc;
1834 	uintptr_t sp = *psp;
1835 
1836 	if (data_model == PR_MODEL_LP64) {
1837 		if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1838 			return (0);
1839 		/*
1840 		 * Ignore arguments pushed on the stack.  See comments in
1841 		 * get_arguments().
1842 		 */
1843 		return (rpc);
1844 	} else
1845 #endif
1846 		return (get_return_address32(psp));
1847 }
1848 
1849 
1850 int
1851 get_arguments32(long *argp)
1852 {
1853 	private_t *pri = get_private();
1854 	const lwpstatus_t *Lsp = pri->lwpstat;
1855 	uint32_t frame[5];	/* return pc + 4 args */
1856 	int narg;
1857 	int count;
1858 	int i;
1859 
1860 	narg = Pread(Proc, frame, sizeof (frame),
1861 	    (uintptr_t)Lsp->pr_reg[R_SP]);
1862 	narg -= sizeof (greg32_t);
1863 	if (narg <= 0)
1864 		return (0);
1865 	narg /= sizeof (greg32_t); /* no more than 4 */
1866 
1867 	/*
1868 	 * Given the return PC, determine the number of arguments.
1869 	 */
1870 	if ((count = return_count32(&frame[0])) < 0)
1871 		narg = 0;
1872 	else {
1873 		count /= sizeof (greg32_t);
1874 		if (narg > count)
1875 			narg = count;
1876 	}
1877 
1878 	for (i = 0; i < narg; i++)
1879 		argp[i] = (long)frame[i+1];
1880 
1881 	return (narg);
1882 }
1883 
1884 int
1885 get_arguments(long *argp)
1886 {
1887 #ifdef _LP64
1888 	private_t *pri = get_private();
1889 	const lwpstatus_t *Lsp = pri->lwpstat;
1890 
1891 	if (data_model == PR_MODEL_LP64) {
1892 		/*
1893 		 * On amd64, we do not know how many arguments are passed to
1894 		 * each function.  While it may be possible to detect if we
1895 		 * have more than 6 arguments, it is of marginal value.
1896 		 * Instead, assume that we always have 6 arguments, which are
1897 		 * passed via registers.
1898 		 */
1899 		argp[0] = Lsp->pr_reg[REG_RDI];
1900 		argp[1] = Lsp->pr_reg[REG_RSI];
1901 		argp[2] = Lsp->pr_reg[REG_RDX];
1902 		argp[3] = Lsp->pr_reg[REG_RCX];
1903 		argp[4] = Lsp->pr_reg[REG_R8];
1904 		argp[5] = Lsp->pr_reg[REG_R9];
1905 		return (6);
1906 	} else
1907 #endif
1908 		return (get_arguments32(argp));
1909 }
1910 
1911 #endif	/* __amd64 || __i386 */
1912