1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #define _SYSCALL32
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <ctype.h>
33 #include <string.h>
34 #include <memory.h>
35 #include <errno.h>
36 #include <sys/types.h>
37 #include <sys/stack.h>
38 #include <signal.h>
39 #include <limits.h>
40 #include <sys/isa_defs.h>
41 #include <proc_service.h>
42 #include <dlfcn.h>
43 #include <fnmatch.h>
44 #include <libproc.h>
45 #include "ramdata.h"
46 #include "systable.h"
47 #include "print.h"
48 #include "proto.h"
49 #include "htbl.h"
50
51 /*
52 * Functions supporting library function call tracing.
53 */
54
55 typedef struct {
56 prmap_t *pmap;
57 int nmap;
58 } ph_map_t;
59
60 /*
61 * static functions in this file.
62 */
63 void function_entry(private_t *, struct bkpt *, struct callstack *);
64 void function_return(private_t *, struct callstack *);
65 int object_iter(void *, const prmap_t *, const char *);
66 int object_present(void *, const prmap_t *, const char *);
67 int symbol_iter(void *, const GElf_Sym *, const char *);
68 uintptr_t get_return_address(uintptr_t *);
69 int get_arguments(long *argp);
70 uintptr_t previous_fp(uintptr_t, uintptr_t *);
71 int lwp_stack_traps(void *cd, const lwpstatus_t *Lsp);
72 int thr_stack_traps(const td_thrhandle_t *Thp, void *cd);
73 struct bkpt *create_bkpt(uintptr_t, int, int);
74 void set_deferred_breakpoints(void);
75
76 #define DEF_MAXCALL 16 /* initial value of Stk->maxcall */
77
78 #define FAULT_ADDR ((uintptr_t)(0-8))
79
80 #define HASHSZ 2048
81 #define bpt_hash(addr) ((((addr) >> 13) ^ ((addr) >> 2)) & 0x7ff)
82
83 static void
setup_thread_agent(void)84 setup_thread_agent(void)
85 {
86 struct bkpt *Bp;
87 td_notify_t notify;
88 td_thr_events_t events;
89
90 if (Thr_agent != NULL) /* only once */
91 return;
92 if (td_init() != TD_OK || td_ta_new(Proc, &Thr_agent) != TD_OK)
93 Thr_agent = NULL;
94 else {
95 td_event_emptyset(&events);
96 td_event_addset(&events, TD_CREATE);
97 if (td_ta_event_addr(Thr_agent, TD_CREATE, ¬ify) == TD_OK &&
98 notify.type == NOTIFY_BPT &&
99 td_ta_set_event(Thr_agent, &events) == TD_OK &&
100 (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
101 Bp->flags |= BPT_TD_CREATE;
102 }
103 }
104
105 /*
106 * Delete all breakpoints in the range [base .. base+size)
107 * from the breakpoint hash table.
108 */
109 static void
delete_breakpoints(uintptr_t base,size_t size)110 delete_breakpoints(uintptr_t base, size_t size)
111 {
112 struct bkpt **Bpp;
113 struct bkpt *Bp;
114 int i;
115
116 if (bpt_hashtable == NULL)
117 return;
118 for (i = 0; i < HASHSZ; i++) {
119 Bpp = &bpt_hashtable[i];
120 while ((Bp = *Bpp) != NULL) {
121 if (Bp->addr < base || Bp->addr >= base + size) {
122 Bpp = &Bp->next;
123 continue;
124 }
125 *Bpp = Bp->next;
126 if (Bp->sym_name)
127 free(Bp->sym_name);
128 free(Bp);
129 }
130 }
131 }
132
133 /*
134 * Establishment of breakpoints on traced library functions.
135 */
136 void
establish_breakpoints(void)137 establish_breakpoints(void)
138 {
139 if (Dynpat == NULL)
140 return;
141
142 /* allocate the breakpoint hash table */
143 if (bpt_hashtable == NULL) {
144 bpt_hashtable = my_malloc(HASHSZ * sizeof (struct bkpt *),
145 NULL);
146 (void) memset(bpt_hashtable, 0,
147 HASHSZ * sizeof (struct bkpt *));
148 }
149
150 /*
151 * Set special rtld_db event breakpoints, first time only.
152 */
153 if (Rdb_agent == NULL &&
154 (Rdb_agent = Prd_agent(Proc)) != NULL) {
155 rd_notify_t notify;
156 struct bkpt *Bp;
157
158 (void) rd_event_enable(Rdb_agent, 1);
159 if (rd_event_addr(Rdb_agent, RD_PREINIT, ¬ify) == RD_OK &&
160 (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
161 Bp->flags |= BPT_PREINIT;
162 if (rd_event_addr(Rdb_agent, RD_POSTINIT, ¬ify) == RD_OK &&
163 (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
164 Bp->flags |= BPT_POSTINIT;
165 if (rd_event_addr(Rdb_agent, RD_DLACTIVITY, ¬ify) == RD_OK &&
166 (Bp = create_bkpt(notify.u.bptaddr, 0, 1)) != NULL)
167 Bp->flags |= BPT_DLACTIVITY;
168 }
169
170 /*
171 * Set special thread event breakpoint, first time libc is seen.
172 */
173 if (Thr_agent == NULL)
174 setup_thread_agent();
175
176 /*
177 * Tell libproc to update its mappings.
178 */
179 Pupdate_maps(Proc);
180
181 /*
182 * If rtld_db told us a library was being deleted,
183 * first mark all of the dynlibs as not present, then
184 * iterate over the shared objects, marking only those
185 * present that really are present, and finally delete
186 * all of the not-present dynlibs.
187 */
188 if (delete_library) {
189 struct dynlib **Dpp;
190 struct dynlib *Dp;
191
192 for (Dp = Dynlib; Dp != NULL; Dp = Dp->next)
193 Dp->present = FALSE;
194 (void) Pobject_iter(Proc, object_present, NULL);
195 Dpp = &Dynlib;
196 while ((Dp = *Dpp) != NULL) {
197 if (Dp->present) {
198 Dpp = &Dp->next;
199 continue;
200 }
201 delete_breakpoints(Dp->base, Dp->size);
202 *Dpp = Dp->next;
203 free(Dp->lib_name);
204 free(Dp->match_name);
205 free(Dp->prt_name);
206 free(Dp);
207 }
208 delete_library = FALSE;
209 }
210
211 /*
212 * Iterate over the shared objects, creating breakpoints.
213 */
214 (void) Pobject_iter(Proc, object_iter, NULL);
215
216 /*
217 * Now actually set all the breakpoints we just created.
218 */
219 set_deferred_breakpoints();
220 }
221
222 /*
223 * Initial establishment of stacks in a newly-grabbed process.
224 * establish_breakpoints() has already been called.
225 */
226 void
establish_stacks(void)227 establish_stacks(void)
228 {
229 const pstatus_t *Psp = Pstatus(Proc);
230 char mapfile[64];
231 int mapfd;
232 struct stat statb;
233 prmap_t *Pmap = NULL;
234 int nmap = 0;
235 ph_map_t ph_map;
236
237 (void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
238 if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
239 fstat(mapfd, &statb) != 0 ||
240 statb.st_size < sizeof (prmap_t) ||
241 (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
242 (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
243 (nmap /= sizeof (prmap_t)) == 0) {
244 if (Pmap != NULL)
245 free(Pmap);
246 Pmap = NULL;
247 nmap = 0;
248 }
249 if (mapfd >= 0)
250 (void) close(mapfd);
251
252 /*
253 * Iterate over lwps, establishing stacks.
254 */
255 ph_map.pmap = Pmap;
256 ph_map.nmap = nmap;
257 (void) Plwp_iter(Proc, lwp_stack_traps, &ph_map);
258 if (Pmap != NULL)
259 free(Pmap);
260
261 if (Thr_agent == NULL)
262 return;
263
264 /*
265 * Iterate over unbound threads, establishing stacks.
266 */
267 (void) td_ta_thr_iter(Thr_agent, thr_stack_traps, NULL,
268 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
269 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
270 }
271
272 void
do_symbol_iter(const char * object_name,struct dynpat * Dyp)273 do_symbol_iter(const char *object_name, struct dynpat *Dyp)
274 {
275 if (*Dyp->Dp->prt_name == '\0')
276 object_name = PR_OBJ_EXEC;
277
278 /*
279 * Always search the dynamic symbol table.
280 */
281 (void) Psymbol_iter(Proc, object_name,
282 PR_DYNSYM, BIND_WEAK|BIND_GLOBAL|TYPE_FUNC,
283 symbol_iter, Dyp);
284
285 /*
286 * Search the static symbol table if this is the
287 * executable file or if we are being asked to
288 * report internal calls within the library.
289 */
290 if (object_name == PR_OBJ_EXEC || Dyp->internal)
291 (void) Psymbol_iter(Proc, object_name,
292 PR_SYMTAB, BIND_ANY|TYPE_FUNC,
293 symbol_iter, Dyp);
294 }
295
296 /* ARGSUSED */
297 int
object_iter(void * cd,const prmap_t * pmp,const char * object_name)298 object_iter(void *cd, const prmap_t *pmp, const char *object_name)
299 {
300 char name[100];
301 struct dynpat *Dyp;
302 struct dynlib *Dp;
303 const char *str;
304 char *s;
305 int i;
306
307 if ((pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_EXEC))
308 return (0);
309
310 /*
311 * Set special thread event breakpoint, first time libc is seen.
312 */
313 if (Thr_agent == NULL && strstr(object_name, "/libc.so.") != NULL)
314 setup_thread_agent();
315
316 for (Dp = Dynlib; Dp != NULL; Dp = Dp->next)
317 if (strcmp(object_name, Dp->lib_name) == 0 ||
318 (strcmp(Dp->lib_name, "a.out") == 0 &&
319 strcmp(pmp->pr_mapname, "a.out") == 0))
320 break;
321
322 if (Dp == NULL) {
323 Dp = my_malloc(sizeof (struct dynlib), NULL);
324 (void) memset(Dp, 0, sizeof (struct dynlib));
325 if (strcmp(pmp->pr_mapname, "a.out") == 0) {
326 Dp->lib_name = strdup(pmp->pr_mapname);
327 Dp->match_name = strdup(pmp->pr_mapname);
328 Dp->prt_name = strdup("");
329 } else {
330 Dp->lib_name = strdup(object_name);
331 if ((str = strrchr(object_name, '/')) != NULL)
332 str++;
333 else
334 str = object_name;
335 (void) strncpy(name, str, sizeof (name) - 2);
336 name[sizeof (name) - 2] = '\0';
337 if ((s = strstr(name, ".so")) != NULL)
338 *s = '\0';
339 Dp->match_name = strdup(name);
340 (void) strcat(name, ":");
341 Dp->prt_name = strdup(name);
342 }
343 Dp->next = Dynlib;
344 Dynlib = Dp;
345 }
346
347 if (Dp->built ||
348 (not_consist && strcmp(Dp->prt_name, "ld:") != 0)) /* kludge */
349 return (0);
350
351 if (hflag && not_consist)
352 (void) fprintf(stderr, "not_consist is TRUE, building %s\n",
353 Dp->lib_name);
354
355 Dp->base = pmp->pr_vaddr;
356 Dp->size = pmp->pr_size;
357
358 /*
359 * For every dynlib pattern that matches this library's name,
360 * iterate through all of the library's symbols looking for
361 * matching symbol name patterns.
362 */
363 for (Dyp = Dynpat; Dyp != NULL; Dyp = Dyp->next) {
364 if (interrupt|sigusr1)
365 break;
366 for (i = 0; i < Dyp->nlibpat; i++) {
367 if (interrupt|sigusr1)
368 break;
369 if (fnmatch(Dyp->libpat[i], Dp->match_name, 0) != 0)
370 continue; /* no match */
371
372 /*
373 * Require an exact match for the executable (a.out)
374 * and for the dynamic linker (ld.so.1).
375 */
376 if ((strcmp(Dp->match_name, "a.out") == 0 ||
377 strcmp(Dp->match_name, "ld") == 0) &&
378 strcmp(Dyp->libpat[i], Dp->match_name) != 0)
379 continue;
380
381 /*
382 * Set Dyp->Dp to Dp so symbol_iter() can use it.
383 */
384 Dyp->Dp = Dp;
385 do_symbol_iter(object_name, Dyp);
386 Dyp->Dp = NULL;
387 }
388 }
389
390 Dp->built = TRUE;
391 return (interrupt | sigusr1);
392 }
393
394 /* ARGSUSED */
395 int
object_present(void * cd,const prmap_t * pmp,const char * object_name)396 object_present(void *cd, const prmap_t *pmp, const char *object_name)
397 {
398 struct dynlib *Dp;
399
400 for (Dp = Dynlib; Dp != NULL; Dp = Dp->next) {
401 if (Dp->base == pmp->pr_vaddr)
402 Dp->present = TRUE;
403 }
404
405 return (0);
406 }
407
408 /*
409 * Search for an existing breakpoint at the 'pc' location.
410 */
411 struct bkpt *
get_bkpt(uintptr_t pc)412 get_bkpt(uintptr_t pc)
413 {
414 struct bkpt *Bp;
415
416 for (Bp = bpt_hashtable[bpt_hash(pc)]; Bp != NULL; Bp = Bp->next)
417 if (pc == Bp->addr)
418 break;
419
420 return (Bp);
421 }
422
423 /*
424 * Create a breakpoint at 'pc', if one is not there already.
425 * 'ret' is true when creating a function return breakpoint, in which case
426 * fail and return NULL if the breakpoint would be created in writeable data.
427 * If 'set' it true, set the breakpoint in the process now.
428 */
429 struct bkpt *
create_bkpt(uintptr_t pc,int ret,int set)430 create_bkpt(uintptr_t pc, int ret, int set)
431 {
432 uint_t hix = bpt_hash(pc);
433 struct bkpt *Bp;
434 const prmap_t *pmp;
435
436 for (Bp = bpt_hashtable[hix]; Bp != NULL; Bp = Bp->next)
437 if (pc == Bp->addr)
438 return (Bp);
439
440 /*
441 * Don't set return breakpoints on writeable data
442 * or on any space other than executable text.
443 * Don't set breakpoints in the child of a vfork()
444 * because that would modify the parent's address space.
445 */
446 if (is_vfork_child ||
447 (ret &&
448 ((pmp = Paddr_to_text_map(Proc, pc)) == NULL ||
449 !(pmp->pr_mflags & MA_EXEC) ||
450 (pmp->pr_mflags & MA_WRITE))))
451 return (NULL);
452
453 /* create a new unnamed breakpoint */
454 Bp = my_malloc(sizeof (struct bkpt), NULL);
455 Bp->sym_name = NULL;
456 Bp->dyn = NULL;
457 Bp->addr = pc;
458 Bp->instr = 0;
459 Bp->flags = 0;
460 if (set && Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
461 Bp->flags |= BPT_ACTIVE;
462 Bp->next = bpt_hashtable[hix];
463 bpt_hashtable[hix] = Bp;
464
465 return (Bp);
466 }
467
468 /*
469 * Set all breakpoints that haven't been set yet.
470 * Deactivate all breakpoints from modules that are not present any more.
471 */
472 void
set_deferred_breakpoints(void)473 set_deferred_breakpoints(void)
474 {
475 struct bkpt *Bp;
476 int i;
477
478 if (is_vfork_child)
479 return;
480
481 for (i = 0; i < HASHSZ; i++) {
482 for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
483 if (!(Bp->flags & BPT_ACTIVE)) {
484 if (!(Bp->flags & BPT_EXCLUDE) &&
485 Psetbkpt(Proc, Bp->addr, &Bp->instr) == 0)
486 Bp->flags |= BPT_ACTIVE;
487 } else if (Paddr_to_text_map(Proc, Bp->addr) == NULL) {
488 Bp->flags &= ~BPT_ACTIVE;
489 }
490 }
491 }
492 }
493
494 int
symbol_iter(void * cd,const GElf_Sym * sym,const char * sym_name)495 symbol_iter(void *cd, const GElf_Sym *sym, const char *sym_name)
496 {
497 struct dynpat *Dyp = cd;
498 struct dynlib *Dp = Dyp->Dp;
499 uintptr_t pc = sym->st_value;
500 struct bkpt *Bp;
501 int i;
502
503 /* ignore any undefined symbols */
504 if (sym->st_shndx == SHN_UNDEF)
505 return (0);
506
507 /*
508 * Arbitrarily omit "_start" from the executable.
509 * (Avoid indentation before main().)
510 */
511 if (*Dp->prt_name == '\0' && strcmp(sym_name, "_start") == 0)
512 return (0);
513
514 /*
515 * Arbitrarily omit "_rt_boot" from the dynamic linker.
516 * (Avoid indentation before main().)
517 */
518 if (strcmp(Dp->match_name, "ld") == 0 &&
519 strcmp(sym_name, "_rt_boot") == 0)
520 return (0);
521
522 /*
523 * Arbitrarily omit any symbols whose name starts with '.'.
524 * Apparantly putting a breakpoint on .umul causes a
525 * fatal error in libthread (%y is not restored correctly
526 * when a single step is taken). Looks like a /proc bug.
527 */
528 if (*sym_name == '.')
529 return (0);
530
531 /*
532 * For each pattern in the array of symbol patterns,
533 * if the pattern matches the symbol name, then
534 * create a breakpoint at the function in question.
535 */
536 for (i = 0; i < Dyp->nsympat; i++) {
537 if (interrupt|sigusr1)
538 break;
539 if (fnmatch(Dyp->sympat[i], sym_name, 0) != 0)
540 continue;
541
542 if ((Bp = create_bkpt(pc, 0, 0)) == NULL) /* can't fail */
543 return (0);
544
545 /*
546 * New breakpoints receive a name now.
547 * For existing breakpoints, prefer the subset name if possible,
548 * else prefer the shorter name.
549 */
550 if (Bp->sym_name == NULL) {
551 Bp->sym_name = strdup(sym_name);
552 } else if (strstr(Bp->sym_name, sym_name) != NULL ||
553 strlen(Bp->sym_name) > strlen(sym_name)) {
554 free(Bp->sym_name);
555 Bp->sym_name = strdup(sym_name);
556 }
557 Bp->dyn = Dp;
558 Bp->flags |= Dyp->flag;
559 if (Dyp->exclude)
560 Bp->flags |= BPT_EXCLUDE;
561 else if (Dyp->internal || *Dp->prt_name == '\0')
562 Bp->flags |= BPT_INTERNAL;
563 return (0);
564 }
565
566 return (interrupt | sigusr1);
567 }
568
569 /* For debugging only ---- */
570 void
report_htable_stats(void)571 report_htable_stats(void)
572 {
573 const pstatus_t *Psp = Pstatus(Proc);
574 struct callstack *Stk;
575 struct bkpt *Bp;
576 uint_t Min = 1000000;
577 uint_t Max = 0;
578 uint_t Avg = 0;
579 uint_t Total = 0;
580 uint_t i, j;
581 uint_t bucket[HASHSZ];
582
583 if (Dynpat == NULL || !hflag)
584 return;
585
586 hflag = FALSE;
587 (void) memset(bucket, 0, sizeof (bucket));
588
589 for (i = 0; i < HASHSZ; i++) {
590 j = 0;
591 for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next)
592 j++;
593 if (j < Min)
594 Min = j;
595 if (j > Max)
596 Max = j;
597 if (j < HASHSZ)
598 bucket[j]++;
599 Total += j;
600 }
601 Avg = (Total + HASHSZ / 2) / HASHSZ;
602 (void) fprintf(stderr, "truss hash table statistics --------\n");
603 (void) fprintf(stderr, " Total = %u\n", Total);
604 (void) fprintf(stderr, " Min = %u\n", Min);
605 (void) fprintf(stderr, " Max = %u\n", Max);
606 (void) fprintf(stderr, " Avg = %u\n", Avg);
607 for (i = 0; i < HASHSZ; i++)
608 if (bucket[i])
609 (void) fprintf(stderr, " %3u buckets of size %d\n",
610 bucket[i], i);
611
612 (void) fprintf(stderr, "truss-detected stacks --------\n");
613 for (Stk = callstack; Stk != NULL; Stk = Stk->next) {
614 (void) fprintf(stderr,
615 " base = 0x%.8lx end = 0x%.8lx size = %ld\n",
616 (ulong_t)Stk->stkbase,
617 (ulong_t)Stk->stkend,
618 (ulong_t)(Stk->stkend - Stk->stkbase));
619 }
620 (void) fprintf(stderr, "primary unix stack --------\n");
621 (void) fprintf(stderr,
622 " base = 0x%.8lx end = 0x%.8lx size = %ld\n",
623 (ulong_t)Psp->pr_stkbase,
624 (ulong_t)(Psp->pr_stkbase + Psp->pr_stksize),
625 (ulong_t)Psp->pr_stksize);
626 (void) fprintf(stderr, "nthr_create = %u\n", nthr_create);
627 }
628
629 void
make_lwp_stack(const lwpstatus_t * Lsp,prmap_t * Pmap,int nmap)630 make_lwp_stack(const lwpstatus_t *Lsp, prmap_t *Pmap, int nmap)
631 {
632 const pstatus_t *Psp = Pstatus(Proc);
633 uintptr_t sp = Lsp->pr_reg[R_SP];
634 id_t lwpid = Lsp->pr_lwpid;
635 struct callstack *Stk;
636 td_thrhandle_t th;
637 td_thrinfo_t thrinfo;
638
639 if (data_model != PR_MODEL_LP64)
640 sp = (uint32_t)sp;
641
642 /* check to see if we already have this stack */
643 if (sp == 0)
644 return;
645 for (Stk = callstack; Stk != NULL; Stk = Stk->next)
646 if (sp >= Stk->stkbase && sp < Stk->stkend)
647 return;
648
649 Stk = my_malloc(sizeof (struct callstack), NULL);
650 Stk->next = callstack;
651 callstack = Stk;
652 nstack++;
653 Stk->tref = 0;
654 Stk->tid = 0;
655 Stk->nthr_create = 0;
656 Stk->ncall = 0;
657 Stk->maxcall = DEF_MAXCALL;
658 Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
659
660 /* primary stack */
661 if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
662 Stk->stkbase = Psp->pr_stkbase;
663 Stk->stkend = Stk->stkbase + Psp->pr_stksize;
664 return;
665 }
666
667 /* alternate stack */
668 if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
669 sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
670 sp < (uintptr_t)Lsp->pr_altstack.ss_sp
671 + Lsp->pr_altstack.ss_size) {
672 Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
673 Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
674 return;
675 }
676
677 /* thread stacks? */
678 if (Thr_agent != NULL &&
679 td_ta_map_lwp2thr(Thr_agent, lwpid, &th) == TD_OK &&
680 td_thr_get_info(&th, &thrinfo) == TD_OK &&
681 sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
682 sp < (uintptr_t)thrinfo.ti_stkbase) {
683 /* The bloody fools got this backwards! */
684 Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
685 Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
686 return;
687 }
688
689 /* last chance -- try the raw memory map */
690 for (; nmap; nmap--, Pmap++) {
691 if (sp >= Pmap->pr_vaddr &&
692 sp < Pmap->pr_vaddr + Pmap->pr_size) {
693 Stk->stkbase = Pmap->pr_vaddr;
694 Stk->stkend = Pmap->pr_vaddr + Pmap->pr_size;
695 return;
696 }
697 }
698
699 callstack = Stk->next;
700 nstack--;
701 free(Stk->stack);
702 free(Stk);
703 }
704
705 void
make_thr_stack(const td_thrhandle_t * Thp,prgregset_t reg)706 make_thr_stack(const td_thrhandle_t *Thp, prgregset_t reg)
707 {
708 const pstatus_t *Psp = Pstatus(Proc);
709 td_thrinfo_t thrinfo;
710 uintptr_t sp = reg[R_SP];
711 struct callstack *Stk;
712
713 if (data_model != PR_MODEL_LP64)
714 sp = (uint32_t)sp;
715
716 /* check to see if we already have this stack */
717 if (sp == 0)
718 return;
719 for (Stk = callstack; Stk != NULL; Stk = Stk->next)
720 if (sp >= Stk->stkbase && sp < Stk->stkend)
721 return;
722
723 Stk = my_malloc(sizeof (struct callstack), NULL);
724 Stk->next = callstack;
725 callstack = Stk;
726 nstack++;
727 Stk->tref = 0;
728 Stk->tid = 0;
729 Stk->nthr_create = 0;
730 Stk->ncall = 0;
731 Stk->maxcall = DEF_MAXCALL;
732 Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack), NULL);
733
734 /* primary stack */
735 if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
736 Stk->stkbase = Psp->pr_stkbase;
737 Stk->stkend = Stk->stkbase + Psp->pr_stksize;
738 return;
739 }
740
741 if (td_thr_get_info(Thp, &thrinfo) == TD_OK &&
742 sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
743 sp < (uintptr_t)thrinfo.ti_stkbase) {
744 /* The bloody fools got this backwards! */
745 Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
746 Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
747 return;
748 }
749
750 callstack = Stk->next;
751 nstack--;
752 free(Stk->stack);
753 free(Stk);
754 }
755
756 struct callstack *
find_lwp_stack(uintptr_t sp)757 find_lwp_stack(uintptr_t sp)
758 {
759 const pstatus_t *Psp = Pstatus(Proc);
760 char mapfile[64];
761 int mapfd;
762 struct stat statb;
763 prmap_t *Pmap = NULL;
764 prmap_t *pmap = NULL;
765 int nmap = 0;
766 struct callstack *Stk = NULL;
767
768 /*
769 * Get the address space map.
770 */
771 (void) sprintf(mapfile, "/proc/%d/rmap", (int)Psp->pr_pid);
772 if ((mapfd = open(mapfile, O_RDONLY)) < 0 ||
773 fstat(mapfd, &statb) != 0 ||
774 statb.st_size < sizeof (prmap_t) ||
775 (Pmap = my_malloc(statb.st_size, NULL)) == NULL ||
776 (nmap = pread(mapfd, Pmap, statb.st_size, 0L)) <= 0 ||
777 (nmap /= sizeof (prmap_t)) == 0) {
778 if (Pmap != NULL)
779 free(Pmap);
780 if (mapfd >= 0)
781 (void) close(mapfd);
782 return (NULL);
783 }
784 (void) close(mapfd);
785
786 for (pmap = Pmap; nmap--; pmap++) {
787 if (sp >= pmap->pr_vaddr &&
788 sp < pmap->pr_vaddr + pmap->pr_size) {
789 Stk = my_malloc(sizeof (struct callstack), NULL);
790 Stk->next = callstack;
791 callstack = Stk;
792 nstack++;
793 Stk->stkbase = pmap->pr_vaddr;
794 Stk->stkend = pmap->pr_vaddr + pmap->pr_size;
795 Stk->tref = 0;
796 Stk->tid = 0;
797 Stk->nthr_create = 0;
798 Stk->ncall = 0;
799 Stk->maxcall = DEF_MAXCALL;
800 Stk->stack = my_malloc(
801 DEF_MAXCALL * sizeof (*Stk->stack), NULL);
802 break;
803 }
804 }
805
806 free(Pmap);
807 return (Stk);
808 }
809
810 struct callstack *
find_stack(uintptr_t sp)811 find_stack(uintptr_t sp)
812 {
813 const pstatus_t *Psp = Pstatus(Proc);
814 private_t *pri = get_private();
815 const lwpstatus_t *Lsp = pri->lwpstat;
816 id_t lwpid = Lsp->pr_lwpid;
817 #if defined(__sparc)
818 prgreg_t tref = Lsp->pr_reg[R_G7];
819 #elif defined(__amd64)
820 prgreg_t tref = Lsp->pr_reg[REG_FS];
821 #elif defined(__i386)
822 prgreg_t tref = Lsp->pr_reg[GS];
823 #endif
824 struct callstack *Stk = NULL;
825 td_thrhandle_t th;
826 td_thrinfo_t thrinfo;
827 td_err_e error;
828
829 /* primary stack */
830 if (sp >= Psp->pr_stkbase && sp < Psp->pr_stkbase + Psp->pr_stksize) {
831 Stk = my_malloc(sizeof (struct callstack), NULL);
832 Stk->next = callstack;
833 callstack = Stk;
834 nstack++;
835 Stk->stkbase = Psp->pr_stkbase;
836 Stk->stkend = Stk->stkbase + Psp->pr_stksize;
837 Stk->tref = 0;
838 Stk->tid = 0;
839 Stk->nthr_create = 0;
840 Stk->ncall = 0;
841 Stk->maxcall = DEF_MAXCALL;
842 Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
843 NULL);
844 return (Stk);
845 }
846
847 /* alternate stack */
848 if ((Lsp->pr_altstack.ss_flags & SS_ONSTACK) &&
849 sp >= (uintptr_t)Lsp->pr_altstack.ss_sp &&
850 sp < (uintptr_t)Lsp->pr_altstack.ss_sp
851 + Lsp->pr_altstack.ss_size) {
852 Stk = my_malloc(sizeof (struct callstack), NULL);
853 Stk->next = callstack;
854 callstack = Stk;
855 nstack++;
856 Stk->stkbase = (uintptr_t)Lsp->pr_altstack.ss_sp;
857 Stk->stkend = Stk->stkbase + Lsp->pr_altstack.ss_size;
858 Stk->tref = 0;
859 Stk->tid = 0;
860 Stk->nthr_create = 0;
861 Stk->ncall = 0;
862 Stk->maxcall = DEF_MAXCALL;
863 Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
864 NULL);
865 return (Stk);
866 }
867
868 if (Thr_agent == NULL)
869 return (find_lwp_stack(sp));
870
871 /* thread stacks? */
872 if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
873 if (hflag)
874 (void) fprintf(stderr,
875 "cannot get thread handle for "
876 "lwp#%d, error=%d, tref=0x%.8lx\n",
877 (int)lwpid, error, (long)tref);
878 return (NULL);
879 }
880
881 if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
882 if (hflag)
883 (void) fprintf(stderr,
884 "cannot get thread info for "
885 "lwp#%d, error=%d, tref=0x%.8lx\n",
886 (int)lwpid, error, (long)tref);
887 return (NULL);
888 }
889
890 if (sp >= (uintptr_t)thrinfo.ti_stkbase - thrinfo.ti_stksize &&
891 sp < (uintptr_t)thrinfo.ti_stkbase) {
892 Stk = my_malloc(sizeof (struct callstack), NULL);
893 Stk->next = callstack;
894 callstack = Stk;
895 nstack++;
896 /* The bloody fools got this backwards! */
897 Stk->stkend = (uintptr_t)thrinfo.ti_stkbase;
898 Stk->stkbase = Stk->stkend - thrinfo.ti_stksize;
899 Stk->tref = tref;
900 Stk->tid = thrinfo.ti_tid;
901 Stk->nthr_create = nthr_create;
902 Stk->ncall = 0;
903 Stk->maxcall = DEF_MAXCALL;
904 Stk->stack = my_malloc(DEF_MAXCALL * sizeof (*Stk->stack),
905 NULL);
906 return (Stk);
907 }
908
909 /* stack bounds failure -- complain bitterly */
910 if (hflag) {
911 (void) fprintf(stderr,
912 "sp not within thread stack: "
913 "sp=0x%.8lx stkbase=0x%.8lx stkend=0x%.8lx\n",
914 (ulong_t)sp,
915 /* The bloody fools got this backwards! */
916 (ulong_t)thrinfo.ti_stkbase - thrinfo.ti_stksize,
917 (ulong_t)thrinfo.ti_stkbase);
918 }
919
920 return (NULL);
921 }
922
923 void
get_tid(struct callstack * Stk)924 get_tid(struct callstack *Stk)
925 {
926 private_t *pri = get_private();
927 const lwpstatus_t *Lsp = pri->lwpstat;
928 id_t lwpid = Lsp->pr_lwpid;
929 #if defined(__sparc)
930 prgreg_t tref = Lsp->pr_reg[R_G7];
931 #elif defined(__amd64)
932 prgreg_t tref = (data_model == PR_MODEL_LP64) ?
933 Lsp->pr_reg[REG_FS] : Lsp->pr_reg[REG_GS];
934 #elif defined(__i386)
935 prgreg_t tref = Lsp->pr_reg[GS];
936 #endif
937 td_thrhandle_t th;
938 td_thrinfo_t thrinfo;
939 td_err_e error;
940
941 if (Thr_agent == NULL) {
942 Stk->tref = 0;
943 Stk->tid = 0;
944 Stk->nthr_create = 0;
945 return;
946 }
947
948 /*
949 * Shortcut here --
950 * If we have a matching tref and no new threads have
951 * been created since the last time we encountered this
952 * stack, then we don't have to go through the overhead
953 * of calling td_ta_map_lwp2thr() to get the thread-id.
954 */
955 if (tref == Stk->tref && Stk->nthr_create == nthr_create)
956 return;
957
958 if ((error = td_ta_map_lwp2thr(Thr_agent, lwpid, &th)) != TD_OK) {
959 if (hflag)
960 (void) fprintf(stderr,
961 "cannot get thread handle for "
962 "lwp#%d, error=%d, tref=0x%.8lx\n",
963 (int)lwpid, error, (long)tref);
964 Stk->tref = 0;
965 Stk->tid = 0;
966 Stk->nthr_create = 0;
967 } else if ((error = td_thr_get_info(&th, &thrinfo)) != TD_OK) {
968 if (hflag)
969 (void) fprintf(stderr,
970 "cannot get thread info for "
971 "lwp#%d, error=%d, tref=0x%.8lx\n",
972 (int)lwpid, error, (long)tref);
973 Stk->tref = 0;
974 Stk->tid = 0;
975 Stk->nthr_create = 0;
976 } else {
977 Stk->tref = tref;
978 Stk->tid = thrinfo.ti_tid;
979 Stk->nthr_create = nthr_create;
980 }
981 }
982
983 struct callstack *
callstack_info(uintptr_t sp,uintptr_t fp,int makeid)984 callstack_info(uintptr_t sp, uintptr_t fp, int makeid)
985 {
986 struct callstack *Stk;
987 uintptr_t trash;
988
989 if (sp == 0 ||
990 Pread(Proc, &trash, sizeof (trash), sp) != sizeof (trash))
991 return (NULL);
992
993 for (Stk = callstack; Stk != NULL; Stk = Stk->next)
994 if (sp >= Stk->stkbase && sp < Stk->stkend)
995 break;
996
997 /*
998 * If we didn't find the stack, do it the hard way.
999 */
1000 if (Stk == NULL) {
1001 uintptr_t stkbase = sp;
1002 uintptr_t stkend;
1003 uint_t minsize;
1004
1005 #if defined(i386) || defined(__amd64)
1006 if (data_model == PR_MODEL_LP64)
1007 minsize = 2 * sizeof (uintptr_t); /* fp + pc */
1008 else
1009 minsize = 2 * sizeof (uint32_t);
1010 #else
1011 if (data_model != PR_MODEL_LP64)
1012 minsize = SA32(MINFRAME32);
1013 else
1014 minsize = SA64(MINFRAME64);
1015 #endif /* i386 */
1016 stkend = sp + minsize;
1017
1018 while (Stk == NULL && fp != 0 && fp >= sp) {
1019 stkend = fp + minsize;
1020 for (Stk = callstack; Stk != NULL; Stk = Stk->next)
1021 if ((fp >= Stk->stkbase && fp < Stk->stkend) ||
1022 (stkend > Stk->stkbase &&
1023 stkend <= Stk->stkend))
1024 break;
1025 if (Stk == NULL)
1026 fp = previous_fp(fp, NULL);
1027 }
1028
1029 if (Stk != NULL) /* the stack grew */
1030 Stk->stkbase = stkbase;
1031 }
1032
1033 if (Stk == NULL && makeid) /* new stack */
1034 Stk = find_stack(sp);
1035
1036 if (Stk == NULL)
1037 return (NULL);
1038
1039 /*
1040 * Ensure that there is room for at least one more entry.
1041 */
1042 if (Stk->ncall == Stk->maxcall) {
1043 Stk->maxcall *= 2;
1044 Stk->stack = my_realloc(Stk->stack,
1045 Stk->maxcall * sizeof (*Stk->stack), NULL);
1046 }
1047
1048 if (makeid)
1049 get_tid(Stk);
1050
1051 return (Stk);
1052 }
1053
1054 /*
1055 * Reset the breakpoint information (called on successful exec()).
1056 */
1057 void
reset_breakpoints(void)1058 reset_breakpoints(void)
1059 {
1060 struct dynlib *Dp;
1061 struct bkpt *Bp;
1062 struct callstack *Stk;
1063 int i;
1064
1065 if (Dynpat == NULL)
1066 return;
1067
1068 /* destroy all previous dynamic library information */
1069 while ((Dp = Dynlib) != NULL) {
1070 Dynlib = Dp->next;
1071 free(Dp->lib_name);
1072 free(Dp->match_name);
1073 free(Dp->prt_name);
1074 free(Dp);
1075 }
1076
1077 /* destroy all previous breakpoint trap information */
1078 if (bpt_hashtable != NULL) {
1079 for (i = 0; i < HASHSZ; i++) {
1080 while ((Bp = bpt_hashtable[i]) != NULL) {
1081 bpt_hashtable[i] = Bp->next;
1082 if (Bp->sym_name)
1083 free(Bp->sym_name);
1084 free(Bp);
1085 }
1086 }
1087 }
1088
1089 /* destroy all the callstack information */
1090 while ((Stk = callstack) != NULL) {
1091 callstack = Stk->next;
1092 free(Stk->stack);
1093 free(Stk);
1094 }
1095
1096 /* we are not a multi-threaded process anymore */
1097 if (Thr_agent != NULL)
1098 (void) td_ta_delete(Thr_agent);
1099 Thr_agent = NULL;
1100
1101 /* tell libproc to clear out its mapping information */
1102 Preset_maps(Proc);
1103 Rdb_agent = NULL;
1104
1105 /* Reestablish the symbols from the executable */
1106 (void) establish_breakpoints();
1107 }
1108
1109 /*
1110 * Clear breakpoints from the process (called before Prelease()).
1111 * Don't actually destroy the breakpoint table;
1112 * threads currently fielding breakpoints will need it.
1113 */
1114 void
clear_breakpoints(void)1115 clear_breakpoints(void)
1116 {
1117 struct bkpt *Bp;
1118 int i;
1119
1120 if (Dynpat == NULL)
1121 return;
1122
1123 /*
1124 * Change all breakpoint traps back to normal instructions.
1125 * We attempt to remove a breakpoint from every address which
1126 * may have ever contained a breakpoint to protect our victims.
1127 */
1128 report_htable_stats(); /* report stats first */
1129 for (i = 0; i < HASHSZ; i++) {
1130 for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1131 if (Bp->flags & BPT_ACTIVE)
1132 (void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1133 Bp->flags &= ~BPT_ACTIVE;
1134 }
1135 }
1136
1137 if (Thr_agent != NULL) {
1138 td_thr_events_t events;
1139
1140 td_event_fillset(&events);
1141 (void) td_ta_clear_event(Thr_agent, &events);
1142 (void) td_ta_delete(Thr_agent);
1143 }
1144 Thr_agent = NULL;
1145 }
1146
1147 /*
1148 * Reestablish the breakpoint traps in the process.
1149 * Called after resuming from a vfork() in the parent.
1150 */
1151 void
reestablish_traps(void)1152 reestablish_traps(void)
1153 {
1154 struct bkpt *Bp;
1155 ulong_t instr;
1156 int i;
1157
1158 if (Dynpat == NULL || is_vfork_child)
1159 return;
1160
1161 for (i = 0; i < HASHSZ; i++) {
1162 for (Bp = bpt_hashtable[i]; Bp != NULL; Bp = Bp->next) {
1163 if ((Bp->flags & BPT_ACTIVE) &&
1164 Psetbkpt(Proc, Bp->addr, &instr) != 0)
1165 Bp->flags &= ~BPT_ACTIVE;
1166 }
1167 }
1168 }
1169
1170 void
show_function_call(private_t * pri,struct callstack * Stk,struct dynlib * Dp,struct bkpt * Bp)1171 show_function_call(private_t *pri,
1172 struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1173 {
1174 long arg[8];
1175 int narg;
1176 int i;
1177
1178 narg = get_arguments(arg);
1179 make_pname(pri, (Stk != NULL)? Stk->tid : 0);
1180 putpname(pri);
1181 timestamp(pri);
1182 if (Stk != NULL) {
1183 for (i = 1; i < Stk->ncall; i++) {
1184 (void) fputc(' ', stdout);
1185 (void) fputc(' ', stdout);
1186 }
1187 }
1188 (void) printf("-> %s%s(", Dp->prt_name, Bp->sym_name);
1189 for (i = 0; i < narg; i++) {
1190 (void) printf("0x%lx", arg[i]);
1191 if (i < narg-1) {
1192 (void) fputc(',', stdout);
1193 (void) fputc(' ', stdout);
1194 }
1195 }
1196 (void) printf(")\n");
1197 Flush();
1198 }
1199
1200 /* ARGSUSED */
1201 void
show_function_return(private_t * pri,long rval,int stret,struct callstack * Stk,struct dynlib * Dp,struct bkpt * Bp)1202 show_function_return(private_t *pri, long rval, int stret,
1203 struct callstack *Stk, struct dynlib *Dp, struct bkpt *Bp)
1204 {
1205 int i;
1206
1207 make_pname(pri, Stk->tid);
1208 putpname(pri);
1209 timestamp(pri);
1210 for (i = 0; i < Stk->ncall; i++) {
1211 (void) fputc(' ', stdout);
1212 (void) fputc(' ', stdout);
1213 }
1214 (void) printf("<- %s%s() = ", Dp->prt_name, Bp->sym_name);
1215 if (stret) {
1216 (void) printf("struct return\n");
1217 } else if (data_model == PR_MODEL_LP64) {
1218 if (rval >= (64 * 1024) || -rval >= (64 * 1024))
1219 (void) printf("0x%lx\n", rval);
1220 else
1221 (void) printf("%ld\n", rval);
1222 } else {
1223 int rval32 = (int)rval;
1224 if (rval32 >= (64 * 1024) || -rval32 >= (64 * 1024))
1225 (void) printf("0x%x\n", rval32);
1226 else
1227 (void) printf("%d\n", rval32);
1228 }
1229 Flush();
1230 }
1231
1232 /*
1233 * Called to deal with function-call tracing.
1234 * Return 0 on normal success, 1 to indicate a BPT_HANG success,
1235 * and -1 on failure (not tracing functions or unknown breakpoint).
1236 */
1237 int
function_trace(private_t * pri,int first,int clear,int dotrace)1238 function_trace(private_t *pri, int first, int clear, int dotrace)
1239 {
1240 struct ps_lwphandle *Lwp = pri->Lwp;
1241 const lwpstatus_t *Lsp = pri->lwpstat;
1242 uintptr_t pc = Lsp->pr_reg[R_PC];
1243 uintptr_t sp = Lsp->pr_reg[R_SP];
1244 uintptr_t fp = Lsp->pr_reg[R_FP];
1245 struct bkpt *Bp;
1246 struct dynlib *Dp;
1247 struct callstack *Stk;
1248 ulong_t instr;
1249 int active;
1250 int rval = 0;
1251
1252 if (Dynpat == NULL)
1253 return (-1);
1254
1255 if (data_model != PR_MODEL_LP64) {
1256 pc = (uint32_t)pc;
1257 sp = (uint32_t)sp;
1258 fp = (uint32_t)fp;
1259 }
1260
1261 if ((Bp = get_bkpt(pc)) == NULL) {
1262 if (hflag)
1263 (void) fprintf(stderr,
1264 "function_trace(): "
1265 "cannot find breakpoint for pc: 0x%.8lx\n",
1266 (ulong_t)pc);
1267 return (-1);
1268 }
1269
1270 if ((Bp->flags & (BPT_PREINIT|BPT_POSTINIT|BPT_DLACTIVITY)) && !clear) {
1271 rd_event_msg_t event_msg;
1272
1273 if (hflag) {
1274 if (Bp->flags & BPT_PREINIT)
1275 (void) fprintf(stderr, "function_trace(): "
1276 "RD_PREINIT breakpoint\n");
1277 if (Bp->flags & BPT_POSTINIT)
1278 (void) fprintf(stderr, "function_trace(): "
1279 "RD_POSTINIT breakpoint\n");
1280 if (Bp->flags & BPT_DLACTIVITY)
1281 (void) fprintf(stderr, "function_trace(): "
1282 "RD_DLACTIVITY breakpoint\n");
1283 }
1284 if (rd_event_getmsg(Rdb_agent, &event_msg) == RD_OK) {
1285 if (event_msg.type == RD_DLACTIVITY) {
1286 switch (event_msg.u.state) {
1287 case RD_CONSISTENT:
1288 establish_breakpoints();
1289 break;
1290 case RD_ADD:
1291 not_consist = TRUE; /* kludge */
1292 establish_breakpoints();
1293 not_consist = FALSE;
1294 break;
1295 case RD_DELETE:
1296 delete_library = TRUE;
1297 break;
1298 default:
1299 break;
1300 }
1301 }
1302 if (hflag) {
1303 const char *et;
1304 char buf[32];
1305
1306 switch (event_msg.type) {
1307 case RD_NONE:
1308 et = "RD_NONE";
1309 break;
1310 case RD_PREINIT:
1311 et = "RD_PREINIT";
1312 break;
1313 case RD_POSTINIT:
1314 et = "RD_POSTINIT";
1315 break;
1316 case RD_DLACTIVITY:
1317 et = "RD_DLACTIVITY";
1318 break;
1319 default:
1320 (void) sprintf(buf, "0x%x",
1321 event_msg.type);
1322 et = buf;
1323 break;
1324 }
1325 (void) fprintf(stderr,
1326 "event_msg.type = %s ", et);
1327 switch (event_msg.u.state) {
1328 case RD_NOSTATE:
1329 et = "RD_NOSTATE";
1330 break;
1331 case RD_CONSISTENT:
1332 et = "RD_CONSISTENT";
1333 break;
1334 case RD_ADD:
1335 et = "RD_ADD";
1336 break;
1337 case RD_DELETE:
1338 et = "RD_DELETE";
1339 break;
1340 default:
1341 (void) sprintf(buf, "0x%x",
1342 event_msg.u.state);
1343 et = buf;
1344 break;
1345 }
1346 (void) fprintf(stderr,
1347 "event_msg.u.state = %s\n", et);
1348 }
1349 }
1350 }
1351
1352 if ((Bp->flags & BPT_TD_CREATE) && !clear) {
1353 nthr_create++;
1354 if (hflag)
1355 (void) fprintf(stderr, "function_trace(): "
1356 "BPT_TD_CREATE breakpoint\n");
1357 /* we don't care about the event message */
1358 }
1359
1360 Dp = Bp->dyn;
1361
1362 if (dotrace) {
1363 if ((Stk = callstack_info(sp, fp, 1)) == NULL) {
1364 if (Dp != NULL && !clear) {
1365 if (cflag) {
1366 add_fcall(fcall_tbl, Dp->prt_name,
1367 Bp->sym_name, (unsigned long)1);
1368 }
1369 else
1370 show_function_call(pri, NULL, Dp, Bp);
1371 if ((Bp->flags & BPT_HANG) && !first)
1372 rval = 1;
1373 }
1374 } else if (!clear) {
1375 if (Dp != NULL) {
1376 function_entry(pri, Bp, Stk);
1377 if ((Bp->flags & BPT_HANG) && !first)
1378 rval = 1;
1379 } else {
1380 function_return(pri, Stk);
1381 }
1382 }
1383 }
1384
1385 /*
1386 * Single-step the traced instruction. Since it's possible that
1387 * another thread has deactivated this breakpoint, we indicate
1388 * that we have reactivated it by virtue of executing it.
1389 *
1390 * To avoid a deadlock with some other thread in the process
1391 * performing a fork() or a thr_suspend() operation, we must
1392 * drop and later reacquire truss_lock. Some fancy dancing here.
1393 */
1394 active = (Bp->flags & BPT_ACTIVE);
1395 Bp->flags |= BPT_ACTIVE;
1396 instr = Bp->instr;
1397 (void) mutex_unlock(&truss_lock);
1398 (void) Lxecbkpt(Lwp, instr);
1399 (void) mutex_lock(&truss_lock);
1400
1401 if (rval || clear) { /* leave process stopped and abandoned */
1402 #if defined(__i386)
1403 /*
1404 * Leave it stopped in a state that a stack trace is reasonable.
1405 */
1406 /* XX64 needs to be updated for amd64 & gcc */
1407 if (rval && instr == 0x55) { /* pushl %ebp */
1408 /* step it over the movl %esp,%ebp */
1409 (void) mutex_unlock(&truss_lock);
1410 (void) Lsetrun(Lwp, 0, PRCFAULT|PRSTEP);
1411 /* we're wrapping up; wait one second at most */
1412 (void) Lwait(Lwp, MILLISEC);
1413 (void) mutex_lock(&truss_lock);
1414 }
1415 #endif
1416 if (get_bkpt(pc) != Bp)
1417 abend("function_trace: lost breakpoint", NULL);
1418 (void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1419 Bp->flags &= ~BPT_ACTIVE;
1420 (void) mutex_unlock(&truss_lock);
1421 (void) Lsetrun(Lwp, 0, PRCFAULT|PRSTOP);
1422 /* we're wrapping up; wait one second at most */
1423 (void) Lwait(Lwp, MILLISEC);
1424 (void) mutex_lock(&truss_lock);
1425 } else {
1426 if (get_bkpt(pc) != Bp)
1427 abend("function_trace: lost breakpoint", NULL);
1428 if (!active || !(Bp->flags & BPT_ACTIVE)) {
1429 (void) Pdelbkpt(Proc, Bp->addr, Bp->instr);
1430 Bp->flags &= ~BPT_ACTIVE;
1431 }
1432 }
1433 return (rval);
1434 }
1435
1436 void
function_entry(private_t * pri,struct bkpt * Bp,struct callstack * Stk)1437 function_entry(private_t *pri, struct bkpt *Bp, struct callstack *Stk)
1438 {
1439 const lwpstatus_t *Lsp = pri->lwpstat;
1440 uintptr_t sp = Lsp->pr_reg[R_SP];
1441 uintptr_t rpc = get_return_address(&sp);
1442 struct dynlib *Dp = Bp->dyn;
1443 int oldframe = FALSE;
1444 int i;
1445
1446 if (data_model != PR_MODEL_LP64) {
1447 sp = (uint32_t)sp;
1448 rpc = (uint32_t)rpc;
1449 }
1450
1451 /*
1452 * If the sp is not within the stack bounds, forget it.
1453 * If the symbol's 'internal' flag is false,
1454 * don't report internal calls within the library.
1455 */
1456 if (!(sp >= Stk->stkbase && sp < Stk->stkend) ||
1457 (!(Bp->flags & BPT_INTERNAL) &&
1458 rpc >= Dp->base && rpc < Dp->base + Dp->size))
1459 return;
1460
1461 for (i = 0; i < Stk->ncall; i++) {
1462 if (sp >= Stk->stack[i].sp) {
1463 Stk->ncall = i;
1464 if (sp == Stk->stack[i].sp)
1465 oldframe = TRUE;
1466 break;
1467 }
1468 }
1469
1470 /*
1471 * Breakpoints for function returns are set here
1472 * If we're counting function calls, there is no need to set
1473 * a breakpoint upon return
1474 */
1475
1476 if (!oldframe && !cflag) {
1477 (void) create_bkpt(rpc, 1, 1); /* may or may not be set */
1478 Stk->stack[Stk->ncall].sp = sp; /* record it anyeay */
1479 Stk->stack[Stk->ncall].pc = rpc;
1480 Stk->stack[Stk->ncall].fcn = Bp;
1481 }
1482 Stk->ncall++;
1483 if (cflag) {
1484 add_fcall(fcall_tbl, Dp->prt_name, Bp->sym_name,
1485 (unsigned long)1);
1486 } else {
1487 show_function_call(pri, Stk, Dp, Bp);
1488 }
1489 }
1490
1491 /*
1492 * We are here because we hit an unnamed breakpoint.
1493 * Attempt to match this up with a return pc on the stack
1494 * and report the function return.
1495 */
1496 void
function_return(private_t * pri,struct callstack * Stk)1497 function_return(private_t *pri, struct callstack *Stk)
1498 {
1499 const lwpstatus_t *Lsp = pri->lwpstat;
1500 uintptr_t sp = Lsp->pr_reg[R_SP];
1501 uintptr_t fp = Lsp->pr_reg[R_FP];
1502 int i;
1503
1504 if (data_model != PR_MODEL_LP64) {
1505 sp = (uint32_t)sp;
1506 fp = (uint32_t)fp;
1507 }
1508
1509 if (fp < sp + 8)
1510 fp = sp + 8;
1511
1512 for (i = Stk->ncall - 1; i >= 0; i--) {
1513 if (sp <= Stk->stack[i].sp && fp > Stk->stack[i].sp) {
1514 Stk->ncall = i;
1515 break;
1516 }
1517 }
1518
1519 #if defined(i386) || defined(__amd64)
1520 if (i < 0) {
1521 /* probably __mul64() or friends -- try harder */
1522 int j;
1523 for (j = 0; i < 0 && j < 8; j++) { /* up to 8 args */
1524 sp -= 4;
1525 for (i = Stk->ncall - 1; i >= 0; i--) {
1526 if (sp <= Stk->stack[i].sp &&
1527 fp > Stk->stack[i].sp) {
1528 Stk->ncall = i;
1529 break;
1530 }
1531 }
1532 }
1533 }
1534 #endif
1535
1536 if ((i >= 0) && (!cflag)) {
1537 show_function_return(pri, Lsp->pr_reg[R_R0], 0,
1538 Stk, Stk->stack[i].fcn->dyn, Stk->stack[i].fcn);
1539 }
1540 }
1541
1542 #if defined(__sparc)
1543 #define FPADJUST 0
1544 #elif defined(__amd64)
1545 #define FPADJUST 8
1546 #elif defined(__i386)
1547 #define FPADJUST 4
1548 #endif
1549
1550 void
trap_one_stack(prgregset_t reg)1551 trap_one_stack(prgregset_t reg)
1552 {
1553 struct dynlib *Dp;
1554 struct bkpt *Bp;
1555 struct callstack *Stk;
1556 GElf_Sym sym;
1557 char sym_name[32];
1558 uintptr_t sp = reg[R_SP];
1559 uintptr_t pc = reg[R_PC];
1560 uintptr_t fp;
1561 uintptr_t rpc;
1562 uint_t nframe = 0;
1563 uint_t maxframe = 8;
1564 struct {
1565 uintptr_t sp; /* %sp within called function */
1566 uintptr_t pc; /* %pc within called function */
1567 uintptr_t rsp; /* the return sp */
1568 uintptr_t rpc; /* the return pc */
1569 } *frame = my_malloc(maxframe * sizeof (*frame), NULL);
1570
1571 /*
1572 * Gather stack frames bottom to top.
1573 */
1574 while (sp != 0) {
1575 fp = sp; /* remember higest non-null sp */
1576 frame[nframe].sp = sp;
1577 frame[nframe].pc = pc;
1578 sp = previous_fp(sp, &pc);
1579 frame[nframe].rsp = sp;
1580 frame[nframe].rpc = pc;
1581 if (++nframe == maxframe) {
1582 maxframe *= 2;
1583 frame = my_realloc(frame, maxframe * sizeof (*frame),
1584 NULL);
1585 }
1586 }
1587
1588 /*
1589 * Scan for function return breakpoints top to bottom.
1590 */
1591 while (nframe--) {
1592 /* lookup the called function in the symbol tables */
1593 if (Plookup_by_addr(Proc, frame[nframe].pc, sym_name,
1594 sizeof (sym_name), &sym) != 0)
1595 continue;
1596
1597 pc = sym.st_value; /* entry point of the function */
1598 rpc = frame[nframe].rpc; /* caller's return pc */
1599
1600 /* lookup the function in the breakpoint table */
1601 if ((Bp = get_bkpt(pc)) == NULL || (Dp = Bp->dyn) == NULL)
1602 continue;
1603
1604 if (!(Bp->flags & BPT_INTERNAL) &&
1605 rpc >= Dp->base && rpc < Dp->base + Dp->size)
1606 continue;
1607
1608 sp = frame[nframe].rsp + FPADJUST; /* %sp at time of call */
1609 if ((Stk = callstack_info(sp, fp, 0)) == NULL)
1610 continue; /* can't happen? */
1611
1612 if (create_bkpt(rpc, 1, 1) != NULL) {
1613 Stk->stack[Stk->ncall].sp = sp;
1614 Stk->stack[Stk->ncall].pc = rpc;
1615 Stk->stack[Stk->ncall].fcn = Bp;
1616 Stk->ncall++;
1617 }
1618 }
1619
1620 free(frame);
1621 }
1622
1623 int
lwp_stack_traps(void * cd,const lwpstatus_t * Lsp)1624 lwp_stack_traps(void *cd, const lwpstatus_t *Lsp)
1625 {
1626 ph_map_t *ph_map = (ph_map_t *)cd;
1627 prgregset_t reg;
1628
1629 (void) memcpy(reg, Lsp->pr_reg, sizeof (prgregset_t));
1630 make_lwp_stack(Lsp, ph_map->pmap, ph_map->nmap);
1631 trap_one_stack(reg);
1632
1633 return (interrupt | sigusr1);
1634 }
1635
1636 /* ARGSUSED */
1637 int
thr_stack_traps(const td_thrhandle_t * Thp,void * cd)1638 thr_stack_traps(const td_thrhandle_t *Thp, void *cd)
1639 {
1640 prgregset_t reg;
1641
1642 /*
1643 * We have already dealt with all the lwps.
1644 * We only care about unbound threads here (TD_PARTIALREG).
1645 */
1646 if (td_thr_getgregs(Thp, reg) != TD_PARTIALREG)
1647 return (0);
1648
1649 make_thr_stack(Thp, reg);
1650 trap_one_stack(reg);
1651
1652 return (interrupt | sigusr1);
1653 }
1654
1655 #if defined(__sparc)
1656
1657 uintptr_t
previous_fp(uintptr_t sp,uintptr_t * rpc)1658 previous_fp(uintptr_t sp, uintptr_t *rpc)
1659 {
1660 uintptr_t fp = 0;
1661 uintptr_t pc = 0;
1662
1663 if (data_model == PR_MODEL_LP64) {
1664 struct rwindow64 rwin;
1665 if (Pread(Proc, &rwin, sizeof (rwin), sp + STACK_BIAS)
1666 == sizeof (rwin)) {
1667 fp = (uintptr_t)rwin.rw_fp;
1668 pc = (uintptr_t)rwin.rw_rtn;
1669 }
1670 if (fp != 0 &&
1671 Pread(Proc, &rwin, sizeof (rwin), fp + STACK_BIAS)
1672 != sizeof (rwin))
1673 fp = pc = 0;
1674 } else {
1675 struct rwindow32 rwin;
1676 if (Pread(Proc, &rwin, sizeof (rwin), sp) == sizeof (rwin)) {
1677 fp = (uint32_t)rwin.rw_fp;
1678 pc = (uint32_t)rwin.rw_rtn;
1679 }
1680 if (fp != 0 &&
1681 Pread(Proc, &rwin, sizeof (rwin), fp) != sizeof (rwin))
1682 fp = pc = 0;
1683 }
1684 if (rpc)
1685 *rpc = pc;
1686 return (fp);
1687 }
1688
1689 /* ARGSUSED */
1690 uintptr_t
get_return_address(uintptr_t * psp)1691 get_return_address(uintptr_t *psp)
1692 {
1693 instr_t inst;
1694 private_t *pri = get_private();
1695 const lwpstatus_t *Lsp = pri->lwpstat;
1696 uintptr_t rpc;
1697
1698 rpc = (uintptr_t)Lsp->pr_reg[R_O7] + 8;
1699 if (data_model != PR_MODEL_LP64)
1700 rpc = (uint32_t)rpc;
1701
1702 /* check for structure return (bletch!) */
1703 if (Pread(Proc, &inst, sizeof (inst), rpc) == sizeof (inst) &&
1704 inst < 0x1000)
1705 rpc += sizeof (instr_t);
1706
1707 return (rpc);
1708 }
1709
1710 int
get_arguments(long * argp)1711 get_arguments(long *argp)
1712 {
1713 private_t *pri = get_private();
1714 const lwpstatus_t *Lsp = pri->lwpstat;
1715 int i;
1716
1717 if (data_model != PR_MODEL_LP64)
1718 for (i = 0; i < 4; i++)
1719 argp[i] = (uint_t)Lsp->pr_reg[R_O0+i];
1720 else
1721 for (i = 0; i < 4; i++)
1722 argp[i] = (long)Lsp->pr_reg[R_O0+i];
1723 return (4);
1724 }
1725
1726 #endif /* __sparc */
1727
1728 #if defined(__i386) || defined(__amd64)
1729
1730 uintptr_t
previous_fp(uintptr_t fp,uintptr_t * rpc)1731 previous_fp(uintptr_t fp, uintptr_t *rpc)
1732 {
1733 uintptr_t frame[2];
1734 uintptr_t trash[2];
1735
1736 if (Pread(Proc, frame, sizeof (frame), fp) != sizeof (frame) ||
1737 (frame[0] != 0 &&
1738 Pread(Proc, trash, sizeof (trash), frame[0]) != sizeof (trash)))
1739 frame[0] = frame[1] = 0;
1740
1741 if (rpc)
1742 *rpc = frame[1];
1743 return (frame[0]);
1744 }
1745
1746 #endif
1747
1748 #if defined(__amd64) || defined(__i386)
1749
1750 /*
1751 * Examine the instruction at the return location of a function call
1752 * and return the byte count by which the stack is adjusted on return.
1753 * It the instruction at the return location is an addl, as expected,
1754 * then adjust the return pc by the size of that instruction so that
1755 * we will place the return breakpoint on the following instruction.
1756 * This allows programs that interrogate their own stacks and record
1757 * function calls and arguments to work correctly even while we interfere.
1758 * Return the count on success, -1 on failure.
1759 */
1760 int
return_count32(uint32_t * ppc)1761 return_count32(uint32_t *ppc)
1762 {
1763 uintptr_t pc = *ppc;
1764 struct bkpt *Bp;
1765 int count;
1766 uchar_t instr[6]; /* instruction at pc */
1767
1768 if ((count = Pread(Proc, instr, sizeof (instr), pc)) < 0)
1769 return (-1);
1770
1771 /* find the replaced instruction at pc (if any) */
1772 if ((Bp = get_bkpt(pc)) != NULL && (Bp->flags & BPT_ACTIVE))
1773 instr[0] = (uchar_t)Bp->instr;
1774
1775 if (count != sizeof (instr) &&
1776 (count < 3 || instr[0] != 0x83))
1777 return (-1);
1778
1779 /*
1780 * A bit of disassembly of the instruction is required here.
1781 */
1782 if (instr[1] != 0xc4) { /* not an addl mumble,%esp inctruction */
1783 count = 0;
1784 } else if (instr[0] == 0x81) { /* count is a longword */
1785 count = instr[2]+(instr[3]<<8)+(instr[4]<<16)+(instr[5]<<24);
1786 *ppc += 6;
1787 } else if (instr[0] == 0x83) { /* count is a byte */
1788 count = instr[2];
1789 *ppc += 3;
1790 } else { /* not an addl inctruction */
1791 count = 0;
1792 }
1793
1794 return (count);
1795 }
1796
1797 uintptr_t
get_return_address32(uintptr_t * psp)1798 get_return_address32(uintptr_t *psp)
1799 {
1800 uint32_t sp = *psp;
1801 uint32_t rpc;
1802 int count;
1803
1804 *psp += 4; /* account for popping the stack on return */
1805 if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1806 return (0);
1807 if ((count = return_count32(&rpc)) < 0)
1808 count = 0;
1809 *psp += count; /* expected sp on return */
1810 return (rpc);
1811 }
1812
1813 uintptr_t
get_return_address(uintptr_t * psp)1814 get_return_address(uintptr_t *psp)
1815 {
1816 uintptr_t rpc;
1817 uintptr_t sp = *psp;
1818
1819 if (data_model == PR_MODEL_LP64) {
1820 if (Pread(Proc, &rpc, sizeof (rpc), sp) != sizeof (rpc))
1821 return (0);
1822 /*
1823 * Ignore arguments pushed on the stack. See comments in
1824 * get_arguments().
1825 */
1826 return (rpc);
1827 } else
1828 return (get_return_address32(psp));
1829 }
1830
1831
1832 int
get_arguments32(long * argp)1833 get_arguments32(long *argp)
1834 {
1835 private_t *pri = get_private();
1836 const lwpstatus_t *Lsp = pri->lwpstat;
1837 uint32_t frame[5]; /* return pc + 4 args */
1838 int narg;
1839 int count;
1840 int i;
1841
1842 narg = Pread(Proc, frame, sizeof (frame),
1843 (uintptr_t)Lsp->pr_reg[R_SP]);
1844 narg -= sizeof (greg32_t);
1845 if (narg <= 0)
1846 return (0);
1847 narg /= sizeof (greg32_t); /* no more than 4 */
1848
1849 /*
1850 * Given the return PC, determine the number of arguments.
1851 */
1852 if ((count = return_count32(&frame[0])) < 0)
1853 narg = 0;
1854 else {
1855 count /= sizeof (greg32_t);
1856 if (narg > count)
1857 narg = count;
1858 }
1859
1860 for (i = 0; i < narg; i++)
1861 argp[i] = (long)frame[i+1];
1862
1863 return (narg);
1864 }
1865
1866 int
get_arguments(long * argp)1867 get_arguments(long *argp)
1868 {
1869 private_t *pri = get_private();
1870 const lwpstatus_t *Lsp = pri->lwpstat;
1871
1872 if (data_model == PR_MODEL_LP64) {
1873 /*
1874 * On amd64, we do not know how many arguments are passed to
1875 * each function. While it may be possible to detect if we
1876 * have more than 6 arguments, it is of marginal value.
1877 * Instead, assume that we always have 6 arguments, which are
1878 * passed via registers.
1879 */
1880 argp[0] = Lsp->pr_reg[REG_RDI];
1881 argp[1] = Lsp->pr_reg[REG_RSI];
1882 argp[2] = Lsp->pr_reg[REG_RDX];
1883 argp[3] = Lsp->pr_reg[REG_RCX];
1884 argp[4] = Lsp->pr_reg[REG_R8];
1885 argp[5] = Lsp->pr_reg[REG_R9];
1886 return (6);
1887 } else
1888 return (get_arguments32(argp));
1889 }
1890
1891 #endif /* __amd64 || __i386 */
1892