17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5b8fac8e1Sjhaslam * Common Development and Distribution License (the "License").
6b8fac8e1Sjhaslam * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
2175521904Sraf
227c478bd9Sstevel@tonic-gate /*
238cb74972SJonathan Haslam * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate */
26*7aa76ffcSBryan Cantrill /*
27*7aa76ffcSBryan Cantrill * Copyright 2011 Joyent, Inc. All rights reserved.
28*7aa76ffcSBryan Cantrill */
297c478bd9Sstevel@tonic-gate
307c478bd9Sstevel@tonic-gate #include <sys/dtrace_impl.h>
317c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
327c478bd9Sstevel@tonic-gate #include <sys/model.h>
337c478bd9Sstevel@tonic-gate #include <sys/frame.h>
347c478bd9Sstevel@tonic-gate #include <sys/stack.h>
357c478bd9Sstevel@tonic-gate #include <sys/machpcb.h>
367c478bd9Sstevel@tonic-gate #include <sys/procfs_isa.h>
377c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
380b38a8bdSahl #include <sys/sysmacros.h>
397c478bd9Sstevel@tonic-gate
407c478bd9Sstevel@tonic-gate #define DTRACE_FMT3OP3_MASK 0x81000000
417c478bd9Sstevel@tonic-gate #define DTRACE_FMT3OP3 0x80000000
427c478bd9Sstevel@tonic-gate #define DTRACE_FMT3RS1_SHIFT 14
437c478bd9Sstevel@tonic-gate #define DTRACE_FMT3RD_SHIFT 25
44a1b5e537Sbmc #define DTRACE_DISP22_SHIFT 10
457c478bd9Sstevel@tonic-gate #define DTRACE_RMASK 0x1f
467c478bd9Sstevel@tonic-gate #define DTRACE_REG_L0 16
477c478bd9Sstevel@tonic-gate #define DTRACE_REG_O7 15
487c478bd9Sstevel@tonic-gate #define DTRACE_REG_I0 24
497c478bd9Sstevel@tonic-gate #define DTRACE_REG_I6 30
507c478bd9Sstevel@tonic-gate #define DTRACE_RET 0x81c7e008
517c478bd9Sstevel@tonic-gate #define DTRACE_RETL 0x81c3e008
527c478bd9Sstevel@tonic-gate #define DTRACE_SAVE_MASK 0xc1f80000
537c478bd9Sstevel@tonic-gate #define DTRACE_SAVE 0x81e00000
547c478bd9Sstevel@tonic-gate #define DTRACE_RESTORE 0x81e80000
557c478bd9Sstevel@tonic-gate #define DTRACE_CALL_MASK 0xc0000000
567c478bd9Sstevel@tonic-gate #define DTRACE_CALL 0x40000000
578cb74972SJonathan Haslam #define DTRACE_JMPL_MASK 0x81f80000
587c478bd9Sstevel@tonic-gate #define DTRACE_JMPL 0x81c00000
59a1b5e537Sbmc #define DTRACE_BA_MASK 0xdfc00000
60a1b5e537Sbmc #define DTRACE_BA 0x10800000
61a1b5e537Sbmc #define DTRACE_BA_MAX 10
627c478bd9Sstevel@tonic-gate
637c478bd9Sstevel@tonic-gate extern int dtrace_getupcstack_top(uint64_t *, int, uintptr_t *);
640b38a8bdSahl extern int dtrace_getustackdepth_top(uintptr_t *);
657c478bd9Sstevel@tonic-gate extern ulong_t dtrace_getreg_win(uint_t, uint_t);
667c478bd9Sstevel@tonic-gate extern void dtrace_putreg_win(uint_t, ulong_t);
677c478bd9Sstevel@tonic-gate extern int dtrace_fish(int, int, uintptr_t *);
687c478bd9Sstevel@tonic-gate
69b8fac8e1Sjhaslam int dtrace_ustackdepth_max = 2048;
70b8fac8e1Sjhaslam
717c478bd9Sstevel@tonic-gate /*
727c478bd9Sstevel@tonic-gate * This is similar in principle to getpcstack(), but there are several marked
737c478bd9Sstevel@tonic-gate * differences in implementation:
747c478bd9Sstevel@tonic-gate *
757c478bd9Sstevel@tonic-gate * (a) dtrace_getpcstack() is called from probe context. Thus, the call
767c478bd9Sstevel@tonic-gate * to flush_windows() from getpcstack() is a call to the probe-safe
777c478bd9Sstevel@tonic-gate * equivalent here.
787c478bd9Sstevel@tonic-gate *
797c478bd9Sstevel@tonic-gate * (b) dtrace_getpcstack() is willing to sacrifice some performance to get
807c478bd9Sstevel@tonic-gate * a correct stack. While consumers of getpcstack() are largely
817c478bd9Sstevel@tonic-gate * subsystem-specific in-kernel debugging facilities, DTrace consumers
827c478bd9Sstevel@tonic-gate * are arbitrary user-level analysis tools; dtrace_getpcstack() must
837c478bd9Sstevel@tonic-gate * deliver as correct a stack as possible. Details on the issues
847c478bd9Sstevel@tonic-gate * surrounding stack correctness are found below.
857c478bd9Sstevel@tonic-gate *
860b38a8bdSahl * (c) dtrace_getpcstack() _always_ fills in pcstack_limit pc_t's -- filling
870b38a8bdSahl * in the difference between the stack depth and pcstack_limit with NULLs.
887c478bd9Sstevel@tonic-gate * Due to this behavior dtrace_getpcstack() returns void.
897c478bd9Sstevel@tonic-gate *
907c478bd9Sstevel@tonic-gate * (d) dtrace_getpcstack() takes a third parameter, aframes, that
917c478bd9Sstevel@tonic-gate * denotes the number of _artificial frames_ on the bottom of the
927c478bd9Sstevel@tonic-gate * stack. An artificial frame is one induced by the provider; all
937c478bd9Sstevel@tonic-gate * artificial frames are stripped off before frames are stored to
947c478bd9Sstevel@tonic-gate * pcstack.
957c478bd9Sstevel@tonic-gate *
967c478bd9Sstevel@tonic-gate * (e) dtrace_getpcstack() takes a fourth parameter, pc, that indicates
977c478bd9Sstevel@tonic-gate * an interrupted program counter (if any). This should be a non-NULL
987c478bd9Sstevel@tonic-gate * value if and only if the hit probe is unanchored. (Anchored probes
997c478bd9Sstevel@tonic-gate * don't fire through an interrupt source.) This parameter is used to
1007c478bd9Sstevel@tonic-gate * assure (b), above.
1017c478bd9Sstevel@tonic-gate */
1027c478bd9Sstevel@tonic-gate void
dtrace_getpcstack(pc_t * pcstack,int pcstack_limit,int aframes,uint32_t * pc)1037c478bd9Sstevel@tonic-gate dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, uint32_t *pc)
1047c478bd9Sstevel@tonic-gate {
1057c478bd9Sstevel@tonic-gate struct frame *fp, *nextfp, *minfp, *stacktop;
1067c478bd9Sstevel@tonic-gate int depth = 0;
1077c478bd9Sstevel@tonic-gate int on_intr, j = 0;
1087c478bd9Sstevel@tonic-gate uint32_t i, r;
1097c478bd9Sstevel@tonic-gate
1107c478bd9Sstevel@tonic-gate fp = (struct frame *)((caddr_t)dtrace_getfp() + STACK_BIAS);
1117c478bd9Sstevel@tonic-gate dtrace_flush_windows();
1127c478bd9Sstevel@tonic-gate
1137c478bd9Sstevel@tonic-gate if (pc != NULL) {
1147c478bd9Sstevel@tonic-gate /*
1157c478bd9Sstevel@tonic-gate * If we've been passed a non-NULL pc, we need to determine
1167c478bd9Sstevel@tonic-gate * whether or not the specified program counter falls in a leaf
1177c478bd9Sstevel@tonic-gate * function. If it falls within a leaf function, we know that
1187c478bd9Sstevel@tonic-gate * %o7 is valid in its frame (and we can just drive on). If
1197c478bd9Sstevel@tonic-gate * it's a non-leaf, however, we know that %o7 is garbage in the
1207c478bd9Sstevel@tonic-gate * bottom frame. To trim this frame, we simply increment
1217c478bd9Sstevel@tonic-gate * aframes and drop into the stack-walking loop.
1227c478bd9Sstevel@tonic-gate *
1237c478bd9Sstevel@tonic-gate * To quickly determine if the specified program counter is in
1247c478bd9Sstevel@tonic-gate * a leaf function, we exploit the fact that leaf functions
1257c478bd9Sstevel@tonic-gate * tend to be short and non-leaf functions tend to frequently
1267c478bd9Sstevel@tonic-gate * perform operations that are only permitted in a non-leaf
1277c478bd9Sstevel@tonic-gate * function (e.g., using the %i's or %l's; calling a function;
1287c478bd9Sstevel@tonic-gate * performing a restore). We exploit these tendencies by
1297c478bd9Sstevel@tonic-gate * simply scanning forward from the specified %pc -- if we see
1307c478bd9Sstevel@tonic-gate * an operation only permitted in a non-leaf, we know we're in
1317c478bd9Sstevel@tonic-gate * a non-leaf; if we see a retl, we know we're in a leaf.
1327c478bd9Sstevel@tonic-gate * Fortunately, one need not perform anywhere near full
1337c478bd9Sstevel@tonic-gate * disassembly to effectively determine the former: determining
1347c478bd9Sstevel@tonic-gate * that an instruction is a format-3 instruction and decoding
1357c478bd9Sstevel@tonic-gate * its rd and rs1 fields, for example, requires very little
1367c478bd9Sstevel@tonic-gate * manipulation. Overall, this method of leaf determination
1377c478bd9Sstevel@tonic-gate * performs quite well: on average, we only examine between
1387c478bd9Sstevel@tonic-gate * 1.5 and 2.5 instructions before making the determination.
1397c478bd9Sstevel@tonic-gate * (Outliers do exist, however; of note is the non-leaf
1407c478bd9Sstevel@tonic-gate * function ip_sioctl_not_ours() which -- as of this writing --
1417c478bd9Sstevel@tonic-gate * has a whopping 455 straight instructions that manipulate
1427c478bd9Sstevel@tonic-gate * only %g's and %o's.)
1437c478bd9Sstevel@tonic-gate */
144a1b5e537Sbmc int delay = 0, branches = 0, taken = 0;
1457c478bd9Sstevel@tonic-gate
1467c478bd9Sstevel@tonic-gate if (depth < pcstack_limit)
14775521904Sraf pcstack[depth++] = (pc_t)(uintptr_t)pc;
1487c478bd9Sstevel@tonic-gate
149a1b5e537Sbmc /*
150a1b5e537Sbmc * Our heuristic is exactly that -- a heuristic -- and there
151a1b5e537Sbmc * exists a possibility that we could be either be vectored
152a1b5e537Sbmc * off into the weeds (by following a bogus branch) or could
153a1b5e537Sbmc * wander off the end of the function and off the end of a
154a1b5e537Sbmc * text mapping (by not following a conditional branch at the
155a1b5e537Sbmc * end of the function that is effectively always taken). So
156a1b5e537Sbmc * as a precautionary measure, we set the NOFAULT flag.
157a1b5e537Sbmc */
158a1b5e537Sbmc DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
159a1b5e537Sbmc
1607c478bd9Sstevel@tonic-gate for (;;) {
1617c478bd9Sstevel@tonic-gate i = pc[j++];
1627c478bd9Sstevel@tonic-gate
1637c478bd9Sstevel@tonic-gate if ((i & DTRACE_FMT3OP3_MASK) == DTRACE_FMT3OP3) {
1647c478bd9Sstevel@tonic-gate /*
1657c478bd9Sstevel@tonic-gate * This is a format-3 instruction. We can
1667c478bd9Sstevel@tonic-gate * look at rd and rs1.
1677c478bd9Sstevel@tonic-gate */
1687c478bd9Sstevel@tonic-gate r = (i >> DTRACE_FMT3RS1_SHIFT) & DTRACE_RMASK;
1697c478bd9Sstevel@tonic-gate
1707c478bd9Sstevel@tonic-gate if (r >= DTRACE_REG_L0)
1717c478bd9Sstevel@tonic-gate goto nonleaf;
1727c478bd9Sstevel@tonic-gate
1737c478bd9Sstevel@tonic-gate r = (i >> DTRACE_FMT3RD_SHIFT) & DTRACE_RMASK;
1747c478bd9Sstevel@tonic-gate
1757c478bd9Sstevel@tonic-gate if (r >= DTRACE_REG_L0)
1767c478bd9Sstevel@tonic-gate goto nonleaf;
1777c478bd9Sstevel@tonic-gate
1787c478bd9Sstevel@tonic-gate if ((i & DTRACE_JMPL_MASK) == DTRACE_JMPL) {
1797c478bd9Sstevel@tonic-gate delay = 1;
1807c478bd9Sstevel@tonic-gate continue;
1817c478bd9Sstevel@tonic-gate }
1827c478bd9Sstevel@tonic-gate
1837c478bd9Sstevel@tonic-gate /*
1847c478bd9Sstevel@tonic-gate * If we see explicit manipulation with %o7
1857c478bd9Sstevel@tonic-gate * as a destination register, we know that
1867c478bd9Sstevel@tonic-gate * %o7 is likely bogus -- and we treat this
1877c478bd9Sstevel@tonic-gate * function as a non-leaf.
1887c478bd9Sstevel@tonic-gate */
1897c478bd9Sstevel@tonic-gate if (r == DTRACE_REG_O7) {
1907c478bd9Sstevel@tonic-gate if (delay)
1917c478bd9Sstevel@tonic-gate goto leaf;
1927c478bd9Sstevel@tonic-gate
1937c478bd9Sstevel@tonic-gate i &= DTRACE_JMPL_MASK;
1947c478bd9Sstevel@tonic-gate
1957c478bd9Sstevel@tonic-gate if (i == DTRACE_JMPL) {
1967c478bd9Sstevel@tonic-gate delay = 1;
1977c478bd9Sstevel@tonic-gate continue;
1987c478bd9Sstevel@tonic-gate }
1997c478bd9Sstevel@tonic-gate
2007c478bd9Sstevel@tonic-gate goto nonleaf;
2017c478bd9Sstevel@tonic-gate }
2027c478bd9Sstevel@tonic-gate } else {
2037c478bd9Sstevel@tonic-gate /*
2047c478bd9Sstevel@tonic-gate * If this is a call, it may or may not be
2057c478bd9Sstevel@tonic-gate * a leaf; we need to check the delay slot.
2067c478bd9Sstevel@tonic-gate */
2077c478bd9Sstevel@tonic-gate if ((i & DTRACE_CALL_MASK) == DTRACE_CALL) {
2087c478bd9Sstevel@tonic-gate delay = 1;
2097c478bd9Sstevel@tonic-gate continue;
2107c478bd9Sstevel@tonic-gate }
2117c478bd9Sstevel@tonic-gate
2127c478bd9Sstevel@tonic-gate /*
2137c478bd9Sstevel@tonic-gate * If we see a ret it's not a leaf; if we
2147c478bd9Sstevel@tonic-gate * see a retl, it is a leaf.
2157c478bd9Sstevel@tonic-gate */
2167c478bd9Sstevel@tonic-gate if (i == DTRACE_RET)
2177c478bd9Sstevel@tonic-gate goto nonleaf;
2187c478bd9Sstevel@tonic-gate
2197c478bd9Sstevel@tonic-gate if (i == DTRACE_RETL)
2207c478bd9Sstevel@tonic-gate goto leaf;
2217c478bd9Sstevel@tonic-gate
2227c478bd9Sstevel@tonic-gate /*
223a1b5e537Sbmc * If this is a ba (annulled or not), then we
224a1b5e537Sbmc * need to actually follow the branch. No, we
225a1b5e537Sbmc * don't look at the delay slot -- hopefully
226a1b5e537Sbmc * anything that can be gleaned from the delay
227a1b5e537Sbmc * slot can also be gleaned from the branch
228a1b5e537Sbmc * target. To prevent ourselves from iterating
229a1b5e537Sbmc * infinitely, we clamp the number of branches
230a1b5e537Sbmc * that we'll follow, and we refuse to follow
231a1b5e537Sbmc * the same branch twice consecutively. In
232a1b5e537Sbmc * both cases, we abort by deciding that we're
233a1b5e537Sbmc * looking at a leaf. While in theory this
234a1b5e537Sbmc * could be wrong (we could be in the middle of
235a1b5e537Sbmc * a loop in a non-leaf that ends with a ba and
236a1b5e537Sbmc * only manipulates outputs and globals in the
237a1b5e537Sbmc * body of the loop -- therefore leading us to
238a1b5e537Sbmc * the wrong conclusion), this doesn't seem to
239a1b5e537Sbmc * crop up in practice. (Or rather, this
240a1b5e537Sbmc * condition could not be deliberately induced,
241a1b5e537Sbmc * despite concerted effort.)
242a1b5e537Sbmc */
243a1b5e537Sbmc if ((i & DTRACE_BA_MASK) == DTRACE_BA) {
244a1b5e537Sbmc if (++branches == DTRACE_BA_MAX ||
245a1b5e537Sbmc taken == j)
246a1b5e537Sbmc goto nonleaf;
247a1b5e537Sbmc
248a1b5e537Sbmc taken = j;
249a1b5e537Sbmc j += ((int)(i << DTRACE_DISP22_SHIFT) >>
250a1b5e537Sbmc DTRACE_DISP22_SHIFT) - 1;
251a1b5e537Sbmc continue;
252a1b5e537Sbmc }
253a1b5e537Sbmc
254a1b5e537Sbmc /*
2557c478bd9Sstevel@tonic-gate * Finally, if it's a save, it should be
2567c478bd9Sstevel@tonic-gate * treated as a leaf; if it's a restore it
2577c478bd9Sstevel@tonic-gate * should not be treated as a leaf.
2587c478bd9Sstevel@tonic-gate */
2597c478bd9Sstevel@tonic-gate if ((i & DTRACE_SAVE_MASK) == DTRACE_SAVE)
2607c478bd9Sstevel@tonic-gate goto leaf;
2617c478bd9Sstevel@tonic-gate
2627c478bd9Sstevel@tonic-gate if ((i & DTRACE_SAVE_MASK) == DTRACE_RESTORE)
2637c478bd9Sstevel@tonic-gate goto nonleaf;
2647c478bd9Sstevel@tonic-gate }
2657c478bd9Sstevel@tonic-gate
2667c478bd9Sstevel@tonic-gate if (delay) {
2677c478bd9Sstevel@tonic-gate /*
2687c478bd9Sstevel@tonic-gate * If this was a delay slot instruction and
2697c478bd9Sstevel@tonic-gate * we didn't pick it up elsewhere, this is a
2707c478bd9Sstevel@tonic-gate * non-leaf.
2717c478bd9Sstevel@tonic-gate */
2727c478bd9Sstevel@tonic-gate goto nonleaf;
2737c478bd9Sstevel@tonic-gate }
2747c478bd9Sstevel@tonic-gate }
2757c478bd9Sstevel@tonic-gate nonleaf:
2767c478bd9Sstevel@tonic-gate aframes++;
2777c478bd9Sstevel@tonic-gate leaf:
278a1b5e537Sbmc DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2797c478bd9Sstevel@tonic-gate }
2807c478bd9Sstevel@tonic-gate
2817c478bd9Sstevel@tonic-gate if ((on_intr = CPU_ON_INTR(CPU)) != 0)
2827c478bd9Sstevel@tonic-gate stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
2837c478bd9Sstevel@tonic-gate else
2847c478bd9Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk;
2857c478bd9Sstevel@tonic-gate minfp = fp;
2867c478bd9Sstevel@tonic-gate
2877c478bd9Sstevel@tonic-gate while (depth < pcstack_limit) {
2887c478bd9Sstevel@tonic-gate nextfp = (struct frame *)((caddr_t)fp->fr_savfp + STACK_BIAS);
2897c478bd9Sstevel@tonic-gate if (nextfp <= minfp || nextfp >= stacktop) {
2907c478bd9Sstevel@tonic-gate if (!on_intr && nextfp == stacktop && aframes != 0) {
2917c478bd9Sstevel@tonic-gate /*
2927c478bd9Sstevel@tonic-gate * If we are exactly at the top of the stack
2937c478bd9Sstevel@tonic-gate * with a non-zero number of artificial frames,
2947c478bd9Sstevel@tonic-gate * it must be that the stack is filled with
2957c478bd9Sstevel@tonic-gate * nothing _but_ artificial frames. In this
2967c478bd9Sstevel@tonic-gate * case, we assert that this is so, zero
2977c478bd9Sstevel@tonic-gate * pcstack, and return.
2987c478bd9Sstevel@tonic-gate */
2997c478bd9Sstevel@tonic-gate ASSERT(aframes == 1);
3007c478bd9Sstevel@tonic-gate ASSERT(depth == 0);
3017c478bd9Sstevel@tonic-gate
3027c478bd9Sstevel@tonic-gate while (depth < pcstack_limit)
3037c478bd9Sstevel@tonic-gate pcstack[depth++] = NULL;
3047c478bd9Sstevel@tonic-gate return;
3057c478bd9Sstevel@tonic-gate }
3067c478bd9Sstevel@tonic-gate
3077c478bd9Sstevel@tonic-gate if (on_intr) {
3087c478bd9Sstevel@tonic-gate /*
3097c478bd9Sstevel@tonic-gate * Hop from interrupt stack to thread stack.
3107c478bd9Sstevel@tonic-gate */
3117c478bd9Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk;
3127c478bd9Sstevel@tonic-gate minfp = (struct frame *)curthread->t_stkbase;
3137c478bd9Sstevel@tonic-gate
3147c478bd9Sstevel@tonic-gate on_intr = 0;
3157c478bd9Sstevel@tonic-gate
3167c478bd9Sstevel@tonic-gate if (nextfp > minfp && nextfp < stacktop)
3177c478bd9Sstevel@tonic-gate continue;
3187c478bd9Sstevel@tonic-gate } else {
3197c478bd9Sstevel@tonic-gate /*
3207c478bd9Sstevel@tonic-gate * High-level interrupts may occur when %sp is
3217c478bd9Sstevel@tonic-gate * not necessarily contained in the stack
3227c478bd9Sstevel@tonic-gate * bounds implied by %g7 -- interrupt thread
3237c478bd9Sstevel@tonic-gate * management runs with %pil at DISP_LEVEL,
3247c478bd9Sstevel@tonic-gate * and high-level interrupts may thus occur
3257c478bd9Sstevel@tonic-gate * in windows when %sp and %g7 are not self-
3267c478bd9Sstevel@tonic-gate * consistent. If we call dtrace_getpcstack()
3277c478bd9Sstevel@tonic-gate * from a high-level interrupt that has occurred
3287c478bd9Sstevel@tonic-gate * in such a window, we will fail the above test
3297c478bd9Sstevel@tonic-gate * of nextfp against minfp/stacktop. If the
3307c478bd9Sstevel@tonic-gate * high-level interrupt has in turn interrupted
3317c478bd9Sstevel@tonic-gate * a non-passivated interrupt thread, we
3327c478bd9Sstevel@tonic-gate * will execute the below code with non-zero
3337c478bd9Sstevel@tonic-gate * aframes. We therefore want to assert that
3347c478bd9Sstevel@tonic-gate * aframes is zero _or_ we are in a high-level
3357c478bd9Sstevel@tonic-gate * interrupt -- but because cpu_intr_actv is
3367c478bd9Sstevel@tonic-gate * updated with high-level interrupts enabled,
3377c478bd9Sstevel@tonic-gate * we must reduce this to only asserting that
3387c478bd9Sstevel@tonic-gate * %pil is greater than DISP_LEVEL.
3397c478bd9Sstevel@tonic-gate */
3407c478bd9Sstevel@tonic-gate ASSERT(aframes == 0 ||
3417c478bd9Sstevel@tonic-gate dtrace_getipl() > DISP_LEVEL);
3427c478bd9Sstevel@tonic-gate pcstack[depth++] = (pc_t)fp->fr_savpc;
3437c478bd9Sstevel@tonic-gate }
3447c478bd9Sstevel@tonic-gate
3457c478bd9Sstevel@tonic-gate while (depth < pcstack_limit)
3467c478bd9Sstevel@tonic-gate pcstack[depth++] = NULL;
3477c478bd9Sstevel@tonic-gate return;
3487c478bd9Sstevel@tonic-gate }
3497c478bd9Sstevel@tonic-gate
3507c478bd9Sstevel@tonic-gate if (aframes > 0) {
3517c478bd9Sstevel@tonic-gate aframes--;
3527c478bd9Sstevel@tonic-gate } else {
3537c478bd9Sstevel@tonic-gate pcstack[depth++] = (pc_t)fp->fr_savpc;
3547c478bd9Sstevel@tonic-gate }
3557c478bd9Sstevel@tonic-gate
3567c478bd9Sstevel@tonic-gate fp = nextfp;
3577c478bd9Sstevel@tonic-gate minfp = fp;
3587c478bd9Sstevel@tonic-gate }
3597c478bd9Sstevel@tonic-gate }
3607c478bd9Sstevel@tonic-gate
3610b38a8bdSahl static int
dtrace_getustack_common(uint64_t * pcstack,int pcstack_limit,uintptr_t sp)3620b38a8bdSahl dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t sp)
3630b38a8bdSahl {
3640b38a8bdSahl proc_t *p = curproc;
3650b38a8bdSahl int ret = 0;
366b8fac8e1Sjhaslam uintptr_t oldsp;
367b8fac8e1Sjhaslam volatile uint16_t *flags =
368b8fac8e1Sjhaslam (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
3690b38a8bdSahl
3700b38a8bdSahl ASSERT(pcstack == NULL || pcstack_limit > 0);
371b8fac8e1Sjhaslam ASSERT(dtrace_ustackdepth_max > 0);
3720b38a8bdSahl
3730b38a8bdSahl if (p->p_model == DATAMODEL_NATIVE) {
3740b38a8bdSahl for (;;) {
3750b38a8bdSahl struct frame *fr = (struct frame *)(sp + STACK_BIAS);
3760b38a8bdSahl uintptr_t pc;
3770b38a8bdSahl
3780b38a8bdSahl if (sp == 0 || fr == NULL ||
3790b38a8bdSahl !IS_P2ALIGNED((uintptr_t)fr, STACK_ALIGN))
3800b38a8bdSahl break;
3810b38a8bdSahl
382b8fac8e1Sjhaslam oldsp = sp;
383b8fac8e1Sjhaslam
3840b38a8bdSahl pc = dtrace_fulword(&fr->fr_savpc);
3850b38a8bdSahl sp = dtrace_fulword(&fr->fr_savfp);
3860b38a8bdSahl
3870b38a8bdSahl if (pc == 0)
3880b38a8bdSahl break;
3890b38a8bdSahl
390b8fac8e1Sjhaslam /*
391b8fac8e1Sjhaslam * We limit the number of times we can go around this
392b8fac8e1Sjhaslam * loop to account for a circular stack.
393b8fac8e1Sjhaslam */
394b8fac8e1Sjhaslam if (sp == oldsp || ret++ >= dtrace_ustackdepth_max) {
395b8fac8e1Sjhaslam *flags |= CPU_DTRACE_BADSTACK;
396b8fac8e1Sjhaslam cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
397b8fac8e1Sjhaslam break;
398b8fac8e1Sjhaslam }
3990b38a8bdSahl
4000b38a8bdSahl if (pcstack != NULL) {
4010b38a8bdSahl *pcstack++ = pc;
4020b38a8bdSahl pcstack_limit--;
4030b38a8bdSahl if (pcstack_limit == 0)
4040b38a8bdSahl break;
4050b38a8bdSahl }
4060b38a8bdSahl }
4070b38a8bdSahl } else {
408900524f3Sahl /*
409900524f3Sahl * Truncate the stack pointer to 32-bits as there may be
410900524f3Sahl * garbage in the upper bits which would normally be ignored
411900524f3Sahl * by the processor in 32-bit mode.
412900524f3Sahl */
413900524f3Sahl sp = (uint32_t)sp;
414900524f3Sahl
4150b38a8bdSahl for (;;) {
4160b38a8bdSahl struct frame32 *fr = (struct frame32 *)sp;
4170b38a8bdSahl uint32_t pc;
4180b38a8bdSahl
4190b38a8bdSahl if (sp == 0 ||
4200b38a8bdSahl !IS_P2ALIGNED((uintptr_t)fr, STACK_ALIGN32))
4210b38a8bdSahl break;
4220b38a8bdSahl
423b8fac8e1Sjhaslam oldsp = sp;
424b8fac8e1Sjhaslam
4250b38a8bdSahl pc = dtrace_fuword32(&fr->fr_savpc);
4260b38a8bdSahl sp = dtrace_fuword32(&fr->fr_savfp);
4270b38a8bdSahl
4280b38a8bdSahl if (pc == 0)
4290b38a8bdSahl break;
4300b38a8bdSahl
431b8fac8e1Sjhaslam if (sp == oldsp || ret++ >= dtrace_ustackdepth_max) {
432b8fac8e1Sjhaslam *flags |= CPU_DTRACE_BADSTACK;
433b8fac8e1Sjhaslam cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
434b8fac8e1Sjhaslam break;
435b8fac8e1Sjhaslam }
4360b38a8bdSahl
4370b38a8bdSahl if (pcstack != NULL) {
4380b38a8bdSahl *pcstack++ = pc;
4390b38a8bdSahl pcstack_limit--;
4400b38a8bdSahl if (pcstack_limit == 0)
4410b38a8bdSahl break;
4420b38a8bdSahl }
4430b38a8bdSahl }
4440b38a8bdSahl }
4450b38a8bdSahl
4460b38a8bdSahl return (ret);
4470b38a8bdSahl }
4480b38a8bdSahl
4497c478bd9Sstevel@tonic-gate void
dtrace_getupcstack(uint64_t * pcstack,int pcstack_limit)4507c478bd9Sstevel@tonic-gate dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
4517c478bd9Sstevel@tonic-gate {
4527c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread);
4530b38a8bdSahl proc_t *p = curproc;
4547c478bd9Sstevel@tonic-gate struct regs *rp;
4557c478bd9Sstevel@tonic-gate uintptr_t sp;
4567c478bd9Sstevel@tonic-gate int n;
4577c478bd9Sstevel@tonic-gate
4586c9596d4Sahl ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
4596c9596d4Sahl
4607c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
4617c478bd9Sstevel@tonic-gate return;
4627c478bd9Sstevel@tonic-gate
46335b7f6ccSahl /*
46435b7f6ccSahl * If there's no user context we still need to zero the stack.
46535b7f6ccSahl */
46635b7f6ccSahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
46735b7f6ccSahl goto zero;
46835b7f6ccSahl
4697c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)p->p_pid;
4707c478bd9Sstevel@tonic-gate pcstack_limit--;
4717c478bd9Sstevel@tonic-gate
4727c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
4737c478bd9Sstevel@tonic-gate return;
4747c478bd9Sstevel@tonic-gate
4757c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)rp->r_pc;
4767c478bd9Sstevel@tonic-gate pcstack_limit--;
4777c478bd9Sstevel@tonic-gate
4787c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
4797c478bd9Sstevel@tonic-gate return;
4807c478bd9Sstevel@tonic-gate
4817c478bd9Sstevel@tonic-gate if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
4827c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)rp->r_o7;
4837c478bd9Sstevel@tonic-gate pcstack_limit--;
4847c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
4857c478bd9Sstevel@tonic-gate return;
4867c478bd9Sstevel@tonic-gate }
4877c478bd9Sstevel@tonic-gate
4887c478bd9Sstevel@tonic-gate sp = rp->r_sp;
4897c478bd9Sstevel@tonic-gate
4907c478bd9Sstevel@tonic-gate n = dtrace_getupcstack_top(pcstack, pcstack_limit, &sp);
4917c478bd9Sstevel@tonic-gate ASSERT(n >= 0);
4927c478bd9Sstevel@tonic-gate ASSERT(n <= pcstack_limit);
4937c478bd9Sstevel@tonic-gate
4947c478bd9Sstevel@tonic-gate pcstack += n;
4957c478bd9Sstevel@tonic-gate pcstack_limit -= n;
4960b38a8bdSahl if (pcstack_limit <= 0)
4970b38a8bdSahl return;
4987c478bd9Sstevel@tonic-gate
4990b38a8bdSahl n = dtrace_getustack_common(pcstack, pcstack_limit, sp);
5000b38a8bdSahl ASSERT(n >= 0);
5010b38a8bdSahl ASSERT(n <= pcstack_limit);
5027c478bd9Sstevel@tonic-gate
5030b38a8bdSahl pcstack += n;
5040b38a8bdSahl pcstack_limit -= n;
5057c478bd9Sstevel@tonic-gate
50635b7f6ccSahl zero:
5077c478bd9Sstevel@tonic-gate while (pcstack_limit-- > 0)
5087c478bd9Sstevel@tonic-gate *pcstack++ = NULL;
5097c478bd9Sstevel@tonic-gate }
5107c478bd9Sstevel@tonic-gate
5110b38a8bdSahl int
dtrace_getustackdepth(void)5120b38a8bdSahl dtrace_getustackdepth(void)
5130b38a8bdSahl {
5140b38a8bdSahl klwp_t *lwp = ttolwp(curthread);
5150b38a8bdSahl proc_t *p = curproc;
5160b38a8bdSahl struct regs *rp;
5170b38a8bdSahl uintptr_t sp;
5180b38a8bdSahl int n = 1;
5190b38a8bdSahl
5200b38a8bdSahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
5210b38a8bdSahl return (0);
5220b38a8bdSahl
5230b38a8bdSahl if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
5240b38a8bdSahl return (-1);
5250b38a8bdSahl
5260b38a8bdSahl sp = rp->r_sp;
5270b38a8bdSahl
5280b38a8bdSahl n += dtrace_getustackdepth_top(&sp);
5290b38a8bdSahl n += dtrace_getustack_common(NULL, 0, sp);
5300b38a8bdSahl
53135b7f6ccSahl /*
53235b7f6ccSahl * Add one more to the stack depth if we're in an entry probe as long
53335b7f6ccSahl * as the return address is non-NULL or there are additional frames
53435b7f6ccSahl * beyond that NULL return address.
53535b7f6ccSahl */
53635b7f6ccSahl if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY) &&
53735b7f6ccSahl (rp->r_o7 != NULL || n != 1))
53835b7f6ccSahl n++;
53935b7f6ccSahl
5400b38a8bdSahl return (n);
5410b38a8bdSahl }
5420b38a8bdSahl
5437c478bd9Sstevel@tonic-gate void
dtrace_getufpstack(uint64_t * pcstack,uint64_t * fpstack,int pcstack_limit)5447c478bd9Sstevel@tonic-gate dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
5457c478bd9Sstevel@tonic-gate {
5467c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread);
5477c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(curthread);
5487c478bd9Sstevel@tonic-gate struct regs *rp;
5497c478bd9Sstevel@tonic-gate uintptr_t sp;
5507c478bd9Sstevel@tonic-gate
5517c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
5527c478bd9Sstevel@tonic-gate return;
5537c478bd9Sstevel@tonic-gate
55435b7f6ccSahl /*
55535b7f6ccSahl * If there's no user context we still need to zero the stack.
55635b7f6ccSahl */
55735b7f6ccSahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
55835b7f6ccSahl goto zero;
55935b7f6ccSahl
5607c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)p->p_pid;
5617c478bd9Sstevel@tonic-gate pcstack_limit--;
5627c478bd9Sstevel@tonic-gate
5637c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
5647c478bd9Sstevel@tonic-gate return;
5657c478bd9Sstevel@tonic-gate
5667c478bd9Sstevel@tonic-gate if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
5677c478bd9Sstevel@tonic-gate *fpstack++ = 0;
5687c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)rp->r_pc;
5697c478bd9Sstevel@tonic-gate pcstack_limit--;
5707c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
5717c478bd9Sstevel@tonic-gate return;
5727c478bd9Sstevel@tonic-gate
5737c478bd9Sstevel@tonic-gate *fpstack++ = (uint64_t)rp->r_sp;
5747c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)rp->r_o7;
5757c478bd9Sstevel@tonic-gate pcstack_limit--;
5767c478bd9Sstevel@tonic-gate } else {
5777c478bd9Sstevel@tonic-gate *fpstack++ = (uint64_t)rp->r_sp;
5787c478bd9Sstevel@tonic-gate *pcstack++ = (uint64_t)rp->r_pc;
5797c478bd9Sstevel@tonic-gate pcstack_limit--;
5807c478bd9Sstevel@tonic-gate }
5817c478bd9Sstevel@tonic-gate
5827c478bd9Sstevel@tonic-gate if (pcstack_limit <= 0)
5837c478bd9Sstevel@tonic-gate return;
5847c478bd9Sstevel@tonic-gate
5857c478bd9Sstevel@tonic-gate sp = rp->r_sp;
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gate dtrace_flush_user_windows();
5887c478bd9Sstevel@tonic-gate
5897c478bd9Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) {
5907c478bd9Sstevel@tonic-gate while (pcstack_limit > 0) {
5917c478bd9Sstevel@tonic-gate struct frame *fr = (struct frame *)(sp + STACK_BIAS);
5927c478bd9Sstevel@tonic-gate uintptr_t pc;
5937c478bd9Sstevel@tonic-gate
5947c478bd9Sstevel@tonic-gate if (sp == 0 || fr == NULL ||
5957c478bd9Sstevel@tonic-gate ((uintptr_t)&fr->fr_savpc & 3) != 0 ||
5967c478bd9Sstevel@tonic-gate ((uintptr_t)&fr->fr_savfp & 3) != 0)
5977c478bd9Sstevel@tonic-gate break;
5987c478bd9Sstevel@tonic-gate
5997c478bd9Sstevel@tonic-gate pc = dtrace_fulword(&fr->fr_savpc);
6007c478bd9Sstevel@tonic-gate sp = dtrace_fulword(&fr->fr_savfp);
6017c478bd9Sstevel@tonic-gate
6027c478bd9Sstevel@tonic-gate if (pc == 0)
6037c478bd9Sstevel@tonic-gate break;
6047c478bd9Sstevel@tonic-gate
6057c478bd9Sstevel@tonic-gate *fpstack++ = sp;
6067c478bd9Sstevel@tonic-gate *pcstack++ = pc;
6077c478bd9Sstevel@tonic-gate pcstack_limit--;
6087c478bd9Sstevel@tonic-gate }
6097c478bd9Sstevel@tonic-gate } else {
610900524f3Sahl /*
611900524f3Sahl * Truncate the stack pointer to 32-bits as there may be
612900524f3Sahl * garbage in the upper bits which would normally be ignored
613900524f3Sahl * by the processor in 32-bit mode.
614900524f3Sahl */
615900524f3Sahl sp = (uint32_t)sp;
616900524f3Sahl
6177c478bd9Sstevel@tonic-gate while (pcstack_limit > 0) {
6187c478bd9Sstevel@tonic-gate struct frame32 *fr = (struct frame32 *)sp;
6197c478bd9Sstevel@tonic-gate uint32_t pc;
6207c478bd9Sstevel@tonic-gate
6217c478bd9Sstevel@tonic-gate if (sp == 0 ||
6227c478bd9Sstevel@tonic-gate ((uintptr_t)&fr->fr_savpc & 3) != 0 ||
6237c478bd9Sstevel@tonic-gate ((uintptr_t)&fr->fr_savfp & 3) != 0)
6247c478bd9Sstevel@tonic-gate break;
6257c478bd9Sstevel@tonic-gate
6267c478bd9Sstevel@tonic-gate pc = dtrace_fuword32(&fr->fr_savpc);
6277c478bd9Sstevel@tonic-gate sp = dtrace_fuword32(&fr->fr_savfp);
6287c478bd9Sstevel@tonic-gate
6290b38a8bdSahl if (pc == 0)
6300b38a8bdSahl break;
6310b38a8bdSahl
6327c478bd9Sstevel@tonic-gate *fpstack++ = sp;
6337c478bd9Sstevel@tonic-gate *pcstack++ = pc;
6347c478bd9Sstevel@tonic-gate pcstack_limit--;
6357c478bd9Sstevel@tonic-gate }
6367c478bd9Sstevel@tonic-gate }
6377c478bd9Sstevel@tonic-gate
63835b7f6ccSahl zero:
6397c478bd9Sstevel@tonic-gate while (pcstack_limit-- > 0)
6407c478bd9Sstevel@tonic-gate *pcstack++ = NULL;
6417c478bd9Sstevel@tonic-gate }
6427c478bd9Sstevel@tonic-gate
6437c478bd9Sstevel@tonic-gate uint64_t
dtrace_getarg(int arg,int aframes)6447c478bd9Sstevel@tonic-gate dtrace_getarg(int arg, int aframes)
6457c478bd9Sstevel@tonic-gate {
6467c478bd9Sstevel@tonic-gate uintptr_t val;
6477c478bd9Sstevel@tonic-gate struct frame *fp;
6487c478bd9Sstevel@tonic-gate uint64_t rval;
6497c478bd9Sstevel@tonic-gate
6507c478bd9Sstevel@tonic-gate /*
6517c478bd9Sstevel@tonic-gate * Account for the fact that dtrace_getarg() consumes an additional
6527c478bd9Sstevel@tonic-gate * stack frame.
6537c478bd9Sstevel@tonic-gate */
6547c478bd9Sstevel@tonic-gate aframes++;
6557c478bd9Sstevel@tonic-gate
6567c478bd9Sstevel@tonic-gate if (arg < 6) {
6577c478bd9Sstevel@tonic-gate if (dtrace_fish(aframes, DTRACE_REG_I0 + arg, &val) == 0)
6587c478bd9Sstevel@tonic-gate return (val);
6597c478bd9Sstevel@tonic-gate } else {
6607c478bd9Sstevel@tonic-gate if (dtrace_fish(aframes, DTRACE_REG_I6, &val) == 0) {
6617c478bd9Sstevel@tonic-gate /*
6627c478bd9Sstevel@tonic-gate * We have a stack pointer; grab the argument.
6637c478bd9Sstevel@tonic-gate */
6647c478bd9Sstevel@tonic-gate fp = (struct frame *)(val + STACK_BIAS);
6657c478bd9Sstevel@tonic-gate
6667c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6677c478bd9Sstevel@tonic-gate rval = fp->fr_argx[arg - 6];
6687c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6697c478bd9Sstevel@tonic-gate
6707c478bd9Sstevel@tonic-gate return (rval);
6717c478bd9Sstevel@tonic-gate }
6727c478bd9Sstevel@tonic-gate }
6737c478bd9Sstevel@tonic-gate
6747c478bd9Sstevel@tonic-gate /*
6757c478bd9Sstevel@tonic-gate * There are other ways to do this. But the slow, painful way works
6767c478bd9Sstevel@tonic-gate * just fine. Because this requires some loads, we need to set
6777c478bd9Sstevel@tonic-gate * CPU_DTRACE_NOFAULT to protect against looking for an argument that
6787c478bd9Sstevel@tonic-gate * isn't there.
6797c478bd9Sstevel@tonic-gate */
6807c478bd9Sstevel@tonic-gate fp = (struct frame *)((caddr_t)dtrace_getfp() + STACK_BIAS);
6817c478bd9Sstevel@tonic-gate dtrace_flush_windows();
6827c478bd9Sstevel@tonic-gate
6837c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6847c478bd9Sstevel@tonic-gate
6857c478bd9Sstevel@tonic-gate for (aframes -= 1; aframes; aframes--)
6867c478bd9Sstevel@tonic-gate fp = (struct frame *)((caddr_t)fp->fr_savfp + STACK_BIAS);
6877c478bd9Sstevel@tonic-gate
6887c478bd9Sstevel@tonic-gate if (arg < 6) {
6897c478bd9Sstevel@tonic-gate rval = fp->fr_arg[arg];
6907c478bd9Sstevel@tonic-gate } else {
6917c478bd9Sstevel@tonic-gate fp = (struct frame *)((caddr_t)fp->fr_savfp + STACK_BIAS);
6927c478bd9Sstevel@tonic-gate rval = fp->fr_argx[arg - 6];
6937c478bd9Sstevel@tonic-gate }
6947c478bd9Sstevel@tonic-gate
6957c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6967c478bd9Sstevel@tonic-gate
6977c478bd9Sstevel@tonic-gate return (rval);
6987c478bd9Sstevel@tonic-gate }
6997c478bd9Sstevel@tonic-gate
7007c478bd9Sstevel@tonic-gate int
dtrace_getstackdepth(int aframes)7017c478bd9Sstevel@tonic-gate dtrace_getstackdepth(int aframes)
7027c478bd9Sstevel@tonic-gate {
7037c478bd9Sstevel@tonic-gate struct frame *fp, *nextfp, *minfp, *stacktop;
7047c478bd9Sstevel@tonic-gate int depth = 0;
7057c478bd9Sstevel@tonic-gate int on_intr;
7067c478bd9Sstevel@tonic-gate
7077c478bd9Sstevel@tonic-gate fp = (struct frame *)((caddr_t)dtrace_getfp() + STACK_BIAS);
7087c478bd9Sstevel@tonic-gate dtrace_flush_windows();
7097c478bd9Sstevel@tonic-gate
7107c478bd9Sstevel@tonic-gate if ((on_intr = CPU_ON_INTR(CPU)) != 0)
7117c478bd9Sstevel@tonic-gate stacktop = (struct frame *)CPU->cpu_intr_stack + SA(MINFRAME);
7127c478bd9Sstevel@tonic-gate else
7137c478bd9Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk;
7147c478bd9Sstevel@tonic-gate minfp = fp;
7157c478bd9Sstevel@tonic-gate
7167c478bd9Sstevel@tonic-gate for (;;) {
7177c478bd9Sstevel@tonic-gate nextfp = (struct frame *)((caddr_t)fp->fr_savfp + STACK_BIAS);
7187c478bd9Sstevel@tonic-gate if (nextfp <= minfp || nextfp >= stacktop) {
7197c478bd9Sstevel@tonic-gate if (on_intr) {
7207c478bd9Sstevel@tonic-gate /*
7217c478bd9Sstevel@tonic-gate * Hop from interrupt stack to thread stack.
7227c478bd9Sstevel@tonic-gate */
7237c478bd9Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk;
7247c478bd9Sstevel@tonic-gate minfp = (struct frame *)curthread->t_stkbase;
7257c478bd9Sstevel@tonic-gate on_intr = 0;
7267c478bd9Sstevel@tonic-gate continue;
7277c478bd9Sstevel@tonic-gate }
7287c478bd9Sstevel@tonic-gate
7297c478bd9Sstevel@tonic-gate return (++depth);
7307c478bd9Sstevel@tonic-gate }
7317c478bd9Sstevel@tonic-gate
7327c478bd9Sstevel@tonic-gate if (aframes > 0) {
7337c478bd9Sstevel@tonic-gate aframes--;
7347c478bd9Sstevel@tonic-gate } else {
7357c478bd9Sstevel@tonic-gate depth++;
7367c478bd9Sstevel@tonic-gate }
7377c478bd9Sstevel@tonic-gate
7387c478bd9Sstevel@tonic-gate fp = nextfp;
7397c478bd9Sstevel@tonic-gate minfp = fp;
7407c478bd9Sstevel@tonic-gate }
7417c478bd9Sstevel@tonic-gate }
7427c478bd9Sstevel@tonic-gate
7437c478bd9Sstevel@tonic-gate /*
7447c478bd9Sstevel@tonic-gate * This uses the same register numbering scheme as in sys/procfs_isa.h.
7457c478bd9Sstevel@tonic-gate */
7467c478bd9Sstevel@tonic-gate ulong_t
dtrace_getreg(struct regs * rp,uint_t reg)7477c478bd9Sstevel@tonic-gate dtrace_getreg(struct regs *rp, uint_t reg)
7487c478bd9Sstevel@tonic-gate {
7497c478bd9Sstevel@tonic-gate ulong_t value;
7507c478bd9Sstevel@tonic-gate uintptr_t fp;
7517c478bd9Sstevel@tonic-gate struct machpcb *mpcb;
7527c478bd9Sstevel@tonic-gate
7537c478bd9Sstevel@tonic-gate if (reg == R_G0)
7547c478bd9Sstevel@tonic-gate return (0);
7557c478bd9Sstevel@tonic-gate
7567c478bd9Sstevel@tonic-gate if (reg <= R_G7)
7577c478bd9Sstevel@tonic-gate return ((&rp->r_g1)[reg - 1]);
7587c478bd9Sstevel@tonic-gate
7597c478bd9Sstevel@tonic-gate if (reg > R_I7) {
7607c478bd9Sstevel@tonic-gate switch (reg) {
7617c478bd9Sstevel@tonic-gate case R_CCR:
7627c478bd9Sstevel@tonic-gate return ((rp->r_tstate >> TSTATE_CCR_SHIFT) &
7637c478bd9Sstevel@tonic-gate TSTATE_CCR_MASK);
7647c478bd9Sstevel@tonic-gate case R_PC:
7657c478bd9Sstevel@tonic-gate return (rp->r_pc);
7667c478bd9Sstevel@tonic-gate case R_nPC:
7677c478bd9Sstevel@tonic-gate return (rp->r_npc);
7687c478bd9Sstevel@tonic-gate case R_Y:
7697c478bd9Sstevel@tonic-gate return (rp->r_y);
7707c478bd9Sstevel@tonic-gate case R_ASI:
7717c478bd9Sstevel@tonic-gate return ((rp->r_tstate >> TSTATE_ASI_SHIFT) &
7727c478bd9Sstevel@tonic-gate TSTATE_ASI_MASK);
7737c478bd9Sstevel@tonic-gate case R_FPRS:
7747c478bd9Sstevel@tonic-gate return (dtrace_getfprs());
7757c478bd9Sstevel@tonic-gate default:
7767c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
7777c478bd9Sstevel@tonic-gate return (0);
7787c478bd9Sstevel@tonic-gate }
7797c478bd9Sstevel@tonic-gate }
7807c478bd9Sstevel@tonic-gate
7817c478bd9Sstevel@tonic-gate /*
7827c478bd9Sstevel@tonic-gate * We reach go to the fake restore case if the probe we hit was a pid
7837c478bd9Sstevel@tonic-gate * return probe on a restore instruction. We partially emulate the
7847c478bd9Sstevel@tonic-gate * restore in the kernel and then execute a simple restore
7857c478bd9Sstevel@tonic-gate * instruction that we've secreted away to do the actual register
7867c478bd9Sstevel@tonic-gate * window manipulation. We need to go one register window further
7877c478bd9Sstevel@tonic-gate * down to get at the %ls, and %is and we need to treat %os like %is
7887c478bd9Sstevel@tonic-gate * to pull them out of the topmost user frame.
7897c478bd9Sstevel@tonic-gate */
7907c478bd9Sstevel@tonic-gate if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAKERESTORE)) {
7917c478bd9Sstevel@tonic-gate if (reg > R_O7)
7927c478bd9Sstevel@tonic-gate goto fake_restore;
7937c478bd9Sstevel@tonic-gate else
7947c478bd9Sstevel@tonic-gate reg += R_I0 - R_O0;
7957c478bd9Sstevel@tonic-gate
7967c478bd9Sstevel@tonic-gate } else if (reg <= R_O7) {
7977c478bd9Sstevel@tonic-gate return ((&rp->r_g1)[reg - 1]);
7987c478bd9Sstevel@tonic-gate }
7997c478bd9Sstevel@tonic-gate
8007c478bd9Sstevel@tonic-gate if (dtrace_getotherwin() > 0)
8017c478bd9Sstevel@tonic-gate return (dtrace_getreg_win(reg, 1));
8027c478bd9Sstevel@tonic-gate
8037c478bd9Sstevel@tonic-gate mpcb = (struct machpcb *)((caddr_t)rp - REGOFF);
8047c478bd9Sstevel@tonic-gate
8057c478bd9Sstevel@tonic-gate if (curproc->p_model == DATAMODEL_NATIVE) {
8067c478bd9Sstevel@tonic-gate struct frame *fr = (void *)(rp->r_sp + STACK_BIAS);
8077c478bd9Sstevel@tonic-gate
8087c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
8097c478bd9Sstevel@tonic-gate struct rwindow *rwin = (void *)mpcb->mpcb_wbuf;
8107c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
8117c478bd9Sstevel@tonic-gate do {
8127c478bd9Sstevel@tonic-gate i--;
8137c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == rp->r_sp)
8147c478bd9Sstevel@tonic-gate return (rwin[i].rw_local[reg - 16]);
8157c478bd9Sstevel@tonic-gate } while (i > 0);
8167c478bd9Sstevel@tonic-gate }
8177c478bd9Sstevel@tonic-gate
8187c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
8197c478bd9Sstevel@tonic-gate value = dtrace_fulword(&fr->fr_local[reg - 16]);
8207c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
8217c478bd9Sstevel@tonic-gate } else {
82275521904Sraf struct frame32 *fr = (void *)(uintptr_t)(caddr32_t)rp->r_sp;
8237c478bd9Sstevel@tonic-gate
8247c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
8257c478bd9Sstevel@tonic-gate struct rwindow32 *rwin = (void *)mpcb->mpcb_wbuf;
8267c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
8277c478bd9Sstevel@tonic-gate do {
8287c478bd9Sstevel@tonic-gate i--;
8297c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == rp->r_sp)
8307c478bd9Sstevel@tonic-gate return (rwin[i].rw_local[reg - 16]);
8317c478bd9Sstevel@tonic-gate } while (i > 0);
8327c478bd9Sstevel@tonic-gate }
8337c478bd9Sstevel@tonic-gate
8347c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
8357c478bd9Sstevel@tonic-gate value = dtrace_fuword32(&fr->fr_local[reg - 16]);
8367c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
8377c478bd9Sstevel@tonic-gate }
8387c478bd9Sstevel@tonic-gate
8397c478bd9Sstevel@tonic-gate return (value);
8407c478bd9Sstevel@tonic-gate
8417c478bd9Sstevel@tonic-gate fake_restore:
8427c478bd9Sstevel@tonic-gate ASSERT(R_L0 <= reg && reg <= R_I7);
8437c478bd9Sstevel@tonic-gate
8447c478bd9Sstevel@tonic-gate /*
8457c478bd9Sstevel@tonic-gate * We first look two user windows down to see if we can dig out
8467c478bd9Sstevel@tonic-gate * the register we're looking for.
8477c478bd9Sstevel@tonic-gate */
8487c478bd9Sstevel@tonic-gate if (dtrace_getotherwin() > 1)
8497c478bd9Sstevel@tonic-gate return (dtrace_getreg_win(reg, 2));
8507c478bd9Sstevel@tonic-gate
8517c478bd9Sstevel@tonic-gate /*
8527c478bd9Sstevel@tonic-gate * First we need to get the frame pointer and then we perform
8537c478bd9Sstevel@tonic-gate * the same computation as in the non-fake-o-restore case.
8547c478bd9Sstevel@tonic-gate */
8557c478bd9Sstevel@tonic-gate
8567c478bd9Sstevel@tonic-gate mpcb = (struct machpcb *)((caddr_t)rp - REGOFF);
8577c478bd9Sstevel@tonic-gate
8587c478bd9Sstevel@tonic-gate if (dtrace_getotherwin() > 0) {
8597c478bd9Sstevel@tonic-gate fp = dtrace_getreg_win(R_FP, 1);
8607c478bd9Sstevel@tonic-gate goto got_fp;
8617c478bd9Sstevel@tonic-gate }
8627c478bd9Sstevel@tonic-gate
8637c478bd9Sstevel@tonic-gate if (curproc->p_model == DATAMODEL_NATIVE) {
8647c478bd9Sstevel@tonic-gate struct frame *fr = (void *)(rp->r_sp + STACK_BIAS);
8657c478bd9Sstevel@tonic-gate
8667c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
8677c478bd9Sstevel@tonic-gate struct rwindow *rwin = (void *)mpcb->mpcb_wbuf;
8687c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
8697c478bd9Sstevel@tonic-gate do {
8707c478bd9Sstevel@tonic-gate i--;
8717c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == rp->r_sp) {
8727c478bd9Sstevel@tonic-gate fp = rwin[i].rw_fp;
8737c478bd9Sstevel@tonic-gate goto got_fp;
8747c478bd9Sstevel@tonic-gate }
8757c478bd9Sstevel@tonic-gate } while (i > 0);
8767c478bd9Sstevel@tonic-gate }
8777c478bd9Sstevel@tonic-gate
8787c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
8797c478bd9Sstevel@tonic-gate fp = dtrace_fulword(&fr->fr_savfp);
8807c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
8817c478bd9Sstevel@tonic-gate if (cpu_core[CPU->cpu_id].cpuc_dtrace_flags & CPU_DTRACE_FAULT)
8827c478bd9Sstevel@tonic-gate return (0);
8837c478bd9Sstevel@tonic-gate } else {
88475521904Sraf struct frame32 *fr = (void *)(uintptr_t)(caddr32_t)rp->r_sp;
8857c478bd9Sstevel@tonic-gate
8867c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
8877c478bd9Sstevel@tonic-gate struct rwindow32 *rwin = (void *)mpcb->mpcb_wbuf;
8887c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
8897c478bd9Sstevel@tonic-gate do {
8907c478bd9Sstevel@tonic-gate i--;
8917c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == rp->r_sp) {
8927c478bd9Sstevel@tonic-gate fp = rwin[i].rw_fp;
8937c478bd9Sstevel@tonic-gate goto got_fp;
8947c478bd9Sstevel@tonic-gate }
8957c478bd9Sstevel@tonic-gate } while (i > 0);
8967c478bd9Sstevel@tonic-gate }
8977c478bd9Sstevel@tonic-gate
8987c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
8997c478bd9Sstevel@tonic-gate fp = dtrace_fuword32(&fr->fr_savfp);
9007c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
9017c478bd9Sstevel@tonic-gate if (cpu_core[CPU->cpu_id].cpuc_dtrace_flags & CPU_DTRACE_FAULT)
9027c478bd9Sstevel@tonic-gate return (0);
9037c478bd9Sstevel@tonic-gate }
9047c478bd9Sstevel@tonic-gate got_fp:
9057c478bd9Sstevel@tonic-gate
9067c478bd9Sstevel@tonic-gate if (curproc->p_model == DATAMODEL_NATIVE) {
9077c478bd9Sstevel@tonic-gate struct frame *fr = (void *)(fp + STACK_BIAS);
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
9107c478bd9Sstevel@tonic-gate struct rwindow *rwin = (void *)mpcb->mpcb_wbuf;
9117c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
9127c478bd9Sstevel@tonic-gate do {
9137c478bd9Sstevel@tonic-gate i--;
9147c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == fp)
9157c478bd9Sstevel@tonic-gate return (rwin[i].rw_local[reg - 16]);
9167c478bd9Sstevel@tonic-gate } while (i > 0);
9177c478bd9Sstevel@tonic-gate }
9187c478bd9Sstevel@tonic-gate
9197c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
9207c478bd9Sstevel@tonic-gate value = dtrace_fulword(&fr->fr_local[reg - 16]);
9217c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
9227c478bd9Sstevel@tonic-gate } else {
92375521904Sraf struct frame32 *fr = (void *)(uintptr_t)(caddr32_t)fp;
9247c478bd9Sstevel@tonic-gate
9257c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt > 0) {
9267c478bd9Sstevel@tonic-gate struct rwindow32 *rwin = (void *)mpcb->mpcb_wbuf;
9277c478bd9Sstevel@tonic-gate int i = mpcb->mpcb_wbcnt;
9287c478bd9Sstevel@tonic-gate do {
9297c478bd9Sstevel@tonic-gate i--;
9307c478bd9Sstevel@tonic-gate if ((long)mpcb->mpcb_spbuf[i] == fp)
9317c478bd9Sstevel@tonic-gate return (rwin[i].rw_local[reg - 16]);
9327c478bd9Sstevel@tonic-gate } while (i > 0);
9337c478bd9Sstevel@tonic-gate }
9347c478bd9Sstevel@tonic-gate
9357c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
9367c478bd9Sstevel@tonic-gate value = dtrace_fuword32(&fr->fr_local[reg - 16]);
9377c478bd9Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
9387c478bd9Sstevel@tonic-gate }
9397c478bd9Sstevel@tonic-gate
9407c478bd9Sstevel@tonic-gate return (value);
9417c478bd9Sstevel@tonic-gate }
942*7aa76ffcSBryan Cantrill
943*7aa76ffcSBryan Cantrill /*ARGSUSED*/
944*7aa76ffcSBryan Cantrill uint64_t
dtrace_getvmreg(uint_t ndx,volatile uint16_t * flags)945*7aa76ffcSBryan Cantrill dtrace_getvmreg(uint_t ndx, volatile uint16_t *flags)
946*7aa76ffcSBryan Cantrill {
947*7aa76ffcSBryan Cantrill *flags |= CPU_DTRACE_ILLOP;
948*7aa76ffcSBryan Cantrill
949*7aa76ffcSBryan Cantrill return (0);
950*7aa76ffcSBryan Cantrill }
951