xref: /freebsd/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c (revision 5f4c09dd85bff675e0ca63c55ea3c517e0fddfcc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #include <sys/cdefs.h>
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/stack.h>
32 #include <sys/pcpu.h>
33 
34 #include <machine/frame.h>
35 #include <machine/md_var.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_param.h>
39 #include <vm/pmap.h>
40 
41 #include <machine/atomic.h>
42 #include <machine/db_machdep.h>
43 #include <machine/md_var.h>
44 #include <machine/stack.h>
45 #include <ddb/db_sym.h>
46 #include <ddb/ddb.h>
47 #include <sys/kdb.h>
48 
49 #include "regset.h"
50 
51 #define	MAX_USTACK_DEPTH  2048
52 
53 uint8_t dtrace_fuword8_nocheck(void *);
54 uint16_t dtrace_fuword16_nocheck(void *);
55 uint32_t dtrace_fuword32_nocheck(void *);
56 uint64_t dtrace_fuword64_nocheck(void *);
57 
58 void
59 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
60     uint32_t *intrpc)
61 {
62 	struct unwind_state state;
63 	int scp_offset;
64 	int depth;
65 
66 	depth = 0;
67 
68 	if (intrpc != 0) {
69 		pcstack[depth++] = (pc_t) intrpc;
70 	}
71 
72 	aframes++;
73 
74 	state.fp = (uintptr_t)__builtin_frame_address(0);
75 	state.pc = (uintptr_t)dtrace_getpcstack;
76 
77 	while (depth < pcstack_limit) {
78 		if (!unwind_frame(curthread, &state))
79 			break;
80 		if (!INKERNEL(state.pc))
81 			break;
82 
83 		/*
84 		 * NB: Unlike some other architectures, we don't need to
85 		 * explicitly insert cpu_dtrace_caller as it appears in the
86 		 * normal kernel stack trace rather than a special trap frame.
87 		 */
88 		if (aframes > 0) {
89 			aframes--;
90 		} else {
91 			pcstack[depth++] = state.pc;
92 		}
93 
94 	}
95 
96 	for (; depth < pcstack_limit; depth++) {
97 		pcstack[depth] = 0;
98 	}
99 }
100 
101 static int
102 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
103     uintptr_t fp)
104 {
105 	volatile uint16_t *flags =
106 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
107 	int ret = 0;
108 	uintptr_t oldfp = fp;
109 
110 	ASSERT(pcstack == NULL || pcstack_limit > 0);
111 
112 	while (pc != 0) {
113 		/*
114 		 * We limit the number of times we can go around this
115 		 * loop to account for a circular stack.
116 		 */
117 		if (ret++ >= MAX_USTACK_DEPTH) {
118 			*flags |= CPU_DTRACE_BADSTACK;
119 			cpu_core[curcpu].cpuc_dtrace_illval = fp;
120 			break;
121 		}
122 
123 		if (pcstack != NULL) {
124 			*pcstack++ = (uint64_t)pc;
125 			pcstack_limit--;
126 			if (pcstack_limit <= 0)
127 				break;
128 		}
129 
130 		if (fp == 0)
131 			break;
132 
133 		pc = dtrace_fuword64((void *)(fp +
134 		    offsetof(struct unwind_state, pc)));
135 		fp = dtrace_fuword64((void *)fp);
136 
137 		if (fp == oldfp) {
138 			*flags |= CPU_DTRACE_BADSTACK;
139 			cpu_core[curcpu].cpuc_dtrace_illval = fp;
140 			break;
141 		}
142 
143 		/*
144 		 * ARM64TODO:
145 		 *     This workaround might not be necessary. It needs to be
146 		 *     revised and removed from all architectures if found
147 		 *     unwanted. Leaving the original x86 comment for reference.
148 		 *
149 		 * This is totally bogus:  if we faulted, we're going to clear
150 		 * the fault and break.  This is to deal with the apparently
151 		 * broken Java stacks on x86.
152 		 */
153 		if (*flags & CPU_DTRACE_FAULT) {
154 			*flags &= ~CPU_DTRACE_FAULT;
155 			break;
156 		}
157 
158 		oldfp = fp;
159 	}
160 
161 	return (ret);
162 }
163 
164 void
165 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
166 {
167 	proc_t *p = curproc;
168 	struct trapframe *tf;
169 	uintptr_t pc, fp;
170 	volatile uint16_t *flags =
171 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
172 	int n;
173 
174 	if (*flags & CPU_DTRACE_FAULT)
175 		return;
176 
177 	if (pcstack_limit <= 0)
178 		return;
179 
180 	/*
181 	 * If there's no user context we still need to zero the stack.
182 	 */
183 	if (p == NULL || (tf = curthread->td_frame) == NULL)
184 		goto zero;
185 
186 	*pcstack++ = (uint64_t)p->p_pid;
187 	pcstack_limit--;
188 
189 	if (pcstack_limit <= 0)
190 		return;
191 
192 	pc = tf->tf_elr;
193 	fp = tf->tf_x[29];
194 
195 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
196 		/*
197 		 * In an entry probe.  The frame pointer has not yet been
198 		 * pushed (that happens in the function prologue).  The
199 		 * best approach is to add the current pc as a missing top
200 		 * of stack and back the pc up to the caller, which is stored
201 		 * at the current stack pointer address since the call
202 		 * instruction puts it there right before the branch.
203 		 */
204 
205 		*pcstack++ = (uint64_t)pc;
206 		pcstack_limit--;
207 		if (pcstack_limit <= 0)
208 			return;
209 
210 		pc = tf->tf_lr;
211 	}
212 
213 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
214 	ASSERT(n >= 0);
215 	ASSERT(n <= pcstack_limit);
216 
217 	pcstack += n;
218 	pcstack_limit -= n;
219 
220 zero:
221 	while (pcstack_limit-- > 0)
222 		*pcstack++ = 0;
223 }
224 
225 int
226 dtrace_getustackdepth(void)
227 {
228 
229 	printf("IMPLEMENT ME: %s\n", __func__);
230 
231 	return (0);
232 }
233 
234 void
235 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
236 {
237 
238 	printf("IMPLEMENT ME: %s\n", __func__);
239 }
240 
241 /*ARGSUSED*/
242 uint64_t
243 dtrace_getarg(int arg, int aframes)
244 {
245 
246 	printf("IMPLEMENT ME: %s\n", __func__);
247 
248 	return (0);
249 }
250 
251 int
252 dtrace_getstackdepth(int aframes)
253 {
254 	struct unwind_state state;
255 	int scp_offset;
256 	int depth;
257 	bool done;
258 
259 	depth = 1;
260 	done = false;
261 
262 	state.fp = (uintptr_t)__builtin_frame_address(0);
263 	state.pc = (uintptr_t)dtrace_getstackdepth;
264 
265 	do {
266 		done = !unwind_frame(curthread, &state);
267 		if (!INKERNEL(state.pc) || !INKERNEL(state.fp))
268 			break;
269 		depth++;
270 	} while (!done);
271 
272 	if (depth < aframes)
273 		return (0);
274 	else
275 		return (depth - aframes);
276 }
277 
278 ulong_t
279 dtrace_getreg(struct trapframe *frame, uint_t reg)
280 {
281 	switch (reg) {
282 	case REG_X0 ... REG_X29:
283 		return (frame->tf_x[reg]);
284 	case REG_LR:
285 		return (frame->tf_lr);
286 	case REG_SP:
287 		return (frame->tf_sp);
288 	case REG_PC:
289 		return (frame->tf_elr);
290 	default:
291 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
292 		return (0);
293 	}
294 	/* NOTREACHED */
295 }
296 
297 static int
298 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
299 {
300 
301 	if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
302 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
303 		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
304 		return (0);
305 	}
306 
307 	return (1);
308 }
309 
310 void
311 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
312     volatile uint16_t *flags)
313 {
314 
315 	if (dtrace_copycheck(uaddr, kaddr, size))
316 		dtrace_copy(uaddr, kaddr, size);
317 }
318 
319 void
320 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
321     volatile uint16_t *flags)
322 {
323 
324 	if (dtrace_copycheck(uaddr, kaddr, size))
325 		dtrace_copy(kaddr, uaddr, size);
326 }
327 
328 void
329 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
330     volatile uint16_t *flags)
331 {
332 
333 	if (dtrace_copycheck(uaddr, kaddr, size))
334 		dtrace_copystr(uaddr, kaddr, size, flags);
335 }
336 
337 void
338 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
339     volatile uint16_t *flags)
340 {
341 
342 	if (dtrace_copycheck(uaddr, kaddr, size))
343 		dtrace_copystr(kaddr, uaddr, size, flags);
344 }
345 
346 uint8_t
347 dtrace_fuword8(void *uaddr)
348 {
349 
350 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
351 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
352 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
353 		return (0);
354 	}
355 
356 	return (dtrace_fuword8_nocheck(uaddr));
357 }
358 
359 uint16_t
360 dtrace_fuword16(void *uaddr)
361 {
362 
363 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
364 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
365 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
366 		return (0);
367 	}
368 
369 	return (dtrace_fuword16_nocheck(uaddr));
370 }
371 
372 uint32_t
373 dtrace_fuword32(void *uaddr)
374 {
375 
376 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
377 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
378 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
379 		return (0);
380 	}
381 
382 	return (dtrace_fuword32_nocheck(uaddr));
383 }
384 
385 uint64_t
386 dtrace_fuword64(void *uaddr)
387 {
388 
389 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
390 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
391 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
392 		return (0);
393 	}
394 
395 	return (dtrace_fuword64_nocheck(uaddr));
396 }
397