xref: /freebsd/sys/cddl/dev/dtrace/i386/dtrace_isa.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #include <sys/cdefs.h>
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/stack.h>
32 #include <sys/pcpu.h>
33 
34 #include <machine/frame.h>
35 #include <machine/md_var.h>
36 #include <machine/pcb.h>
37 #include <machine/stack.h>
38 
39 #include <vm/vm.h>
40 #include <vm/vm_param.h>
41 #include <vm/pmap.h>
42 
43 #include "regset.h"
44 
45 extern uintptr_t kernbase;
46 uintptr_t kernelbase = (uintptr_t) &kernbase;
47 
48 uint8_t dtrace_fuword8_nocheck(void *);
49 uint16_t dtrace_fuword16_nocheck(void *);
50 uint32_t dtrace_fuword32_nocheck(void *);
51 uint64_t dtrace_fuword64_nocheck(void *);
52 
53 int	dtrace_ustackdepth_max = 2048;
54 
55 void
56 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
57     uint32_t *intrpc)
58 {
59 	int depth = 0;
60 	register_t ebp;
61 	struct i386_frame *frame;
62 	vm_offset_t callpc;
63 	pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
64 
65 	if (intrpc != 0)
66 		pcstack[depth++] = (pc_t) intrpc;
67 
68 	aframes++;
69 
70 	__asm __volatile("movl %%ebp,%0" : "=r" (ebp));
71 
72 	frame = (struct i386_frame *)ebp;
73 	while (depth < pcstack_limit) {
74 		if (!kstack_contains(curthread, (vm_offset_t)frame,
75 		    sizeof(*frame)))
76 			break;
77 
78 		callpc = frame->f_retaddr;
79 
80 		if (!INKERNEL(callpc))
81 			break;
82 
83 		if (aframes > 0) {
84 			aframes--;
85 			if ((aframes == 0) && (caller != 0)) {
86 				pcstack[depth++] = caller;
87 			}
88 		}
89 		else {
90 			pcstack[depth++] = callpc;
91 		}
92 
93 		if (frame->f_frame <= frame)
94 			break;
95 		frame = frame->f_frame;
96 	}
97 
98 	for (; depth < pcstack_limit; depth++) {
99 		pcstack[depth] = 0;
100 	}
101 }
102 
103 static int
104 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
105     uintptr_t sp)
106 {
107 #ifdef notyet
108 	proc_t *p = curproc;
109 	uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
110 	size_t s1, s2;
111 #endif
112 	uintptr_t oldsp;
113 	volatile uint16_t *flags =
114 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
115 	int ret = 0;
116 
117 	ASSERT(pcstack == NULL || pcstack_limit > 0);
118 	ASSERT(dtrace_ustackdepth_max > 0);
119 
120 #ifdef notyet /* XXX signal stack. */
121 	if (p->p_model == DATAMODEL_NATIVE) {
122 		s1 = sizeof (struct frame) + 2 * sizeof (long);
123 		s2 = s1 + sizeof (siginfo_t);
124 	} else {
125 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
126 		s2 = s1 + sizeof (siginfo32_t);
127 	}
128 #endif
129 
130 	while (pc != 0) {
131 		/*
132 		 * We limit the number of times we can go around this
133 		 * loop to account for a circular stack.
134 		 */
135 		if (ret++ >= dtrace_ustackdepth_max) {
136 			*flags |= CPU_DTRACE_BADSTACK;
137 			cpu_core[curcpu].cpuc_dtrace_illval = sp;
138 			break;
139 		}
140 
141 		if (pcstack != NULL) {
142 			*pcstack++ = (uint64_t)pc;
143 			pcstack_limit--;
144 			if (pcstack_limit <= 0)
145 				break;
146 		}
147 
148 		if (sp == 0)
149 			break;
150 
151 		oldsp = sp;
152 
153 #ifdef notyet /* XXX signal stack. */
154 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
155 			if (p->p_model == DATAMODEL_NATIVE) {
156 				ucontext_t *ucp = (ucontext_t *)oldcontext;
157 				greg_t *gregs = ucp->uc_mcontext.gregs;
158 
159 				sp = dtrace_fulword(&gregs[REG_FP]);
160 				pc = dtrace_fulword(&gregs[REG_PC]);
161 
162 				oldcontext = dtrace_fulword(&ucp->uc_link);
163 			} else {
164 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
165 				greg32_t *gregs = ucp->uc_mcontext.gregs;
166 
167 				sp = dtrace_fuword32(&gregs[EBP]);
168 				pc = dtrace_fuword32(&gregs[EIP]);
169 
170 				oldcontext = dtrace_fuword32(&ucp->uc_link);
171 			}
172 		} else {
173 			if (p->p_model == DATAMODEL_NATIVE) {
174 				struct frame *fr = (struct frame *)sp;
175 
176 				pc = dtrace_fulword(&fr->fr_savpc);
177 				sp = dtrace_fulword(&fr->fr_savfp);
178 			} else {
179 				struct frame32 *fr = (struct frame32 *)sp;
180 
181 				pc = dtrace_fuword32(&fr->fr_savpc);
182 				sp = dtrace_fuword32(&fr->fr_savfp);
183 			}
184 		}
185 #else
186 		pc = dtrace_fuword32((void *)(sp +
187 			offsetof(struct i386_frame, f_retaddr)));
188 		sp = dtrace_fuword32((void *)sp);
189 #endif /* ! notyet */
190 
191 		if (sp == oldsp) {
192 			*flags |= CPU_DTRACE_BADSTACK;
193 			cpu_core[curcpu].cpuc_dtrace_illval = sp;
194 			break;
195 		}
196 
197 		/*
198 		 * This is totally bogus:  if we faulted, we're going to clear
199 		 * the fault and break.  This is to deal with the apparently
200 		 * broken Java stacks on x86.
201 		 */
202 		if (*flags & CPU_DTRACE_FAULT) {
203 			*flags &= ~CPU_DTRACE_FAULT;
204 			break;
205 		}
206 	}
207 
208 	return (ret);
209 }
210 
211 void
212 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
213 {
214 	proc_t *p = curproc;
215 	struct trapframe *tf;
216 	uintptr_t pc, sp, fp;
217 	volatile uint16_t *flags =
218 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
219 	int n;
220 
221 	if (*flags & CPU_DTRACE_FAULT)
222 		return;
223 
224 	if (pcstack_limit <= 0)
225 		return;
226 
227 	/*
228 	 * If there's no user context we still need to zero the stack.
229 	 */
230 	if (p == NULL || (tf = curthread->td_frame) == NULL)
231 		goto zero;
232 
233 	*pcstack++ = (uint64_t)p->p_pid;
234 	pcstack_limit--;
235 
236 	if (pcstack_limit <= 0)
237 		return;
238 
239 	pc = tf->tf_eip;
240 	fp = tf->tf_ebp;
241 	sp = tf->tf_esp;
242 
243 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
244 		/*
245 		 * In an entry probe.  The frame pointer has not yet been
246 		 * pushed (that happens in the function prologue).  The
247 		 * best approach is to add the current pc as a missing top
248 		 * of stack and back the pc up to the caller, which is stored
249 		 * at the current stack pointer address since the call
250 		 * instruction puts it there right before the branch.
251 		 */
252 
253 		*pcstack++ = (uint64_t)pc;
254 		pcstack_limit--;
255 		if (pcstack_limit <= 0)
256 			return;
257 
258 		pc = dtrace_fuword32((void *) sp);
259 	}
260 
261 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
262 	ASSERT(n >= 0);
263 	ASSERT(n <= pcstack_limit);
264 
265 	pcstack += n;
266 	pcstack_limit -= n;
267 
268 zero:
269 	while (pcstack_limit-- > 0)
270 		*pcstack++ = 0;
271 }
272 
273 int
274 dtrace_getustackdepth(void)
275 {
276 	proc_t *p = curproc;
277 	struct trapframe *tf;
278 	uintptr_t pc, fp, sp;
279 	int n = 0;
280 
281 	if (p == NULL || (tf = curthread->td_frame) == NULL)
282 		return (0);
283 
284 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
285 		return (-1);
286 
287 	pc = tf->tf_eip;
288 	fp = tf->tf_ebp;
289 	sp = tf->tf_esp;
290 
291 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
292 		/*
293 		 * In an entry probe.  The frame pointer has not yet been
294 		 * pushed (that happens in the function prologue).  The
295 		 * best approach is to add the current pc as a missing top
296 		 * of stack and back the pc up to the caller, which is stored
297 		 * at the current stack pointer address since the call
298 		 * instruction puts it there right before the branch.
299 		 */
300 
301 		pc = dtrace_fuword32((void *) sp);
302 		n++;
303 	}
304 
305 	n += dtrace_getustack_common(NULL, 0, pc, fp);
306 
307 	return (n);
308 }
309 
310 void
311 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
312 {
313 	proc_t *p = curproc;
314 	struct trapframe *tf;
315 	uintptr_t pc, sp, fp;
316 	volatile uint16_t *flags =
317 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
318 #ifdef notyet /* XXX signal stack */
319 	uintptr_t oldcontext;
320 	size_t s1, s2;
321 #endif
322 
323 	if (*flags & CPU_DTRACE_FAULT)
324 		return;
325 
326 	if (pcstack_limit <= 0)
327 		return;
328 
329 	/*
330 	 * If there's no user context we still need to zero the stack.
331 	 */
332 	if (p == NULL || (tf = curthread->td_frame) == NULL)
333 		goto zero;
334 
335 	*pcstack++ = (uint64_t)p->p_pid;
336 	pcstack_limit--;
337 
338 	if (pcstack_limit <= 0)
339 		return;
340 
341 	pc = tf->tf_eip;
342 	fp = tf->tf_ebp;
343 	sp = tf->tf_esp;
344 
345 #ifdef notyet /* XXX signal stack */
346 	oldcontext = lwp->lwp_oldcontext;
347 
348 	if (p->p_model == DATAMODEL_NATIVE) {
349 		s1 = sizeof (struct frame) + 2 * sizeof (long);
350 		s2 = s1 + sizeof (siginfo_t);
351 	} else {
352 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
353 		s2 = s1 + sizeof (siginfo32_t);
354 	}
355 #endif
356 
357 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
358 		*pcstack++ = (uint64_t)pc;
359 		*fpstack++ = 0;
360 		pcstack_limit--;
361 		if (pcstack_limit <= 0)
362 			return;
363 
364 		pc = dtrace_fuword32((void *)sp);
365 	}
366 
367 	while (pc != 0) {
368 		*pcstack++ = (uint64_t)pc;
369 		*fpstack++ = fp;
370 		pcstack_limit--;
371 		if (pcstack_limit <= 0)
372 			break;
373 
374 		if (fp == 0)
375 			break;
376 
377 #ifdef notyet /* XXX signal stack */
378 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
379 			if (p->p_model == DATAMODEL_NATIVE) {
380 				ucontext_t *ucp = (ucontext_t *)oldcontext;
381 				greg_t *gregs = ucp->uc_mcontext.gregs;
382 
383 				sp = dtrace_fulword(&gregs[REG_FP]);
384 				pc = dtrace_fulword(&gregs[REG_PC]);
385 
386 				oldcontext = dtrace_fulword(&ucp->uc_link);
387 			} else {
388 				ucontext_t *ucp = (ucontext_t *)oldcontext;
389 				greg_t *gregs = ucp->uc_mcontext.gregs;
390 
391 				sp = dtrace_fuword32(&gregs[EBP]);
392 				pc = dtrace_fuword32(&gregs[EIP]);
393 
394 				oldcontext = dtrace_fuword32(&ucp->uc_link);
395 			}
396 		} else
397 #endif /* XXX */
398 		{
399 			pc = dtrace_fuword32((void *)(fp +
400 				offsetof(struct i386_frame, f_retaddr)));
401 			fp = dtrace_fuword32((void *)fp);
402 		}
403 
404 		/*
405 		 * This is totally bogus:  if we faulted, we're going to clear
406 		 * the fault and break.  This is to deal with the apparently
407 		 * broken Java stacks on x86.
408 		 */
409 		if (*flags & CPU_DTRACE_FAULT) {
410 			*flags &= ~CPU_DTRACE_FAULT;
411 			break;
412 		}
413 	}
414 
415 zero:
416 	while (pcstack_limit-- > 0)
417 		*pcstack++ = 0;
418 }
419 
420 uint64_t
421 dtrace_getarg(int arg, int aframes)
422 {
423 	struct trapframe *frame;
424 	struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
425 	uintptr_t *stack, val;
426 	int i;
427 
428 	for (i = 1; i <= aframes; i++) {
429 		fp = fp->f_frame;
430 
431 		if (P2ROUNDUP(fp->f_retaddr, 4) ==
432 		    (long)dtrace_invop_callsite) {
433 			/*
434 			 * If we pass through the invalid op handler, we will
435 			 * use the trap frame pointer that it pushed on the
436 			 * stack as the second argument to dtrace_invop() as
437 			 * the pointer to the stack.  When using this stack, we
438 			 * must skip the third argument to dtrace_invop(),
439 			 * which is included in the i386_frame.
440 			 */
441 			frame = (struct trapframe *)(((uintptr_t **)&fp[1])[0]);
442 			/*
443 			 * Skip the three hardware-saved registers and the
444 			 * return address.
445 			 */
446 			stack = (uintptr_t *)frame->tf_isp + 4;
447 			goto load;
448 		}
449 
450 	}
451 
452 	/*
453 	 * We know that we did not come through a trap to get into
454 	 * dtrace_probe() -- the provider simply called dtrace_probe()
455 	 * directly.  As this is the case, we need to shift the argument
456 	 * that we're looking for:  the probe ID is the first argument to
457 	 * dtrace_probe(), so the argument n will actually be found where
458 	 * one would expect to find argument (n + 1).
459 	 */
460 	arg++;
461 
462 	stack = (uintptr_t *)fp + 2;
463 
464 load:
465 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
466 	val = stack[arg];
467 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
468 
469 	return (val);
470 }
471 
472 int
473 dtrace_getstackdepth(int aframes)
474 {
475 	int depth = 0;
476 	struct i386_frame *frame;
477 	vm_offset_t ebp;
478 
479 	aframes++;
480 	ebp = dtrace_getfp();
481 	frame = (struct i386_frame *)ebp;
482 	depth++;
483 	for(;;) {
484 		if (!kstack_contains(curthread, (vm_offset_t)frame,
485 		    sizeof(*frame)))
486 			break;
487 		depth++;
488 		if (frame->f_frame <= frame)
489 			break;
490 		frame = frame->f_frame;
491 	}
492 	if (depth < aframes)
493 		return 0;
494 	else
495 		return depth - aframes;
496 }
497 
498 ulong_t
499 dtrace_getreg(struct trapframe *frame, uint_t reg)
500 {
501 	struct pcb *pcb;
502 	int regmap[] = {  /* Order is dependent on reg.d */
503 		REG_GS,		/* 0  GS */
504 		REG_FS,		/* 1  FS */
505 		REG_ES,		/* 2  ES */
506 		REG_DS,		/* 3  DS */
507 		REG_RDI,	/* 4  EDI */
508 		REG_RSI,	/* 5  ESI */
509 		REG_RBP,	/* 6  EBP, REG_FP */
510 		REG_RSP,	/* 7  ESP */
511 		REG_RBX,	/* 8  EBX */
512 		REG_RDX,	/* 9  EDX, REG_R1 */
513 		REG_RCX,	/* 10 ECX */
514 		REG_RAX,	/* 11 EAX, REG_R0 */
515 		REG_TRAPNO,	/* 12 TRAPNO */
516 		REG_ERR,	/* 13 ERR */
517 		REG_RIP,	/* 14 EIP, REG_PC */
518 		REG_CS,		/* 15 CS */
519 		REG_RFL,	/* 16 EFL, REG_PS */
520 		REG_RSP,	/* 17 UESP, REG_SP */
521 		REG_SS		/* 18 SS */
522 	};
523 
524 	if (reg > SS) {
525 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
526 		return (0);
527 	}
528 
529 	if (reg >= sizeof (regmap) / sizeof (int)) {
530 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
531 		return (0);
532 	}
533 
534 	reg = regmap[reg];
535 
536 	switch(reg) {
537 	case REG_GS:
538 		if ((pcb = curthread->td_pcb) == NULL) {
539 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
540 			return (0);
541 		}
542 		return (pcb->pcb_gs);
543 	case REG_FS:
544 		return (frame->tf_fs);
545 	case REG_ES:
546 		return (frame->tf_es);
547 	case REG_DS:
548 		return (frame->tf_ds);
549 	case REG_RDI:
550 		return (frame->tf_edi);
551 	case REG_RSI:
552 		return (frame->tf_esi);
553 	case REG_RBP:
554 		return (frame->tf_ebp);
555 	case REG_RSP:
556 		return (frame->tf_isp);
557 	case REG_RBX:
558 		return (frame->tf_ebx);
559 	case REG_RCX:
560 		return (frame->tf_ecx);
561 	case REG_RAX:
562 		return (frame->tf_eax);
563 	case REG_TRAPNO:
564 		return (frame->tf_trapno);
565 	case REG_ERR:
566 		return (frame->tf_err);
567 	case REG_RIP:
568 		return (frame->tf_eip);
569 	case REG_CS:
570 		return (frame->tf_cs);
571 	case REG_RFL:
572 		return (frame->tf_eflags);
573 #if 0
574 	case REG_RSP:
575 		return (frame->tf_esp);
576 #endif
577 	case REG_SS:
578 		return (frame->tf_ss);
579 	default:
580 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
581 		return (0);
582 	}
583 }
584 
585 static int
586 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
587 {
588 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
589 
590 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
591 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
592 		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
593 		return (0);
594 	}
595 
596 	return (1);
597 }
598 
599 void
600 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
601     volatile uint16_t *flags)
602 {
603 	if (dtrace_copycheck(uaddr, kaddr, size))
604 		dtrace_copy(uaddr, kaddr, size);
605 }
606 
607 void
608 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
609     volatile uint16_t *flags)
610 {
611 	if (dtrace_copycheck(uaddr, kaddr, size))
612 		dtrace_copy(kaddr, uaddr, size);
613 }
614 
615 void
616 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
617     volatile uint16_t *flags)
618 {
619 	if (dtrace_copycheck(uaddr, kaddr, size))
620 		dtrace_copystr(uaddr, kaddr, size, flags);
621 }
622 
623 void
624 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
625     volatile uint16_t *flags)
626 {
627 	if (dtrace_copycheck(uaddr, kaddr, size))
628 		dtrace_copystr(kaddr, uaddr, size, flags);
629 }
630 
631 uint8_t
632 dtrace_fuword8(void *uaddr)
633 {
634 	if ((uintptr_t)uaddr >= kernelbase) {
635 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
636 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
637 		return (0);
638 	}
639 	return (dtrace_fuword8_nocheck(uaddr));
640 }
641 
642 uint16_t
643 dtrace_fuword16(void *uaddr)
644 {
645 	if ((uintptr_t)uaddr >= kernelbase) {
646 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
647 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
648 		return (0);
649 	}
650 	return (dtrace_fuword16_nocheck(uaddr));
651 }
652 
653 uint32_t
654 dtrace_fuword32(void *uaddr)
655 {
656 	if ((uintptr_t)uaddr >= kernelbase) {
657 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
658 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
659 		return (0);
660 	}
661 	return (dtrace_fuword32_nocheck(uaddr));
662 }
663 
664 uint64_t
665 dtrace_fuword64(void *uaddr)
666 {
667 	if ((uintptr_t)uaddr >= kernelbase) {
668 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
669 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
670 		return (0);
671 	}
672 	return (dtrace_fuword64_nocheck(uaddr));
673 }
674