xref: /titanic_44/usr/src/uts/intel/dtrace/dtrace_isa.c (revision 09f67678c27dda8a89f87f1f408a87dd49ceb0e1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/dtrace_impl.h>
30 #include <sys/stack.h>
31 #include <sys/frame.h>
32 #include <sys/cmn_err.h>
33 #include <sys/privregs.h>
34 #include <sys/sysmacros.h>
35 
36 /*
37  * This is gross knowledge to have to encode here...
38  */
39 extern void _interrupt();
40 extern void _cmntrap();
41 extern void _allsyscalls();
42 
43 extern size_t _interrupt_size;
44 extern size_t _cmntrap_size;
45 extern size_t _allsyscalls_size;
46 
47 extern uintptr_t kernelbase;
48 
49 void
50 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
51     uint32_t *intrpc)
52 {
53 	struct frame *fp = (struct frame *)dtrace_getfp();
54 	struct frame *nextfp, *minfp, *stacktop;
55 	int depth = 0;
56 	int on_intr, last = 0;
57 	uintptr_t pc;
58 	uintptr_t caller = CPU->cpu_dtrace_caller;
59 
60 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
61 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
62 	else
63 		stacktop = (struct frame *)curthread->t_stk;
64 	minfp = fp;
65 
66 	aframes++;
67 
68 	if (intrpc != NULL && depth < pcstack_limit)
69 		pcstack[depth++] = (pc_t)intrpc;
70 
71 	while (depth < pcstack_limit) {
72 		nextfp = (struct frame *)fp->fr_savfp;
73 		pc = fp->fr_savpc;
74 
75 		if (nextfp <= minfp || nextfp >= stacktop) {
76 			if (on_intr) {
77 				/*
78 				 * Hop from interrupt stack to thread stack.
79 				 */
80 				stacktop = (struct frame *)curthread->t_stk;
81 				minfp = (struct frame *)curthread->t_stkbase;
82 				on_intr = 0;
83 				continue;
84 			}
85 
86 			/*
87 			 * This is the last frame we can process; indicate
88 			 * that we should return after processing this frame.
89 			 */
90 			last = 1;
91 		}
92 
93 		if (aframes > 0) {
94 			if (--aframes == 0 && caller != NULL) {
95 				/*
96 				 * We've just run out of artificial frames,
97 				 * and we have a valid caller -- fill it in
98 				 * now.
99 				 */
100 				ASSERT(depth < pcstack_limit);
101 				pcstack[depth++] = (pc_t)caller;
102 				caller = NULL;
103 			}
104 		} else {
105 			if (depth < pcstack_limit)
106 				pcstack[depth++] = (pc_t)pc;
107 		}
108 
109 		if (last) {
110 			while (depth < pcstack_limit)
111 				pcstack[depth++] = NULL;
112 			return;
113 		}
114 
115 		fp = nextfp;
116 		minfp = fp;
117 	}
118 }
119 
120 void
121 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
122 {
123 	klwp_t *lwp = ttolwp(curthread);
124 	proc_t *p = ttoproc(curthread);
125 	struct regs *rp;
126 	uintptr_t pc, sp, oldcontext;
127 	volatile uint8_t *flags =
128 	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
129 	size_t s1, s2;
130 
131 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
132 		return;
133 
134 	if (*flags & CPU_DTRACE_FAULT)
135 		return;
136 
137 	if (pcstack_limit <= 0)
138 		return;
139 
140 	*pcstack++ = (uint64_t)p->p_pid;
141 	pcstack_limit--;
142 
143 	if (pcstack_limit <= 0)
144 		return;
145 
146 	pc = rp->r_pc;
147 	sp = rp->r_fp;
148 	oldcontext = lwp->lwp_oldcontext;
149 
150 	if (p->p_model == DATAMODEL_NATIVE) {
151 		s1 = sizeof (struct frame) + 2 * sizeof (long);
152 		s2 = s1 + sizeof (siginfo_t);
153 	} else {
154 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
155 		s2 = s1 + sizeof (siginfo32_t);
156 	}
157 
158 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
159 		*pcstack++ = (uint64_t)pc;
160 		pcstack_limit--;
161 		if (pcstack_limit <= 0)
162 			return;
163 
164 		if (p->p_model == DATAMODEL_NATIVE)
165 			pc = dtrace_fulword((void *)rp->r_sp);
166 		else
167 			pc = dtrace_fuword32((void *)rp->r_sp);
168 	}
169 
170 	while (pc != 0 && sp != 0) {
171 		*pcstack++ = (uint64_t)pc;
172 		pcstack_limit--;
173 		if (pcstack_limit <= 0)
174 			break;
175 
176 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
177 			if (p->p_model == DATAMODEL_NATIVE) {
178 				ucontext_t *ucp = (ucontext_t *)oldcontext;
179 				greg_t *gregs = ucp->uc_mcontext.gregs;
180 
181 				sp = dtrace_fulword(&gregs[REG_FP]);
182 				pc = dtrace_fulword(&gregs[REG_PC]);
183 
184 				oldcontext = dtrace_fulword(&ucp->uc_link);
185 			} else {
186 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
187 				greg32_t *gregs = ucp->uc_mcontext.gregs;
188 
189 				sp = dtrace_fuword32(&gregs[EBP]);
190 				pc = dtrace_fuword32(&gregs[EIP]);
191 
192 				oldcontext = dtrace_fuword32(&ucp->uc_link);
193 			}
194 		} else {
195 			if (p->p_model == DATAMODEL_NATIVE) {
196 				struct frame *fr = (struct frame *)sp;
197 
198 				pc = dtrace_fulword(&fr->fr_savpc);
199 				sp = dtrace_fulword(&fr->fr_savfp);
200 			} else {
201 				struct frame32 *fr = (struct frame32 *)sp;
202 
203 				pc = dtrace_fuword32(&fr->fr_savpc);
204 				sp = dtrace_fuword32(&fr->fr_savfp);
205 			}
206 		}
207 
208 		/*
209 		 * This is totally bogus:  if we faulted, we're going to clear
210 		 * the fault and break.  This is to deal with the apparently
211 		 * broken Java stacks on x86.
212 		 */
213 		if (*flags & CPU_DTRACE_FAULT) {
214 			*flags &= ~CPU_DTRACE_FAULT;
215 			break;
216 		}
217 	}
218 
219 	while (pcstack_limit-- > 0)
220 		*pcstack++ = NULL;
221 }
222 
223 /*ARGSUSED*/
224 void
225 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
226 {
227 	klwp_t *lwp = ttolwp(curthread);
228 	proc_t *p = ttoproc(curthread);
229 	struct regs *rp;
230 	uintptr_t pc, sp, oldcontext;
231 	volatile uint8_t *flags =
232 	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
233 	size_t s1, s2;
234 
235 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
236 		return;
237 
238 	if (*flags & CPU_DTRACE_FAULT)
239 		return;
240 
241 	if (pcstack_limit <= 0)
242 		return;
243 
244 	*pcstack++ = (uint64_t)p->p_pid;
245 	pcstack_limit--;
246 
247 	if (pcstack_limit <= 0)
248 		return;
249 
250 	pc = rp->r_pc;
251 	sp = rp->r_fp;
252 	oldcontext = lwp->lwp_oldcontext;
253 
254 	if (p->p_model == DATAMODEL_NATIVE) {
255 		s1 = sizeof (struct frame) + 2 * sizeof (long);
256 		s2 = s1 + sizeof (siginfo_t);
257 	} else {
258 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
259 		s2 = s1 + sizeof (siginfo32_t);
260 	}
261 
262 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
263 		*pcstack++ = (uint64_t)pc;
264 		*fpstack++ = 0;
265 		pcstack_limit--;
266 		if (pcstack_limit <= 0)
267 			return;
268 
269 		if (p->p_model == DATAMODEL_NATIVE)
270 			pc = dtrace_fulword((void *)rp->r_sp);
271 		else
272 			pc = dtrace_fuword32((void *)rp->r_sp);
273 	}
274 
275 	while (pc != 0 && sp != 0) {
276 		*pcstack++ = (uint64_t)pc;
277 		*fpstack++ = sp;
278 		pcstack_limit--;
279 		if (pcstack_limit <= 0)
280 			break;
281 
282 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
283 			if (p->p_model == DATAMODEL_NATIVE) {
284 				ucontext_t *ucp = (ucontext_t *)oldcontext;
285 				greg_t *gregs = ucp->uc_mcontext.gregs;
286 
287 				sp = dtrace_fulword(&gregs[REG_FP]);
288 				pc = dtrace_fulword(&gregs[REG_PC]);
289 
290 				oldcontext = dtrace_fulword(&ucp->uc_link);
291 			} else {
292 				ucontext_t *ucp = (ucontext_t *)oldcontext;
293 				greg_t *gregs = ucp->uc_mcontext.gregs;
294 
295 				sp = dtrace_fuword32(&gregs[EBP]);
296 				pc = dtrace_fuword32(&gregs[EIP]);
297 
298 				oldcontext = dtrace_fuword32(&ucp->uc_link);
299 			}
300 		} else {
301 			if (p->p_model == DATAMODEL_NATIVE) {
302 				struct frame *fr = (struct frame *)sp;
303 
304 				pc = dtrace_fulword(&fr->fr_savpc);
305 				sp = dtrace_fulword(&fr->fr_savfp);
306 			} else {
307 				struct frame32 *fr = (struct frame32 *)sp;
308 
309 				pc = dtrace_fuword32(&fr->fr_savpc);
310 				sp = dtrace_fuword32(&fr->fr_savfp);
311 			}
312 		}
313 
314 		/*
315 		 * This is totally bogus:  if we faulted, we're going to clear
316 		 * the fault and break.  This is to deal with the apparently
317 		 * broken Java stacks on x86.
318 		 */
319 		if (*flags & CPU_DTRACE_FAULT) {
320 			*flags &= ~CPU_DTRACE_FAULT;
321 			break;
322 		}
323 	}
324 
325 	while (pcstack_limit-- > 0)
326 		*pcstack++ = NULL;
327 }
328 
329 /*ARGSUSED*/
330 uint64_t
331 dtrace_getarg(int arg, int aframes)
332 {
333 	uintptr_t val;
334 	struct frame *fp = (struct frame *)dtrace_getfp();
335 	uintptr_t *stack;
336 	int i;
337 #if defined(__amd64)
338 	/*
339 	 * A total of 6 arguments are passed via registers; any argument with
340 	 * index of 5 or lower is therefore in a register.
341 	 */
342 	int inreg = 5;
343 #endif
344 
345 	for (i = 1; i <= aframes; i++) {
346 		fp = (struct frame *)(fp->fr_savfp);
347 
348 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
349 #if !defined(__amd64)
350 			/*
351 			 * If we pass through the invalid op handler, we will
352 			 * use the pointer that it passed to the stack as the
353 			 * second argument to dtrace_invop() as the pointer to
354 			 * the stack.  When using this stack, we must step
355 			 * beyond the EIP/RIP that was pushed when the trap was
356 			 * taken -- hence the "+ 1" below.
357 			 */
358 			stack = ((uintptr_t **)&fp[1])[1] + 1;
359 #else
360 			/*
361 			 * In the case of amd64, we will use the pointer to the
362 			 * regs structure that was pushed when we took the
363 			 * trap.  To get this structure, we must increment
364 			 * beyond the frame structure, and then again beyond
365 			 * the calling RIP stored in dtrace_invop().  If the
366 			 * argument that we're seeking is passed on the stack,
367 			 * we'll pull the true stack pointer out of the saved
368 			 * registers and decrement our argument by the number
369 			 * of arguments passed in registers; if the argument
370 			 * we're seeking is passed in regsiters, we can just
371 			 * load it directly.
372 			 */
373 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
374 			    sizeof (uintptr_t));
375 
376 			if (arg <= inreg) {
377 				stack = (uintptr_t *)&rp->r_rdi;
378 			} else {
379 				stack = (uintptr_t *)(rp->r_rsp);
380 				arg -= inreg;
381 			}
382 #endif
383 			goto load;
384 		}
385 
386 	}
387 
388 	/*
389 	 * We know that we did not come through a trap to get into
390 	 * dtrace_probe() -- the provider simply called dtrace_probe()
391 	 * directly.  As this is the case, we need to shift the argument
392 	 * that we're looking for:  the probe ID is the first argument to
393 	 * dtrace_probe(), so the argument n will actually be found where
394 	 * one would expect to find argument (n + 1).
395 	 */
396 	arg++;
397 
398 #if defined(__amd64)
399 	if (arg <= inreg) {
400 		/*
401 		 * This shouldn't happen.  If the argument is passed in a
402 		 * register then it should have been, well, passed in a
403 		 * register...
404 		 */
405 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
406 		return (0);
407 	}
408 
409 	arg -= (inreg + 1);
410 #endif
411 	stack = (uintptr_t *)&fp[1];
412 
413 load:
414 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
415 	val = stack[arg];
416 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
417 
418 	return (val);
419 }
420 
421 /*ARGSUSED*/
422 int
423 dtrace_getstackdepth(int aframes)
424 {
425 	struct frame *fp = (struct frame *)dtrace_getfp();
426 	struct frame *nextfp, *minfp, *stacktop;
427 	int depth = 0;
428 	int on_intr;
429 
430 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
431 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
432 	else
433 		stacktop = (struct frame *)curthread->t_stk;
434 	minfp = fp;
435 
436 	aframes++;
437 
438 	for (;;) {
439 		depth++;
440 
441 		nextfp = (struct frame *)fp->fr_savfp;
442 
443 		if (nextfp <= minfp || nextfp >= stacktop) {
444 			if (on_intr) {
445 				/*
446 				 * Hop from interrupt stack to thread stack.
447 				 */
448 				stacktop = (struct frame *)curthread->t_stk;
449 				minfp = (struct frame *)curthread->t_stkbase;
450 				on_intr = 0;
451 				continue;
452 			}
453 			break;
454 		}
455 
456 		fp = nextfp;
457 		minfp = fp;
458 	}
459 
460 	if (depth <= aframes)
461 		return (0);
462 
463 	return (depth - aframes);
464 }
465 
466 ulong_t
467 dtrace_getreg(struct regs *rp, uint_t reg)
468 {
469 #if defined(__amd64)
470 	int regmap[] = {
471 		REG_GS,		/* GS */
472 		REG_FS,		/* FS */
473 		REG_ES,		/* ES */
474 		REG_DS,		/* DS */
475 		REG_RDI,	/* EDI */
476 		REG_RSI,	/* ESI */
477 		REG_RBP,	/* EBP */
478 		REG_RSP,	/* ESP */
479 		REG_RBX,	/* EBX */
480 		REG_RDX,	/* EDX */
481 		REG_RCX,	/* ECX */
482 		REG_RAX,	/* EAX */
483 		REG_TRAPNO,	/* TRAPNO */
484 		REG_ERR,	/* ERR */
485 		REG_RIP,	/* EIP */
486 		REG_CS,		/* CS */
487 		REG_RFL,	/* EFL */
488 		REG_RSP,	/* UESP */
489 		REG_SS		/* SS */
490 	};
491 
492 	if (reg <= SS) {
493 		if (reg >= sizeof (regmap) / sizeof (int)) {
494 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
495 			return (0);
496 		}
497 
498 		reg = regmap[reg];
499 	} else {
500 		reg -= SS + 1;
501 	}
502 
503 	switch (reg) {
504 	case REG_RDI:
505 		return (rp->r_rdi);
506 	case REG_RSI:
507 		return (rp->r_rsi);
508 	case REG_RDX:
509 		return (rp->r_rdx);
510 	case REG_RCX:
511 		return (rp->r_rcx);
512 	case REG_R8:
513 		return (rp->r_r8);
514 	case REG_R9:
515 		return (rp->r_r9);
516 	case REG_RAX:
517 		return (rp->r_rax);
518 	case REG_RBX:
519 		return (rp->r_rbx);
520 	case REG_RBP:
521 		return (rp->r_rbp);
522 	case REG_R10:
523 		return (rp->r_r10);
524 	case REG_R11:
525 		return (rp->r_r11);
526 	case REG_R12:
527 		return (rp->r_r12);
528 	case REG_R13:
529 		return (rp->r_r13);
530 	case REG_R14:
531 		return (rp->r_r14);
532 	case REG_R15:
533 		return (rp->r_r15);
534 	case REG_DS:
535 		return (rp->r_ds);
536 	case REG_ES:
537 		return (rp->r_es);
538 	case REG_FS:
539 		return (rp->r_fs);
540 	case REG_GS:
541 		return (rp->r_gs);
542 	case REG_TRAPNO:
543 		return (rp->r_trapno);
544 	case REG_ERR:
545 		return (rp->r_err);
546 	case REG_RIP:
547 		return (rp->r_rip);
548 	case REG_CS:
549 		return (rp->r_cs);
550 	case REG_SS:
551 		return (rp->r_ss);
552 	case REG_RFL:
553 		return (rp->r_rfl);
554 	case REG_RSP:
555 		return (rp->r_rsp);
556 	default:
557 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
558 		return (0);
559 	}
560 
561 #else
562 	if (reg > SS) {
563 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
564 		return (0);
565 	}
566 
567 	return ((&rp->r_gs)[reg]);
568 #endif
569 }
570 
571 static int
572 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
573 {
574 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
575 
576 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
577 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
578 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
579 		return (0);
580 	}
581 
582 	return (1);
583 }
584 
585 void
586 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size)
587 {
588 	if (dtrace_copycheck(uaddr, kaddr, size))
589 		dtrace_copy(uaddr, kaddr, size);
590 }
591 
592 void
593 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size)
594 {
595 	if (dtrace_copycheck(uaddr, kaddr, size))
596 		dtrace_copy(kaddr, uaddr, size);
597 }
598 
599 void
600 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
601 {
602 	if (dtrace_copycheck(uaddr, kaddr, size))
603 		dtrace_copystr(uaddr, kaddr, size);
604 }
605 
606 void
607 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size)
608 {
609 	if (dtrace_copycheck(uaddr, kaddr, size))
610 		dtrace_copystr(kaddr, uaddr, size);
611 }
612 
613 uint8_t
614 dtrace_fuword8(void *uaddr)
615 {
616 	extern uint8_t dtrace_fuword8_nocheck(void *);
617 	if ((uintptr_t)uaddr >= _userlimit) {
618 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
619 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
620 		return (0);
621 	}
622 	return (dtrace_fuword8_nocheck(uaddr));
623 }
624 
625 uint16_t
626 dtrace_fuword16(void *uaddr)
627 {
628 	extern uint16_t dtrace_fuword16_nocheck(void *);
629 	if ((uintptr_t)uaddr >= _userlimit) {
630 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
631 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
632 		return (0);
633 	}
634 	return (dtrace_fuword16_nocheck(uaddr));
635 }
636 
637 uint32_t
638 dtrace_fuword32(void *uaddr)
639 {
640 	extern uint32_t dtrace_fuword32_nocheck(void *);
641 	if ((uintptr_t)uaddr >= _userlimit) {
642 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
643 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
644 		return (0);
645 	}
646 	return (dtrace_fuword32_nocheck(uaddr));
647 }
648 
649 uint64_t
650 dtrace_fuword64(void *uaddr)
651 {
652 	extern uint64_t dtrace_fuword64_nocheck(void *);
653 	if ((uintptr_t)uaddr >= _userlimit) {
654 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
655 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
656 		return (0);
657 	}
658 	return (dtrace_fuword64_nocheck(uaddr));
659 }
660