xref: /illumos-gate/usr/src/uts/intel/dtrace/dtrace_isa.c (revision b1e2e3fb17324e9ddf43db264a0c64da7756d9e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
29  * Copyright (c) 2017 Joyent, Inc.
30  */
31 
32 #include <sys/dtrace_impl.h>
33 #include <sys/stack.h>
34 #include <sys/frame.h>
35 #include <sys/cmn_err.h>
36 #include <sys/privregs.h>
37 #include <sys/sysmacros.h>
38 
39 extern uintptr_t kernelbase;
40 
41 int	dtrace_ustackdepth_max = 2048;
42 
43 void
44 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
45     uint32_t *intrpc)
46 {
47 	struct frame *fp = (struct frame *)dtrace_getfp();
48 	struct frame *nextfp, *minfp, *stacktop;
49 	int depth = 0;
50 	int on_intr, last = 0;
51 	uintptr_t pc;
52 	uintptr_t caller = CPU->cpu_dtrace_caller;
53 
54 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
55 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
56 	else
57 		stacktop = (struct frame *)curthread->t_stk;
58 	minfp = fp;
59 
60 	aframes++;
61 
62 	if (intrpc != NULL && depth < pcstack_limit)
63 		pcstack[depth++] = (pc_t)intrpc;
64 
65 	while (depth < pcstack_limit) {
66 		nextfp = (struct frame *)fp->fr_savfp;
67 		pc = fp->fr_savpc;
68 
69 		if (nextfp <= minfp || nextfp >= stacktop) {
70 			if (on_intr) {
71 				/*
72 				 * Hop from interrupt stack to thread stack.
73 				 */
74 				stacktop = (struct frame *)curthread->t_stk;
75 				minfp = (struct frame *)curthread->t_stkbase;
76 				on_intr = 0;
77 				continue;
78 			}
79 
80 			/*
81 			 * This is the last frame we can process; indicate
82 			 * that we should return after processing this frame.
83 			 */
84 			last = 1;
85 		}
86 
87 		if (aframes > 0) {
88 			if (--aframes == 0 && caller != 0) {
89 				/*
90 				 * We've just run out of artificial frames,
91 				 * and we have a valid caller -- fill it in
92 				 * now.
93 				 */
94 				ASSERT(depth < pcstack_limit);
95 				pcstack[depth++] = (pc_t)caller;
96 				caller = 0;
97 			}
98 		} else {
99 			if (depth < pcstack_limit)
100 				pcstack[depth++] = (pc_t)pc;
101 		}
102 
103 		if (last) {
104 			while (depth < pcstack_limit)
105 				pcstack[depth++] = 0;
106 			return;
107 		}
108 
109 		fp = nextfp;
110 		minfp = fp;
111 	}
112 }
113 
114 static int
115 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
116     uintptr_t sp)
117 {
118 	klwp_t *lwp = ttolwp(curthread);
119 	proc_t *p = curproc;
120 	uintptr_t oldcontext = lwp->lwp_oldcontext;
121 	uintptr_t oldsp;
122 	volatile uint16_t *flags =
123 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
124 	size_t s1, s2;
125 	int ret = 0;
126 
127 	ASSERT(pcstack == NULL || pcstack_limit > 0);
128 	ASSERT(dtrace_ustackdepth_max > 0);
129 
130 	if (p->p_model == DATAMODEL_NATIVE) {
131 		s1 = sizeof (struct frame) + 2 * sizeof (long);
132 		s2 = s1 + sizeof (siginfo_t);
133 	} else {
134 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
135 		s2 = s1 + sizeof (siginfo32_t);
136 	}
137 
138 	while (pc != 0) {
139 		/*
140 		 * We limit the number of times we can go around this
141 		 * loop to account for a circular stack.
142 		 */
143 		if (ret++ >= dtrace_ustackdepth_max) {
144 			*flags |= CPU_DTRACE_BADSTACK;
145 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
146 			break;
147 		}
148 
149 		if (pcstack != NULL) {
150 			*pcstack++ = (uint64_t)pc;
151 			pcstack_limit--;
152 			if (pcstack_limit <= 0)
153 				break;
154 		}
155 
156 		if (sp == 0)
157 			break;
158 
159 		oldsp = sp;
160 
161 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
162 			if (p->p_model == DATAMODEL_NATIVE) {
163 				ucontext_t *ucp = (ucontext_t *)oldcontext;
164 				greg_t *gregs = ucp->uc_mcontext.gregs;
165 
166 				sp = dtrace_fulword(&gregs[REG_FP]);
167 				pc = dtrace_fulword(&gregs[REG_PC]);
168 
169 				oldcontext = dtrace_fulword(&ucp->uc_link);
170 			} else {
171 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
172 				greg32_t *gregs = ucp->uc_mcontext.gregs;
173 
174 				sp = dtrace_fuword32(&gregs[EBP]);
175 				pc = dtrace_fuword32(&gregs[EIP]);
176 
177 				oldcontext = dtrace_fuword32(&ucp->uc_link);
178 			}
179 		} else {
180 			if (p->p_model == DATAMODEL_NATIVE) {
181 				struct frame *fr = (struct frame *)sp;
182 
183 				pc = dtrace_fulword(&fr->fr_savpc);
184 				sp = dtrace_fulword(&fr->fr_savfp);
185 			} else {
186 				struct frame32 *fr = (struct frame32 *)sp;
187 
188 				pc = dtrace_fuword32(&fr->fr_savpc);
189 				sp = dtrace_fuword32(&fr->fr_savfp);
190 			}
191 		}
192 
193 		if (sp == oldsp) {
194 			*flags |= CPU_DTRACE_BADSTACK;
195 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
196 			break;
197 		}
198 
199 		/*
200 		 * This is totally bogus:  if we faulted, we're going to clear
201 		 * the fault and break.  This is to deal with the apparently
202 		 * broken Java stacks on x86.
203 		 */
204 		if (*flags & CPU_DTRACE_FAULT) {
205 			*flags &= ~CPU_DTRACE_FAULT;
206 			break;
207 		}
208 	}
209 
210 	return (ret);
211 }
212 
213 void
214 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
215 {
216 	klwp_t *lwp = ttolwp(curthread);
217 	proc_t *p = curproc;
218 	struct regs *rp;
219 	uintptr_t pc, sp;
220 	int n;
221 
222 	ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
223 
224 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
225 		return;
226 
227 	if (pcstack_limit <= 0)
228 		return;
229 
230 	/*
231 	 * If there's no user context we still need to zero the stack.
232 	 */
233 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
234 		goto zero;
235 
236 	*pcstack++ = (uint64_t)p->p_pid;
237 	pcstack_limit--;
238 
239 	if (pcstack_limit <= 0)
240 		return;
241 
242 	pc = rp->r_pc;
243 	sp = rp->r_fp;
244 
245 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
246 		*pcstack++ = (uint64_t)pc;
247 		pcstack_limit--;
248 		if (pcstack_limit <= 0)
249 			return;
250 
251 		if (p->p_model == DATAMODEL_NATIVE)
252 			pc = dtrace_fulword((void *)rp->r_sp);
253 		else
254 			pc = dtrace_fuword32((void *)rp->r_sp);
255 	}
256 
257 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
258 	ASSERT(n >= 0);
259 	ASSERT(n <= pcstack_limit);
260 
261 	pcstack += n;
262 	pcstack_limit -= n;
263 
264 zero:
265 	while (pcstack_limit-- > 0)
266 		*pcstack++ = 0;
267 }
268 
269 int
270 dtrace_getustackdepth(void)
271 {
272 	klwp_t *lwp = ttolwp(curthread);
273 	proc_t *p = curproc;
274 	struct regs *rp;
275 	uintptr_t pc, sp;
276 	int n = 0;
277 
278 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
279 		return (0);
280 
281 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
282 		return (-1);
283 
284 	pc = rp->r_pc;
285 	sp = rp->r_fp;
286 
287 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
288 		n++;
289 
290 		if (p->p_model == DATAMODEL_NATIVE)
291 			pc = dtrace_fulword((void *)rp->r_sp);
292 		else
293 			pc = dtrace_fuword32((void *)rp->r_sp);
294 	}
295 
296 	n += dtrace_getustack_common(NULL, 0, pc, sp);
297 
298 	return (n);
299 }
300 
301 void
302 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
303 {
304 	klwp_t *lwp = ttolwp(curthread);
305 	proc_t *p = curproc;
306 	struct regs *rp;
307 	uintptr_t pc, sp, oldcontext;
308 	volatile uint16_t *flags =
309 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
310 	size_t s1, s2;
311 
312 	if (*flags & CPU_DTRACE_FAULT)
313 		return;
314 
315 	if (pcstack_limit <= 0)
316 		return;
317 
318 	/*
319 	 * If there's no user context we still need to zero the stack.
320 	 */
321 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
322 		goto zero;
323 
324 	*pcstack++ = (uint64_t)p->p_pid;
325 	pcstack_limit--;
326 
327 	if (pcstack_limit <= 0)
328 		return;
329 
330 	pc = rp->r_pc;
331 	sp = rp->r_fp;
332 	oldcontext = lwp->lwp_oldcontext;
333 
334 	if (p->p_model == DATAMODEL_NATIVE) {
335 		s1 = sizeof (struct frame) + 2 * sizeof (long);
336 		s2 = s1 + sizeof (siginfo_t);
337 	} else {
338 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
339 		s2 = s1 + sizeof (siginfo32_t);
340 	}
341 
342 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
343 		*pcstack++ = (uint64_t)pc;
344 		*fpstack++ = 0;
345 		pcstack_limit--;
346 		if (pcstack_limit <= 0)
347 			return;
348 
349 		if (p->p_model == DATAMODEL_NATIVE)
350 			pc = dtrace_fulword((void *)rp->r_sp);
351 		else
352 			pc = dtrace_fuword32((void *)rp->r_sp);
353 	}
354 
355 	while (pc != 0) {
356 		*pcstack++ = (uint64_t)pc;
357 		*fpstack++ = sp;
358 		pcstack_limit--;
359 		if (pcstack_limit <= 0)
360 			break;
361 
362 		if (sp == 0)
363 			break;
364 
365 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
366 			if (p->p_model == DATAMODEL_NATIVE) {
367 				ucontext_t *ucp = (ucontext_t *)oldcontext;
368 				greg_t *gregs = ucp->uc_mcontext.gregs;
369 
370 				sp = dtrace_fulword(&gregs[REG_FP]);
371 				pc = dtrace_fulword(&gregs[REG_PC]);
372 
373 				oldcontext = dtrace_fulword(&ucp->uc_link);
374 			} else {
375 				ucontext_t *ucp = (ucontext_t *)oldcontext;
376 				greg_t *gregs = ucp->uc_mcontext.gregs;
377 
378 				sp = dtrace_fuword32(&gregs[EBP]);
379 				pc = dtrace_fuword32(&gregs[EIP]);
380 
381 				oldcontext = dtrace_fuword32(&ucp->uc_link);
382 			}
383 		} else {
384 			if (p->p_model == DATAMODEL_NATIVE) {
385 				struct frame *fr = (struct frame *)sp;
386 
387 				pc = dtrace_fulword(&fr->fr_savpc);
388 				sp = dtrace_fulword(&fr->fr_savfp);
389 			} else {
390 				struct frame32 *fr = (struct frame32 *)sp;
391 
392 				pc = dtrace_fuword32(&fr->fr_savpc);
393 				sp = dtrace_fuword32(&fr->fr_savfp);
394 			}
395 		}
396 
397 		/*
398 		 * This is totally bogus:  if we faulted, we're going to clear
399 		 * the fault and break.  This is to deal with the apparently
400 		 * broken Java stacks on x86.
401 		 */
402 		if (*flags & CPU_DTRACE_FAULT) {
403 			*flags &= ~CPU_DTRACE_FAULT;
404 			break;
405 		}
406 	}
407 
408 zero:
409 	while (pcstack_limit-- > 0)
410 		*pcstack++ = 0;
411 }
412 
413 /*ARGSUSED*/
414 uint64_t
415 dtrace_getarg(int arg, int aframes)
416 {
417 	uintptr_t val;
418 	struct frame *fp = (struct frame *)dtrace_getfp();
419 	uintptr_t *stack;
420 	int i;
421 #if defined(__amd64)
422 	/*
423 	 * A total of 6 arguments are passed via registers; any argument with
424 	 * index of 5 or lower is therefore in a register.
425 	 */
426 	int inreg = 5;
427 #endif
428 
429 	for (i = 1; i <= aframes; i++) {
430 		fp = (struct frame *)(fp->fr_savfp);
431 
432 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
433 #if !defined(__amd64)
434 			/*
435 			 * If we pass through the invalid op handler, we will
436 			 * use the pointer that it passed to the stack as the
437 			 * second argument to dtrace_invop() as the pointer to
438 			 * the stack.  When using this stack, we must step
439 			 * beyond the EIP that was pushed when the trap was
440 			 * taken -- hence the "+ 1" below.
441 			 */
442 			stack = ((uintptr_t **)&fp[1])[1] + 1;
443 #else
444 			/*
445 			 * In the case of amd64, we will use the pointer to the
446 			 * regs structure that was pushed when we took the
447 			 * trap.  To get this structure, we must increment
448 			 * beyond the frame structure, the calling RIP, and
449 			 * padding stored in dtrace_invop().  If the argument
450 			 * that we're seeking is passed on the stack, we'll
451 			 * pull the true stack pointer out of the saved
452 			 * registers and decrement our argument by the number
453 			 * of arguments passed in registers; if the argument
454 			 * we're seeking is passed in regsiters, we can just
455 			 * load it directly.
456 			 */
457 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
458 			    sizeof (uintptr_t) * 2);
459 
460 			if (arg <= inreg) {
461 				stack = (uintptr_t *)&rp->r_rdi;
462 			} else {
463 				stack = (uintptr_t *)(rp->r_rsp);
464 				arg -= inreg;
465 			}
466 #endif
467 			goto load;
468 		}
469 
470 	}
471 
472 	/*
473 	 * We know that we did not come through a trap to get into
474 	 * dtrace_probe() -- the provider simply called dtrace_probe()
475 	 * directly.  As this is the case, we need to shift the argument
476 	 * that we're looking for:  the probe ID is the first argument to
477 	 * dtrace_probe(), so the argument n will actually be found where
478 	 * one would expect to find argument (n + 1).
479 	 */
480 	arg++;
481 
482 #if defined(__amd64)
483 	if (arg <= inreg) {
484 		/*
485 		 * This shouldn't happen.  If the argument is passed in a
486 		 * register then it should have been, well, passed in a
487 		 * register...
488 		 */
489 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
490 		return (0);
491 	}
492 
493 	arg -= (inreg + 1);
494 #endif
495 	stack = (uintptr_t *)&fp[1];
496 
497 load:
498 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
499 	val = stack[arg];
500 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
501 
502 	return (val);
503 }
504 
505 /*ARGSUSED*/
506 int
507 dtrace_getstackdepth(int aframes)
508 {
509 	struct frame *fp = (struct frame *)dtrace_getfp();
510 	struct frame *nextfp, *minfp, *stacktop;
511 	int depth = 0;
512 	int on_intr;
513 
514 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
515 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
516 	else
517 		stacktop = (struct frame *)curthread->t_stk;
518 	minfp = fp;
519 
520 	aframes++;
521 
522 	for (;;) {
523 		depth++;
524 
525 		nextfp = (struct frame *)fp->fr_savfp;
526 
527 		if (nextfp <= minfp || nextfp >= stacktop) {
528 			if (on_intr) {
529 				/*
530 				 * Hop from interrupt stack to thread stack.
531 				 */
532 				stacktop = (struct frame *)curthread->t_stk;
533 				minfp = (struct frame *)curthread->t_stkbase;
534 				on_intr = 0;
535 				continue;
536 			}
537 			break;
538 		}
539 
540 		fp = nextfp;
541 		minfp = fp;
542 	}
543 
544 	if (depth <= aframes)
545 		return (0);
546 
547 	return (depth - aframes);
548 }
549 
550 #if defined(__amd64)
551 static const int dtrace_regmap[] = {
552 	REG_GS,		/* GS */
553 	REG_FS,		/* FS */
554 	REG_ES,		/* ES */
555 	REG_DS,		/* DS */
556 	REG_RDI,	/* EDI */
557 	REG_RSI,	/* ESI */
558 	REG_RBP,	/* EBP */
559 	REG_RSP,	/* ESP */
560 	REG_RBX,	/* EBX */
561 	REG_RDX,	/* EDX */
562 	REG_RCX,	/* ECX */
563 	REG_RAX,	/* EAX */
564 	REG_TRAPNO,	/* TRAPNO */
565 	REG_ERR,	/* ERR */
566 	REG_RIP,	/* EIP */
567 	REG_CS,		/* CS */
568 	REG_RFL,	/* EFL */
569 	REG_RSP,	/* UESP */
570 	REG_SS		/* SS */
571 };
572 #endif
573 
574 
575 ulong_t
576 dtrace_getreg(struct regs *rp, uint_t reg)
577 {
578 #if defined(__amd64)
579 	if (reg <= SS) {
580 		if (reg >= sizeof (dtrace_regmap) / sizeof (int)) {
581 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
582 			return (0);
583 		}
584 
585 		reg = dtrace_regmap[reg];
586 	} else {
587 		reg -= SS + 1;
588 	}
589 
590 	switch (reg) {
591 	case REG_RDI:
592 		return (rp->r_rdi);
593 	case REG_RSI:
594 		return (rp->r_rsi);
595 	case REG_RDX:
596 		return (rp->r_rdx);
597 	case REG_RCX:
598 		return (rp->r_rcx);
599 	case REG_R8:
600 		return (rp->r_r8);
601 	case REG_R9:
602 		return (rp->r_r9);
603 	case REG_RAX:
604 		return (rp->r_rax);
605 	case REG_RBX:
606 		return (rp->r_rbx);
607 	case REG_RBP:
608 		return (rp->r_rbp);
609 	case REG_R10:
610 		return (rp->r_r10);
611 	case REG_R11:
612 		return (rp->r_r11);
613 	case REG_R12:
614 		return (rp->r_r12);
615 	case REG_R13:
616 		return (rp->r_r13);
617 	case REG_R14:
618 		return (rp->r_r14);
619 	case REG_R15:
620 		return (rp->r_r15);
621 	case REG_DS:
622 		return (rp->r_ds);
623 	case REG_ES:
624 		return (rp->r_es);
625 	case REG_FS:
626 		return (rp->r_fs);
627 	case REG_GS:
628 		return (rp->r_gs);
629 	case REG_TRAPNO:
630 		return (rp->r_trapno);
631 	case REG_ERR:
632 		return (rp->r_err);
633 	case REG_RIP:
634 		return (rp->r_rip);
635 	case REG_CS:
636 		return (rp->r_cs);
637 	case REG_SS:
638 		return (rp->r_ss);
639 	case REG_RFL:
640 		return (rp->r_rfl);
641 	case REG_RSP:
642 		return (rp->r_rsp);
643 	default:
644 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
645 		return (0);
646 	}
647 
648 #else
649 	if (reg > SS) {
650 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
651 		return (0);
652 	}
653 
654 	return ((&rp->r_gs)[reg]);
655 #endif
656 }
657 
658 void
659 dtrace_setreg(struct regs *rp, uint_t reg, ulong_t val)
660 {
661 #if defined(__amd64)
662 	if (reg <= SS) {
663 		ASSERT(reg < (sizeof (dtrace_regmap) / sizeof (int)));
664 
665 		reg = dtrace_regmap[reg];
666 	} else {
667 		reg -= SS + 1;
668 	}
669 
670 	switch (reg) {
671 	case REG_RDI:
672 		rp->r_rdi = val;
673 		break;
674 	case REG_RSI:
675 		rp->r_rsi = val;
676 		break;
677 	case REG_RDX:
678 		rp->r_rdx = val;
679 		break;
680 	case REG_RCX:
681 		rp->r_rcx = val;
682 		break;
683 	case REG_R8:
684 		rp->r_r8 = val;
685 		break;
686 	case REG_R9:
687 		rp->r_r9 = val;
688 		break;
689 	case REG_RAX:
690 		rp->r_rax = val;
691 		break;
692 	case REG_RBX:
693 		rp->r_rbx = val;
694 		break;
695 	case REG_RBP:
696 		rp->r_rbp = val;
697 		break;
698 	case REG_R10:
699 		rp->r_r10 = val;
700 		break;
701 	case REG_R11:
702 		rp->r_r11 = val;
703 		break;
704 	case REG_R12:
705 		rp->r_r12 = val;
706 		break;
707 	case REG_R13:
708 		rp->r_r13 = val;
709 		break;
710 	case REG_R14:
711 		rp->r_r14 = val;
712 		break;
713 	case REG_R15:
714 		rp->r_r15 = val;
715 		break;
716 	case REG_RSP:
717 		rp->r_rsp = val;
718 		break;
719 	default:
720 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
721 		return;
722 	}
723 
724 #else /* defined(__amd64) */
725 	switch (reg) {
726 	case EAX:
727 		rp->r_eax = val;
728 		break;
729 	case ECX:
730 		rp->r_ecx = val;
731 		break;
732 	case EDX:
733 		rp->r_edx = val;
734 		break;
735 	case EBX:
736 		rp->r_ebx = val;
737 		break;
738 	case ESP:
739 		rp->r_esp = val;
740 		break;
741 	case EBP:
742 		rp->r_ebp = val;
743 		break;
744 	case ESI:
745 		rp->r_esi = val;
746 		break;
747 	case EDI:
748 		rp->r_edi = val;
749 		break;
750 	default:
751 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
752 		return;
753 	}
754 #endif /* defined(__amd64) */
755 }
756 
757 static int
758 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
759 {
760 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
761 
762 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
763 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
764 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
765 		return (0);
766 	}
767 
768 	return (1);
769 }
770 
771 /*ARGSUSED*/
772 void
773 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
774     volatile uint16_t *flags)
775 {
776 	if (dtrace_copycheck(uaddr, kaddr, size))
777 		dtrace_copy(uaddr, kaddr, size);
778 }
779 
780 /*ARGSUSED*/
781 void
782 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
783     volatile uint16_t *flags)
784 {
785 	if (dtrace_copycheck(uaddr, kaddr, size))
786 		dtrace_copy(kaddr, uaddr, size);
787 }
788 
789 void
790 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
791     volatile uint16_t *flags)
792 {
793 	if (dtrace_copycheck(uaddr, kaddr, size))
794 		dtrace_copystr(uaddr, kaddr, size, flags);
795 }
796 
797 void
798 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
799     volatile uint16_t *flags)
800 {
801 	if (dtrace_copycheck(uaddr, kaddr, size))
802 		dtrace_copystr(kaddr, uaddr, size, flags);
803 }
804 
805 uint8_t
806 dtrace_fuword8(void *uaddr)
807 {
808 	extern uint8_t dtrace_fuword8_nocheck(void *);
809 	if ((uintptr_t)uaddr >= _userlimit) {
810 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
811 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
812 		return (0);
813 	}
814 	return (dtrace_fuword8_nocheck(uaddr));
815 }
816 
817 uint16_t
818 dtrace_fuword16(void *uaddr)
819 {
820 	extern uint16_t dtrace_fuword16_nocheck(void *);
821 	if ((uintptr_t)uaddr >= _userlimit) {
822 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
823 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
824 		return (0);
825 	}
826 	return (dtrace_fuword16_nocheck(uaddr));
827 }
828 
829 uint32_t
830 dtrace_fuword32(void *uaddr)
831 {
832 	extern uint32_t dtrace_fuword32_nocheck(void *);
833 	if ((uintptr_t)uaddr >= _userlimit) {
834 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
835 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
836 		return (0);
837 	}
838 	return (dtrace_fuword32_nocheck(uaddr));
839 }
840 
841 uint64_t
842 dtrace_fuword64(void *uaddr)
843 {
844 	extern uint64_t dtrace_fuword64_nocheck(void *);
845 	if ((uintptr_t)uaddr >= _userlimit) {
846 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
847 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
848 		return (0);
849 	}
850 	return (dtrace_fuword64_nocheck(uaddr));
851 }
852