xref: /titanic_44/usr/src/uts/intel/dtrace/dtrace_isa.c (revision 2cbed7292737821015ab481353eb10e8346b2c05)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/dtrace_impl.h>
30 #include <sys/stack.h>
31 #include <sys/frame.h>
32 #include <sys/cmn_err.h>
33 #include <sys/privregs.h>
34 #include <sys/sysmacros.h>
35 
36 extern uintptr_t kernelbase;
37 
38 int	dtrace_ustackdepth_max = 2048;
39 
40 void
41 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
42     uint32_t *intrpc)
43 {
44 	struct frame *fp = (struct frame *)dtrace_getfp();
45 	struct frame *nextfp, *minfp, *stacktop;
46 	int depth = 0;
47 	int on_intr, last = 0;
48 	uintptr_t pc;
49 	uintptr_t caller = CPU->cpu_dtrace_caller;
50 
51 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
52 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
53 	else
54 		stacktop = (struct frame *)curthread->t_stk;
55 	minfp = fp;
56 
57 	aframes++;
58 
59 	if (intrpc != NULL && depth < pcstack_limit)
60 		pcstack[depth++] = (pc_t)intrpc;
61 
62 	while (depth < pcstack_limit) {
63 		nextfp = (struct frame *)fp->fr_savfp;
64 		pc = fp->fr_savpc;
65 
66 		if (nextfp <= minfp || nextfp >= stacktop) {
67 			if (on_intr) {
68 				/*
69 				 * Hop from interrupt stack to thread stack.
70 				 */
71 				stacktop = (struct frame *)curthread->t_stk;
72 				minfp = (struct frame *)curthread->t_stkbase;
73 				on_intr = 0;
74 				continue;
75 			}
76 
77 			/*
78 			 * This is the last frame we can process; indicate
79 			 * that we should return after processing this frame.
80 			 */
81 			last = 1;
82 		}
83 
84 		if (aframes > 0) {
85 			if (--aframes == 0 && caller != NULL) {
86 				/*
87 				 * We've just run out of artificial frames,
88 				 * and we have a valid caller -- fill it in
89 				 * now.
90 				 */
91 				ASSERT(depth < pcstack_limit);
92 				pcstack[depth++] = (pc_t)caller;
93 				caller = NULL;
94 			}
95 		} else {
96 			if (depth < pcstack_limit)
97 				pcstack[depth++] = (pc_t)pc;
98 		}
99 
100 		if (last) {
101 			while (depth < pcstack_limit)
102 				pcstack[depth++] = NULL;
103 			return;
104 		}
105 
106 		fp = nextfp;
107 		minfp = fp;
108 	}
109 }
110 
111 static int
112 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
113     uintptr_t sp)
114 {
115 	klwp_t *lwp = ttolwp(curthread);
116 	proc_t *p = curproc;
117 	uintptr_t oldcontext = lwp->lwp_oldcontext;
118 	uintptr_t oldsp;
119 	volatile uint16_t *flags =
120 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
121 	size_t s1, s2;
122 	int ret = 0;
123 
124 	ASSERT(pcstack == NULL || pcstack_limit > 0);
125 	ASSERT(dtrace_ustackdepth_max > 0);
126 
127 	if (p->p_model == DATAMODEL_NATIVE) {
128 		s1 = sizeof (struct frame) + 2 * sizeof (long);
129 		s2 = s1 + sizeof (siginfo_t);
130 	} else {
131 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
132 		s2 = s1 + sizeof (siginfo32_t);
133 	}
134 
135 	while (pc != 0) {
136 		/*
137 		 * We limit the number of times we can go around this
138 		 * loop to account for a circular stack.
139 		 */
140 		if (ret++ >= dtrace_ustackdepth_max) {
141 			*flags |= CPU_DTRACE_BADSTACK;
142 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
143 			break;
144 		}
145 
146 		if (pcstack != NULL) {
147 			*pcstack++ = (uint64_t)pc;
148 			pcstack_limit--;
149 			if (pcstack_limit <= 0)
150 				break;
151 		}
152 
153 		if (sp == 0)
154 			break;
155 
156 		oldsp = sp;
157 
158 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
159 			if (p->p_model == DATAMODEL_NATIVE) {
160 				ucontext_t *ucp = (ucontext_t *)oldcontext;
161 				greg_t *gregs = ucp->uc_mcontext.gregs;
162 
163 				sp = dtrace_fulword(&gregs[REG_FP]);
164 				pc = dtrace_fulword(&gregs[REG_PC]);
165 
166 				oldcontext = dtrace_fulword(&ucp->uc_link);
167 			} else {
168 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
169 				greg32_t *gregs = ucp->uc_mcontext.gregs;
170 
171 				sp = dtrace_fuword32(&gregs[EBP]);
172 				pc = dtrace_fuword32(&gregs[EIP]);
173 
174 				oldcontext = dtrace_fuword32(&ucp->uc_link);
175 			}
176 		} else {
177 			if (p->p_model == DATAMODEL_NATIVE) {
178 				struct frame *fr = (struct frame *)sp;
179 
180 				pc = dtrace_fulword(&fr->fr_savpc);
181 				sp = dtrace_fulword(&fr->fr_savfp);
182 			} else {
183 				struct frame32 *fr = (struct frame32 *)sp;
184 
185 				pc = dtrace_fuword32(&fr->fr_savpc);
186 				sp = dtrace_fuword32(&fr->fr_savfp);
187 			}
188 		}
189 
190 		if (sp == oldsp) {
191 			*flags |= CPU_DTRACE_BADSTACK;
192 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
193 			break;
194 		}
195 
196 		/*
197 		 * This is totally bogus:  if we faulted, we're going to clear
198 		 * the fault and break.  This is to deal with the apparently
199 		 * broken Java stacks on x86.
200 		 */
201 		if (*flags & CPU_DTRACE_FAULT) {
202 			*flags &= ~CPU_DTRACE_FAULT;
203 			break;
204 		}
205 	}
206 
207 	return (ret);
208 }
209 
210 void
211 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
212 {
213 	klwp_t *lwp = ttolwp(curthread);
214 	proc_t *p = curproc;
215 	struct regs *rp;
216 	uintptr_t pc, sp;
217 	int n;
218 
219 	ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
220 
221 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
222 		return;
223 
224 	if (pcstack_limit <= 0)
225 		return;
226 
227 	/*
228 	 * If there's no user context we still need to zero the stack.
229 	 */
230 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
231 		goto zero;
232 
233 	*pcstack++ = (uint64_t)p->p_pid;
234 	pcstack_limit--;
235 
236 	if (pcstack_limit <= 0)
237 		return;
238 
239 	pc = rp->r_pc;
240 	sp = rp->r_fp;
241 
242 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
243 		*pcstack++ = (uint64_t)pc;
244 		pcstack_limit--;
245 		if (pcstack_limit <= 0)
246 			return;
247 
248 		if (p->p_model == DATAMODEL_NATIVE)
249 			pc = dtrace_fulword((void *)rp->r_sp);
250 		else
251 			pc = dtrace_fuword32((void *)rp->r_sp);
252 	}
253 
254 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
255 	ASSERT(n >= 0);
256 	ASSERT(n <= pcstack_limit);
257 
258 	pcstack += n;
259 	pcstack_limit -= n;
260 
261 zero:
262 	while (pcstack_limit-- > 0)
263 		*pcstack++ = NULL;
264 }
265 
266 int
267 dtrace_getustackdepth(void)
268 {
269 	klwp_t *lwp = ttolwp(curthread);
270 	proc_t *p = curproc;
271 	struct regs *rp;
272 	uintptr_t pc, sp;
273 	int n = 0;
274 
275 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
276 		return (0);
277 
278 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
279 		return (-1);
280 
281 	pc = rp->r_pc;
282 	sp = rp->r_fp;
283 
284 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
285 		n++;
286 
287 		if (p->p_model == DATAMODEL_NATIVE)
288 			pc = dtrace_fulword((void *)rp->r_sp);
289 		else
290 			pc = dtrace_fuword32((void *)rp->r_sp);
291 	}
292 
293 	n += dtrace_getustack_common(NULL, 0, pc, sp);
294 
295 	return (n);
296 }
297 
298 void
299 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
300 {
301 	klwp_t *lwp = ttolwp(curthread);
302 	proc_t *p = curproc;
303 	struct regs *rp;
304 	uintptr_t pc, sp, oldcontext;
305 	volatile uint16_t *flags =
306 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
307 	size_t s1, s2;
308 
309 	if (*flags & CPU_DTRACE_FAULT)
310 		return;
311 
312 	if (pcstack_limit <= 0)
313 		return;
314 
315 	/*
316 	 * If there's no user context we still need to zero the stack.
317 	 */
318 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
319 		goto zero;
320 
321 	*pcstack++ = (uint64_t)p->p_pid;
322 	pcstack_limit--;
323 
324 	if (pcstack_limit <= 0)
325 		return;
326 
327 	pc = rp->r_pc;
328 	sp = rp->r_fp;
329 	oldcontext = lwp->lwp_oldcontext;
330 
331 	if (p->p_model == DATAMODEL_NATIVE) {
332 		s1 = sizeof (struct frame) + 2 * sizeof (long);
333 		s2 = s1 + sizeof (siginfo_t);
334 	} else {
335 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
336 		s2 = s1 + sizeof (siginfo32_t);
337 	}
338 
339 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
340 		*pcstack++ = (uint64_t)pc;
341 		*fpstack++ = 0;
342 		pcstack_limit--;
343 		if (pcstack_limit <= 0)
344 			return;
345 
346 		if (p->p_model == DATAMODEL_NATIVE)
347 			pc = dtrace_fulword((void *)rp->r_sp);
348 		else
349 			pc = dtrace_fuword32((void *)rp->r_sp);
350 	}
351 
352 	while (pc != 0) {
353 		*pcstack++ = (uint64_t)pc;
354 		*fpstack++ = sp;
355 		pcstack_limit--;
356 		if (pcstack_limit <= 0)
357 			break;
358 
359 		if (sp == 0)
360 			break;
361 
362 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
363 			if (p->p_model == DATAMODEL_NATIVE) {
364 				ucontext_t *ucp = (ucontext_t *)oldcontext;
365 				greg_t *gregs = ucp->uc_mcontext.gregs;
366 
367 				sp = dtrace_fulword(&gregs[REG_FP]);
368 				pc = dtrace_fulword(&gregs[REG_PC]);
369 
370 				oldcontext = dtrace_fulword(&ucp->uc_link);
371 			} else {
372 				ucontext_t *ucp = (ucontext_t *)oldcontext;
373 				greg_t *gregs = ucp->uc_mcontext.gregs;
374 
375 				sp = dtrace_fuword32(&gregs[EBP]);
376 				pc = dtrace_fuword32(&gregs[EIP]);
377 
378 				oldcontext = dtrace_fuword32(&ucp->uc_link);
379 			}
380 		} else {
381 			if (p->p_model == DATAMODEL_NATIVE) {
382 				struct frame *fr = (struct frame *)sp;
383 
384 				pc = dtrace_fulword(&fr->fr_savpc);
385 				sp = dtrace_fulword(&fr->fr_savfp);
386 			} else {
387 				struct frame32 *fr = (struct frame32 *)sp;
388 
389 				pc = dtrace_fuword32(&fr->fr_savpc);
390 				sp = dtrace_fuword32(&fr->fr_savfp);
391 			}
392 		}
393 
394 		/*
395 		 * This is totally bogus:  if we faulted, we're going to clear
396 		 * the fault and break.  This is to deal with the apparently
397 		 * broken Java stacks on x86.
398 		 */
399 		if (*flags & CPU_DTRACE_FAULT) {
400 			*flags &= ~CPU_DTRACE_FAULT;
401 			break;
402 		}
403 	}
404 
405 zero:
406 	while (pcstack_limit-- > 0)
407 		*pcstack++ = NULL;
408 }
409 
410 /*ARGSUSED*/
411 uint64_t
412 dtrace_getarg(int arg, int aframes)
413 {
414 	uintptr_t val;
415 	struct frame *fp = (struct frame *)dtrace_getfp();
416 	uintptr_t *stack;
417 	int i;
418 #if defined(__amd64)
419 	/*
420 	 * A total of 6 arguments are passed via registers; any argument with
421 	 * index of 5 or lower is therefore in a register.
422 	 */
423 	int inreg = 5;
424 #endif
425 
426 	for (i = 1; i <= aframes; i++) {
427 		fp = (struct frame *)(fp->fr_savfp);
428 
429 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
430 #if !defined(__amd64)
431 			/*
432 			 * If we pass through the invalid op handler, we will
433 			 * use the pointer that it passed to the stack as the
434 			 * second argument to dtrace_invop() as the pointer to
435 			 * the stack.  When using this stack, we must step
436 			 * beyond the EIP/RIP that was pushed when the trap was
437 			 * taken -- hence the "+ 1" below.
438 			 */
439 			stack = ((uintptr_t **)&fp[1])[1] + 1;
440 #else
441 			/*
442 			 * In the case of amd64, we will use the pointer to the
443 			 * regs structure that was pushed when we took the
444 			 * trap.  To get this structure, we must increment
445 			 * beyond the frame structure, and then again beyond
446 			 * the calling RIP stored in dtrace_invop().  If the
447 			 * argument that we're seeking is passed on the stack,
448 			 * we'll pull the true stack pointer out of the saved
449 			 * registers and decrement our argument by the number
450 			 * of arguments passed in registers; if the argument
451 			 * we're seeking is passed in regsiters, we can just
452 			 * load it directly.
453 			 */
454 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
455 			    sizeof (uintptr_t));
456 
457 			if (arg <= inreg) {
458 				stack = (uintptr_t *)&rp->r_rdi;
459 			} else {
460 				stack = (uintptr_t *)(rp->r_rsp);
461 				arg -= inreg;
462 			}
463 #endif
464 			goto load;
465 		}
466 
467 	}
468 
469 	/*
470 	 * We know that we did not come through a trap to get into
471 	 * dtrace_probe() -- the provider simply called dtrace_probe()
472 	 * directly.  As this is the case, we need to shift the argument
473 	 * that we're looking for:  the probe ID is the first argument to
474 	 * dtrace_probe(), so the argument n will actually be found where
475 	 * one would expect to find argument (n + 1).
476 	 */
477 	arg++;
478 
479 #if defined(__amd64)
480 	if (arg <= inreg) {
481 		/*
482 		 * This shouldn't happen.  If the argument is passed in a
483 		 * register then it should have been, well, passed in a
484 		 * register...
485 		 */
486 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
487 		return (0);
488 	}
489 
490 	arg -= (inreg + 1);
491 #endif
492 	stack = (uintptr_t *)&fp[1];
493 
494 load:
495 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
496 	val = stack[arg];
497 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
498 
499 	return (val);
500 }
501 
502 /*ARGSUSED*/
503 int
504 dtrace_getstackdepth(int aframes)
505 {
506 	struct frame *fp = (struct frame *)dtrace_getfp();
507 	struct frame *nextfp, *minfp, *stacktop;
508 	int depth = 0;
509 	int on_intr;
510 
511 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
512 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
513 	else
514 		stacktop = (struct frame *)curthread->t_stk;
515 	minfp = fp;
516 
517 	aframes++;
518 
519 	for (;;) {
520 		depth++;
521 
522 		nextfp = (struct frame *)fp->fr_savfp;
523 
524 		if (nextfp <= minfp || nextfp >= stacktop) {
525 			if (on_intr) {
526 				/*
527 				 * Hop from interrupt stack to thread stack.
528 				 */
529 				stacktop = (struct frame *)curthread->t_stk;
530 				minfp = (struct frame *)curthread->t_stkbase;
531 				on_intr = 0;
532 				continue;
533 			}
534 			break;
535 		}
536 
537 		fp = nextfp;
538 		minfp = fp;
539 	}
540 
541 	if (depth <= aframes)
542 		return (0);
543 
544 	return (depth - aframes);
545 }
546 
547 ulong_t
548 dtrace_getreg(struct regs *rp, uint_t reg)
549 {
550 #if defined(__amd64)
551 	int regmap[] = {
552 		REG_GS,		/* GS */
553 		REG_FS,		/* FS */
554 		REG_ES,		/* ES */
555 		REG_DS,		/* DS */
556 		REG_RDI,	/* EDI */
557 		REG_RSI,	/* ESI */
558 		REG_RBP,	/* EBP */
559 		REG_RSP,	/* ESP */
560 		REG_RBX,	/* EBX */
561 		REG_RDX,	/* EDX */
562 		REG_RCX,	/* ECX */
563 		REG_RAX,	/* EAX */
564 		REG_TRAPNO,	/* TRAPNO */
565 		REG_ERR,	/* ERR */
566 		REG_RIP,	/* EIP */
567 		REG_CS,		/* CS */
568 		REG_RFL,	/* EFL */
569 		REG_RSP,	/* UESP */
570 		REG_SS		/* SS */
571 	};
572 
573 	if (reg <= SS) {
574 		if (reg >= sizeof (regmap) / sizeof (int)) {
575 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
576 			return (0);
577 		}
578 
579 		reg = regmap[reg];
580 	} else {
581 		reg -= SS + 1;
582 	}
583 
584 	switch (reg) {
585 	case REG_RDI:
586 		return (rp->r_rdi);
587 	case REG_RSI:
588 		return (rp->r_rsi);
589 	case REG_RDX:
590 		return (rp->r_rdx);
591 	case REG_RCX:
592 		return (rp->r_rcx);
593 	case REG_R8:
594 		return (rp->r_r8);
595 	case REG_R9:
596 		return (rp->r_r9);
597 	case REG_RAX:
598 		return (rp->r_rax);
599 	case REG_RBX:
600 		return (rp->r_rbx);
601 	case REG_RBP:
602 		return (rp->r_rbp);
603 	case REG_R10:
604 		return (rp->r_r10);
605 	case REG_R11:
606 		return (rp->r_r11);
607 	case REG_R12:
608 		return (rp->r_r12);
609 	case REG_R13:
610 		return (rp->r_r13);
611 	case REG_R14:
612 		return (rp->r_r14);
613 	case REG_R15:
614 		return (rp->r_r15);
615 	case REG_DS:
616 		return (rp->r_ds);
617 	case REG_ES:
618 		return (rp->r_es);
619 	case REG_FS:
620 		return (rp->r_fs);
621 	case REG_GS:
622 		return (rp->r_gs);
623 	case REG_TRAPNO:
624 		return (rp->r_trapno);
625 	case REG_ERR:
626 		return (rp->r_err);
627 	case REG_RIP:
628 		return (rp->r_rip);
629 	case REG_CS:
630 		return (rp->r_cs);
631 	case REG_SS:
632 		return (rp->r_ss);
633 	case REG_RFL:
634 		return (rp->r_rfl);
635 	case REG_RSP:
636 		return (rp->r_rsp);
637 	default:
638 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
639 		return (0);
640 	}
641 
642 #else
643 	if (reg > SS) {
644 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
645 		return (0);
646 	}
647 
648 	return ((&rp->r_gs)[reg]);
649 #endif
650 }
651 
652 static int
653 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
654 {
655 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
656 
657 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
658 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
659 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
660 		return (0);
661 	}
662 
663 	return (1);
664 }
665 
666 /*ARGSUSED*/
667 void
668 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
669     volatile uint16_t *flags)
670 {
671 	if (dtrace_copycheck(uaddr, kaddr, size))
672 		dtrace_copy(uaddr, kaddr, size);
673 }
674 
675 /*ARGSUSED*/
676 void
677 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
678     volatile uint16_t *flags)
679 {
680 	if (dtrace_copycheck(uaddr, kaddr, size))
681 		dtrace_copy(kaddr, uaddr, size);
682 }
683 
684 void
685 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
686     volatile uint16_t *flags)
687 {
688 	if (dtrace_copycheck(uaddr, kaddr, size))
689 		dtrace_copystr(uaddr, kaddr, size, flags);
690 }
691 
692 void
693 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
694     volatile uint16_t *flags)
695 {
696 	if (dtrace_copycheck(uaddr, kaddr, size))
697 		dtrace_copystr(kaddr, uaddr, size, flags);
698 }
699 
700 uint8_t
701 dtrace_fuword8(void *uaddr)
702 {
703 	extern uint8_t dtrace_fuword8_nocheck(void *);
704 	if ((uintptr_t)uaddr >= _userlimit) {
705 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
706 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
707 		return (0);
708 	}
709 	return (dtrace_fuword8_nocheck(uaddr));
710 }
711 
712 uint16_t
713 dtrace_fuword16(void *uaddr)
714 {
715 	extern uint16_t dtrace_fuword16_nocheck(void *);
716 	if ((uintptr_t)uaddr >= _userlimit) {
717 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
718 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
719 		return (0);
720 	}
721 	return (dtrace_fuword16_nocheck(uaddr));
722 }
723 
724 uint32_t
725 dtrace_fuword32(void *uaddr)
726 {
727 	extern uint32_t dtrace_fuword32_nocheck(void *);
728 	if ((uintptr_t)uaddr >= _userlimit) {
729 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
730 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
731 		return (0);
732 	}
733 	return (dtrace_fuword32_nocheck(uaddr));
734 }
735 
736 uint64_t
737 dtrace_fuword64(void *uaddr)
738 {
739 	extern uint64_t dtrace_fuword64_nocheck(void *);
740 	if ((uintptr_t)uaddr >= _userlimit) {
741 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
742 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
743 		return (0);
744 	}
745 	return (dtrace_fuword64_nocheck(uaddr));
746 }
747