xref: /titanic_50/usr/src/uts/intel/dtrace/dtrace_isa.c (revision 91d7f85e02991954d1e1bd44673df567ad8dcc87)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
29  */
30 
31 #include <sys/dtrace_impl.h>
32 #include <sys/stack.h>
33 #include <sys/frame.h>
34 #include <sys/cmn_err.h>
35 #include <sys/privregs.h>
36 #include <sys/sysmacros.h>
37 
38 extern uintptr_t kernelbase;
39 
40 int	dtrace_ustackdepth_max = 2048;
41 
42 void
43 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
44     uint32_t *intrpc)
45 {
46 	struct frame *fp = (struct frame *)dtrace_getfp();
47 	struct frame *nextfp, *minfp, *stacktop;
48 	int depth = 0;
49 	int on_intr, last = 0;
50 	uintptr_t pc;
51 	uintptr_t caller = CPU->cpu_dtrace_caller;
52 
53 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
54 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
55 	else
56 		stacktop = (struct frame *)curthread->t_stk;
57 	minfp = fp;
58 
59 	aframes++;
60 
61 	if (intrpc != NULL && depth < pcstack_limit)
62 		pcstack[depth++] = (pc_t)intrpc;
63 
64 	while (depth < pcstack_limit) {
65 		nextfp = (struct frame *)fp->fr_savfp;
66 		pc = fp->fr_savpc;
67 
68 		if (nextfp <= minfp || nextfp >= stacktop) {
69 			if (on_intr) {
70 				/*
71 				 * Hop from interrupt stack to thread stack.
72 				 */
73 				stacktop = (struct frame *)curthread->t_stk;
74 				minfp = (struct frame *)curthread->t_stkbase;
75 				on_intr = 0;
76 				continue;
77 			}
78 
79 			/*
80 			 * This is the last frame we can process; indicate
81 			 * that we should return after processing this frame.
82 			 */
83 			last = 1;
84 		}
85 
86 		if (aframes > 0) {
87 			if (--aframes == 0 && caller != NULL) {
88 				/*
89 				 * We've just run out of artificial frames,
90 				 * and we have a valid caller -- fill it in
91 				 * now.
92 				 */
93 				ASSERT(depth < pcstack_limit);
94 				pcstack[depth++] = (pc_t)caller;
95 				caller = NULL;
96 			}
97 		} else {
98 			if (depth < pcstack_limit)
99 				pcstack[depth++] = (pc_t)pc;
100 		}
101 
102 		if (last) {
103 			while (depth < pcstack_limit)
104 				pcstack[depth++] = NULL;
105 			return;
106 		}
107 
108 		fp = nextfp;
109 		minfp = fp;
110 	}
111 }
112 
113 static int
114 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
115     uintptr_t sp)
116 {
117 	klwp_t *lwp = ttolwp(curthread);
118 	proc_t *p = curproc;
119 	uintptr_t oldcontext = lwp->lwp_oldcontext;
120 	uintptr_t oldsp;
121 	volatile uint16_t *flags =
122 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
123 	size_t s1, s2;
124 	int ret = 0;
125 
126 	ASSERT(pcstack == NULL || pcstack_limit > 0);
127 	ASSERT(dtrace_ustackdepth_max > 0);
128 
129 	if (p->p_model == DATAMODEL_NATIVE) {
130 		s1 = sizeof (struct frame) + 2 * sizeof (long);
131 		s2 = s1 + sizeof (siginfo_t);
132 	} else {
133 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
134 		s2 = s1 + sizeof (siginfo32_t);
135 	}
136 
137 	while (pc != 0) {
138 		/*
139 		 * We limit the number of times we can go around this
140 		 * loop to account for a circular stack.
141 		 */
142 		if (ret++ >= dtrace_ustackdepth_max) {
143 			*flags |= CPU_DTRACE_BADSTACK;
144 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
145 			break;
146 		}
147 
148 		if (pcstack != NULL) {
149 			*pcstack++ = (uint64_t)pc;
150 			pcstack_limit--;
151 			if (pcstack_limit <= 0)
152 				break;
153 		}
154 
155 		if (sp == 0)
156 			break;
157 
158 		oldsp = sp;
159 
160 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
161 			if (p->p_model == DATAMODEL_NATIVE) {
162 				ucontext_t *ucp = (ucontext_t *)oldcontext;
163 				greg_t *gregs = ucp->uc_mcontext.gregs;
164 
165 				sp = dtrace_fulword(&gregs[REG_FP]);
166 				pc = dtrace_fulword(&gregs[REG_PC]);
167 
168 				oldcontext = dtrace_fulword(&ucp->uc_link);
169 			} else {
170 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
171 				greg32_t *gregs = ucp->uc_mcontext.gregs;
172 
173 				sp = dtrace_fuword32(&gregs[EBP]);
174 				pc = dtrace_fuword32(&gregs[EIP]);
175 
176 				oldcontext = dtrace_fuword32(&ucp->uc_link);
177 			}
178 		} else {
179 			if (p->p_model == DATAMODEL_NATIVE) {
180 				struct frame *fr = (struct frame *)sp;
181 
182 				pc = dtrace_fulword(&fr->fr_savpc);
183 				sp = dtrace_fulword(&fr->fr_savfp);
184 			} else {
185 				struct frame32 *fr = (struct frame32 *)sp;
186 
187 				pc = dtrace_fuword32(&fr->fr_savpc);
188 				sp = dtrace_fuword32(&fr->fr_savfp);
189 			}
190 		}
191 
192 		if (sp == oldsp) {
193 			*flags |= CPU_DTRACE_BADSTACK;
194 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
195 			break;
196 		}
197 
198 		/*
199 		 * This is totally bogus:  if we faulted, we're going to clear
200 		 * the fault and break.  This is to deal with the apparently
201 		 * broken Java stacks on x86.
202 		 */
203 		if (*flags & CPU_DTRACE_FAULT) {
204 			*flags &= ~CPU_DTRACE_FAULT;
205 			break;
206 		}
207 	}
208 
209 	return (ret);
210 }
211 
212 void
213 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
214 {
215 	klwp_t *lwp = ttolwp(curthread);
216 	proc_t *p = curproc;
217 	struct regs *rp;
218 	uintptr_t pc, sp;
219 	int n;
220 
221 	ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
222 
223 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
224 		return;
225 
226 	if (pcstack_limit <= 0)
227 		return;
228 
229 	/*
230 	 * If there's no user context we still need to zero the stack.
231 	 */
232 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
233 		goto zero;
234 
235 	*pcstack++ = (uint64_t)p->p_pid;
236 	pcstack_limit--;
237 
238 	if (pcstack_limit <= 0)
239 		return;
240 
241 	pc = rp->r_pc;
242 	sp = rp->r_fp;
243 
244 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
245 		*pcstack++ = (uint64_t)pc;
246 		pcstack_limit--;
247 		if (pcstack_limit <= 0)
248 			return;
249 
250 		if (p->p_model == DATAMODEL_NATIVE)
251 			pc = dtrace_fulword((void *)rp->r_sp);
252 		else
253 			pc = dtrace_fuword32((void *)rp->r_sp);
254 	}
255 
256 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
257 	ASSERT(n >= 0);
258 	ASSERT(n <= pcstack_limit);
259 
260 	pcstack += n;
261 	pcstack_limit -= n;
262 
263 zero:
264 	while (pcstack_limit-- > 0)
265 		*pcstack++ = NULL;
266 }
267 
268 int
269 dtrace_getustackdepth(void)
270 {
271 	klwp_t *lwp = ttolwp(curthread);
272 	proc_t *p = curproc;
273 	struct regs *rp;
274 	uintptr_t pc, sp;
275 	int n = 0;
276 
277 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
278 		return (0);
279 
280 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
281 		return (-1);
282 
283 	pc = rp->r_pc;
284 	sp = rp->r_fp;
285 
286 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
287 		n++;
288 
289 		if (p->p_model == DATAMODEL_NATIVE)
290 			pc = dtrace_fulword((void *)rp->r_sp);
291 		else
292 			pc = dtrace_fuword32((void *)rp->r_sp);
293 	}
294 
295 	n += dtrace_getustack_common(NULL, 0, pc, sp);
296 
297 	return (n);
298 }
299 
300 void
301 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
302 {
303 	klwp_t *lwp = ttolwp(curthread);
304 	proc_t *p = curproc;
305 	struct regs *rp;
306 	uintptr_t pc, sp, oldcontext;
307 	volatile uint16_t *flags =
308 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
309 	size_t s1, s2;
310 
311 	if (*flags & CPU_DTRACE_FAULT)
312 		return;
313 
314 	if (pcstack_limit <= 0)
315 		return;
316 
317 	/*
318 	 * If there's no user context we still need to zero the stack.
319 	 */
320 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
321 		goto zero;
322 
323 	*pcstack++ = (uint64_t)p->p_pid;
324 	pcstack_limit--;
325 
326 	if (pcstack_limit <= 0)
327 		return;
328 
329 	pc = rp->r_pc;
330 	sp = rp->r_fp;
331 	oldcontext = lwp->lwp_oldcontext;
332 
333 	if (p->p_model == DATAMODEL_NATIVE) {
334 		s1 = sizeof (struct frame) + 2 * sizeof (long);
335 		s2 = s1 + sizeof (siginfo_t);
336 	} else {
337 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
338 		s2 = s1 + sizeof (siginfo32_t);
339 	}
340 
341 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
342 		*pcstack++ = (uint64_t)pc;
343 		*fpstack++ = 0;
344 		pcstack_limit--;
345 		if (pcstack_limit <= 0)
346 			return;
347 
348 		if (p->p_model == DATAMODEL_NATIVE)
349 			pc = dtrace_fulword((void *)rp->r_sp);
350 		else
351 			pc = dtrace_fuword32((void *)rp->r_sp);
352 	}
353 
354 	while (pc != 0) {
355 		*pcstack++ = (uint64_t)pc;
356 		*fpstack++ = sp;
357 		pcstack_limit--;
358 		if (pcstack_limit <= 0)
359 			break;
360 
361 		if (sp == 0)
362 			break;
363 
364 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
365 			if (p->p_model == DATAMODEL_NATIVE) {
366 				ucontext_t *ucp = (ucontext_t *)oldcontext;
367 				greg_t *gregs = ucp->uc_mcontext.gregs;
368 
369 				sp = dtrace_fulword(&gregs[REG_FP]);
370 				pc = dtrace_fulword(&gregs[REG_PC]);
371 
372 				oldcontext = dtrace_fulword(&ucp->uc_link);
373 			} else {
374 				ucontext_t *ucp = (ucontext_t *)oldcontext;
375 				greg_t *gregs = ucp->uc_mcontext.gregs;
376 
377 				sp = dtrace_fuword32(&gregs[EBP]);
378 				pc = dtrace_fuword32(&gregs[EIP]);
379 
380 				oldcontext = dtrace_fuword32(&ucp->uc_link);
381 			}
382 		} else {
383 			if (p->p_model == DATAMODEL_NATIVE) {
384 				struct frame *fr = (struct frame *)sp;
385 
386 				pc = dtrace_fulword(&fr->fr_savpc);
387 				sp = dtrace_fulword(&fr->fr_savfp);
388 			} else {
389 				struct frame32 *fr = (struct frame32 *)sp;
390 
391 				pc = dtrace_fuword32(&fr->fr_savpc);
392 				sp = dtrace_fuword32(&fr->fr_savfp);
393 			}
394 		}
395 
396 		/*
397 		 * This is totally bogus:  if we faulted, we're going to clear
398 		 * the fault and break.  This is to deal with the apparently
399 		 * broken Java stacks on x86.
400 		 */
401 		if (*flags & CPU_DTRACE_FAULT) {
402 			*flags &= ~CPU_DTRACE_FAULT;
403 			break;
404 		}
405 	}
406 
407 zero:
408 	while (pcstack_limit-- > 0)
409 		*pcstack++ = NULL;
410 }
411 
412 /*ARGSUSED*/
413 uint64_t
414 dtrace_getarg(int arg, int aframes)
415 {
416 	uintptr_t val;
417 	struct frame *fp = (struct frame *)dtrace_getfp();
418 	uintptr_t *stack;
419 	int i;
420 #if defined(__amd64)
421 	/*
422 	 * A total of 6 arguments are passed via registers; any argument with
423 	 * index of 5 or lower is therefore in a register.
424 	 */
425 	int inreg = 5;
426 #endif
427 
428 	for (i = 1; i <= aframes; i++) {
429 		fp = (struct frame *)(fp->fr_savfp);
430 
431 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
432 #if !defined(__amd64)
433 			/*
434 			 * If we pass through the invalid op handler, we will
435 			 * use the pointer that it passed to the stack as the
436 			 * second argument to dtrace_invop() as the pointer to
437 			 * the stack.  When using this stack, we must step
438 			 * beyond the EIP that was pushed when the trap was
439 			 * taken -- hence the "+ 1" below.
440 			 */
441 			stack = ((uintptr_t **)&fp[1])[1] + 1;
442 #else
443 			/*
444 			 * In the case of amd64, we will use the pointer to the
445 			 * regs structure that was pushed when we took the
446 			 * trap.  To get this structure, we must increment
447 			 * beyond the frame structure, the calling RIP, and
448 			 * padding stored in dtrace_invop().  If the argument
449 			 * that we're seeking is passed on the stack, we'll
450 			 * pull the true stack pointer out of the saved
451 			 * registers and decrement our argument by the number
452 			 * of arguments passed in registers; if the argument
453 			 * we're seeking is passed in regsiters, we can just
454 			 * load it directly.
455 			 */
456 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
457 			    sizeof (uintptr_t) * 2);
458 
459 			if (arg <= inreg) {
460 				stack = (uintptr_t *)&rp->r_rdi;
461 			} else {
462 				stack = (uintptr_t *)(rp->r_rsp);
463 				arg -= inreg;
464 			}
465 #endif
466 			goto load;
467 		}
468 
469 	}
470 
471 	/*
472 	 * We know that we did not come through a trap to get into
473 	 * dtrace_probe() -- the provider simply called dtrace_probe()
474 	 * directly.  As this is the case, we need to shift the argument
475 	 * that we're looking for:  the probe ID is the first argument to
476 	 * dtrace_probe(), so the argument n will actually be found where
477 	 * one would expect to find argument (n + 1).
478 	 */
479 	arg++;
480 
481 #if defined(__amd64)
482 	if (arg <= inreg) {
483 		/*
484 		 * This shouldn't happen.  If the argument is passed in a
485 		 * register then it should have been, well, passed in a
486 		 * register...
487 		 */
488 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
489 		return (0);
490 	}
491 
492 	arg -= (inreg + 1);
493 #endif
494 	stack = (uintptr_t *)&fp[1];
495 
496 load:
497 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
498 	val = stack[arg];
499 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
500 
501 	return (val);
502 }
503 
504 /*ARGSUSED*/
505 int
506 dtrace_getstackdepth(int aframes)
507 {
508 	struct frame *fp = (struct frame *)dtrace_getfp();
509 	struct frame *nextfp, *minfp, *stacktop;
510 	int depth = 0;
511 	int on_intr;
512 
513 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
514 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
515 	else
516 		stacktop = (struct frame *)curthread->t_stk;
517 	minfp = fp;
518 
519 	aframes++;
520 
521 	for (;;) {
522 		depth++;
523 
524 		nextfp = (struct frame *)fp->fr_savfp;
525 
526 		if (nextfp <= minfp || nextfp >= stacktop) {
527 			if (on_intr) {
528 				/*
529 				 * Hop from interrupt stack to thread stack.
530 				 */
531 				stacktop = (struct frame *)curthread->t_stk;
532 				minfp = (struct frame *)curthread->t_stkbase;
533 				on_intr = 0;
534 				continue;
535 			}
536 			break;
537 		}
538 
539 		fp = nextfp;
540 		minfp = fp;
541 	}
542 
543 	if (depth <= aframes)
544 		return (0);
545 
546 	return (depth - aframes);
547 }
548 
549 ulong_t
550 dtrace_getreg(struct regs *rp, uint_t reg)
551 {
552 #if defined(__amd64)
553 	int regmap[] = {
554 		REG_GS,		/* GS */
555 		REG_FS,		/* FS */
556 		REG_ES,		/* ES */
557 		REG_DS,		/* DS */
558 		REG_RDI,	/* EDI */
559 		REG_RSI,	/* ESI */
560 		REG_RBP,	/* EBP */
561 		REG_RSP,	/* ESP */
562 		REG_RBX,	/* EBX */
563 		REG_RDX,	/* EDX */
564 		REG_RCX,	/* ECX */
565 		REG_RAX,	/* EAX */
566 		REG_TRAPNO,	/* TRAPNO */
567 		REG_ERR,	/* ERR */
568 		REG_RIP,	/* EIP */
569 		REG_CS,		/* CS */
570 		REG_RFL,	/* EFL */
571 		REG_RSP,	/* UESP */
572 		REG_SS		/* SS */
573 	};
574 
575 	if (reg <= SS) {
576 		if (reg >= sizeof (regmap) / sizeof (int)) {
577 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
578 			return (0);
579 		}
580 
581 		reg = regmap[reg];
582 	} else {
583 		reg -= SS + 1;
584 	}
585 
586 	switch (reg) {
587 	case REG_RDI:
588 		return (rp->r_rdi);
589 	case REG_RSI:
590 		return (rp->r_rsi);
591 	case REG_RDX:
592 		return (rp->r_rdx);
593 	case REG_RCX:
594 		return (rp->r_rcx);
595 	case REG_R8:
596 		return (rp->r_r8);
597 	case REG_R9:
598 		return (rp->r_r9);
599 	case REG_RAX:
600 		return (rp->r_rax);
601 	case REG_RBX:
602 		return (rp->r_rbx);
603 	case REG_RBP:
604 		return (rp->r_rbp);
605 	case REG_R10:
606 		return (rp->r_r10);
607 	case REG_R11:
608 		return (rp->r_r11);
609 	case REG_R12:
610 		return (rp->r_r12);
611 	case REG_R13:
612 		return (rp->r_r13);
613 	case REG_R14:
614 		return (rp->r_r14);
615 	case REG_R15:
616 		return (rp->r_r15);
617 	case REG_DS:
618 		return (rp->r_ds);
619 	case REG_ES:
620 		return (rp->r_es);
621 	case REG_FS:
622 		return (rp->r_fs);
623 	case REG_GS:
624 		return (rp->r_gs);
625 	case REG_TRAPNO:
626 		return (rp->r_trapno);
627 	case REG_ERR:
628 		return (rp->r_err);
629 	case REG_RIP:
630 		return (rp->r_rip);
631 	case REG_CS:
632 		return (rp->r_cs);
633 	case REG_SS:
634 		return (rp->r_ss);
635 	case REG_RFL:
636 		return (rp->r_rfl);
637 	case REG_RSP:
638 		return (rp->r_rsp);
639 	default:
640 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
641 		return (0);
642 	}
643 
644 #else
645 	if (reg > SS) {
646 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
647 		return (0);
648 	}
649 
650 	return ((&rp->r_gs)[reg]);
651 #endif
652 }
653 
654 static int
655 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
656 {
657 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
658 
659 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
660 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
661 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
662 		return (0);
663 	}
664 
665 	return (1);
666 }
667 
668 /*ARGSUSED*/
669 void
670 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
671     volatile uint16_t *flags)
672 {
673 	if (dtrace_copycheck(uaddr, kaddr, size))
674 		dtrace_copy(uaddr, kaddr, size);
675 }
676 
677 /*ARGSUSED*/
678 void
679 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
680     volatile uint16_t *flags)
681 {
682 	if (dtrace_copycheck(uaddr, kaddr, size))
683 		dtrace_copy(kaddr, uaddr, size);
684 }
685 
686 void
687 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
688     volatile uint16_t *flags)
689 {
690 	if (dtrace_copycheck(uaddr, kaddr, size))
691 		dtrace_copystr(uaddr, kaddr, size, flags);
692 }
693 
694 void
695 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
696     volatile uint16_t *flags)
697 {
698 	if (dtrace_copycheck(uaddr, kaddr, size))
699 		dtrace_copystr(kaddr, uaddr, size, flags);
700 }
701 
702 uint8_t
703 dtrace_fuword8(void *uaddr)
704 {
705 	extern uint8_t dtrace_fuword8_nocheck(void *);
706 	if ((uintptr_t)uaddr >= _userlimit) {
707 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
708 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
709 		return (0);
710 	}
711 	return (dtrace_fuword8_nocheck(uaddr));
712 }
713 
714 uint16_t
715 dtrace_fuword16(void *uaddr)
716 {
717 	extern uint16_t dtrace_fuword16_nocheck(void *);
718 	if ((uintptr_t)uaddr >= _userlimit) {
719 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
720 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
721 		return (0);
722 	}
723 	return (dtrace_fuword16_nocheck(uaddr));
724 }
725 
726 uint32_t
727 dtrace_fuword32(void *uaddr)
728 {
729 	extern uint32_t dtrace_fuword32_nocheck(void *);
730 	if ((uintptr_t)uaddr >= _userlimit) {
731 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
732 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
733 		return (0);
734 	}
735 	return (dtrace_fuword32_nocheck(uaddr));
736 }
737 
738 uint64_t
739 dtrace_fuword64(void *uaddr)
740 {
741 	extern uint64_t dtrace_fuword64_nocheck(void *);
742 	if ((uintptr_t)uaddr >= _userlimit) {
743 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
744 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
745 		return (0);
746 	}
747 	return (dtrace_fuword64_nocheck(uaddr));
748 }
749