xref: /titanic_52/usr/src/uts/intel/dtrace/dtrace_isa.c (revision f498645a3eecf2ddd304b4ea9c7f1b4c155ff79e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/dtrace_impl.h>
30 #include <sys/stack.h>
31 #include <sys/frame.h>
32 #include <sys/cmn_err.h>
33 #include <sys/privregs.h>
34 #include <sys/sysmacros.h>
35 
36 /*
37  * This is gross knowledge to have to encode here...
38  */
39 extern void _interrupt();
40 extern void _cmntrap();
41 extern void _allsyscalls();
42 
43 extern size_t _interrupt_size;
44 extern size_t _cmntrap_size;
45 extern size_t _allsyscalls_size;
46 
47 extern uintptr_t kernelbase;
48 
49 void
50 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
51     uint32_t *intrpc)
52 {
53 	struct frame *fp = (struct frame *)dtrace_getfp();
54 	struct frame *nextfp, *minfp, *stacktop;
55 	int depth = 0;
56 	int on_intr, last = 0;
57 	uintptr_t pc;
58 	uintptr_t caller = CPU->cpu_dtrace_caller;
59 
60 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
61 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
62 	else
63 		stacktop = (struct frame *)curthread->t_stk;
64 	minfp = fp;
65 
66 	aframes++;
67 
68 	if (intrpc != NULL && depth < pcstack_limit)
69 		pcstack[depth++] = (pc_t)intrpc;
70 
71 	while (depth < pcstack_limit) {
72 		nextfp = (struct frame *)fp->fr_savfp;
73 		pc = fp->fr_savpc;
74 
75 		if (nextfp <= minfp || nextfp >= stacktop) {
76 			if (on_intr) {
77 				/*
78 				 * Hop from interrupt stack to thread stack.
79 				 */
80 				stacktop = (struct frame *)curthread->t_stk;
81 				minfp = (struct frame *)curthread->t_stkbase;
82 				on_intr = 0;
83 				continue;
84 			}
85 
86 			/*
87 			 * This is the last frame we can process; indicate
88 			 * that we should return after processing this frame.
89 			 */
90 			last = 1;
91 		}
92 
93 		if (aframes > 0) {
94 			if (--aframes == 0 && caller != NULL) {
95 				/*
96 				 * We've just run out of artificial frames,
97 				 * and we have a valid caller -- fill it in
98 				 * now.
99 				 */
100 				ASSERT(depth < pcstack_limit);
101 				pcstack[depth++] = (pc_t)caller;
102 				caller = NULL;
103 			}
104 		} else {
105 			if (depth < pcstack_limit)
106 				pcstack[depth++] = (pc_t)pc;
107 		}
108 
109 		if (last) {
110 			while (depth < pcstack_limit)
111 				pcstack[depth++] = NULL;
112 			return;
113 		}
114 
115 		fp = nextfp;
116 		minfp = fp;
117 	}
118 }
119 
120 static int
121 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
122     uintptr_t sp)
123 {
124 	klwp_t *lwp = ttolwp(curthread);
125 	proc_t *p = curproc;
126 	uintptr_t oldcontext = lwp->lwp_oldcontext;
127 	volatile uint16_t *flags =
128 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
129 	size_t s1, s2;
130 	int ret = 0;
131 
132 	ASSERT(pcstack == NULL || pcstack_limit > 0);
133 
134 	if (p->p_model == DATAMODEL_NATIVE) {
135 		s1 = sizeof (struct frame) + 2 * sizeof (long);
136 		s2 = s1 + sizeof (siginfo_t);
137 	} else {
138 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
139 		s2 = s1 + sizeof (siginfo32_t);
140 	}
141 
142 	while (pc != 0) {
143 		ret++;
144 		if (pcstack != NULL) {
145 			*pcstack++ = (uint64_t)pc;
146 			pcstack_limit--;
147 			if (pcstack_limit <= 0)
148 				break;
149 		}
150 
151 		if (sp == 0)
152 			break;
153 
154 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
155 			if (p->p_model == DATAMODEL_NATIVE) {
156 				ucontext_t *ucp = (ucontext_t *)oldcontext;
157 				greg_t *gregs = ucp->uc_mcontext.gregs;
158 
159 				sp = dtrace_fulword(&gregs[REG_FP]);
160 				pc = dtrace_fulword(&gregs[REG_PC]);
161 
162 				oldcontext = dtrace_fulword(&ucp->uc_link);
163 			} else {
164 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
165 				greg32_t *gregs = ucp->uc_mcontext.gregs;
166 
167 				sp = dtrace_fuword32(&gregs[EBP]);
168 				pc = dtrace_fuword32(&gregs[EIP]);
169 
170 				oldcontext = dtrace_fuword32(&ucp->uc_link);
171 			}
172 		} else {
173 			if (p->p_model == DATAMODEL_NATIVE) {
174 				struct frame *fr = (struct frame *)sp;
175 
176 				pc = dtrace_fulword(&fr->fr_savpc);
177 				sp = dtrace_fulword(&fr->fr_savfp);
178 			} else {
179 				struct frame32 *fr = (struct frame32 *)sp;
180 
181 				pc = dtrace_fuword32(&fr->fr_savpc);
182 				sp = dtrace_fuword32(&fr->fr_savfp);
183 			}
184 		}
185 
186 		/*
187 		 * This is totally bogus:  if we faulted, we're going to clear
188 		 * the fault and break.  This is to deal with the apparently
189 		 * broken Java stacks on x86.
190 		 */
191 		if (*flags & CPU_DTRACE_FAULT) {
192 			*flags &= ~CPU_DTRACE_FAULT;
193 			break;
194 		}
195 	}
196 
197 	return (ret);
198 }
199 
200 void
201 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
202 {
203 	klwp_t *lwp = ttolwp(curthread);
204 	proc_t *p = curproc;
205 	struct regs *rp;
206 	uintptr_t pc, sp;
207 	volatile uint16_t *flags =
208 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
209 	int n;
210 
211 	if (*flags & CPU_DTRACE_FAULT)
212 		return;
213 
214 	if (pcstack_limit <= 0)
215 		return;
216 
217 	/*
218 	 * If there's no user context we still need to zero the stack.
219 	 */
220 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
221 		goto zero;
222 
223 	*pcstack++ = (uint64_t)p->p_pid;
224 	pcstack_limit--;
225 
226 	if (pcstack_limit <= 0)
227 		return;
228 
229 	pc = rp->r_pc;
230 	sp = rp->r_fp;
231 
232 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
233 		*pcstack++ = (uint64_t)pc;
234 		pcstack_limit--;
235 		if (pcstack_limit <= 0)
236 			return;
237 
238 		if (p->p_model == DATAMODEL_NATIVE)
239 			pc = dtrace_fulword((void *)rp->r_sp);
240 		else
241 			pc = dtrace_fuword32((void *)rp->r_sp);
242 	}
243 
244 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
245 	ASSERT(n >= 0);
246 	ASSERT(n <= pcstack_limit);
247 
248 	pcstack += n;
249 	pcstack_limit -= n;
250 
251 zero:
252 	while (pcstack_limit-- > 0)
253 		*pcstack++ = NULL;
254 }
255 
256 int
257 dtrace_getustackdepth(void)
258 {
259 	klwp_t *lwp = ttolwp(curthread);
260 	proc_t *p = curproc;
261 	struct regs *rp;
262 	uintptr_t pc, sp;
263 	int n = 0;
264 
265 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
266 		return (0);
267 
268 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
269 		return (-1);
270 
271 	pc = rp->r_pc;
272 	sp = rp->r_fp;
273 
274 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
275 		n++;
276 
277 		if (p->p_model == DATAMODEL_NATIVE)
278 			pc = dtrace_fulword((void *)rp->r_sp);
279 		else
280 			pc = dtrace_fuword32((void *)rp->r_sp);
281 	}
282 
283 	n += dtrace_getustack_common(NULL, 0, pc, sp);
284 
285 	return (n);
286 }
287 
288 void
289 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
290 {
291 	klwp_t *lwp = ttolwp(curthread);
292 	proc_t *p = curproc;
293 	struct regs *rp;
294 	uintptr_t pc, sp, oldcontext;
295 	volatile uint16_t *flags =
296 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
297 	size_t s1, s2;
298 
299 	if (*flags & CPU_DTRACE_FAULT)
300 		return;
301 
302 	if (pcstack_limit <= 0)
303 		return;
304 
305 	/*
306 	 * If there's no user context we still need to zero the stack.
307 	 */
308 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
309 		goto zero;
310 
311 	*pcstack++ = (uint64_t)p->p_pid;
312 	pcstack_limit--;
313 
314 	if (pcstack_limit <= 0)
315 		return;
316 
317 	pc = rp->r_pc;
318 	sp = rp->r_fp;
319 	oldcontext = lwp->lwp_oldcontext;
320 
321 	if (p->p_model == DATAMODEL_NATIVE) {
322 		s1 = sizeof (struct frame) + 2 * sizeof (long);
323 		s2 = s1 + sizeof (siginfo_t);
324 	} else {
325 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
326 		s2 = s1 + sizeof (siginfo32_t);
327 	}
328 
329 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
330 		*pcstack++ = (uint64_t)pc;
331 		*fpstack++ = 0;
332 		pcstack_limit--;
333 		if (pcstack_limit <= 0)
334 			return;
335 
336 		if (p->p_model == DATAMODEL_NATIVE)
337 			pc = dtrace_fulword((void *)rp->r_sp);
338 		else
339 			pc = dtrace_fuword32((void *)rp->r_sp);
340 	}
341 
342 	while (pc != 0) {
343 		*pcstack++ = (uint64_t)pc;
344 		*fpstack++ = sp;
345 		pcstack_limit--;
346 		if (pcstack_limit <= 0)
347 			break;
348 
349 		if (sp == 0)
350 			break;
351 
352 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
353 			if (p->p_model == DATAMODEL_NATIVE) {
354 				ucontext_t *ucp = (ucontext_t *)oldcontext;
355 				greg_t *gregs = ucp->uc_mcontext.gregs;
356 
357 				sp = dtrace_fulword(&gregs[REG_FP]);
358 				pc = dtrace_fulword(&gregs[REG_PC]);
359 
360 				oldcontext = dtrace_fulword(&ucp->uc_link);
361 			} else {
362 				ucontext_t *ucp = (ucontext_t *)oldcontext;
363 				greg_t *gregs = ucp->uc_mcontext.gregs;
364 
365 				sp = dtrace_fuword32(&gregs[EBP]);
366 				pc = dtrace_fuword32(&gregs[EIP]);
367 
368 				oldcontext = dtrace_fuword32(&ucp->uc_link);
369 			}
370 		} else {
371 			if (p->p_model == DATAMODEL_NATIVE) {
372 				struct frame *fr = (struct frame *)sp;
373 
374 				pc = dtrace_fulword(&fr->fr_savpc);
375 				sp = dtrace_fulword(&fr->fr_savfp);
376 			} else {
377 				struct frame32 *fr = (struct frame32 *)sp;
378 
379 				pc = dtrace_fuword32(&fr->fr_savpc);
380 				sp = dtrace_fuword32(&fr->fr_savfp);
381 			}
382 		}
383 
384 		/*
385 		 * This is totally bogus:  if we faulted, we're going to clear
386 		 * the fault and break.  This is to deal with the apparently
387 		 * broken Java stacks on x86.
388 		 */
389 		if (*flags & CPU_DTRACE_FAULT) {
390 			*flags &= ~CPU_DTRACE_FAULT;
391 			break;
392 		}
393 	}
394 
395 zero:
396 	while (pcstack_limit-- > 0)
397 		*pcstack++ = NULL;
398 }
399 
400 /*ARGSUSED*/
401 uint64_t
402 dtrace_getarg(int arg, int aframes)
403 {
404 	uintptr_t val;
405 	struct frame *fp = (struct frame *)dtrace_getfp();
406 	uintptr_t *stack;
407 	int i;
408 #if defined(__amd64)
409 	/*
410 	 * A total of 6 arguments are passed via registers; any argument with
411 	 * index of 5 or lower is therefore in a register.
412 	 */
413 	int inreg = 5;
414 #endif
415 
416 	for (i = 1; i <= aframes; i++) {
417 		fp = (struct frame *)(fp->fr_savfp);
418 
419 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
420 #if !defined(__amd64)
421 			/*
422 			 * If we pass through the invalid op handler, we will
423 			 * use the pointer that it passed to the stack as the
424 			 * second argument to dtrace_invop() as the pointer to
425 			 * the stack.  When using this stack, we must step
426 			 * beyond the EIP/RIP that was pushed when the trap was
427 			 * taken -- hence the "+ 1" below.
428 			 */
429 			stack = ((uintptr_t **)&fp[1])[1] + 1;
430 #else
431 			/*
432 			 * In the case of amd64, we will use the pointer to the
433 			 * regs structure that was pushed when we took the
434 			 * trap.  To get this structure, we must increment
435 			 * beyond the frame structure, and then again beyond
436 			 * the calling RIP stored in dtrace_invop().  If the
437 			 * argument that we're seeking is passed on the stack,
438 			 * we'll pull the true stack pointer out of the saved
439 			 * registers and decrement our argument by the number
440 			 * of arguments passed in registers; if the argument
441 			 * we're seeking is passed in regsiters, we can just
442 			 * load it directly.
443 			 */
444 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
445 			    sizeof (uintptr_t));
446 
447 			if (arg <= inreg) {
448 				stack = (uintptr_t *)&rp->r_rdi;
449 			} else {
450 				stack = (uintptr_t *)(rp->r_rsp);
451 				arg -= inreg;
452 			}
453 #endif
454 			goto load;
455 		}
456 
457 	}
458 
459 	/*
460 	 * We know that we did not come through a trap to get into
461 	 * dtrace_probe() -- the provider simply called dtrace_probe()
462 	 * directly.  As this is the case, we need to shift the argument
463 	 * that we're looking for:  the probe ID is the first argument to
464 	 * dtrace_probe(), so the argument n will actually be found where
465 	 * one would expect to find argument (n + 1).
466 	 */
467 	arg++;
468 
469 #if defined(__amd64)
470 	if (arg <= inreg) {
471 		/*
472 		 * This shouldn't happen.  If the argument is passed in a
473 		 * register then it should have been, well, passed in a
474 		 * register...
475 		 */
476 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
477 		return (0);
478 	}
479 
480 	arg -= (inreg + 1);
481 #endif
482 	stack = (uintptr_t *)&fp[1];
483 
484 load:
485 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
486 	val = stack[arg];
487 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
488 
489 	return (val);
490 }
491 
492 /*ARGSUSED*/
493 int
494 dtrace_getstackdepth(int aframes)
495 {
496 	struct frame *fp = (struct frame *)dtrace_getfp();
497 	struct frame *nextfp, *minfp, *stacktop;
498 	int depth = 0;
499 	int on_intr;
500 
501 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
502 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
503 	else
504 		stacktop = (struct frame *)curthread->t_stk;
505 	minfp = fp;
506 
507 	aframes++;
508 
509 	for (;;) {
510 		depth++;
511 
512 		nextfp = (struct frame *)fp->fr_savfp;
513 
514 		if (nextfp <= minfp || nextfp >= stacktop) {
515 			if (on_intr) {
516 				/*
517 				 * Hop from interrupt stack to thread stack.
518 				 */
519 				stacktop = (struct frame *)curthread->t_stk;
520 				minfp = (struct frame *)curthread->t_stkbase;
521 				on_intr = 0;
522 				continue;
523 			}
524 			break;
525 		}
526 
527 		fp = nextfp;
528 		minfp = fp;
529 	}
530 
531 	if (depth <= aframes)
532 		return (0);
533 
534 	return (depth - aframes);
535 }
536 
537 ulong_t
538 dtrace_getreg(struct regs *rp, uint_t reg)
539 {
540 #if defined(__amd64)
541 	int regmap[] = {
542 		REG_GS,		/* GS */
543 		REG_FS,		/* FS */
544 		REG_ES,		/* ES */
545 		REG_DS,		/* DS */
546 		REG_RDI,	/* EDI */
547 		REG_RSI,	/* ESI */
548 		REG_RBP,	/* EBP */
549 		REG_RSP,	/* ESP */
550 		REG_RBX,	/* EBX */
551 		REG_RDX,	/* EDX */
552 		REG_RCX,	/* ECX */
553 		REG_RAX,	/* EAX */
554 		REG_TRAPNO,	/* TRAPNO */
555 		REG_ERR,	/* ERR */
556 		REG_RIP,	/* EIP */
557 		REG_CS,		/* CS */
558 		REG_RFL,	/* EFL */
559 		REG_RSP,	/* UESP */
560 		REG_SS		/* SS */
561 	};
562 
563 	if (reg <= SS) {
564 		if (reg >= sizeof (regmap) / sizeof (int)) {
565 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
566 			return (0);
567 		}
568 
569 		reg = regmap[reg];
570 	} else {
571 		reg -= SS + 1;
572 	}
573 
574 	switch (reg) {
575 	case REG_RDI:
576 		return (rp->r_rdi);
577 	case REG_RSI:
578 		return (rp->r_rsi);
579 	case REG_RDX:
580 		return (rp->r_rdx);
581 	case REG_RCX:
582 		return (rp->r_rcx);
583 	case REG_R8:
584 		return (rp->r_r8);
585 	case REG_R9:
586 		return (rp->r_r9);
587 	case REG_RAX:
588 		return (rp->r_rax);
589 	case REG_RBX:
590 		return (rp->r_rbx);
591 	case REG_RBP:
592 		return (rp->r_rbp);
593 	case REG_R10:
594 		return (rp->r_r10);
595 	case REG_R11:
596 		return (rp->r_r11);
597 	case REG_R12:
598 		return (rp->r_r12);
599 	case REG_R13:
600 		return (rp->r_r13);
601 	case REG_R14:
602 		return (rp->r_r14);
603 	case REG_R15:
604 		return (rp->r_r15);
605 	case REG_DS:
606 		return (rp->r_ds);
607 	case REG_ES:
608 		return (rp->r_es);
609 	case REG_FS:
610 		return (rp->r_fs);
611 	case REG_GS:
612 		return (rp->r_gs);
613 	case REG_TRAPNO:
614 		return (rp->r_trapno);
615 	case REG_ERR:
616 		return (rp->r_err);
617 	case REG_RIP:
618 		return (rp->r_rip);
619 	case REG_CS:
620 		return (rp->r_cs);
621 	case REG_SS:
622 		return (rp->r_ss);
623 	case REG_RFL:
624 		return (rp->r_rfl);
625 	case REG_RSP:
626 		return (rp->r_rsp);
627 	default:
628 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
629 		return (0);
630 	}
631 
632 #else
633 	if (reg > SS) {
634 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
635 		return (0);
636 	}
637 
638 	return ((&rp->r_gs)[reg]);
639 #endif
640 }
641 
642 static int
643 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
644 {
645 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
646 
647 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
648 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
649 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
650 		return (0);
651 	}
652 
653 	return (1);
654 }
655 
656 void
657 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size)
658 {
659 	if (dtrace_copycheck(uaddr, kaddr, size))
660 		dtrace_copy(uaddr, kaddr, size);
661 }
662 
663 void
664 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size)
665 {
666 	if (dtrace_copycheck(uaddr, kaddr, size))
667 		dtrace_copy(kaddr, uaddr, size);
668 }
669 
670 void
671 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
672 {
673 	if (dtrace_copycheck(uaddr, kaddr, size))
674 		dtrace_copystr(uaddr, kaddr, size);
675 }
676 
677 void
678 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size)
679 {
680 	if (dtrace_copycheck(uaddr, kaddr, size))
681 		dtrace_copystr(kaddr, uaddr, size);
682 }
683 
684 uint8_t
685 dtrace_fuword8(void *uaddr)
686 {
687 	extern uint8_t dtrace_fuword8_nocheck(void *);
688 	if ((uintptr_t)uaddr >= _userlimit) {
689 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
690 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
691 		return (0);
692 	}
693 	return (dtrace_fuword8_nocheck(uaddr));
694 }
695 
696 uint16_t
697 dtrace_fuword16(void *uaddr)
698 {
699 	extern uint16_t dtrace_fuword16_nocheck(void *);
700 	if ((uintptr_t)uaddr >= _userlimit) {
701 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
702 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
703 		return (0);
704 	}
705 	return (dtrace_fuword16_nocheck(uaddr));
706 }
707 
708 uint32_t
709 dtrace_fuword32(void *uaddr)
710 {
711 	extern uint32_t dtrace_fuword32_nocheck(void *);
712 	if ((uintptr_t)uaddr >= _userlimit) {
713 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
714 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
715 		return (0);
716 	}
717 	return (dtrace_fuword32_nocheck(uaddr));
718 }
719 
720 uint64_t
721 dtrace_fuword64(void *uaddr)
722 {
723 	extern uint64_t dtrace_fuword64_nocheck(void *);
724 	if ((uintptr_t)uaddr >= _userlimit) {
725 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
726 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
727 		return (0);
728 	}
729 	return (dtrace_fuword64_nocheck(uaddr));
730 }
731