xref: /titanic_41/usr/src/uts/intel/dtrace/dtrace_isa.c (revision e8031f0a8ed0e45c6d8847c5e09424e66fd34a4b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/dtrace_impl.h>
30 #include <sys/stack.h>
31 #include <sys/frame.h>
32 #include <sys/cmn_err.h>
33 #include <sys/privregs.h>
34 #include <sys/sysmacros.h>
35 
36 /*
37  * This is gross knowledge to have to encode here...
38  */
39 extern void _interrupt();
40 extern void _cmntrap();
41 extern void _allsyscalls();
42 
43 extern size_t _interrupt_size;
44 extern size_t _cmntrap_size;
45 extern size_t _allsyscalls_size;
46 
47 extern uintptr_t kernelbase;
48 
49 void
50 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
51     uint32_t *intrpc)
52 {
53 	struct frame *fp = (struct frame *)dtrace_getfp();
54 	struct frame *nextfp, *minfp, *stacktop;
55 	int depth = 0;
56 	int on_intr, last = 0;
57 	uintptr_t pc;
58 	uintptr_t caller = CPU->cpu_dtrace_caller;
59 
60 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
61 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
62 	else
63 		stacktop = (struct frame *)curthread->t_stk;
64 	minfp = fp;
65 
66 	aframes++;
67 
68 	if (intrpc != NULL && depth < pcstack_limit)
69 		pcstack[depth++] = (pc_t)intrpc;
70 
71 	while (depth < pcstack_limit) {
72 		nextfp = (struct frame *)fp->fr_savfp;
73 		pc = fp->fr_savpc;
74 
75 		if (nextfp <= minfp || nextfp >= stacktop) {
76 			if (on_intr) {
77 				/*
78 				 * Hop from interrupt stack to thread stack.
79 				 */
80 				stacktop = (struct frame *)curthread->t_stk;
81 				minfp = (struct frame *)curthread->t_stkbase;
82 				on_intr = 0;
83 				continue;
84 			}
85 
86 			/*
87 			 * This is the last frame we can process; indicate
88 			 * that we should return after processing this frame.
89 			 */
90 			last = 1;
91 		}
92 
93 		if (aframes > 0) {
94 			if (--aframes == 0 && caller != NULL) {
95 				/*
96 				 * We've just run out of artificial frames,
97 				 * and we have a valid caller -- fill it in
98 				 * now.
99 				 */
100 				ASSERT(depth < pcstack_limit);
101 				pcstack[depth++] = (pc_t)caller;
102 				caller = NULL;
103 			}
104 		} else {
105 			if (depth < pcstack_limit)
106 				pcstack[depth++] = (pc_t)pc;
107 		}
108 
109 		if (last) {
110 			while (depth < pcstack_limit)
111 				pcstack[depth++] = NULL;
112 			return;
113 		}
114 
115 		fp = nextfp;
116 		minfp = fp;
117 	}
118 }
119 
120 static int
121 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
122     uintptr_t sp)
123 {
124 	klwp_t *lwp = ttolwp(curthread);
125 	proc_t *p = curproc;
126 	uintptr_t oldcontext = lwp->lwp_oldcontext;
127 	volatile uint16_t *flags =
128 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
129 	size_t s1, s2;
130 	int ret = 0;
131 
132 	ASSERT(pcstack == NULL || pcstack_limit > 0);
133 
134 	if (p->p_model == DATAMODEL_NATIVE) {
135 		s1 = sizeof (struct frame) + 2 * sizeof (long);
136 		s2 = s1 + sizeof (siginfo_t);
137 	} else {
138 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
139 		s2 = s1 + sizeof (siginfo32_t);
140 	}
141 
142 	while (pc != 0 && sp != 0) {
143 		ret++;
144 		if (pcstack != NULL) {
145 			*pcstack++ = (uint64_t)pc;
146 			pcstack_limit--;
147 			if (pcstack_limit <= 0)
148 				break;
149 		}
150 
151 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
152 			if (p->p_model == DATAMODEL_NATIVE) {
153 				ucontext_t *ucp = (ucontext_t *)oldcontext;
154 				greg_t *gregs = ucp->uc_mcontext.gregs;
155 
156 				sp = dtrace_fulword(&gregs[REG_FP]);
157 				pc = dtrace_fulword(&gregs[REG_PC]);
158 
159 				oldcontext = dtrace_fulword(&ucp->uc_link);
160 			} else {
161 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
162 				greg32_t *gregs = ucp->uc_mcontext.gregs;
163 
164 				sp = dtrace_fuword32(&gregs[EBP]);
165 				pc = dtrace_fuword32(&gregs[EIP]);
166 
167 				oldcontext = dtrace_fuword32(&ucp->uc_link);
168 			}
169 		} else {
170 			if (p->p_model == DATAMODEL_NATIVE) {
171 				struct frame *fr = (struct frame *)sp;
172 
173 				pc = dtrace_fulword(&fr->fr_savpc);
174 				sp = dtrace_fulword(&fr->fr_savfp);
175 			} else {
176 				struct frame32 *fr = (struct frame32 *)sp;
177 
178 				pc = dtrace_fuword32(&fr->fr_savpc);
179 				sp = dtrace_fuword32(&fr->fr_savfp);
180 			}
181 		}
182 
183 		/*
184 		 * This is totally bogus:  if we faulted, we're going to clear
185 		 * the fault and break.  This is to deal with the apparently
186 		 * broken Java stacks on x86.
187 		 */
188 		if (*flags & CPU_DTRACE_FAULT) {
189 			*flags &= ~CPU_DTRACE_FAULT;
190 			break;
191 		}
192 	}
193 
194 	return (ret);
195 }
196 
197 void
198 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
199 {
200 	klwp_t *lwp = ttolwp(curthread);
201 	proc_t *p = curproc;
202 	struct regs *rp;
203 	uintptr_t pc, sp;
204 	volatile uint16_t *flags =
205 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
206 	int n;
207 
208 	if (*flags & CPU_DTRACE_FAULT)
209 		return;
210 
211 	if (pcstack_limit <= 0)
212 		return;
213 
214 	/*
215 	 * If there's no user context we still need to zero the stack.
216 	 */
217 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
218 		goto zero;
219 
220 	*pcstack++ = (uint64_t)p->p_pid;
221 	pcstack_limit--;
222 
223 	if (pcstack_limit <= 0)
224 		return;
225 
226 	pc = rp->r_pc;
227 	sp = rp->r_fp;
228 
229 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
230 		*pcstack++ = (uint64_t)pc;
231 		pcstack_limit--;
232 		if (pcstack_limit <= 0)
233 			return;
234 
235 		if (p->p_model == DATAMODEL_NATIVE)
236 			pc = dtrace_fulword((void *)rp->r_sp);
237 		else
238 			pc = dtrace_fuword32((void *)rp->r_sp);
239 	}
240 
241 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
242 	ASSERT(n >= 0);
243 	ASSERT(n <= pcstack_limit);
244 
245 	pcstack += n;
246 	pcstack_limit -= n;
247 
248 zero:
249 	while (pcstack_limit-- > 0)
250 		*pcstack++ = NULL;
251 }
252 
253 int
254 dtrace_getustackdepth(void)
255 {
256 	klwp_t *lwp = ttolwp(curthread);
257 	proc_t *p = curproc;
258 	struct regs *rp;
259 	uintptr_t pc, sp;
260 	int n = 0;
261 
262 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
263 		return (0);
264 
265 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
266 		return (-1);
267 
268 	pc = rp->r_pc;
269 	sp = rp->r_fp;
270 
271 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
272 		n++;
273 
274 		if (p->p_model == DATAMODEL_NATIVE)
275 			pc = dtrace_fulword((void *)rp->r_sp);
276 		else
277 			pc = dtrace_fuword32((void *)rp->r_sp);
278 	}
279 
280 	n += dtrace_getustack_common(NULL, 0, pc, sp);
281 
282 	return (n);
283 }
284 
285 void
286 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
287 {
288 	klwp_t *lwp = ttolwp(curthread);
289 	proc_t *p = curproc;
290 	struct regs *rp;
291 	uintptr_t pc, sp, oldcontext;
292 	volatile uint16_t *flags =
293 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
294 	size_t s1, s2;
295 
296 	if (*flags & CPU_DTRACE_FAULT)
297 		return;
298 
299 	if (pcstack_limit <= 0)
300 		return;
301 
302 	/*
303 	 * If there's no user context we still need to zero the stack.
304 	 */
305 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
306 		goto zero;
307 
308 	*pcstack++ = (uint64_t)p->p_pid;
309 	pcstack_limit--;
310 
311 	if (pcstack_limit <= 0)
312 		return;
313 
314 	pc = rp->r_pc;
315 	sp = rp->r_fp;
316 	oldcontext = lwp->lwp_oldcontext;
317 
318 	if (p->p_model == DATAMODEL_NATIVE) {
319 		s1 = sizeof (struct frame) + 2 * sizeof (long);
320 		s2 = s1 + sizeof (siginfo_t);
321 	} else {
322 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
323 		s2 = s1 + sizeof (siginfo32_t);
324 	}
325 
326 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
327 		*pcstack++ = (uint64_t)pc;
328 		*fpstack++ = 0;
329 		pcstack_limit--;
330 		if (pcstack_limit <= 0)
331 			return;
332 
333 		if (p->p_model == DATAMODEL_NATIVE)
334 			pc = dtrace_fulword((void *)rp->r_sp);
335 		else
336 			pc = dtrace_fuword32((void *)rp->r_sp);
337 	}
338 
339 	while (pc != 0 && sp != 0) {
340 		*pcstack++ = (uint64_t)pc;
341 		*fpstack++ = sp;
342 		pcstack_limit--;
343 		if (pcstack_limit <= 0)
344 			break;
345 
346 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
347 			if (p->p_model == DATAMODEL_NATIVE) {
348 				ucontext_t *ucp = (ucontext_t *)oldcontext;
349 				greg_t *gregs = ucp->uc_mcontext.gregs;
350 
351 				sp = dtrace_fulword(&gregs[REG_FP]);
352 				pc = dtrace_fulword(&gregs[REG_PC]);
353 
354 				oldcontext = dtrace_fulword(&ucp->uc_link);
355 			} else {
356 				ucontext_t *ucp = (ucontext_t *)oldcontext;
357 				greg_t *gregs = ucp->uc_mcontext.gregs;
358 
359 				sp = dtrace_fuword32(&gregs[EBP]);
360 				pc = dtrace_fuword32(&gregs[EIP]);
361 
362 				oldcontext = dtrace_fuword32(&ucp->uc_link);
363 			}
364 		} else {
365 			if (p->p_model == DATAMODEL_NATIVE) {
366 				struct frame *fr = (struct frame *)sp;
367 
368 				pc = dtrace_fulword(&fr->fr_savpc);
369 				sp = dtrace_fulword(&fr->fr_savfp);
370 			} else {
371 				struct frame32 *fr = (struct frame32 *)sp;
372 
373 				pc = dtrace_fuword32(&fr->fr_savpc);
374 				sp = dtrace_fuword32(&fr->fr_savfp);
375 			}
376 		}
377 
378 		/*
379 		 * This is totally bogus:  if we faulted, we're going to clear
380 		 * the fault and break.  This is to deal with the apparently
381 		 * broken Java stacks on x86.
382 		 */
383 		if (*flags & CPU_DTRACE_FAULT) {
384 			*flags &= ~CPU_DTRACE_FAULT;
385 			break;
386 		}
387 	}
388 
389 zero:
390 	while (pcstack_limit-- > 0)
391 		*pcstack++ = NULL;
392 }
393 
394 /*ARGSUSED*/
395 uint64_t
396 dtrace_getarg(int arg, int aframes)
397 {
398 	uintptr_t val;
399 	struct frame *fp = (struct frame *)dtrace_getfp();
400 	uintptr_t *stack;
401 	int i;
402 #if defined(__amd64)
403 	/*
404 	 * A total of 6 arguments are passed via registers; any argument with
405 	 * index of 5 or lower is therefore in a register.
406 	 */
407 	int inreg = 5;
408 #endif
409 
410 	for (i = 1; i <= aframes; i++) {
411 		fp = (struct frame *)(fp->fr_savfp);
412 
413 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
414 #if !defined(__amd64)
415 			/*
416 			 * If we pass through the invalid op handler, we will
417 			 * use the pointer that it passed to the stack as the
418 			 * second argument to dtrace_invop() as the pointer to
419 			 * the stack.  When using this stack, we must step
420 			 * beyond the EIP/RIP that was pushed when the trap was
421 			 * taken -- hence the "+ 1" below.
422 			 */
423 			stack = ((uintptr_t **)&fp[1])[1] + 1;
424 #else
425 			/*
426 			 * In the case of amd64, we will use the pointer to the
427 			 * regs structure that was pushed when we took the
428 			 * trap.  To get this structure, we must increment
429 			 * beyond the frame structure, and then again beyond
430 			 * the calling RIP stored in dtrace_invop().  If the
431 			 * argument that we're seeking is passed on the stack,
432 			 * we'll pull the true stack pointer out of the saved
433 			 * registers and decrement our argument by the number
434 			 * of arguments passed in registers; if the argument
435 			 * we're seeking is passed in regsiters, we can just
436 			 * load it directly.
437 			 */
438 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
439 			    sizeof (uintptr_t));
440 
441 			if (arg <= inreg) {
442 				stack = (uintptr_t *)&rp->r_rdi;
443 			} else {
444 				stack = (uintptr_t *)(rp->r_rsp);
445 				arg -= inreg;
446 			}
447 #endif
448 			goto load;
449 		}
450 
451 	}
452 
453 	/*
454 	 * We know that we did not come through a trap to get into
455 	 * dtrace_probe() -- the provider simply called dtrace_probe()
456 	 * directly.  As this is the case, we need to shift the argument
457 	 * that we're looking for:  the probe ID is the first argument to
458 	 * dtrace_probe(), so the argument n will actually be found where
459 	 * one would expect to find argument (n + 1).
460 	 */
461 	arg++;
462 
463 #if defined(__amd64)
464 	if (arg <= inreg) {
465 		/*
466 		 * This shouldn't happen.  If the argument is passed in a
467 		 * register then it should have been, well, passed in a
468 		 * register...
469 		 */
470 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
471 		return (0);
472 	}
473 
474 	arg -= (inreg + 1);
475 #endif
476 	stack = (uintptr_t *)&fp[1];
477 
478 load:
479 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
480 	val = stack[arg];
481 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
482 
483 	return (val);
484 }
485 
486 /*ARGSUSED*/
487 int
488 dtrace_getstackdepth(int aframes)
489 {
490 	struct frame *fp = (struct frame *)dtrace_getfp();
491 	struct frame *nextfp, *minfp, *stacktop;
492 	int depth = 0;
493 	int on_intr;
494 
495 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
496 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
497 	else
498 		stacktop = (struct frame *)curthread->t_stk;
499 	minfp = fp;
500 
501 	aframes++;
502 
503 	for (;;) {
504 		depth++;
505 
506 		nextfp = (struct frame *)fp->fr_savfp;
507 
508 		if (nextfp <= minfp || nextfp >= stacktop) {
509 			if (on_intr) {
510 				/*
511 				 * Hop from interrupt stack to thread stack.
512 				 */
513 				stacktop = (struct frame *)curthread->t_stk;
514 				minfp = (struct frame *)curthread->t_stkbase;
515 				on_intr = 0;
516 				continue;
517 			}
518 			break;
519 		}
520 
521 		fp = nextfp;
522 		minfp = fp;
523 	}
524 
525 	if (depth <= aframes)
526 		return (0);
527 
528 	return (depth - aframes);
529 }
530 
531 ulong_t
532 dtrace_getreg(struct regs *rp, uint_t reg)
533 {
534 #if defined(__amd64)
535 	int regmap[] = {
536 		REG_GS,		/* GS */
537 		REG_FS,		/* FS */
538 		REG_ES,		/* ES */
539 		REG_DS,		/* DS */
540 		REG_RDI,	/* EDI */
541 		REG_RSI,	/* ESI */
542 		REG_RBP,	/* EBP */
543 		REG_RSP,	/* ESP */
544 		REG_RBX,	/* EBX */
545 		REG_RDX,	/* EDX */
546 		REG_RCX,	/* ECX */
547 		REG_RAX,	/* EAX */
548 		REG_TRAPNO,	/* TRAPNO */
549 		REG_ERR,	/* ERR */
550 		REG_RIP,	/* EIP */
551 		REG_CS,		/* CS */
552 		REG_RFL,	/* EFL */
553 		REG_RSP,	/* UESP */
554 		REG_SS		/* SS */
555 	};
556 
557 	if (reg <= SS) {
558 		if (reg >= sizeof (regmap) / sizeof (int)) {
559 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
560 			return (0);
561 		}
562 
563 		reg = regmap[reg];
564 	} else {
565 		reg -= SS + 1;
566 	}
567 
568 	switch (reg) {
569 	case REG_RDI:
570 		return (rp->r_rdi);
571 	case REG_RSI:
572 		return (rp->r_rsi);
573 	case REG_RDX:
574 		return (rp->r_rdx);
575 	case REG_RCX:
576 		return (rp->r_rcx);
577 	case REG_R8:
578 		return (rp->r_r8);
579 	case REG_R9:
580 		return (rp->r_r9);
581 	case REG_RAX:
582 		return (rp->r_rax);
583 	case REG_RBX:
584 		return (rp->r_rbx);
585 	case REG_RBP:
586 		return (rp->r_rbp);
587 	case REG_R10:
588 		return (rp->r_r10);
589 	case REG_R11:
590 		return (rp->r_r11);
591 	case REG_R12:
592 		return (rp->r_r12);
593 	case REG_R13:
594 		return (rp->r_r13);
595 	case REG_R14:
596 		return (rp->r_r14);
597 	case REG_R15:
598 		return (rp->r_r15);
599 	case REG_DS:
600 		return (rp->r_ds);
601 	case REG_ES:
602 		return (rp->r_es);
603 	case REG_FS:
604 		return (rp->r_fs);
605 	case REG_GS:
606 		return (rp->r_gs);
607 	case REG_TRAPNO:
608 		return (rp->r_trapno);
609 	case REG_ERR:
610 		return (rp->r_err);
611 	case REG_RIP:
612 		return (rp->r_rip);
613 	case REG_CS:
614 		return (rp->r_cs);
615 	case REG_SS:
616 		return (rp->r_ss);
617 	case REG_RFL:
618 		return (rp->r_rfl);
619 	case REG_RSP:
620 		return (rp->r_rsp);
621 	default:
622 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
623 		return (0);
624 	}
625 
626 #else
627 	if (reg > SS) {
628 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
629 		return (0);
630 	}
631 
632 	return ((&rp->r_gs)[reg]);
633 #endif
634 }
635 
636 static int
637 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
638 {
639 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
640 
641 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
642 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
643 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
644 		return (0);
645 	}
646 
647 	return (1);
648 }
649 
650 void
651 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size)
652 {
653 	if (dtrace_copycheck(uaddr, kaddr, size))
654 		dtrace_copy(uaddr, kaddr, size);
655 }
656 
657 void
658 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size)
659 {
660 	if (dtrace_copycheck(uaddr, kaddr, size))
661 		dtrace_copy(kaddr, uaddr, size);
662 }
663 
664 void
665 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
666 {
667 	if (dtrace_copycheck(uaddr, kaddr, size))
668 		dtrace_copystr(uaddr, kaddr, size);
669 }
670 
671 void
672 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size)
673 {
674 	if (dtrace_copycheck(uaddr, kaddr, size))
675 		dtrace_copystr(kaddr, uaddr, size);
676 }
677 
678 uint8_t
679 dtrace_fuword8(void *uaddr)
680 {
681 	extern uint8_t dtrace_fuword8_nocheck(void *);
682 	if ((uintptr_t)uaddr >= _userlimit) {
683 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
684 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
685 		return (0);
686 	}
687 	return (dtrace_fuword8_nocheck(uaddr));
688 }
689 
690 uint16_t
691 dtrace_fuword16(void *uaddr)
692 {
693 	extern uint16_t dtrace_fuword16_nocheck(void *);
694 	if ((uintptr_t)uaddr >= _userlimit) {
695 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
696 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
697 		return (0);
698 	}
699 	return (dtrace_fuword16_nocheck(uaddr));
700 }
701 
702 uint32_t
703 dtrace_fuword32(void *uaddr)
704 {
705 	extern uint32_t dtrace_fuword32_nocheck(void *);
706 	if ((uintptr_t)uaddr >= _userlimit) {
707 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
708 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
709 		return (0);
710 	}
711 	return (dtrace_fuword32_nocheck(uaddr));
712 }
713 
714 uint64_t
715 dtrace_fuword64(void *uaddr)
716 {
717 	extern uint64_t dtrace_fuword64_nocheck(void *);
718 	if ((uintptr_t)uaddr >= _userlimit) {
719 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
720 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
721 		return (0);
722 	}
723 	return (dtrace_fuword64_nocheck(uaddr));
724 }
725