xref: /titanic_41/usr/src/uts/intel/dtrace/dtrace_isa.c (revision 036aa26189b72905886e39d76d63352185cfd9d2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/dtrace_impl.h>
30 #include <sys/stack.h>
31 #include <sys/frame.h>
32 #include <sys/cmn_err.h>
33 #include <sys/privregs.h>
34 #include <sys/sysmacros.h>
35 
36 extern uintptr_t kernelbase;
37 
38 void
39 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
40     uint32_t *intrpc)
41 {
42 	struct frame *fp = (struct frame *)dtrace_getfp();
43 	struct frame *nextfp, *minfp, *stacktop;
44 	int depth = 0;
45 	int on_intr, last = 0;
46 	uintptr_t pc;
47 	uintptr_t caller = CPU->cpu_dtrace_caller;
48 
49 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
50 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
51 	else
52 		stacktop = (struct frame *)curthread->t_stk;
53 	minfp = fp;
54 
55 	aframes++;
56 
57 	if (intrpc != NULL && depth < pcstack_limit)
58 		pcstack[depth++] = (pc_t)intrpc;
59 
60 	while (depth < pcstack_limit) {
61 		nextfp = (struct frame *)fp->fr_savfp;
62 		pc = fp->fr_savpc;
63 
64 		if (nextfp <= minfp || nextfp >= stacktop) {
65 			if (on_intr) {
66 				/*
67 				 * Hop from interrupt stack to thread stack.
68 				 */
69 				stacktop = (struct frame *)curthread->t_stk;
70 				minfp = (struct frame *)curthread->t_stkbase;
71 				on_intr = 0;
72 				continue;
73 			}
74 
75 			/*
76 			 * This is the last frame we can process; indicate
77 			 * that we should return after processing this frame.
78 			 */
79 			last = 1;
80 		}
81 
82 		if (aframes > 0) {
83 			if (--aframes == 0 && caller != NULL) {
84 				/*
85 				 * We've just run out of artificial frames,
86 				 * and we have a valid caller -- fill it in
87 				 * now.
88 				 */
89 				ASSERT(depth < pcstack_limit);
90 				pcstack[depth++] = (pc_t)caller;
91 				caller = NULL;
92 			}
93 		} else {
94 			if (depth < pcstack_limit)
95 				pcstack[depth++] = (pc_t)pc;
96 		}
97 
98 		if (last) {
99 			while (depth < pcstack_limit)
100 				pcstack[depth++] = NULL;
101 			return;
102 		}
103 
104 		fp = nextfp;
105 		minfp = fp;
106 	}
107 }
108 
109 static int
110 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
111     uintptr_t sp)
112 {
113 	klwp_t *lwp = ttolwp(curthread);
114 	proc_t *p = curproc;
115 	uintptr_t oldcontext = lwp->lwp_oldcontext;
116 	volatile uint16_t *flags =
117 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
118 	size_t s1, s2;
119 	int ret = 0;
120 
121 	ASSERT(pcstack == NULL || pcstack_limit > 0);
122 
123 	if (p->p_model == DATAMODEL_NATIVE) {
124 		s1 = sizeof (struct frame) + 2 * sizeof (long);
125 		s2 = s1 + sizeof (siginfo_t);
126 	} else {
127 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
128 		s2 = s1 + sizeof (siginfo32_t);
129 	}
130 
131 	while (pc != 0) {
132 		ret++;
133 		if (pcstack != NULL) {
134 			*pcstack++ = (uint64_t)pc;
135 			pcstack_limit--;
136 			if (pcstack_limit <= 0)
137 				break;
138 		}
139 
140 		if (sp == 0)
141 			break;
142 
143 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
144 			if (p->p_model == DATAMODEL_NATIVE) {
145 				ucontext_t *ucp = (ucontext_t *)oldcontext;
146 				greg_t *gregs = ucp->uc_mcontext.gregs;
147 
148 				sp = dtrace_fulword(&gregs[REG_FP]);
149 				pc = dtrace_fulword(&gregs[REG_PC]);
150 
151 				oldcontext = dtrace_fulword(&ucp->uc_link);
152 			} else {
153 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
154 				greg32_t *gregs = ucp->uc_mcontext.gregs;
155 
156 				sp = dtrace_fuword32(&gregs[EBP]);
157 				pc = dtrace_fuword32(&gregs[EIP]);
158 
159 				oldcontext = dtrace_fuword32(&ucp->uc_link);
160 			}
161 		} else {
162 			if (p->p_model == DATAMODEL_NATIVE) {
163 				struct frame *fr = (struct frame *)sp;
164 
165 				pc = dtrace_fulword(&fr->fr_savpc);
166 				sp = dtrace_fulword(&fr->fr_savfp);
167 			} else {
168 				struct frame32 *fr = (struct frame32 *)sp;
169 
170 				pc = dtrace_fuword32(&fr->fr_savpc);
171 				sp = dtrace_fuword32(&fr->fr_savfp);
172 			}
173 		}
174 
175 		/*
176 		 * This is totally bogus:  if we faulted, we're going to clear
177 		 * the fault and break.  This is to deal with the apparently
178 		 * broken Java stacks on x86.
179 		 */
180 		if (*flags & CPU_DTRACE_FAULT) {
181 			*flags &= ~CPU_DTRACE_FAULT;
182 			break;
183 		}
184 	}
185 
186 	return (ret);
187 }
188 
189 void
190 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
191 {
192 	klwp_t *lwp = ttolwp(curthread);
193 	proc_t *p = curproc;
194 	struct regs *rp;
195 	uintptr_t pc, sp;
196 	volatile uint16_t *flags =
197 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
198 	int n;
199 
200 	if (*flags & CPU_DTRACE_FAULT)
201 		return;
202 
203 	if (pcstack_limit <= 0)
204 		return;
205 
206 	/*
207 	 * If there's no user context we still need to zero the stack.
208 	 */
209 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
210 		goto zero;
211 
212 	*pcstack++ = (uint64_t)p->p_pid;
213 	pcstack_limit--;
214 
215 	if (pcstack_limit <= 0)
216 		return;
217 
218 	pc = rp->r_pc;
219 	sp = rp->r_fp;
220 
221 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
222 		*pcstack++ = (uint64_t)pc;
223 		pcstack_limit--;
224 		if (pcstack_limit <= 0)
225 			return;
226 
227 		if (p->p_model == DATAMODEL_NATIVE)
228 			pc = dtrace_fulword((void *)rp->r_sp);
229 		else
230 			pc = dtrace_fuword32((void *)rp->r_sp);
231 	}
232 
233 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
234 	ASSERT(n >= 0);
235 	ASSERT(n <= pcstack_limit);
236 
237 	pcstack += n;
238 	pcstack_limit -= n;
239 
240 zero:
241 	while (pcstack_limit-- > 0)
242 		*pcstack++ = NULL;
243 }
244 
245 int
246 dtrace_getustackdepth(void)
247 {
248 	klwp_t *lwp = ttolwp(curthread);
249 	proc_t *p = curproc;
250 	struct regs *rp;
251 	uintptr_t pc, sp;
252 	int n = 0;
253 
254 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
255 		return (0);
256 
257 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
258 		return (-1);
259 
260 	pc = rp->r_pc;
261 	sp = rp->r_fp;
262 
263 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
264 		n++;
265 
266 		if (p->p_model == DATAMODEL_NATIVE)
267 			pc = dtrace_fulword((void *)rp->r_sp);
268 		else
269 			pc = dtrace_fuword32((void *)rp->r_sp);
270 	}
271 
272 	n += dtrace_getustack_common(NULL, 0, pc, sp);
273 
274 	return (n);
275 }
276 
277 void
278 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
279 {
280 	klwp_t *lwp = ttolwp(curthread);
281 	proc_t *p = curproc;
282 	struct regs *rp;
283 	uintptr_t pc, sp, oldcontext;
284 	volatile uint16_t *flags =
285 	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
286 	size_t s1, s2;
287 
288 	if (*flags & CPU_DTRACE_FAULT)
289 		return;
290 
291 	if (pcstack_limit <= 0)
292 		return;
293 
294 	/*
295 	 * If there's no user context we still need to zero the stack.
296 	 */
297 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
298 		goto zero;
299 
300 	*pcstack++ = (uint64_t)p->p_pid;
301 	pcstack_limit--;
302 
303 	if (pcstack_limit <= 0)
304 		return;
305 
306 	pc = rp->r_pc;
307 	sp = rp->r_fp;
308 	oldcontext = lwp->lwp_oldcontext;
309 
310 	if (p->p_model == DATAMODEL_NATIVE) {
311 		s1 = sizeof (struct frame) + 2 * sizeof (long);
312 		s2 = s1 + sizeof (siginfo_t);
313 	} else {
314 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
315 		s2 = s1 + sizeof (siginfo32_t);
316 	}
317 
318 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
319 		*pcstack++ = (uint64_t)pc;
320 		*fpstack++ = 0;
321 		pcstack_limit--;
322 		if (pcstack_limit <= 0)
323 			return;
324 
325 		if (p->p_model == DATAMODEL_NATIVE)
326 			pc = dtrace_fulword((void *)rp->r_sp);
327 		else
328 			pc = dtrace_fuword32((void *)rp->r_sp);
329 	}
330 
331 	while (pc != 0) {
332 		*pcstack++ = (uint64_t)pc;
333 		*fpstack++ = sp;
334 		pcstack_limit--;
335 		if (pcstack_limit <= 0)
336 			break;
337 
338 		if (sp == 0)
339 			break;
340 
341 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
342 			if (p->p_model == DATAMODEL_NATIVE) {
343 				ucontext_t *ucp = (ucontext_t *)oldcontext;
344 				greg_t *gregs = ucp->uc_mcontext.gregs;
345 
346 				sp = dtrace_fulword(&gregs[REG_FP]);
347 				pc = dtrace_fulword(&gregs[REG_PC]);
348 
349 				oldcontext = dtrace_fulword(&ucp->uc_link);
350 			} else {
351 				ucontext_t *ucp = (ucontext_t *)oldcontext;
352 				greg_t *gregs = ucp->uc_mcontext.gregs;
353 
354 				sp = dtrace_fuword32(&gregs[EBP]);
355 				pc = dtrace_fuword32(&gregs[EIP]);
356 
357 				oldcontext = dtrace_fuword32(&ucp->uc_link);
358 			}
359 		} else {
360 			if (p->p_model == DATAMODEL_NATIVE) {
361 				struct frame *fr = (struct frame *)sp;
362 
363 				pc = dtrace_fulword(&fr->fr_savpc);
364 				sp = dtrace_fulword(&fr->fr_savfp);
365 			} else {
366 				struct frame32 *fr = (struct frame32 *)sp;
367 
368 				pc = dtrace_fuword32(&fr->fr_savpc);
369 				sp = dtrace_fuword32(&fr->fr_savfp);
370 			}
371 		}
372 
373 		/*
374 		 * This is totally bogus:  if we faulted, we're going to clear
375 		 * the fault and break.  This is to deal with the apparently
376 		 * broken Java stacks on x86.
377 		 */
378 		if (*flags & CPU_DTRACE_FAULT) {
379 			*flags &= ~CPU_DTRACE_FAULT;
380 			break;
381 		}
382 	}
383 
384 zero:
385 	while (pcstack_limit-- > 0)
386 		*pcstack++ = NULL;
387 }
388 
389 /*ARGSUSED*/
390 uint64_t
391 dtrace_getarg(int arg, int aframes)
392 {
393 	uintptr_t val;
394 	struct frame *fp = (struct frame *)dtrace_getfp();
395 	uintptr_t *stack;
396 	int i;
397 #if defined(__amd64)
398 	/*
399 	 * A total of 6 arguments are passed via registers; any argument with
400 	 * index of 5 or lower is therefore in a register.
401 	 */
402 	int inreg = 5;
403 #endif
404 
405 	for (i = 1; i <= aframes; i++) {
406 		fp = (struct frame *)(fp->fr_savfp);
407 
408 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
409 #if !defined(__amd64)
410 			/*
411 			 * If we pass through the invalid op handler, we will
412 			 * use the pointer that it passed to the stack as the
413 			 * second argument to dtrace_invop() as the pointer to
414 			 * the stack.  When using this stack, we must step
415 			 * beyond the EIP/RIP that was pushed when the trap was
416 			 * taken -- hence the "+ 1" below.
417 			 */
418 			stack = ((uintptr_t **)&fp[1])[1] + 1;
419 #else
420 			/*
421 			 * In the case of amd64, we will use the pointer to the
422 			 * regs structure that was pushed when we took the
423 			 * trap.  To get this structure, we must increment
424 			 * beyond the frame structure, and then again beyond
425 			 * the calling RIP stored in dtrace_invop().  If the
426 			 * argument that we're seeking is passed on the stack,
427 			 * we'll pull the true stack pointer out of the saved
428 			 * registers and decrement our argument by the number
429 			 * of arguments passed in registers; if the argument
430 			 * we're seeking is passed in regsiters, we can just
431 			 * load it directly.
432 			 */
433 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
434 			    sizeof (uintptr_t));
435 
436 			if (arg <= inreg) {
437 				stack = (uintptr_t *)&rp->r_rdi;
438 			} else {
439 				stack = (uintptr_t *)(rp->r_rsp);
440 				arg -= inreg;
441 			}
442 #endif
443 			goto load;
444 		}
445 
446 	}
447 
448 	/*
449 	 * We know that we did not come through a trap to get into
450 	 * dtrace_probe() -- the provider simply called dtrace_probe()
451 	 * directly.  As this is the case, we need to shift the argument
452 	 * that we're looking for:  the probe ID is the first argument to
453 	 * dtrace_probe(), so the argument n will actually be found where
454 	 * one would expect to find argument (n + 1).
455 	 */
456 	arg++;
457 
458 #if defined(__amd64)
459 	if (arg <= inreg) {
460 		/*
461 		 * This shouldn't happen.  If the argument is passed in a
462 		 * register then it should have been, well, passed in a
463 		 * register...
464 		 */
465 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
466 		return (0);
467 	}
468 
469 	arg -= (inreg + 1);
470 #endif
471 	stack = (uintptr_t *)&fp[1];
472 
473 load:
474 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
475 	val = stack[arg];
476 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
477 
478 	return (val);
479 }
480 
481 /*ARGSUSED*/
482 int
483 dtrace_getstackdepth(int aframes)
484 {
485 	struct frame *fp = (struct frame *)dtrace_getfp();
486 	struct frame *nextfp, *minfp, *stacktop;
487 	int depth = 0;
488 	int on_intr;
489 
490 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
491 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
492 	else
493 		stacktop = (struct frame *)curthread->t_stk;
494 	minfp = fp;
495 
496 	aframes++;
497 
498 	for (;;) {
499 		depth++;
500 
501 		nextfp = (struct frame *)fp->fr_savfp;
502 
503 		if (nextfp <= minfp || nextfp >= stacktop) {
504 			if (on_intr) {
505 				/*
506 				 * Hop from interrupt stack to thread stack.
507 				 */
508 				stacktop = (struct frame *)curthread->t_stk;
509 				minfp = (struct frame *)curthread->t_stkbase;
510 				on_intr = 0;
511 				continue;
512 			}
513 			break;
514 		}
515 
516 		fp = nextfp;
517 		minfp = fp;
518 	}
519 
520 	if (depth <= aframes)
521 		return (0);
522 
523 	return (depth - aframes);
524 }
525 
526 ulong_t
527 dtrace_getreg(struct regs *rp, uint_t reg)
528 {
529 #if defined(__amd64)
530 	int regmap[] = {
531 		REG_GS,		/* GS */
532 		REG_FS,		/* FS */
533 		REG_ES,		/* ES */
534 		REG_DS,		/* DS */
535 		REG_RDI,	/* EDI */
536 		REG_RSI,	/* ESI */
537 		REG_RBP,	/* EBP */
538 		REG_RSP,	/* ESP */
539 		REG_RBX,	/* EBX */
540 		REG_RDX,	/* EDX */
541 		REG_RCX,	/* ECX */
542 		REG_RAX,	/* EAX */
543 		REG_TRAPNO,	/* TRAPNO */
544 		REG_ERR,	/* ERR */
545 		REG_RIP,	/* EIP */
546 		REG_CS,		/* CS */
547 		REG_RFL,	/* EFL */
548 		REG_RSP,	/* UESP */
549 		REG_SS		/* SS */
550 	};
551 
552 	if (reg <= SS) {
553 		if (reg >= sizeof (regmap) / sizeof (int)) {
554 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
555 			return (0);
556 		}
557 
558 		reg = regmap[reg];
559 	} else {
560 		reg -= SS + 1;
561 	}
562 
563 	switch (reg) {
564 	case REG_RDI:
565 		return (rp->r_rdi);
566 	case REG_RSI:
567 		return (rp->r_rsi);
568 	case REG_RDX:
569 		return (rp->r_rdx);
570 	case REG_RCX:
571 		return (rp->r_rcx);
572 	case REG_R8:
573 		return (rp->r_r8);
574 	case REG_R9:
575 		return (rp->r_r9);
576 	case REG_RAX:
577 		return (rp->r_rax);
578 	case REG_RBX:
579 		return (rp->r_rbx);
580 	case REG_RBP:
581 		return (rp->r_rbp);
582 	case REG_R10:
583 		return (rp->r_r10);
584 	case REG_R11:
585 		return (rp->r_r11);
586 	case REG_R12:
587 		return (rp->r_r12);
588 	case REG_R13:
589 		return (rp->r_r13);
590 	case REG_R14:
591 		return (rp->r_r14);
592 	case REG_R15:
593 		return (rp->r_r15);
594 	case REG_DS:
595 		return (rp->r_ds);
596 	case REG_ES:
597 		return (rp->r_es);
598 	case REG_FS:
599 		return (rp->r_fs);
600 	case REG_GS:
601 		return (rp->r_gs);
602 	case REG_TRAPNO:
603 		return (rp->r_trapno);
604 	case REG_ERR:
605 		return (rp->r_err);
606 	case REG_RIP:
607 		return (rp->r_rip);
608 	case REG_CS:
609 		return (rp->r_cs);
610 	case REG_SS:
611 		return (rp->r_ss);
612 	case REG_RFL:
613 		return (rp->r_rfl);
614 	case REG_RSP:
615 		return (rp->r_rsp);
616 	default:
617 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
618 		return (0);
619 	}
620 
621 #else
622 	if (reg > SS) {
623 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
624 		return (0);
625 	}
626 
627 	return ((&rp->r_gs)[reg]);
628 #endif
629 }
630 
631 static int
632 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
633 {
634 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
635 
636 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
637 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
638 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
639 		return (0);
640 	}
641 
642 	return (1);
643 }
644 
645 void
646 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size)
647 {
648 	if (dtrace_copycheck(uaddr, kaddr, size))
649 		dtrace_copy(uaddr, kaddr, size);
650 }
651 
652 void
653 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size)
654 {
655 	if (dtrace_copycheck(uaddr, kaddr, size))
656 		dtrace_copy(kaddr, uaddr, size);
657 }
658 
659 void
660 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
661 {
662 	if (dtrace_copycheck(uaddr, kaddr, size))
663 		dtrace_copystr(uaddr, kaddr, size);
664 }
665 
666 void
667 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size)
668 {
669 	if (dtrace_copycheck(uaddr, kaddr, size))
670 		dtrace_copystr(kaddr, uaddr, size);
671 }
672 
673 uint8_t
674 dtrace_fuword8(void *uaddr)
675 {
676 	extern uint8_t dtrace_fuword8_nocheck(void *);
677 	if ((uintptr_t)uaddr >= _userlimit) {
678 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
679 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
680 		return (0);
681 	}
682 	return (dtrace_fuword8_nocheck(uaddr));
683 }
684 
685 uint16_t
686 dtrace_fuword16(void *uaddr)
687 {
688 	extern uint16_t dtrace_fuword16_nocheck(void *);
689 	if ((uintptr_t)uaddr >= _userlimit) {
690 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
691 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
692 		return (0);
693 	}
694 	return (dtrace_fuword16_nocheck(uaddr));
695 }
696 
697 uint32_t
698 dtrace_fuword32(void *uaddr)
699 {
700 	extern uint32_t dtrace_fuword32_nocheck(void *);
701 	if ((uintptr_t)uaddr >= _userlimit) {
702 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
703 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
704 		return (0);
705 	}
706 	return (dtrace_fuword32_nocheck(uaddr));
707 }
708 
709 uint64_t
710 dtrace_fuword64(void *uaddr)
711 {
712 	extern uint64_t dtrace_fuword64_nocheck(void *);
713 	if ((uintptr_t)uaddr >= _userlimit) {
714 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
715 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
716 		return (0);
717 	}
718 	return (dtrace_fuword64_nocheck(uaddr));
719 }
720