xref: /freebsd/sys/cddl/dev/dtrace/amd64/dtrace_isa.c (revision 5c1d97100348ef19878fa14671a9b70f3d963ed4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * $FreeBSD$
23  */
24 /*
25  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 #include <sys/cdefs.h>
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
34 #include <sys/pcpu.h>
35 
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/reg.h>
39 #include <machine/stack.h>
40 #include <x86/ifunc.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/pmap.h>
45 
46 #include "regset.h"
47 
48 uint8_t dtrace_fuword8_nocheck(void *);
49 uint16_t dtrace_fuword16_nocheck(void *);
50 uint32_t dtrace_fuword32_nocheck(void *);
51 uint64_t dtrace_fuword64_nocheck(void *);
52 
53 int	dtrace_ustackdepth_max = 2048;
54 
55 void
56 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
57     uint32_t *intrpc)
58 {
59 	int depth = 0;
60 	register_t rbp;
61 	struct amd64_frame *frame;
62 	vm_offset_t callpc;
63 	pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
64 
65 	if (intrpc != 0)
66 		pcstack[depth++] = (pc_t) intrpc;
67 
68 	aframes++;
69 
70 	__asm __volatile("movq %%rbp,%0" : "=r" (rbp));
71 
72 	frame = (struct amd64_frame *)rbp;
73 	while (depth < pcstack_limit) {
74 		if (!INKERNEL((long) frame))
75 			break;
76 
77 		callpc = frame->f_retaddr;
78 
79 		if (!INKERNEL(callpc))
80 			break;
81 
82 		if (aframes > 0) {
83 			aframes--;
84 			if ((aframes == 0) && (caller != 0)) {
85 				pcstack[depth++] = caller;
86 			}
87 		}
88 		else {
89 			pcstack[depth++] = callpc;
90 		}
91 
92 		if (frame->f_frame <= frame ||
93 		    (vm_offset_t)frame->f_frame >= curthread->td_kstack +
94 		    curthread->td_kstack_pages * PAGE_SIZE)
95 			break;
96 		frame = frame->f_frame;
97 	}
98 
99 	for (; depth < pcstack_limit; depth++) {
100 		pcstack[depth] = 0;
101 	}
102 }
103 
104 static int
105 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
106     uintptr_t sp)
107 {
108 	uintptr_t oldsp;
109 	volatile uint16_t *flags =
110 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
111 	int ret = 0;
112 
113 	ASSERT(pcstack == NULL || pcstack_limit > 0);
114 	ASSERT(dtrace_ustackdepth_max > 0);
115 
116 	while (pc != 0) {
117 		/*
118 		 * We limit the number of times we can go around this
119 		 * loop to account for a circular stack.
120 		 */
121 		if (ret++ >= dtrace_ustackdepth_max) {
122 			*flags |= CPU_DTRACE_BADSTACK;
123 			cpu_core[curcpu].cpuc_dtrace_illval = sp;
124 			break;
125 		}
126 
127 		if (pcstack != NULL) {
128 			*pcstack++ = (uint64_t)pc;
129 			pcstack_limit--;
130 			if (pcstack_limit <= 0)
131 				break;
132 		}
133 
134 		if (sp == 0)
135 			break;
136 
137 		oldsp = sp;
138 
139 		pc = dtrace_fuword64((void *)(sp +
140 			offsetof(struct amd64_frame, f_retaddr)));
141 		sp = dtrace_fuword64((void *)sp);
142 
143 		if (sp == oldsp) {
144 			*flags |= CPU_DTRACE_BADSTACK;
145 			cpu_core[curcpu].cpuc_dtrace_illval = sp;
146 			break;
147 		}
148 
149 		/*
150 		 * This is totally bogus:  if we faulted, we're going to clear
151 		 * the fault and break.  This is to deal with the apparently
152 		 * broken Java stacks on x86.
153 		 */
154 		if (*flags & CPU_DTRACE_FAULT) {
155 			*flags &= ~CPU_DTRACE_FAULT;
156 			break;
157 		}
158 	}
159 
160 	return (ret);
161 }
162 
163 void
164 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
165 {
166 	proc_t *p = curproc;
167 	struct trapframe *tf;
168 	uintptr_t pc, sp, fp;
169 	volatile uint16_t *flags =
170 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
171 	int n;
172 
173 	if (*flags & CPU_DTRACE_FAULT)
174 		return;
175 
176 	if (pcstack_limit <= 0)
177 		return;
178 
179 	/*
180 	 * If there's no user context we still need to zero the stack.
181 	 */
182 	if (p == NULL || (tf = curthread->td_frame) == NULL)
183 		goto zero;
184 
185 	*pcstack++ = (uint64_t)p->p_pid;
186 	pcstack_limit--;
187 
188 	if (pcstack_limit <= 0)
189 		return;
190 
191 	pc = tf->tf_rip;
192 	fp = tf->tf_rbp;
193 	sp = tf->tf_rsp;
194 
195 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
196 		/*
197 		 * In an entry probe.  The frame pointer has not yet been
198 		 * pushed (that happens in the function prologue).  The
199 		 * best approach is to add the current pc as a missing top
200 		 * of stack and back the pc up to the caller, which is stored
201 		 * at the current stack pointer address since the call
202 		 * instruction puts it there right before the branch.
203 		 */
204 
205 		*pcstack++ = (uint64_t)pc;
206 		pcstack_limit--;
207 		if (pcstack_limit <= 0)
208 			return;
209 
210 		pc = dtrace_fuword64((void *) sp);
211 	}
212 
213 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
214 	ASSERT(n >= 0);
215 	ASSERT(n <= pcstack_limit);
216 
217 	pcstack += n;
218 	pcstack_limit -= n;
219 
220 zero:
221 	while (pcstack_limit-- > 0)
222 		*pcstack++ = 0;
223 }
224 
225 int
226 dtrace_getustackdepth(void)
227 {
228 	proc_t *p = curproc;
229 	struct trapframe *tf;
230 	uintptr_t pc, fp, sp;
231 	int n = 0;
232 
233 	if (p == NULL || (tf = curthread->td_frame) == NULL)
234 		return (0);
235 
236 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
237 		return (-1);
238 
239 	pc = tf->tf_rip;
240 	fp = tf->tf_rbp;
241 	sp = tf->tf_rsp;
242 
243 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
244 		/*
245 		 * In an entry probe.  The frame pointer has not yet been
246 		 * pushed (that happens in the function prologue).  The
247 		 * best approach is to add the current pc as a missing top
248 		 * of stack and back the pc up to the caller, which is stored
249 		 * at the current stack pointer address since the call
250 		 * instruction puts it there right before the branch.
251 		 */
252 
253 		pc = dtrace_fuword64((void *) sp);
254 		n++;
255 	}
256 
257 	n += dtrace_getustack_common(NULL, 0, pc, fp);
258 
259 	return (n);
260 }
261 
262 void
263 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
264 {
265 	proc_t *p = curproc;
266 	struct trapframe *tf;
267 	uintptr_t pc, sp, fp;
268 	volatile uint16_t *flags =
269 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
270 #ifdef notyet	/* XXX signal stack */
271 	uintptr_t oldcontext;
272 	size_t s1, s2;
273 #endif
274 
275 	if (*flags & CPU_DTRACE_FAULT)
276 		return;
277 
278 	if (pcstack_limit <= 0)
279 		return;
280 
281 	/*
282 	 * If there's no user context we still need to zero the stack.
283 	 */
284 	if (p == NULL || (tf = curthread->td_frame) == NULL)
285 		goto zero;
286 
287 	*pcstack++ = (uint64_t)p->p_pid;
288 	pcstack_limit--;
289 
290 	if (pcstack_limit <= 0)
291 		return;
292 
293 	pc = tf->tf_rip;
294 	sp = tf->tf_rsp;
295 	fp = tf->tf_rbp;
296 
297 #ifdef notyet /* XXX signal stack */
298 	oldcontext = lwp->lwp_oldcontext;
299 	s1 = sizeof (struct xframe) + 2 * sizeof (long);
300 	s2 = s1 + sizeof (siginfo_t);
301 #endif
302 
303 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
304 		*pcstack++ = (uint64_t)pc;
305 		*fpstack++ = 0;
306 		pcstack_limit--;
307 		if (pcstack_limit <= 0)
308 			return;
309 
310 		pc = dtrace_fuword64((void *)sp);
311 	}
312 
313 	while (pc != 0) {
314 		*pcstack++ = (uint64_t)pc;
315 		*fpstack++ = fp;
316 		pcstack_limit--;
317 		if (pcstack_limit <= 0)
318 			break;
319 
320 		if (fp == 0)
321 			break;
322 
323 #ifdef notyet /* XXX signal stack */
324 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
325 			ucontext_t *ucp = (ucontext_t *)oldcontext;
326 			greg_t *gregs = ucp->uc_mcontext.gregs;
327 
328 			sp = dtrace_fulword(&gregs[REG_FP]);
329 			pc = dtrace_fulword(&gregs[REG_PC]);
330 
331 			oldcontext = dtrace_fulword(&ucp->uc_link);
332 		} else
333 #endif /* XXX */
334 		{
335 			pc = dtrace_fuword64((void *)(fp +
336 				offsetof(struct amd64_frame, f_retaddr)));
337 			fp = dtrace_fuword64((void *)fp);
338 		}
339 
340 		/*
341 		 * This is totally bogus:  if we faulted, we're going to clear
342 		 * the fault and break.  This is to deal with the apparently
343 		 * broken Java stacks on x86.
344 		 */
345 		if (*flags & CPU_DTRACE_FAULT) {
346 			*flags &= ~CPU_DTRACE_FAULT;
347 			break;
348 		}
349 	}
350 
351 zero:
352 	while (pcstack_limit-- > 0)
353 		*pcstack++ = 0;
354 }
355 
356 /*ARGSUSED*/
357 uint64_t
358 dtrace_getarg(int arg, int aframes)
359 {
360 	uintptr_t val;
361 	struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp();
362 	uintptr_t *stack;
363 	int i;
364 
365 	/*
366 	 * A total of 6 arguments are passed via registers; any argument with
367 	 * index of 5 or lower is therefore in a register.
368 	 */
369 	int inreg = 5;
370 
371 	for (i = 1; i <= aframes; i++) {
372 		fp = fp->f_frame;
373 
374 		if (P2ROUNDUP(fp->f_retaddr, 16) ==
375 		    (long)dtrace_invop_callsite) {
376 			/*
377 			 * In the case of amd64, we will use the pointer to the
378 			 * regs structure that was pushed when we took the
379 			 * trap.  To get this structure, we must increment
380 			 * beyond the frame structure, and then again beyond
381 			 * the calling RIP stored in dtrace_invop().  If the
382 			 * argument that we're seeking is passed on the stack,
383 			 * we'll pull the true stack pointer out of the saved
384 			 * registers and decrement our argument by the number
385 			 * of arguments passed in registers; if the argument
386 			 * we're seeking is passed in registers, we can just
387 			 * load it directly.
388 			 */
389 			struct trapframe *tf = (struct trapframe *)&fp[1];
390 
391 			if (arg <= inreg) {
392 				switch (arg) {
393 				case 0:
394 					stack = (uintptr_t *)&tf->tf_rdi;
395 					break;
396 				case 1:
397 					stack = (uintptr_t *)&tf->tf_rsi;
398 					break;
399 				case 2:
400 					stack = (uintptr_t *)&tf->tf_rdx;
401 					break;
402 				case 3:
403 					stack = (uintptr_t *)&tf->tf_rcx;
404 					break;
405 				case 4:
406 					stack = (uintptr_t *)&tf->tf_r8;
407 					break;
408 				case 5:
409 					stack = (uintptr_t *)&tf->tf_r9;
410 					break;
411 				}
412 				arg = 0;
413 			} else {
414 				stack = (uintptr_t *)(tf->tf_rsp);
415 				arg -= inreg;
416 			}
417 			goto load;
418 		}
419 
420 	}
421 
422 	/*
423 	 * We know that we did not come through a trap to get into
424 	 * dtrace_probe() -- the provider simply called dtrace_probe()
425 	 * directly.  As this is the case, we need to shift the argument
426 	 * that we're looking for:  the probe ID is the first argument to
427 	 * dtrace_probe(), so the argument n will actually be found where
428 	 * one would expect to find argument (n + 1).
429 	 */
430 	arg++;
431 
432 	if (arg <= inreg) {
433 		/*
434 		 * This shouldn't happen.  If the argument is passed in a
435 		 * register then it should have been, well, passed in a
436 		 * register...
437 		 */
438 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
439 		return (0);
440 	}
441 
442 	arg -= (inreg + 1);
443 	stack = (uintptr_t *)&fp[1];
444 
445 load:
446 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
447 	val = stack[arg];
448 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
449 
450 	return (val);
451 }
452 
453 int
454 dtrace_getstackdepth(int aframes)
455 {
456 	int depth = 0;
457 	struct amd64_frame *frame;
458 	vm_offset_t rbp;
459 
460 	aframes++;
461 	rbp = dtrace_getfp();
462 	frame = (struct amd64_frame *)rbp;
463 	depth++;
464 	for(;;) {
465 		if (!INKERNEL((long) frame))
466 			break;
467 		if (!INKERNEL((long) frame->f_frame))
468 			break;
469 		depth++;
470 		if (frame->f_frame <= frame ||
471 		    (vm_offset_t)frame->f_frame >= curthread->td_kstack +
472 		    curthread->td_kstack_pages * PAGE_SIZE)
473 			break;
474 		frame = frame->f_frame;
475 	}
476 	if (depth < aframes)
477 		return 0;
478 	else
479 		return depth - aframes;
480 }
481 
482 ulong_t
483 dtrace_getreg(struct trapframe *rp, uint_t reg)
484 {
485 	/* This table is dependent on reg.d. */
486 	int regmap[] = {
487 		REG_GS,		/* 0  GS */
488 		REG_FS,		/* 1  FS */
489 		REG_ES,		/* 2  ES */
490 		REG_DS,		/* 3  DS */
491 		REG_RDI,	/* 4  EDI */
492 		REG_RSI,	/* 5  ESI */
493 		REG_RBP,	/* 6  EBP, REG_FP */
494 		REG_RSP,	/* 7  ESP */
495 		REG_RBX,	/* 8  EBX, REG_R1 */
496 		REG_RDX,	/* 9  EDX */
497 		REG_RCX,	/* 10 ECX */
498 		REG_RAX,	/* 11 EAX, REG_R0 */
499 		REG_TRAPNO,	/* 12 TRAPNO */
500 		REG_ERR,	/* 13 ERR */
501 		REG_RIP,	/* 14 EIP, REG_PC */
502 		REG_CS,		/* 15 CS */
503 		REG_RFL,	/* 16 EFL, REG_PS */
504 		REG_RSP,	/* 17 UESP, REG_SP */
505 		REG_SS		/* 18 SS */
506 	};
507 
508 	if (reg <= SS) {
509 		if (reg >= sizeof (regmap) / sizeof (int)) {
510 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
511 			return (0);
512 		}
513 
514 		reg = regmap[reg];
515 	} else {
516 		/* This is dependent on reg.d. */
517 #ifdef illumos
518 		reg -= SS + 1;
519 #else	/* !illumos */
520 		reg -= GS + 1;
521 #endif
522 	}
523 
524 	switch (reg) {
525 	case REG_RDI:
526 		return (rp->tf_rdi);
527 	case REG_RSI:
528 		return (rp->tf_rsi);
529 	case REG_RDX:
530 		return (rp->tf_rdx);
531 	case REG_RCX:
532 		return (rp->tf_rcx);
533 	case REG_R8:
534 		return (rp->tf_r8);
535 	case REG_R9:
536 		return (rp->tf_r9);
537 	case REG_RAX:
538 		return (rp->tf_rax);
539 	case REG_RBX:
540 		return (rp->tf_rbx);
541 	case REG_RBP:
542 		return (rp->tf_rbp);
543 	case REG_R10:
544 		return (rp->tf_r10);
545 	case REG_R11:
546 		return (rp->tf_r11);
547 	case REG_R12:
548 		return (rp->tf_r12);
549 	case REG_R13:
550 		return (rp->tf_r13);
551 	case REG_R14:
552 		return (rp->tf_r14);
553 	case REG_R15:
554 		return (rp->tf_r15);
555 	case REG_DS:
556 		return (rp->tf_ds);
557 	case REG_ES:
558 		return (rp->tf_es);
559 	case REG_FS:
560 		return (rp->tf_fs);
561 	case REG_GS:
562 		return (rp->tf_gs);
563 	case REG_TRAPNO:
564 		return (rp->tf_trapno);
565 	case REG_ERR:
566 		return (rp->tf_err);
567 	case REG_RIP:
568 		return (rp->tf_rip);
569 	case REG_CS:
570 		return (rp->tf_cs);
571 	case REG_SS:
572 		return (rp->tf_ss);
573 	case REG_RFL:
574 		return (rp->tf_rflags);
575 	case REG_RSP:
576 		return (rp->tf_rsp);
577 	default:
578 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
579 		return (0);
580 	}
581 }
582 
583 static int
584 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
585 {
586 	ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
587 
588 	if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
589 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
590 		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
591 		return (0);
592 	}
593 
594 	return (1);
595 }
596 
597 void
598 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
599     volatile uint16_t *flags)
600 {
601 	if (dtrace_copycheck(uaddr, kaddr, size))
602 		dtrace_copy(uaddr, kaddr, size);
603 }
604 
605 void
606 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
607     volatile uint16_t *flags)
608 {
609 	if (dtrace_copycheck(uaddr, kaddr, size))
610 		dtrace_copy(kaddr, uaddr, size);
611 }
612 
613 void
614 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
615     volatile uint16_t *flags)
616 {
617 	if (dtrace_copycheck(uaddr, kaddr, size))
618 		dtrace_copystr(uaddr, kaddr, size, flags);
619 }
620 
621 void
622 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
623     volatile uint16_t *flags)
624 {
625 	if (dtrace_copycheck(uaddr, kaddr, size))
626 		dtrace_copystr(kaddr, uaddr, size, flags);
627 }
628 
629 uint8_t
630 dtrace_fuword8(void *uaddr)
631 {
632 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
633 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
634 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
635 		return (0);
636 	}
637 	return (dtrace_fuword8_nocheck(uaddr));
638 }
639 
640 uint16_t
641 dtrace_fuword16(void *uaddr)
642 {
643 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
644 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
645 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
646 		return (0);
647 	}
648 	return (dtrace_fuword16_nocheck(uaddr));
649 }
650 
651 uint32_t
652 dtrace_fuword32(void *uaddr)
653 {
654 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
655 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
656 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
657 		return (0);
658 	}
659 	return (dtrace_fuword32_nocheck(uaddr));
660 }
661 
662 uint64_t
663 dtrace_fuword64(void *uaddr)
664 {
665 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
666 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
667 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
668 		return (0);
669 	}
670 	return (dtrace_fuword64_nocheck(uaddr));
671 }
672 
673 /*
674  * ifunc resolvers for SMAP support
675  */
676 void dtrace_copy_nosmap(uintptr_t, uintptr_t, size_t);
677 void dtrace_copy_smap(uintptr_t, uintptr_t, size_t);
678 DEFINE_IFUNC(, void, dtrace_copy, (uintptr_t, uintptr_t, size_t))
679 {
680 
681 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
682 	    dtrace_copy_smap : dtrace_copy_nosmap);
683 }
684 
685 void dtrace_copystr_nosmap(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
686 void dtrace_copystr_smap(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
687 DEFINE_IFUNC(, void, dtrace_copystr, (uintptr_t, uintptr_t, size_t,
688     volatile uint16_t *))
689 {
690 
691 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
692 	    dtrace_copystr_smap : dtrace_copystr_nosmap);
693 }
694 
695 uintptr_t dtrace_fulword_nosmap(void *);
696 uintptr_t dtrace_fulword_smap(void *);
697 DEFINE_IFUNC(, uintptr_t, dtrace_fulword, (void *))
698 {
699 
700 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
701 	    dtrace_fulword_smap : dtrace_fulword_nosmap);
702 }
703 
704 uint8_t dtrace_fuword8_nocheck_nosmap(void *);
705 uint8_t dtrace_fuword8_nocheck_smap(void *);
706 DEFINE_IFUNC(, uint8_t, dtrace_fuword8_nocheck, (void *))
707 {
708 
709 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
710 	    dtrace_fuword8_nocheck_smap : dtrace_fuword8_nocheck_nosmap);
711 }
712 
713 uint16_t dtrace_fuword16_nocheck_nosmap(void *);
714 uint16_t dtrace_fuword16_nocheck_smap(void *);
715 DEFINE_IFUNC(, uint16_t, dtrace_fuword16_nocheck, (void *))
716 {
717 
718 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
719 	    dtrace_fuword16_nocheck_smap : dtrace_fuword16_nocheck_nosmap);
720 }
721 
722 uint32_t dtrace_fuword32_nocheck_nosmap(void *);
723 uint32_t dtrace_fuword32_nocheck_smap(void *);
724 DEFINE_IFUNC(, uint32_t, dtrace_fuword32_nocheck, (void *))
725 {
726 
727 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
728 	    dtrace_fuword32_nocheck_smap : dtrace_fuword32_nocheck_nosmap);
729 }
730 
731 uint64_t dtrace_fuword64_nocheck_nosmap(void *);
732 uint64_t dtrace_fuword64_nocheck_smap(void *);
733 DEFINE_IFUNC(, uint64_t, dtrace_fuword64_nocheck, (void *))
734 {
735 
736 	return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
737 	    dtrace_fuword64_nocheck_smap : dtrace_fuword64_nocheck_nosmap);
738 }
739