1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 #include <sys/cdefs.h>
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/dtrace_impl.h>
31 #include <sys/kernel.h>
32 #include <sys/msan.h>
33 #include <sys/stack.h>
34 #include <sys/pcpu.h>
35
36 #include <cddl/dev/dtrace/dtrace_cddl.h>
37
38 #include <machine/frame.h>
39 #include <machine/md_var.h>
40 #include <machine/stack.h>
41 #include <x86/ifunc.h>
42
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/pmap.h>
46
47 #include "regset.h"
48
49 uint8_t dtrace_fuword8_nocheck(void *);
50 uint16_t dtrace_fuword16_nocheck(void *);
51 uint32_t dtrace_fuword32_nocheck(void *);
52 uint64_t dtrace_fuword64_nocheck(void *);
53
54 int dtrace_ustackdepth_max = 2048;
55
56 void
dtrace_getpcstack(pc_t * pcstack,int pcstack_limit,int aframes,uint32_t * intrpc)57 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
58 uint32_t *intrpc)
59 {
60 struct thread *td;
61 int depth = 0;
62 register_t rbp;
63 struct amd64_frame *frame;
64 vm_offset_t callpc;
65 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
66
67 if (intrpc != 0)
68 pcstack[depth++] = (pc_t) intrpc;
69
70 aframes++;
71
72 __asm __volatile("movq %%rbp,%0" : "=r" (rbp));
73
74 frame = (struct amd64_frame *)rbp;
75 td = curthread;
76 while (depth < pcstack_limit) {
77 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
78
79 if (!kstack_contains(curthread, (vm_offset_t)frame,
80 sizeof(*frame)))
81 break;
82
83 callpc = frame->f_retaddr;
84
85 if (!INKERNEL(callpc))
86 break;
87
88 if (aframes > 0) {
89 aframes--;
90 if ((aframes == 0) && (caller != 0)) {
91 pcstack[depth++] = caller;
92 }
93 } else {
94 pcstack[depth++] = callpc;
95 }
96
97 if ((vm_offset_t)frame->f_frame <= (vm_offset_t)frame)
98 break;
99 frame = frame->f_frame;
100 }
101
102 for (; depth < pcstack_limit; depth++) {
103 pcstack[depth] = 0;
104 }
105 kmsan_check(pcstack, pcstack_limit * sizeof(*pcstack), "dtrace");
106 }
107
108 static int
dtrace_getustack_common(uint64_t * pcstack,int pcstack_limit,uintptr_t pc,uintptr_t sp)109 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
110 uintptr_t sp)
111 {
112 uintptr_t oldsp;
113 volatile uint16_t *flags =
114 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
115 int ret = 0;
116
117 ASSERT(pcstack == NULL || pcstack_limit > 0);
118 ASSERT(dtrace_ustackdepth_max > 0);
119
120 while (pc != 0) {
121 /*
122 * We limit the number of times we can go around this
123 * loop to account for a circular stack.
124 */
125 if (ret++ >= dtrace_ustackdepth_max) {
126 *flags |= CPU_DTRACE_BADSTACK;
127 cpu_core[curcpu].cpuc_dtrace_illval = sp;
128 break;
129 }
130
131 if (pcstack != NULL) {
132 *pcstack++ = (uint64_t)pc;
133 pcstack_limit--;
134 if (pcstack_limit <= 0)
135 break;
136 }
137
138 if (sp == 0)
139 break;
140
141 oldsp = sp;
142
143 pc = dtrace_fuword64((void *)(sp +
144 offsetof(struct amd64_frame, f_retaddr)));
145 sp = dtrace_fuword64((void *)sp);
146
147 if (sp == oldsp) {
148 *flags |= CPU_DTRACE_BADSTACK;
149 cpu_core[curcpu].cpuc_dtrace_illval = sp;
150 break;
151 }
152
153 /*
154 * This is totally bogus: if we faulted, we're going to clear
155 * the fault and break. This is to deal with the apparently
156 * broken Java stacks on x86.
157 */
158 if (*flags & CPU_DTRACE_FAULT) {
159 *flags &= ~CPU_DTRACE_FAULT;
160 break;
161 }
162 }
163
164 return (ret);
165 }
166
167 void
dtrace_getupcstack(uint64_t * pcstack,int pcstack_limit)168 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
169 {
170 proc_t *p = curproc;
171 struct trapframe *tf;
172 uintptr_t pc, sp, fp;
173 volatile uint16_t *flags =
174 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
175 int n;
176
177 if (*flags & CPU_DTRACE_FAULT)
178 return;
179
180 if (pcstack_limit <= 0)
181 return;
182
183 /*
184 * If there's no user context we still need to zero the stack.
185 */
186 if (p == NULL || (tf = curthread->td_frame) == NULL)
187 goto zero;
188
189 *pcstack++ = (uint64_t)p->p_pid;
190 pcstack_limit--;
191
192 if (pcstack_limit <= 0)
193 return;
194
195 pc = tf->tf_rip;
196 fp = tf->tf_rbp;
197 sp = tf->tf_rsp;
198
199 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
200 /*
201 * In an entry probe. The frame pointer has not yet been
202 * pushed (that happens in the function prologue). The
203 * best approach is to add the current pc as a missing top
204 * of stack and back the pc up to the caller, which is stored
205 * at the current stack pointer address since the call
206 * instruction puts it there right before the branch.
207 */
208
209 *pcstack++ = (uint64_t)pc;
210 pcstack_limit--;
211 if (pcstack_limit <= 0)
212 return;
213
214 pc = dtrace_fuword64((void *) sp);
215 }
216
217 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
218 ASSERT(n >= 0);
219 ASSERT(n <= pcstack_limit);
220
221 pcstack += n;
222 pcstack_limit -= n;
223
224 zero:
225 while (pcstack_limit-- > 0)
226 *pcstack++ = 0;
227 }
228
229 int
dtrace_getustackdepth(void)230 dtrace_getustackdepth(void)
231 {
232 proc_t *p = curproc;
233 struct trapframe *tf;
234 uintptr_t pc, fp, sp;
235 int n = 0;
236
237 if (p == NULL || (tf = curthread->td_frame) == NULL)
238 return (0);
239
240 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
241 return (-1);
242
243 pc = tf->tf_rip;
244 fp = tf->tf_rbp;
245 sp = tf->tf_rsp;
246
247 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
248 /*
249 * In an entry probe. The frame pointer has not yet been
250 * pushed (that happens in the function prologue). The
251 * best approach is to add the current pc as a missing top
252 * of stack and back the pc up to the caller, which is stored
253 * at the current stack pointer address since the call
254 * instruction puts it there right before the branch.
255 */
256
257 pc = dtrace_fuword64((void *) sp);
258 n++;
259 }
260
261 n += dtrace_getustack_common(NULL, 0, pc, fp);
262
263 return (n);
264 }
265
266 void
dtrace_getufpstack(uint64_t * pcstack,uint64_t * fpstack,int pcstack_limit)267 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
268 {
269 proc_t *p = curproc;
270 struct trapframe *tf;
271 uintptr_t pc, sp, fp;
272 volatile uint16_t *flags =
273 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
274 #ifdef notyet /* XXX signal stack */
275 uintptr_t oldcontext;
276 size_t s1, s2;
277 #endif
278
279 if (*flags & CPU_DTRACE_FAULT)
280 return;
281
282 if (pcstack_limit <= 0)
283 return;
284
285 /*
286 * If there's no user context we still need to zero the stack.
287 */
288 if (p == NULL || (tf = curthread->td_frame) == NULL)
289 goto zero;
290
291 *pcstack++ = (uint64_t)p->p_pid;
292 pcstack_limit--;
293
294 if (pcstack_limit <= 0)
295 return;
296
297 pc = tf->tf_rip;
298 sp = tf->tf_rsp;
299 fp = tf->tf_rbp;
300
301 #ifdef notyet /* XXX signal stack */
302 oldcontext = lwp->lwp_oldcontext;
303 s1 = sizeof (struct xframe) + 2 * sizeof (long);
304 s2 = s1 + sizeof (siginfo_t);
305 #endif
306
307 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
308 *pcstack++ = (uint64_t)pc;
309 *fpstack++ = 0;
310 pcstack_limit--;
311 if (pcstack_limit <= 0)
312 return;
313
314 pc = dtrace_fuword64((void *)sp);
315 }
316
317 while (pc != 0) {
318 *pcstack++ = (uint64_t)pc;
319 *fpstack++ = fp;
320 pcstack_limit--;
321 if (pcstack_limit <= 0)
322 break;
323
324 if (fp == 0)
325 break;
326
327 #ifdef notyet /* XXX signal stack */
328 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
329 ucontext_t *ucp = (ucontext_t *)oldcontext;
330 greg_t *gregs = ucp->uc_mcontext.gregs;
331
332 sp = dtrace_fulword(&gregs[REG_FP]);
333 pc = dtrace_fulword(&gregs[REG_PC]);
334
335 oldcontext = dtrace_fulword(&ucp->uc_link);
336 } else
337 #endif /* XXX */
338 {
339 pc = dtrace_fuword64((void *)(fp +
340 offsetof(struct amd64_frame, f_retaddr)));
341 fp = dtrace_fuword64((void *)fp);
342 }
343
344 /*
345 * This is totally bogus: if we faulted, we're going to clear
346 * the fault and break. This is to deal with the apparently
347 * broken Java stacks on x86.
348 */
349 if (*flags & CPU_DTRACE_FAULT) {
350 *flags &= ~CPU_DTRACE_FAULT;
351 break;
352 }
353 }
354
355 zero:
356 while (pcstack_limit-- > 0)
357 *pcstack++ = 0;
358 }
359
360 /*ARGSUSED*/
361 uint64_t
dtrace_getarg(int arg,int aframes)362 dtrace_getarg(int arg, int aframes)
363 {
364 struct thread *td;
365 uintptr_t val;
366 struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp();
367 uintptr_t *stack;
368 int i;
369
370 /*
371 * A total of 6 arguments are passed via registers; any argument with
372 * index of 5 or lower is therefore in a register.
373 */
374 int inreg = 5;
375
376 /*
377 * Did we arrive here via dtrace_invop()? We can simply fetch arguments
378 * from the trap frame if so.
379 */
380 td = curthread;
381 if (td->t_dtrace_trapframe != NULL) {
382 struct trapframe *tf = td->t_dtrace_trapframe;
383
384 if (arg <= inreg) {
385 switch (arg) {
386 case 0:
387 return (tf->tf_rdi);
388 case 1:
389 return (tf->tf_rsi);
390 case 2:
391 return (tf->tf_rdx);
392 case 3:
393 return (tf->tf_rcx);
394 case 4:
395 return (tf->tf_r8);
396 case 5:
397 return (tf->tf_r9);
398 }
399 }
400
401 arg -= inreg;
402 stack = (uintptr_t *)tf->tf_rsp;
403 goto load;
404 }
405
406 for (i = 1; i <= aframes; i++) {
407 kmsan_mark(fp, sizeof(*fp), KMSAN_STATE_INITED);
408 fp = fp->f_frame;
409 }
410
411 /*
412 * We know that we did not come through a trap to get into
413 * dtrace_probe() -- the provider simply called dtrace_probe()
414 * directly. As this is the case, we need to shift the argument
415 * that we're looking for: the probe ID is the first argument to
416 * dtrace_probe(), so the argument n will actually be found where
417 * one would expect to find argument (n + 1).
418 */
419 arg++;
420
421 if (arg <= inreg) {
422 /*
423 * This shouldn't happen. If the argument is passed in a
424 * register then it should have been, well, passed in a
425 * register...
426 */
427 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
428 return (0);
429 }
430
431 arg -= (inreg + 1);
432 stack = (uintptr_t *)&fp[1];
433
434 load:
435 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
436 val = stack[arg];
437 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
438
439 kmsan_mark(&val, sizeof(val), KMSAN_STATE_INITED);
440
441 return (val);
442 }
443
444 int
dtrace_getstackdepth(int aframes)445 dtrace_getstackdepth(int aframes)
446 {
447 int depth = 0;
448 struct amd64_frame *frame;
449 vm_offset_t rbp;
450
451 aframes++;
452 rbp = dtrace_getfp();
453 frame = (struct amd64_frame *)rbp;
454 depth++;
455 for (;;) {
456 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
457
458 if (!kstack_contains(curthread, (vm_offset_t)frame,
459 sizeof(*frame)))
460 break;
461
462 depth++;
463 if (frame->f_frame <= frame)
464 break;
465 frame = frame->f_frame;
466 }
467 if (depth < aframes)
468 return 0;
469 else
470 return depth - aframes;
471 }
472
473 ulong_t
dtrace_getreg(struct trapframe * frame,uint_t reg)474 dtrace_getreg(struct trapframe *frame, uint_t reg)
475 {
476 /* This table is dependent on reg.d. */
477 int regmap[] = {
478 REG_GS, /* 0 GS */
479 REG_FS, /* 1 FS */
480 REG_ES, /* 2 ES */
481 REG_DS, /* 3 DS */
482 REG_RDI, /* 4 EDI */
483 REG_RSI, /* 5 ESI */
484 REG_RBP, /* 6 EBP, REG_FP */
485 REG_RSP, /* 7 ESP */
486 REG_RBX, /* 8 EBX, REG_R1 */
487 REG_RDX, /* 9 EDX */
488 REG_RCX, /* 10 ECX */
489 REG_RAX, /* 11 EAX, REG_R0 */
490 REG_TRAPNO, /* 12 TRAPNO */
491 REG_ERR, /* 13 ERR */
492 REG_RIP, /* 14 EIP, REG_PC */
493 REG_CS, /* 15 CS */
494 REG_RFL, /* 16 EFL, REG_PS */
495 REG_RSP, /* 17 UESP, REG_SP */
496 REG_SS /* 18 SS */
497 };
498
499 if (reg <= GS) {
500 if (reg >= sizeof (regmap) / sizeof (int)) {
501 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
502 return (0);
503 }
504
505 reg = regmap[reg];
506 } else {
507 /* This is dependent on reg.d. */
508 reg -= GS + 1;
509 }
510
511 switch (reg) {
512 case REG_RDI:
513 return (frame->tf_rdi);
514 case REG_RSI:
515 return (frame->tf_rsi);
516 case REG_RDX:
517 return (frame->tf_rdx);
518 case REG_RCX:
519 return (frame->tf_rcx);
520 case REG_R8:
521 return (frame->tf_r8);
522 case REG_R9:
523 return (frame->tf_r9);
524 case REG_RAX:
525 return (frame->tf_rax);
526 case REG_RBX:
527 return (frame->tf_rbx);
528 case REG_RBP:
529 return (frame->tf_rbp);
530 case REG_R10:
531 return (frame->tf_r10);
532 case REG_R11:
533 return (frame->tf_r11);
534 case REG_R12:
535 return (frame->tf_r12);
536 case REG_R13:
537 return (frame->tf_r13);
538 case REG_R14:
539 return (frame->tf_r14);
540 case REG_R15:
541 return (frame->tf_r15);
542 case REG_DS:
543 return (frame->tf_ds);
544 case REG_ES:
545 return (frame->tf_es);
546 case REG_FS:
547 return (frame->tf_fs);
548 case REG_GS:
549 return (frame->tf_gs);
550 case REG_TRAPNO:
551 return (frame->tf_trapno);
552 case REG_ERR:
553 return (frame->tf_err);
554 case REG_RIP:
555 return (frame->tf_rip);
556 case REG_CS:
557 return (frame->tf_cs);
558 case REG_SS:
559 return (frame->tf_ss);
560 case REG_RFL:
561 return (frame->tf_rflags);
562 case REG_RSP:
563 return (frame->tf_rsp);
564 default:
565 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
566 return (0);
567 }
568 }
569
570 static int
dtrace_copycheck(uintptr_t uaddr,uintptr_t kaddr,size_t size)571 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
572 {
573 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
574
575 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
576 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
577 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
578 return (0);
579 }
580
581 return (1);
582 }
583
584 void
dtrace_copyin(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)585 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
586 volatile uint16_t *flags)
587 {
588 if (dtrace_copycheck(uaddr, kaddr, size)) {
589 dtrace_copy(uaddr, kaddr, size);
590 kmsan_mark((void *)kaddr, size, KMSAN_STATE_INITED);
591 }
592 }
593
594 void
dtrace_copyout(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)595 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
596 volatile uint16_t *flags)
597 {
598 if (dtrace_copycheck(uaddr, kaddr, size)) {
599 kmsan_check((void *)kaddr, size, "dtrace_copyout");
600 dtrace_copy(kaddr, uaddr, size);
601 }
602 }
603
604 void
dtrace_copyinstr(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)605 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
606 volatile uint16_t *flags)
607 {
608 if (dtrace_copycheck(uaddr, kaddr, size)) {
609 dtrace_copystr(uaddr, kaddr, size, flags);
610 kmsan_mark((void *)kaddr, size, KMSAN_STATE_INITED);
611 }
612 }
613
614 void
dtrace_copyoutstr(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)615 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
616 volatile uint16_t *flags)
617 {
618 if (dtrace_copycheck(uaddr, kaddr, size)) {
619 kmsan_check((void *)kaddr, size, "dtrace_copyoutstr");
620 dtrace_copystr(kaddr, uaddr, size, flags);
621 }
622 }
623
624 uint8_t
dtrace_fuword8(void * uaddr)625 dtrace_fuword8(void *uaddr)
626 {
627 uint8_t val;
628
629 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
630 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
631 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
632 return (0);
633 }
634 val = dtrace_fuword8_nocheck(uaddr);
635 kmsan_mark(&val, sizeof(val), KMSAN_STATE_INITED);
636 return (val);
637 }
638
639 uint16_t
dtrace_fuword16(void * uaddr)640 dtrace_fuword16(void *uaddr)
641 {
642 uint16_t val;
643
644 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
645 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
646 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
647 return (0);
648 }
649 val = dtrace_fuword16_nocheck(uaddr);
650 kmsan_mark(&val, sizeof(val), KMSAN_STATE_INITED);
651 return (val);
652 }
653
654 uint32_t
dtrace_fuword32(void * uaddr)655 dtrace_fuword32(void *uaddr)
656 {
657 uint32_t val;
658
659 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
660 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
661 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
662 return (0);
663 }
664 val = dtrace_fuword32_nocheck(uaddr);
665 kmsan_mark(&val, sizeof(val), KMSAN_STATE_INITED);
666 return (val);
667 }
668
669 uint64_t
dtrace_fuword64(void * uaddr)670 dtrace_fuword64(void *uaddr)
671 {
672 uint64_t val;
673
674 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
675 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
676 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
677 return (0);
678 }
679 val = dtrace_fuword64_nocheck(uaddr);
680 kmsan_mark(&val, sizeof(val), KMSAN_STATE_INITED);
681 return (val);
682 }
683
684 /*
685 * ifunc resolvers for SMAP support
686 */
687 void dtrace_copy_nosmap(uintptr_t, uintptr_t, size_t);
688 void dtrace_copy_smap(uintptr_t, uintptr_t, size_t);
689 DEFINE_IFUNC(, void, dtrace_copy, (uintptr_t, uintptr_t, size_t))
690 {
691
692 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
693 dtrace_copy_smap : dtrace_copy_nosmap);
694 }
695
696 void dtrace_copystr_nosmap(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
697 void dtrace_copystr_smap(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
698 DEFINE_IFUNC(, void, dtrace_copystr, (uintptr_t, uintptr_t, size_t,
699 volatile uint16_t *))
700 {
701
702 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
703 dtrace_copystr_smap : dtrace_copystr_nosmap);
704 }
705
706 uintptr_t dtrace_fulword_nosmap(void *);
707 uintptr_t dtrace_fulword_smap(void *);
708 DEFINE_IFUNC(, uintptr_t, dtrace_fulword, (void *))
709 {
710
711 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
712 dtrace_fulword_smap : dtrace_fulword_nosmap);
713 }
714
715 uint8_t dtrace_fuword8_nocheck_nosmap(void *);
716 uint8_t dtrace_fuword8_nocheck_smap(void *);
717 DEFINE_IFUNC(, uint8_t, dtrace_fuword8_nocheck, (void *))
718 {
719
720 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
721 dtrace_fuword8_nocheck_smap : dtrace_fuword8_nocheck_nosmap);
722 }
723
724 uint16_t dtrace_fuword16_nocheck_nosmap(void *);
725 uint16_t dtrace_fuword16_nocheck_smap(void *);
726 DEFINE_IFUNC(, uint16_t, dtrace_fuword16_nocheck, (void *))
727 {
728
729 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
730 dtrace_fuword16_nocheck_smap : dtrace_fuword16_nocheck_nosmap);
731 }
732
733 uint32_t dtrace_fuword32_nocheck_nosmap(void *);
734 uint32_t dtrace_fuword32_nocheck_smap(void *);
735 DEFINE_IFUNC(, uint32_t, dtrace_fuword32_nocheck, (void *))
736 {
737
738 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
739 dtrace_fuword32_nocheck_smap : dtrace_fuword32_nocheck_nosmap);
740 }
741
742 uint64_t dtrace_fuword64_nocheck_nosmap(void *);
743 uint64_t dtrace_fuword64_nocheck_smap(void *);
744 DEFINE_IFUNC(, uint64_t, dtrace_fuword64_nocheck, (void *))
745 {
746
747 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ?
748 dtrace_fuword64_nocheck_smap : dtrace_fuword64_nocheck_nosmap);
749 }
750