1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * $FreeBSD$ 23 */ 24 /* 25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 #include <sys/cdefs.h> 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/stack.h> 34 #include <sys/pcpu.h> 35 36 #include <machine/frame.h> 37 #include <machine/md_var.h> 38 #include <machine/stack.h> 39 #include <x86/ifunc.h> 40 41 #include <vm/vm.h> 42 #include <vm/vm_param.h> 43 #include <vm/pmap.h> 44 45 #include "regset.h" 46 47 uint8_t dtrace_fuword8_nocheck(void *); 48 uint16_t dtrace_fuword16_nocheck(void *); 49 uint32_t dtrace_fuword32_nocheck(void *); 50 uint64_t dtrace_fuword64_nocheck(void *); 51 52 int dtrace_ustackdepth_max = 2048; 53 54 void 55 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 56 uint32_t *intrpc) 57 { 58 struct thread *td; 59 int depth = 0; 60 register_t rbp; 61 struct amd64_frame *frame; 62 vm_offset_t callpc; 63 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller; 64 65 if (intrpc != 0) 66 pcstack[depth++] = (pc_t) intrpc; 67 68 aframes++; 69 70 __asm __volatile("movq %%rbp,%0" : "=r" (rbp)); 71 72 frame = (struct amd64_frame *)rbp; 73 td = curthread; 74 while (depth < pcstack_limit) { 75 if (!kstack_contains(curthread, (vm_offset_t)frame, 76 sizeof(*frame))) 77 break; 78 79 callpc = frame->f_retaddr; 80 81 if (!INKERNEL(callpc)) 82 break; 83 84 if (aframes > 0) { 85 aframes--; 86 if ((aframes == 0) && (caller != 0)) { 87 pcstack[depth++] = caller; 88 } 89 } else { 90 pcstack[depth++] = callpc; 91 } 92 93 if ((vm_offset_t)frame->f_frame <= (vm_offset_t)frame) 94 break; 95 frame = frame->f_frame; 96 } 97 98 for (; depth < pcstack_limit; depth++) { 99 pcstack[depth] = 0; 100 } 101 } 102 103 static int 104 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 105 uintptr_t sp) 106 { 107 uintptr_t oldsp; 108 volatile uint16_t *flags = 109 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 110 int ret = 0; 111 112 ASSERT(pcstack == NULL || pcstack_limit > 0); 113 ASSERT(dtrace_ustackdepth_max > 0); 114 115 while (pc != 0) { 116 /* 117 * We limit the number of times we can go around this 118 * loop to account for a circular stack. 119 */ 120 if (ret++ >= dtrace_ustackdepth_max) { 121 *flags |= CPU_DTRACE_BADSTACK; 122 cpu_core[curcpu].cpuc_dtrace_illval = sp; 123 break; 124 } 125 126 if (pcstack != NULL) { 127 *pcstack++ = (uint64_t)pc; 128 pcstack_limit--; 129 if (pcstack_limit <= 0) 130 break; 131 } 132 133 if (sp == 0) 134 break; 135 136 oldsp = sp; 137 138 pc = dtrace_fuword64((void *)(sp + 139 offsetof(struct amd64_frame, f_retaddr))); 140 sp = dtrace_fuword64((void *)sp); 141 142 if (sp == oldsp) { 143 *flags |= CPU_DTRACE_BADSTACK; 144 cpu_core[curcpu].cpuc_dtrace_illval = sp; 145 break; 146 } 147 148 /* 149 * This is totally bogus: if we faulted, we're going to clear 150 * the fault and break. This is to deal with the apparently 151 * broken Java stacks on x86. 152 */ 153 if (*flags & CPU_DTRACE_FAULT) { 154 *flags &= ~CPU_DTRACE_FAULT; 155 break; 156 } 157 } 158 159 return (ret); 160 } 161 162 void 163 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 164 { 165 proc_t *p = curproc; 166 struct trapframe *tf; 167 uintptr_t pc, sp, fp; 168 volatile uint16_t *flags = 169 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 170 int n; 171 172 if (*flags & CPU_DTRACE_FAULT) 173 return; 174 175 if (pcstack_limit <= 0) 176 return; 177 178 /* 179 * If there's no user context we still need to zero the stack. 180 */ 181 if (p == NULL || (tf = curthread->td_frame) == NULL) 182 goto zero; 183 184 *pcstack++ = (uint64_t)p->p_pid; 185 pcstack_limit--; 186 187 if (pcstack_limit <= 0) 188 return; 189 190 pc = tf->tf_rip; 191 fp = tf->tf_rbp; 192 sp = tf->tf_rsp; 193 194 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 195 /* 196 * In an entry probe. The frame pointer has not yet been 197 * pushed (that happens in the function prologue). The 198 * best approach is to add the current pc as a missing top 199 * of stack and back the pc up to the caller, which is stored 200 * at the current stack pointer address since the call 201 * instruction puts it there right before the branch. 202 */ 203 204 *pcstack++ = (uint64_t)pc; 205 pcstack_limit--; 206 if (pcstack_limit <= 0) 207 return; 208 209 pc = dtrace_fuword64((void *) sp); 210 } 211 212 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); 213 ASSERT(n >= 0); 214 ASSERT(n <= pcstack_limit); 215 216 pcstack += n; 217 pcstack_limit -= n; 218 219 zero: 220 while (pcstack_limit-- > 0) 221 *pcstack++ = 0; 222 } 223 224 int 225 dtrace_getustackdepth(void) 226 { 227 proc_t *p = curproc; 228 struct trapframe *tf; 229 uintptr_t pc, fp, sp; 230 int n = 0; 231 232 if (p == NULL || (tf = curthread->td_frame) == NULL) 233 return (0); 234 235 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 236 return (-1); 237 238 pc = tf->tf_rip; 239 fp = tf->tf_rbp; 240 sp = tf->tf_rsp; 241 242 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 243 /* 244 * In an entry probe. The frame pointer has not yet been 245 * pushed (that happens in the function prologue). The 246 * best approach is to add the current pc as a missing top 247 * of stack and back the pc up to the caller, which is stored 248 * at the current stack pointer address since the call 249 * instruction puts it there right before the branch. 250 */ 251 252 pc = dtrace_fuword64((void *) sp); 253 n++; 254 } 255 256 n += dtrace_getustack_common(NULL, 0, pc, fp); 257 258 return (n); 259 } 260 261 void 262 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 263 { 264 proc_t *p = curproc; 265 struct trapframe *tf; 266 uintptr_t pc, sp, fp; 267 volatile uint16_t *flags = 268 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 269 #ifdef notyet /* XXX signal stack */ 270 uintptr_t oldcontext; 271 size_t s1, s2; 272 #endif 273 274 if (*flags & CPU_DTRACE_FAULT) 275 return; 276 277 if (pcstack_limit <= 0) 278 return; 279 280 /* 281 * If there's no user context we still need to zero the stack. 282 */ 283 if (p == NULL || (tf = curthread->td_frame) == NULL) 284 goto zero; 285 286 *pcstack++ = (uint64_t)p->p_pid; 287 pcstack_limit--; 288 289 if (pcstack_limit <= 0) 290 return; 291 292 pc = tf->tf_rip; 293 sp = tf->tf_rsp; 294 fp = tf->tf_rbp; 295 296 #ifdef notyet /* XXX signal stack */ 297 oldcontext = lwp->lwp_oldcontext; 298 s1 = sizeof (struct xframe) + 2 * sizeof (long); 299 s2 = s1 + sizeof (siginfo_t); 300 #endif 301 302 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 303 *pcstack++ = (uint64_t)pc; 304 *fpstack++ = 0; 305 pcstack_limit--; 306 if (pcstack_limit <= 0) 307 return; 308 309 pc = dtrace_fuword64((void *)sp); 310 } 311 312 while (pc != 0) { 313 *pcstack++ = (uint64_t)pc; 314 *fpstack++ = fp; 315 pcstack_limit--; 316 if (pcstack_limit <= 0) 317 break; 318 319 if (fp == 0) 320 break; 321 322 #ifdef notyet /* XXX signal stack */ 323 if (oldcontext == sp + s1 || oldcontext == sp + s2) { 324 ucontext_t *ucp = (ucontext_t *)oldcontext; 325 greg_t *gregs = ucp->uc_mcontext.gregs; 326 327 sp = dtrace_fulword(&gregs[REG_FP]); 328 pc = dtrace_fulword(&gregs[REG_PC]); 329 330 oldcontext = dtrace_fulword(&ucp->uc_link); 331 } else 332 #endif /* XXX */ 333 { 334 pc = dtrace_fuword64((void *)(fp + 335 offsetof(struct amd64_frame, f_retaddr))); 336 fp = dtrace_fuword64((void *)fp); 337 } 338 339 /* 340 * This is totally bogus: if we faulted, we're going to clear 341 * the fault and break. This is to deal with the apparently 342 * broken Java stacks on x86. 343 */ 344 if (*flags & CPU_DTRACE_FAULT) { 345 *flags &= ~CPU_DTRACE_FAULT; 346 break; 347 } 348 } 349 350 zero: 351 while (pcstack_limit-- > 0) 352 *pcstack++ = 0; 353 } 354 355 /*ARGSUSED*/ 356 uint64_t 357 dtrace_getarg(int arg, int aframes) 358 { 359 uintptr_t val; 360 struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp(); 361 uintptr_t *stack; 362 int i; 363 364 /* 365 * A total of 6 arguments are passed via registers; any argument with 366 * index of 5 or lower is therefore in a register. 367 */ 368 int inreg = 5; 369 370 for (i = 1; i <= aframes; i++) { 371 fp = fp->f_frame; 372 373 if (P2ROUNDUP(fp->f_retaddr, 16) == 374 (long)dtrace_invop_callsite) { 375 /* 376 * In the case of amd64, we will use the pointer to the 377 * regs structure that was pushed when we took the 378 * trap. To get this structure, we must increment 379 * beyond the frame structure, and then again beyond 380 * the calling RIP stored in dtrace_invop(). If the 381 * argument that we're seeking is passed on the stack, 382 * we'll pull the true stack pointer out of the saved 383 * registers and decrement our argument by the number 384 * of arguments passed in registers; if the argument 385 * we're seeking is passed in registers, we can just 386 * load it directly. 387 */ 388 struct trapframe *tf = (struct trapframe *)&fp[1]; 389 390 if (arg <= inreg) { 391 switch (arg) { 392 case 0: 393 stack = (uintptr_t *)&tf->tf_rdi; 394 break; 395 case 1: 396 stack = (uintptr_t *)&tf->tf_rsi; 397 break; 398 case 2: 399 stack = (uintptr_t *)&tf->tf_rdx; 400 break; 401 case 3: 402 stack = (uintptr_t *)&tf->tf_rcx; 403 break; 404 case 4: 405 stack = (uintptr_t *)&tf->tf_r8; 406 break; 407 case 5: 408 stack = (uintptr_t *)&tf->tf_r9; 409 break; 410 } 411 arg = 0; 412 } else { 413 stack = (uintptr_t *)(tf->tf_rsp); 414 arg -= inreg; 415 } 416 goto load; 417 } 418 419 } 420 421 /* 422 * We know that we did not come through a trap to get into 423 * dtrace_probe() -- the provider simply called dtrace_probe() 424 * directly. As this is the case, we need to shift the argument 425 * that we're looking for: the probe ID is the first argument to 426 * dtrace_probe(), so the argument n will actually be found where 427 * one would expect to find argument (n + 1). 428 */ 429 arg++; 430 431 if (arg <= inreg) { 432 /* 433 * This shouldn't happen. If the argument is passed in a 434 * register then it should have been, well, passed in a 435 * register... 436 */ 437 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 438 return (0); 439 } 440 441 arg -= (inreg + 1); 442 stack = (uintptr_t *)&fp[1]; 443 444 load: 445 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 446 val = stack[arg]; 447 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 448 449 return (val); 450 } 451 452 int 453 dtrace_getstackdepth(int aframes) 454 { 455 int depth = 0; 456 struct amd64_frame *frame; 457 vm_offset_t rbp; 458 459 aframes++; 460 rbp = dtrace_getfp(); 461 frame = (struct amd64_frame *)rbp; 462 depth++; 463 for(;;) { 464 if (!kstack_contains(curthread, (vm_offset_t)frame, 465 sizeof(*frame))) 466 break; 467 depth++; 468 if (frame->f_frame <= frame) 469 break; 470 frame = frame->f_frame; 471 } 472 if (depth < aframes) 473 return 0; 474 else 475 return depth - aframes; 476 } 477 478 ulong_t 479 dtrace_getreg(struct trapframe *rp, uint_t reg) 480 { 481 /* This table is dependent on reg.d. */ 482 int regmap[] = { 483 REG_GS, /* 0 GS */ 484 REG_FS, /* 1 FS */ 485 REG_ES, /* 2 ES */ 486 REG_DS, /* 3 DS */ 487 REG_RDI, /* 4 EDI */ 488 REG_RSI, /* 5 ESI */ 489 REG_RBP, /* 6 EBP, REG_FP */ 490 REG_RSP, /* 7 ESP */ 491 REG_RBX, /* 8 EBX, REG_R1 */ 492 REG_RDX, /* 9 EDX */ 493 REG_RCX, /* 10 ECX */ 494 REG_RAX, /* 11 EAX, REG_R0 */ 495 REG_TRAPNO, /* 12 TRAPNO */ 496 REG_ERR, /* 13 ERR */ 497 REG_RIP, /* 14 EIP, REG_PC */ 498 REG_CS, /* 15 CS */ 499 REG_RFL, /* 16 EFL, REG_PS */ 500 REG_RSP, /* 17 UESP, REG_SP */ 501 REG_SS /* 18 SS */ 502 }; 503 504 if (reg <= GS) { 505 if (reg >= sizeof (regmap) / sizeof (int)) { 506 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 507 return (0); 508 } 509 510 reg = regmap[reg]; 511 } else { 512 /* This is dependent on reg.d. */ 513 reg -= GS + 1; 514 } 515 516 switch (reg) { 517 case REG_RDI: 518 return (rp->tf_rdi); 519 case REG_RSI: 520 return (rp->tf_rsi); 521 case REG_RDX: 522 return (rp->tf_rdx); 523 case REG_RCX: 524 return (rp->tf_rcx); 525 case REG_R8: 526 return (rp->tf_r8); 527 case REG_R9: 528 return (rp->tf_r9); 529 case REG_RAX: 530 return (rp->tf_rax); 531 case REG_RBX: 532 return (rp->tf_rbx); 533 case REG_RBP: 534 return (rp->tf_rbp); 535 case REG_R10: 536 return (rp->tf_r10); 537 case REG_R11: 538 return (rp->tf_r11); 539 case REG_R12: 540 return (rp->tf_r12); 541 case REG_R13: 542 return (rp->tf_r13); 543 case REG_R14: 544 return (rp->tf_r14); 545 case REG_R15: 546 return (rp->tf_r15); 547 case REG_DS: 548 return (rp->tf_ds); 549 case REG_ES: 550 return (rp->tf_es); 551 case REG_FS: 552 return (rp->tf_fs); 553 case REG_GS: 554 return (rp->tf_gs); 555 case REG_TRAPNO: 556 return (rp->tf_trapno); 557 case REG_ERR: 558 return (rp->tf_err); 559 case REG_RIP: 560 return (rp->tf_rip); 561 case REG_CS: 562 return (rp->tf_cs); 563 case REG_SS: 564 return (rp->tf_ss); 565 case REG_RFL: 566 return (rp->tf_rflags); 567 case REG_RSP: 568 return (rp->tf_rsp); 569 default: 570 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 571 return (0); 572 } 573 } 574 575 static int 576 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 577 { 578 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr); 579 580 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) { 581 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 582 cpu_core[curcpu].cpuc_dtrace_illval = uaddr; 583 return (0); 584 } 585 586 return (1); 587 } 588 589 void 590 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, 591 volatile uint16_t *flags) 592 { 593 if (dtrace_copycheck(uaddr, kaddr, size)) 594 dtrace_copy(uaddr, kaddr, size); 595 } 596 597 void 598 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, 599 volatile uint16_t *flags) 600 { 601 if (dtrace_copycheck(uaddr, kaddr, size)) 602 dtrace_copy(kaddr, uaddr, size); 603 } 604 605 void 606 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 607 volatile uint16_t *flags) 608 { 609 if (dtrace_copycheck(uaddr, kaddr, size)) 610 dtrace_copystr(uaddr, kaddr, size, flags); 611 } 612 613 void 614 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, 615 volatile uint16_t *flags) 616 { 617 if (dtrace_copycheck(uaddr, kaddr, size)) 618 dtrace_copystr(kaddr, uaddr, size, flags); 619 } 620 621 uint8_t 622 dtrace_fuword8(void *uaddr) 623 { 624 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 625 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 626 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 627 return (0); 628 } 629 return (dtrace_fuword8_nocheck(uaddr)); 630 } 631 632 uint16_t 633 dtrace_fuword16(void *uaddr) 634 { 635 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 636 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 637 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 638 return (0); 639 } 640 return (dtrace_fuword16_nocheck(uaddr)); 641 } 642 643 uint32_t 644 dtrace_fuword32(void *uaddr) 645 { 646 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 647 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 648 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 649 return (0); 650 } 651 return (dtrace_fuword32_nocheck(uaddr)); 652 } 653 654 uint64_t 655 dtrace_fuword64(void *uaddr) 656 { 657 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 658 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 659 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 660 return (0); 661 } 662 return (dtrace_fuword64_nocheck(uaddr)); 663 } 664 665 /* 666 * ifunc resolvers for SMAP support 667 */ 668 void dtrace_copy_nosmap(uintptr_t, uintptr_t, size_t); 669 void dtrace_copy_smap(uintptr_t, uintptr_t, size_t); 670 DEFINE_IFUNC(, void, dtrace_copy, (uintptr_t, uintptr_t, size_t)) 671 { 672 673 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 674 dtrace_copy_smap : dtrace_copy_nosmap); 675 } 676 677 void dtrace_copystr_nosmap(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 678 void dtrace_copystr_smap(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 679 DEFINE_IFUNC(, void, dtrace_copystr, (uintptr_t, uintptr_t, size_t, 680 volatile uint16_t *)) 681 { 682 683 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 684 dtrace_copystr_smap : dtrace_copystr_nosmap); 685 } 686 687 uintptr_t dtrace_fulword_nosmap(void *); 688 uintptr_t dtrace_fulword_smap(void *); 689 DEFINE_IFUNC(, uintptr_t, dtrace_fulword, (void *)) 690 { 691 692 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 693 dtrace_fulword_smap : dtrace_fulword_nosmap); 694 } 695 696 uint8_t dtrace_fuword8_nocheck_nosmap(void *); 697 uint8_t dtrace_fuword8_nocheck_smap(void *); 698 DEFINE_IFUNC(, uint8_t, dtrace_fuword8_nocheck, (void *)) 699 { 700 701 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 702 dtrace_fuword8_nocheck_smap : dtrace_fuword8_nocheck_nosmap); 703 } 704 705 uint16_t dtrace_fuword16_nocheck_nosmap(void *); 706 uint16_t dtrace_fuword16_nocheck_smap(void *); 707 DEFINE_IFUNC(, uint16_t, dtrace_fuword16_nocheck, (void *)) 708 { 709 710 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 711 dtrace_fuword16_nocheck_smap : dtrace_fuword16_nocheck_nosmap); 712 } 713 714 uint32_t dtrace_fuword32_nocheck_nosmap(void *); 715 uint32_t dtrace_fuword32_nocheck_smap(void *); 716 DEFINE_IFUNC(, uint32_t, dtrace_fuword32_nocheck, (void *)) 717 { 718 719 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 720 dtrace_fuword32_nocheck_smap : dtrace_fuword32_nocheck_nosmap); 721 } 722 723 uint64_t dtrace_fuword64_nocheck_nosmap(void *); 724 uint64_t dtrace_fuword64_nocheck_smap(void *); 725 DEFINE_IFUNC(, uint64_t, dtrace_fuword64_nocheck, (void *)) 726 { 727 728 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 ? 729 dtrace_fuword64_nocheck_smap : dtrace_fuword64_nocheck_nosmap); 730 } 731