1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
29 * Copyright (c) 2017 Joyent, Inc.
30 */
31
32 #include <sys/dtrace_impl.h>
33 #include <sys/stack.h>
34 #include <sys/frame.h>
35 #include <sys/cmn_err.h>
36 #include <sys/privregs.h>
37 #include <sys/sysmacros.h>
38
39 extern uintptr_t kernelbase;
40
41 int dtrace_ustackdepth_max = 2048;
42
43 void
dtrace_getpcstack(pc_t * pcstack,int pcstack_limit,int aframes,uint32_t * intrpc)44 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
45 uint32_t *intrpc)
46 {
47 struct frame *fp = (struct frame *)dtrace_getfp();
48 struct frame *nextfp, *minfp, *stacktop;
49 int depth = 0;
50 int on_intr, last = 0;
51 uintptr_t pc;
52 uintptr_t caller = CPU->cpu_dtrace_caller;
53
54 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
55 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
56 else
57 stacktop = (struct frame *)curthread->t_stk;
58 minfp = fp;
59
60 aframes++;
61
62 if (intrpc != NULL && depth < pcstack_limit)
63 pcstack[depth++] = (pc_t)intrpc;
64
65 while (depth < pcstack_limit) {
66 nextfp = (struct frame *)fp->fr_savfp;
67 pc = fp->fr_savpc;
68
69 if (nextfp <= minfp || nextfp >= stacktop) {
70 if (on_intr) {
71 /*
72 * Hop from interrupt stack to thread stack.
73 */
74 stacktop = (struct frame *)curthread->t_stk;
75 minfp = (struct frame *)curthread->t_stkbase;
76 on_intr = 0;
77 continue;
78 }
79
80 /*
81 * This is the last frame we can process; indicate
82 * that we should return after processing this frame.
83 */
84 last = 1;
85 }
86
87 if (aframes > 0) {
88 if (--aframes == 0 && caller != 0) {
89 /*
90 * We've just run out of artificial frames,
91 * and we have a valid caller -- fill it in
92 * now.
93 */
94 ASSERT(depth < pcstack_limit);
95 pcstack[depth++] = (pc_t)caller;
96 caller = 0;
97 }
98 } else {
99 if (depth < pcstack_limit)
100 pcstack[depth++] = (pc_t)pc;
101 }
102
103 if (last) {
104 while (depth < pcstack_limit)
105 pcstack[depth++] = 0;
106 return;
107 }
108
109 fp = nextfp;
110 minfp = fp;
111 }
112 }
113
114 static int
dtrace_getustack_common(uint64_t * pcstack,int pcstack_limit,uintptr_t pc,uintptr_t sp)115 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
116 uintptr_t sp)
117 {
118 klwp_t *lwp = ttolwp(curthread);
119 proc_t *p = curproc;
120 uintptr_t oldcontext = lwp->lwp_oldcontext;
121 uintptr_t oldsp;
122 volatile uint16_t *flags =
123 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
124 size_t s1, s2;
125 int ret = 0;
126
127 ASSERT(pcstack == NULL || pcstack_limit > 0);
128 ASSERT(dtrace_ustackdepth_max > 0);
129
130 if (p->p_model == DATAMODEL_NATIVE) {
131 s1 = sizeof (struct frame) + 2 * sizeof (long);
132 s2 = s1 + sizeof (siginfo_t);
133 } else {
134 s1 = sizeof (struct frame32) + 3 * sizeof (int);
135 s2 = s1 + sizeof (siginfo32_t);
136 }
137
138 while (pc != 0) {
139 /*
140 * We limit the number of times we can go around this
141 * loop to account for a circular stack.
142 */
143 if (ret++ >= dtrace_ustackdepth_max) {
144 *flags |= CPU_DTRACE_BADSTACK;
145 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
146 break;
147 }
148
149 if (pcstack != NULL) {
150 *pcstack++ = (uint64_t)pc;
151 pcstack_limit--;
152 if (pcstack_limit <= 0)
153 break;
154 }
155
156 if (sp == 0)
157 break;
158
159 oldsp = sp;
160
161 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
162 if (p->p_model == DATAMODEL_NATIVE) {
163 ucontext_t *ucp = (ucontext_t *)oldcontext;
164 greg_t *gregs = ucp->uc_mcontext.gregs;
165
166 sp = dtrace_fulword(&gregs[REG_FP]);
167 pc = dtrace_fulword(&gregs[REG_PC]);
168
169 oldcontext = dtrace_fulword(&ucp->uc_link);
170 } else {
171 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
172 greg32_t *gregs = ucp->uc_mcontext.gregs;
173
174 sp = dtrace_fuword32(&gregs[EBP]);
175 pc = dtrace_fuword32(&gregs[EIP]);
176
177 oldcontext = dtrace_fuword32(&ucp->uc_link);
178 }
179 } else {
180 if (p->p_model == DATAMODEL_NATIVE) {
181 struct frame *fr = (struct frame *)sp;
182
183 pc = dtrace_fulword(&fr->fr_savpc);
184 sp = dtrace_fulword(&fr->fr_savfp);
185 } else {
186 struct frame32 *fr = (struct frame32 *)sp;
187
188 pc = dtrace_fuword32(&fr->fr_savpc);
189 sp = dtrace_fuword32(&fr->fr_savfp);
190 }
191 }
192
193 if (sp == oldsp) {
194 *flags |= CPU_DTRACE_BADSTACK;
195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
196 break;
197 }
198
199 /*
200 * This is totally bogus: if we faulted, we're going to clear
201 * the fault and break. This is to deal with the apparently
202 * broken Java stacks on x86.
203 */
204 if (*flags & CPU_DTRACE_FAULT) {
205 *flags &= ~CPU_DTRACE_FAULT;
206 break;
207 }
208 }
209
210 return (ret);
211 }
212
213 void
dtrace_getupcstack(uint64_t * pcstack,int pcstack_limit)214 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
215 {
216 klwp_t *lwp = ttolwp(curthread);
217 proc_t *p = curproc;
218 struct regs *rp;
219 uintptr_t pc, sp;
220 int n;
221
222 ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
223
224 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
225 return;
226
227 if (pcstack_limit <= 0)
228 return;
229
230 /*
231 * If there's no user context we still need to zero the stack.
232 */
233 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
234 goto zero;
235
236 *pcstack++ = (uint64_t)p->p_pid;
237 pcstack_limit--;
238
239 if (pcstack_limit <= 0)
240 return;
241
242 pc = rp->r_pc;
243 sp = rp->r_fp;
244
245 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
246 *pcstack++ = (uint64_t)pc;
247 pcstack_limit--;
248 if (pcstack_limit <= 0)
249 return;
250
251 if (p->p_model == DATAMODEL_NATIVE)
252 pc = dtrace_fulword((void *)rp->r_sp);
253 else
254 pc = dtrace_fuword32((void *)rp->r_sp);
255 }
256
257 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
258 ASSERT(n >= 0);
259 ASSERT(n <= pcstack_limit);
260
261 pcstack += n;
262 pcstack_limit -= n;
263
264 zero:
265 while (pcstack_limit-- > 0)
266 *pcstack++ = 0;
267 }
268
269 int
dtrace_getustackdepth(void)270 dtrace_getustackdepth(void)
271 {
272 klwp_t *lwp = ttolwp(curthread);
273 proc_t *p = curproc;
274 struct regs *rp;
275 uintptr_t pc, sp;
276 int n = 0;
277
278 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
279 return (0);
280
281 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
282 return (-1);
283
284 pc = rp->r_pc;
285 sp = rp->r_fp;
286
287 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
288 n++;
289
290 if (p->p_model == DATAMODEL_NATIVE)
291 pc = dtrace_fulword((void *)rp->r_sp);
292 else
293 pc = dtrace_fuword32((void *)rp->r_sp);
294 }
295
296 n += dtrace_getustack_common(NULL, 0, pc, sp);
297
298 return (n);
299 }
300
301 void
dtrace_getufpstack(uint64_t * pcstack,uint64_t * fpstack,int pcstack_limit)302 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
303 {
304 klwp_t *lwp = ttolwp(curthread);
305 proc_t *p = curproc;
306 struct regs *rp;
307 uintptr_t pc, sp, oldcontext;
308 volatile uint16_t *flags =
309 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
310 size_t s1, s2;
311
312 if (*flags & CPU_DTRACE_FAULT)
313 return;
314
315 if (pcstack_limit <= 0)
316 return;
317
318 /*
319 * If there's no user context we still need to zero the stack.
320 */
321 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
322 goto zero;
323
324 *pcstack++ = (uint64_t)p->p_pid;
325 pcstack_limit--;
326
327 if (pcstack_limit <= 0)
328 return;
329
330 pc = rp->r_pc;
331 sp = rp->r_fp;
332 oldcontext = lwp->lwp_oldcontext;
333
334 if (p->p_model == DATAMODEL_NATIVE) {
335 s1 = sizeof (struct frame) + 2 * sizeof (long);
336 s2 = s1 + sizeof (siginfo_t);
337 } else {
338 s1 = sizeof (struct frame32) + 3 * sizeof (int);
339 s2 = s1 + sizeof (siginfo32_t);
340 }
341
342 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
343 *pcstack++ = (uint64_t)pc;
344 *fpstack++ = 0;
345 pcstack_limit--;
346 if (pcstack_limit <= 0)
347 return;
348
349 if (p->p_model == DATAMODEL_NATIVE)
350 pc = dtrace_fulword((void *)rp->r_sp);
351 else
352 pc = dtrace_fuword32((void *)rp->r_sp);
353 }
354
355 while (pc != 0) {
356 *pcstack++ = (uint64_t)pc;
357 *fpstack++ = sp;
358 pcstack_limit--;
359 if (pcstack_limit <= 0)
360 break;
361
362 if (sp == 0)
363 break;
364
365 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
366 if (p->p_model == DATAMODEL_NATIVE) {
367 ucontext_t *ucp = (ucontext_t *)oldcontext;
368 greg_t *gregs = ucp->uc_mcontext.gregs;
369
370 sp = dtrace_fulword(&gregs[REG_FP]);
371 pc = dtrace_fulword(&gregs[REG_PC]);
372
373 oldcontext = dtrace_fulword(&ucp->uc_link);
374 } else {
375 ucontext_t *ucp = (ucontext_t *)oldcontext;
376 greg_t *gregs = ucp->uc_mcontext.gregs;
377
378 sp = dtrace_fuword32(&gregs[EBP]);
379 pc = dtrace_fuword32(&gregs[EIP]);
380
381 oldcontext = dtrace_fuword32(&ucp->uc_link);
382 }
383 } else {
384 if (p->p_model == DATAMODEL_NATIVE) {
385 struct frame *fr = (struct frame *)sp;
386
387 pc = dtrace_fulword(&fr->fr_savpc);
388 sp = dtrace_fulword(&fr->fr_savfp);
389 } else {
390 struct frame32 *fr = (struct frame32 *)sp;
391
392 pc = dtrace_fuword32(&fr->fr_savpc);
393 sp = dtrace_fuword32(&fr->fr_savfp);
394 }
395 }
396
397 /*
398 * This is totally bogus: if we faulted, we're going to clear
399 * the fault and break. This is to deal with the apparently
400 * broken Java stacks on x86.
401 */
402 if (*flags & CPU_DTRACE_FAULT) {
403 *flags &= ~CPU_DTRACE_FAULT;
404 break;
405 }
406 }
407
408 zero:
409 while (pcstack_limit-- > 0)
410 *pcstack++ = 0;
411 }
412
413 /*ARGSUSED*/
414 uint64_t
dtrace_getarg(int arg,int aframes)415 dtrace_getarg(int arg, int aframes)
416 {
417 uintptr_t val;
418 struct frame *fp = (struct frame *)dtrace_getfp();
419 uintptr_t *stack;
420 int i;
421 /*
422 * A total of 6 arguments are passed via registers; any argument with
423 * index of 5 or lower is therefore in a register.
424 */
425 int inreg = 5;
426
427 for (i = 1; i <= aframes; i++) {
428 fp = (struct frame *)(fp->fr_savfp);
429
430 if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
431 /*
432 * In the case of amd64, we will use the pointer to the
433 * regs structure that was pushed when we took the
434 * trap. To get this structure, we must increment
435 * beyond the frame structure, the calling RIP, and
436 * padding stored in dtrace_invop(). If the argument
437 * that we're seeking is passed on the stack, we'll
438 * pull the true stack pointer out of the saved
439 * registers and decrement our argument by the number
440 * of arguments passed in registers; if the argument
441 * we're seeking is passed in regsiters, we can just
442 * load it directly.
443 */
444 struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
445 sizeof (uintptr_t) * 2);
446
447 if (arg <= inreg) {
448 stack = (uintptr_t *)&rp->r_rdi;
449 } else {
450 stack = (uintptr_t *)(rp->r_rsp);
451 arg -= inreg;
452 }
453 goto load;
454 }
455
456 }
457
458 /*
459 * We know that we did not come through a trap to get into
460 * dtrace_probe() -- the provider simply called dtrace_probe()
461 * directly. As this is the case, we need to shift the argument
462 * that we're looking for: the probe ID is the first argument to
463 * dtrace_probe(), so the argument n will actually be found where
464 * one would expect to find argument (n + 1).
465 */
466 arg++;
467
468 if (arg <= inreg) {
469 /*
470 * This shouldn't happen. If the argument is passed in a
471 * register then it should have been, well, passed in a
472 * register...
473 */
474 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
475 return (0);
476 }
477
478 arg -= (inreg + 1);
479 stack = (uintptr_t *)&fp[1];
480
481 load:
482 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
483 val = stack[arg];
484 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
485
486 return (val);
487 }
488
489 /*ARGSUSED*/
490 int
dtrace_getstackdepth(int aframes)491 dtrace_getstackdepth(int aframes)
492 {
493 struct frame *fp = (struct frame *)dtrace_getfp();
494 struct frame *nextfp, *minfp, *stacktop;
495 int depth = 0;
496 int on_intr;
497
498 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
499 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
500 else
501 stacktop = (struct frame *)curthread->t_stk;
502 minfp = fp;
503
504 aframes++;
505
506 for (;;) {
507 depth++;
508
509 nextfp = (struct frame *)fp->fr_savfp;
510
511 if (nextfp <= minfp || nextfp >= stacktop) {
512 if (on_intr) {
513 /*
514 * Hop from interrupt stack to thread stack.
515 */
516 stacktop = (struct frame *)curthread->t_stk;
517 minfp = (struct frame *)curthread->t_stkbase;
518 on_intr = 0;
519 continue;
520 }
521 break;
522 }
523
524 fp = nextfp;
525 minfp = fp;
526 }
527
528 if (depth <= aframes)
529 return (0);
530
531 return (depth - aframes);
532 }
533
534 static const int dtrace_regmap[] = {
535 REG_GS, /* GS */
536 REG_FS, /* FS */
537 REG_ES, /* ES */
538 REG_DS, /* DS */
539 REG_RDI, /* EDI */
540 REG_RSI, /* ESI */
541 REG_RBP, /* EBP */
542 REG_RSP, /* ESP */
543 REG_RBX, /* EBX */
544 REG_RDX, /* EDX */
545 REG_RCX, /* ECX */
546 REG_RAX, /* EAX */
547 REG_TRAPNO, /* TRAPNO */
548 REG_ERR, /* ERR */
549 REG_RIP, /* EIP */
550 REG_CS, /* CS */
551 REG_RFL, /* EFL */
552 REG_RSP, /* UESP */
553 REG_SS /* SS */
554 };
555
556
557 ulong_t
dtrace_getreg(struct regs * rp,uint_t reg)558 dtrace_getreg(struct regs *rp, uint_t reg)
559 {
560 if (reg <= SS) {
561 if (reg >= sizeof (dtrace_regmap) / sizeof (int)) {
562 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
563 return (0);
564 }
565
566 reg = dtrace_regmap[reg];
567 } else {
568 reg -= SS + 1;
569 }
570
571 switch (reg) {
572 case REG_RDI:
573 return (rp->r_rdi);
574 case REG_RSI:
575 return (rp->r_rsi);
576 case REG_RDX:
577 return (rp->r_rdx);
578 case REG_RCX:
579 return (rp->r_rcx);
580 case REG_R8:
581 return (rp->r_r8);
582 case REG_R9:
583 return (rp->r_r9);
584 case REG_RAX:
585 return (rp->r_rax);
586 case REG_RBX:
587 return (rp->r_rbx);
588 case REG_RBP:
589 return (rp->r_rbp);
590 case REG_R10:
591 return (rp->r_r10);
592 case REG_R11:
593 return (rp->r_r11);
594 case REG_R12:
595 return (rp->r_r12);
596 case REG_R13:
597 return (rp->r_r13);
598 case REG_R14:
599 return (rp->r_r14);
600 case REG_R15:
601 return (rp->r_r15);
602 case REG_DS:
603 return (rp->r_ds);
604 case REG_ES:
605 return (rp->r_es);
606 case REG_FS:
607 return (rp->r_fs);
608 case REG_GS:
609 return (rp->r_gs);
610 case REG_TRAPNO:
611 return (rp->r_trapno);
612 case REG_ERR:
613 return (rp->r_err);
614 case REG_RIP:
615 return (rp->r_rip);
616 case REG_CS:
617 return (rp->r_cs);
618 case REG_SS:
619 return (rp->r_ss);
620 case REG_RFL:
621 return (rp->r_rfl);
622 case REG_RSP:
623 return (rp->r_rsp);
624 default:
625 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
626 return (0);
627 }
628
629 }
630
631 void
dtrace_setreg(struct regs * rp,uint_t reg,ulong_t val)632 dtrace_setreg(struct regs *rp, uint_t reg, ulong_t val)
633 {
634 if (reg <= SS) {
635 ASSERT(reg < (sizeof (dtrace_regmap) / sizeof (int)));
636
637 reg = dtrace_regmap[reg];
638 } else {
639 reg -= SS + 1;
640 }
641
642 switch (reg) {
643 case REG_RDI:
644 rp->r_rdi = val;
645 break;
646 case REG_RSI:
647 rp->r_rsi = val;
648 break;
649 case REG_RDX:
650 rp->r_rdx = val;
651 break;
652 case REG_RCX:
653 rp->r_rcx = val;
654 break;
655 case REG_R8:
656 rp->r_r8 = val;
657 break;
658 case REG_R9:
659 rp->r_r9 = val;
660 break;
661 case REG_RAX:
662 rp->r_rax = val;
663 break;
664 case REG_RBX:
665 rp->r_rbx = val;
666 break;
667 case REG_RBP:
668 rp->r_rbp = val;
669 break;
670 case REG_R10:
671 rp->r_r10 = val;
672 break;
673 case REG_R11:
674 rp->r_r11 = val;
675 break;
676 case REG_R12:
677 rp->r_r12 = val;
678 break;
679 case REG_R13:
680 rp->r_r13 = val;
681 break;
682 case REG_R14:
683 rp->r_r14 = val;
684 break;
685 case REG_R15:
686 rp->r_r15 = val;
687 break;
688 case REG_RSP:
689 rp->r_rsp = val;
690 break;
691 default:
692 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
693 return;
694 }
695
696 }
697
698 static int
dtrace_copycheck(uintptr_t uaddr,uintptr_t kaddr,size_t size)699 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
700 {
701 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
702
703 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
704 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
705 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
706 return (0);
707 }
708
709 return (1);
710 }
711
712 /*ARGSUSED*/
713 void
dtrace_copyin(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)714 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
715 volatile uint16_t *flags)
716 {
717 if (dtrace_copycheck(uaddr, kaddr, size))
718 dtrace_copy(uaddr, kaddr, size);
719 }
720
721 /*ARGSUSED*/
722 void
dtrace_copyout(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)723 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
724 volatile uint16_t *flags)
725 {
726 if (dtrace_copycheck(uaddr, kaddr, size))
727 dtrace_copy(kaddr, uaddr, size);
728 }
729
730 void
dtrace_copyinstr(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)731 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
732 volatile uint16_t *flags)
733 {
734 if (dtrace_copycheck(uaddr, kaddr, size))
735 dtrace_copystr(uaddr, kaddr, size, flags);
736 }
737
738 void
dtrace_copyoutstr(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)739 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
740 volatile uint16_t *flags)
741 {
742 if (dtrace_copycheck(uaddr, kaddr, size))
743 dtrace_copystr(kaddr, uaddr, size, flags);
744 }
745
746 uint8_t
dtrace_fuword8(void * uaddr)747 dtrace_fuword8(void *uaddr)
748 {
749 extern uint8_t dtrace_fuword8_nocheck(void *);
750 if ((uintptr_t)uaddr >= _userlimit) {
751 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
752 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
753 return (0);
754 }
755 return (dtrace_fuword8_nocheck(uaddr));
756 }
757
758 uint16_t
dtrace_fuword16(void * uaddr)759 dtrace_fuword16(void *uaddr)
760 {
761 extern uint16_t dtrace_fuword16_nocheck(void *);
762 if ((uintptr_t)uaddr >= _userlimit) {
763 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
764 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
765 return (0);
766 }
767 return (dtrace_fuword16_nocheck(uaddr));
768 }
769
770 uint32_t
dtrace_fuword32(void * uaddr)771 dtrace_fuword32(void *uaddr)
772 {
773 extern uint32_t dtrace_fuword32_nocheck(void *);
774 if ((uintptr_t)uaddr >= _userlimit) {
775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
776 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
777 return (0);
778 }
779 return (dtrace_fuword32_nocheck(uaddr));
780 }
781
782 uint64_t
dtrace_fuword64(void * uaddr)783 dtrace_fuword64(void *uaddr)
784 {
785 extern uint64_t dtrace_fuword64_nocheck(void *);
786 if ((uintptr_t)uaddr >= _userlimit) {
787 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
788 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
789 return (0);
790 }
791 return (dtrace_fuword64_nocheck(uaddr));
792 }
793