1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #include <sys/cdefs.h> 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/kernel.h> 31 #include <sys/stack.h> 32 #include <sys/pcpu.h> 33 34 #include <machine/frame.h> 35 #include <machine/md_var.h> 36 37 #include <vm/vm.h> 38 #include <vm/vm_param.h> 39 #include <vm/pmap.h> 40 41 #include <machine/atomic.h> 42 #include <machine/db_machdep.h> 43 #include <machine/md_var.h> 44 #include <machine/stack.h> 45 #include <ddb/db_sym.h> 46 #include <ddb/ddb.h> 47 #include <sys/kdb.h> 48 49 #include "regset.h" 50 51 #define MAX_USTACK_DEPTH 2048 52 53 uint8_t dtrace_fuword8_nocheck(void *); 54 uint16_t dtrace_fuword16_nocheck(void *); 55 uint32_t dtrace_fuword32_nocheck(void *); 56 uint64_t dtrace_fuword64_nocheck(void *); 57 58 void 59 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 60 uint32_t *intrpc) 61 { 62 struct unwind_state state; 63 int scp_offset; 64 int depth; 65 66 depth = 0; 67 68 if (intrpc != 0) { 69 pcstack[depth++] = (pc_t) intrpc; 70 } 71 72 aframes++; 73 74 state.fp = (uintptr_t)__builtin_frame_address(0); 75 state.pc = (uintptr_t)dtrace_getpcstack; 76 77 while (depth < pcstack_limit) { 78 if (!unwind_frame(curthread, &state)) 79 break; 80 if (!INKERNEL(state.pc)) 81 break; 82 83 /* 84 * NB: Unlike some other architectures, we don't need to 85 * explicitly insert cpu_dtrace_caller as it appears in the 86 * normal kernel stack trace rather than a special trap frame. 87 */ 88 if (aframes > 0) { 89 aframes--; 90 } else { 91 pcstack[depth++] = state.pc; 92 } 93 94 } 95 96 for (; depth < pcstack_limit; depth++) { 97 pcstack[depth] = 0; 98 } 99 } 100 101 static int 102 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 103 uintptr_t fp) 104 { 105 volatile uint16_t *flags = 106 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 107 int ret = 0; 108 uintptr_t oldfp = fp; 109 110 ASSERT(pcstack == NULL || pcstack_limit > 0); 111 112 while (pc != 0) { 113 /* 114 * We limit the number of times we can go around this 115 * loop to account for a circular stack. 116 */ 117 if (ret++ >= MAX_USTACK_DEPTH) { 118 *flags |= CPU_DTRACE_BADSTACK; 119 cpu_core[curcpu].cpuc_dtrace_illval = fp; 120 break; 121 } 122 123 if (pcstack != NULL) { 124 *pcstack++ = (uint64_t)pc; 125 pcstack_limit--; 126 if (pcstack_limit <= 0) 127 break; 128 } 129 130 if (fp == 0) 131 break; 132 133 pc = dtrace_fuword64((void *)(fp + 134 offsetof(struct unwind_state, pc))); 135 fp = dtrace_fuword64((void *)fp); 136 137 if (fp == oldfp) { 138 *flags |= CPU_DTRACE_BADSTACK; 139 cpu_core[curcpu].cpuc_dtrace_illval = fp; 140 break; 141 } 142 143 /* 144 * ARM64TODO: 145 * This workaround might not be necessary. It needs to be 146 * revised and removed from all architectures if found 147 * unwanted. Leaving the original x86 comment for reference. 148 * 149 * This is totally bogus: if we faulted, we're going to clear 150 * the fault and break. This is to deal with the apparently 151 * broken Java stacks on x86. 152 */ 153 if (*flags & CPU_DTRACE_FAULT) { 154 *flags &= ~CPU_DTRACE_FAULT; 155 break; 156 } 157 158 oldfp = fp; 159 } 160 161 return (ret); 162 } 163 164 void 165 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 166 { 167 proc_t *p = curproc; 168 struct trapframe *tf; 169 uintptr_t pc, fp; 170 volatile uint16_t *flags = 171 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 172 int n; 173 174 if (*flags & CPU_DTRACE_FAULT) 175 return; 176 177 if (pcstack_limit <= 0) 178 return; 179 180 /* 181 * If there's no user context we still need to zero the stack. 182 */ 183 if (p == NULL || (tf = curthread->td_frame) == NULL) 184 goto zero; 185 186 *pcstack++ = (uint64_t)p->p_pid; 187 pcstack_limit--; 188 189 if (pcstack_limit <= 0) 190 return; 191 192 pc = tf->tf_elr; 193 fp = tf->tf_x[29]; 194 195 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 196 /* 197 * In an entry probe. The frame pointer has not yet been 198 * pushed (that happens in the function prologue). The 199 * best approach is to add the current pc as a missing top 200 * of stack and back the pc up to the caller, which is stored 201 * at the current stack pointer address since the call 202 * instruction puts it there right before the branch. 203 */ 204 205 *pcstack++ = (uint64_t)pc; 206 pcstack_limit--; 207 if (pcstack_limit <= 0) 208 return; 209 210 pc = tf->tf_lr; 211 } 212 213 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); 214 ASSERT(n >= 0); 215 ASSERT(n <= pcstack_limit); 216 217 pcstack += n; 218 pcstack_limit -= n; 219 220 zero: 221 while (pcstack_limit-- > 0) 222 *pcstack++ = 0; 223 } 224 225 int 226 dtrace_getustackdepth(void) 227 { 228 229 printf("IMPLEMENT ME: %s\n", __func__); 230 231 return (0); 232 } 233 234 void 235 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 236 { 237 238 printf("IMPLEMENT ME: %s\n", __func__); 239 } 240 241 uint64_t 242 dtrace_getarg(int arg, int aframes __unused) 243 { 244 struct trapframe *tf; 245 246 /* 247 * We only handle invop providers here. 248 */ 249 if ((tf = curthread->t_dtrace_trapframe) == NULL) { 250 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 251 return (0); 252 } else if (arg < 8) { 253 return (tf->tf_x[arg]); 254 } else { 255 uintptr_t p; 256 uint64_t val; 257 258 p = (tf->tf_sp + (arg - 8) * sizeof(uint64_t)); 259 if ((p & 7) != 0) { 260 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADALIGN); 261 cpu_core[curcpu].cpuc_dtrace_illval = p; 262 return (0); 263 } 264 if (!kstack_contains(curthread, p, sizeof(uint64_t))) { 265 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 266 cpu_core[curcpu].cpuc_dtrace_illval = p; 267 return (0); 268 } 269 memcpy(&val, (void *)p, sizeof(uint64_t)); 270 return (val); 271 } 272 } 273 274 int 275 dtrace_getstackdepth(int aframes) 276 { 277 struct unwind_state state; 278 int scp_offset; 279 int depth; 280 bool done; 281 282 depth = 1; 283 done = false; 284 285 state.fp = (uintptr_t)__builtin_frame_address(0); 286 state.pc = (uintptr_t)dtrace_getstackdepth; 287 288 do { 289 done = !unwind_frame(curthread, &state); 290 if (!INKERNEL(state.pc) || !INKERNEL(state.fp)) 291 break; 292 depth++; 293 } while (!done); 294 295 if (depth < aframes) 296 return (0); 297 else 298 return (depth - aframes); 299 } 300 301 ulong_t 302 dtrace_getreg(struct trapframe *frame, uint_t reg) 303 { 304 switch (reg) { 305 case REG_X0 ... REG_X29: 306 return (frame->tf_x[reg]); 307 case REG_LR: 308 return (frame->tf_lr); 309 case REG_SP: 310 return (frame->tf_sp); 311 case REG_PC: 312 return (frame->tf_elr); 313 default: 314 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 315 return (0); 316 } 317 /* NOTREACHED */ 318 } 319 320 static int 321 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 322 { 323 324 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) { 325 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 326 cpu_core[curcpu].cpuc_dtrace_illval = uaddr; 327 return (0); 328 } 329 330 return (1); 331 } 332 333 void 334 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, 335 volatile uint16_t *flags) 336 { 337 338 if (dtrace_copycheck(uaddr, kaddr, size)) 339 dtrace_copy(uaddr, kaddr, size); 340 } 341 342 void 343 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, 344 volatile uint16_t *flags) 345 { 346 347 if (dtrace_copycheck(uaddr, kaddr, size)) 348 dtrace_copy(kaddr, uaddr, size); 349 } 350 351 void 352 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 353 volatile uint16_t *flags) 354 { 355 356 if (dtrace_copycheck(uaddr, kaddr, size)) 357 dtrace_copystr(uaddr, kaddr, size, flags); 358 } 359 360 void 361 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, 362 volatile uint16_t *flags) 363 { 364 365 if (dtrace_copycheck(uaddr, kaddr, size)) 366 dtrace_copystr(kaddr, uaddr, size, flags); 367 } 368 369 uint8_t 370 dtrace_fuword8(void *uaddr) 371 { 372 373 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 374 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 375 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 376 return (0); 377 } 378 379 return (dtrace_fuword8_nocheck(uaddr)); 380 } 381 382 uint16_t 383 dtrace_fuword16(void *uaddr) 384 { 385 386 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 387 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 388 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 389 return (0); 390 } 391 392 return (dtrace_fuword16_nocheck(uaddr)); 393 } 394 395 uint32_t 396 dtrace_fuword32(void *uaddr) 397 { 398 399 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 400 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 401 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 402 return (0); 403 } 404 405 return (dtrace_fuword32_nocheck(uaddr)); 406 } 407 408 uint64_t 409 dtrace_fuword64(void *uaddr) 410 { 411 412 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 413 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 414 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 415 return (0); 416 } 417 418 return (dtrace_fuword64_nocheck(uaddr)); 419 } 420