1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * $FreeBSD$ 23 */ 24 /* 25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 #include <sys/cdefs.h> 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/stack.h> 34 #include <sys/pcpu.h> 35 36 #include <machine/frame.h> 37 #include <machine/md_var.h> 38 39 #include <vm/vm.h> 40 #include <vm/vm_param.h> 41 #include <vm/pmap.h> 42 43 #include <machine/atomic.h> 44 #include <machine/db_machdep.h> 45 #include <machine/md_var.h> 46 #include <machine/stack.h> 47 #include <ddb/db_sym.h> 48 #include <ddb/ddb.h> 49 #include <sys/kdb.h> 50 51 #include "regset.h" 52 53 #define MAX_USTACK_DEPTH 2048 54 55 uint8_t dtrace_fuword8_nocheck(void *); 56 uint16_t dtrace_fuword16_nocheck(void *); 57 uint32_t dtrace_fuword32_nocheck(void *); 58 uint64_t dtrace_fuword64_nocheck(void *); 59 60 void 61 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 62 uint32_t *intrpc) 63 { 64 struct unwind_state state; 65 int scp_offset; 66 int depth; 67 68 depth = 0; 69 70 if (intrpc != 0) { 71 pcstack[depth++] = (pc_t) intrpc; 72 } 73 74 aframes++; 75 76 state.fp = (uintptr_t)__builtin_frame_address(0); 77 state.pc = (uintptr_t)dtrace_getpcstack; 78 79 while (depth < pcstack_limit) { 80 if (!unwind_frame(curthread, &state)) 81 break; 82 if (!INKERNEL(state.pc)) 83 break; 84 85 /* 86 * NB: Unlike some other architectures, we don't need to 87 * explicitly insert cpu_dtrace_caller as it appears in the 88 * normal kernel stack trace rather than a special trap frame. 89 */ 90 if (aframes > 0) { 91 aframes--; 92 } else { 93 pcstack[depth++] = state.pc; 94 } 95 96 } 97 98 for (; depth < pcstack_limit; depth++) { 99 pcstack[depth] = 0; 100 } 101 } 102 103 static int 104 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 105 uintptr_t fp) 106 { 107 volatile uint16_t *flags = 108 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 109 int ret = 0; 110 uintptr_t oldfp = fp; 111 112 ASSERT(pcstack == NULL || pcstack_limit > 0); 113 114 while (pc != 0) { 115 /* 116 * We limit the number of times we can go around this 117 * loop to account for a circular stack. 118 */ 119 if (ret++ >= MAX_USTACK_DEPTH) { 120 *flags |= CPU_DTRACE_BADSTACK; 121 cpu_core[curcpu].cpuc_dtrace_illval = fp; 122 break; 123 } 124 125 if (pcstack != NULL) { 126 *pcstack++ = (uint64_t)pc; 127 pcstack_limit--; 128 if (pcstack_limit <= 0) 129 break; 130 } 131 132 if (fp == 0) 133 break; 134 135 pc = dtrace_fuword64((void *)(fp + 136 offsetof(struct unwind_state, pc))); 137 fp = dtrace_fuword64((void *)fp); 138 139 if (fp == oldfp) { 140 *flags |= CPU_DTRACE_BADSTACK; 141 cpu_core[curcpu].cpuc_dtrace_illval = fp; 142 break; 143 } 144 145 /* 146 * ARM64TODO: 147 * This workaround might not be necessary. It needs to be 148 * revised and removed from all architectures if found 149 * unwanted. Leaving the original x86 comment for reference. 150 * 151 * This is totally bogus: if we faulted, we're going to clear 152 * the fault and break. This is to deal with the apparently 153 * broken Java stacks on x86. 154 */ 155 if (*flags & CPU_DTRACE_FAULT) { 156 *flags &= ~CPU_DTRACE_FAULT; 157 break; 158 } 159 160 oldfp = fp; 161 } 162 163 return (ret); 164 } 165 166 void 167 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 168 { 169 proc_t *p = curproc; 170 struct trapframe *tf; 171 uintptr_t pc, fp; 172 volatile uint16_t *flags = 173 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 174 int n; 175 176 if (*flags & CPU_DTRACE_FAULT) 177 return; 178 179 if (pcstack_limit <= 0) 180 return; 181 182 /* 183 * If there's no user context we still need to zero the stack. 184 */ 185 if (p == NULL || (tf = curthread->td_frame) == NULL) 186 goto zero; 187 188 *pcstack++ = (uint64_t)p->p_pid; 189 pcstack_limit--; 190 191 if (pcstack_limit <= 0) 192 return; 193 194 pc = tf->tf_elr; 195 fp = tf->tf_x[29]; 196 197 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 198 /* 199 * In an entry probe. The frame pointer has not yet been 200 * pushed (that happens in the function prologue). The 201 * best approach is to add the current pc as a missing top 202 * of stack and back the pc up to the caller, which is stored 203 * at the current stack pointer address since the call 204 * instruction puts it there right before the branch. 205 */ 206 207 *pcstack++ = (uint64_t)pc; 208 pcstack_limit--; 209 if (pcstack_limit <= 0) 210 return; 211 212 pc = tf->tf_lr; 213 } 214 215 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); 216 ASSERT(n >= 0); 217 ASSERT(n <= pcstack_limit); 218 219 pcstack += n; 220 pcstack_limit -= n; 221 222 zero: 223 while (pcstack_limit-- > 0) 224 *pcstack++ = 0; 225 } 226 227 int 228 dtrace_getustackdepth(void) 229 { 230 231 printf("IMPLEMENT ME: %s\n", __func__); 232 233 return (0); 234 } 235 236 void 237 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 238 { 239 240 printf("IMPLEMENT ME: %s\n", __func__); 241 } 242 243 /*ARGSUSED*/ 244 uint64_t 245 dtrace_getarg(int arg, int aframes) 246 { 247 248 printf("IMPLEMENT ME: %s\n", __func__); 249 250 return (0); 251 } 252 253 int 254 dtrace_getstackdepth(int aframes) 255 { 256 struct unwind_state state; 257 int scp_offset; 258 int depth; 259 bool done; 260 261 depth = 1; 262 done = false; 263 264 state.fp = (uintptr_t)__builtin_frame_address(0); 265 state.pc = (uintptr_t)dtrace_getstackdepth; 266 267 do { 268 done = !unwind_frame(curthread, &state); 269 if (!INKERNEL(state.pc) || !INKERNEL(state.fp)) 270 break; 271 depth++; 272 } while (!done); 273 274 if (depth < aframes) 275 return (0); 276 else 277 return (depth - aframes); 278 } 279 280 ulong_t 281 dtrace_getreg(struct trapframe *frame, uint_t reg) 282 { 283 switch (reg) { 284 case REG_X0 ... REG_X29: 285 return (frame->tf_x[reg]); 286 case REG_LR: 287 return (frame->tf_lr); 288 case REG_SP: 289 return (frame->tf_sp); 290 case REG_PC: 291 return (frame->tf_elr); 292 default: 293 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 294 return (0); 295 } 296 /* NOTREACHED */ 297 } 298 299 static int 300 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 301 { 302 303 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) { 304 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 305 cpu_core[curcpu].cpuc_dtrace_illval = uaddr; 306 return (0); 307 } 308 309 return (1); 310 } 311 312 void 313 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, 314 volatile uint16_t *flags) 315 { 316 317 if (dtrace_copycheck(uaddr, kaddr, size)) 318 dtrace_copy(uaddr, kaddr, size); 319 } 320 321 void 322 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, 323 volatile uint16_t *flags) 324 { 325 326 if (dtrace_copycheck(uaddr, kaddr, size)) 327 dtrace_copy(kaddr, uaddr, size); 328 } 329 330 void 331 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 332 volatile uint16_t *flags) 333 { 334 335 if (dtrace_copycheck(uaddr, kaddr, size)) 336 dtrace_copystr(uaddr, kaddr, size, flags); 337 } 338 339 void 340 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, 341 volatile uint16_t *flags) 342 { 343 344 if (dtrace_copycheck(uaddr, kaddr, size)) 345 dtrace_copystr(kaddr, uaddr, size, flags); 346 } 347 348 uint8_t 349 dtrace_fuword8(void *uaddr) 350 { 351 352 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 353 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 354 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 355 return (0); 356 } 357 358 return (dtrace_fuword8_nocheck(uaddr)); 359 } 360 361 uint16_t 362 dtrace_fuword16(void *uaddr) 363 { 364 365 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 366 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 367 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 368 return (0); 369 } 370 371 return (dtrace_fuword16_nocheck(uaddr)); 372 } 373 374 uint32_t 375 dtrace_fuword32(void *uaddr) 376 { 377 378 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 379 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 380 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 381 return (0); 382 } 383 384 return (dtrace_fuword32_nocheck(uaddr)); 385 } 386 387 uint64_t 388 dtrace_fuword64(void *uaddr) 389 { 390 391 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 392 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 393 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 394 return (0); 395 } 396 397 return (dtrace_fuword64_nocheck(uaddr)); 398 } 399