1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * Portions Copyright 2016 Ruslan Bukin <br@bsdpad.com> 23 * 24 * $FreeBSD$ 25 */ 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #include <sys/cdefs.h> 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/stack.h> 36 #include <sys/pcpu.h> 37 38 #include <machine/frame.h> 39 #include <machine/md_var.h> 40 #include <machine/encoding.h> 41 #include <machine/riscvreg.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_param.h> 45 #include <vm/pmap.h> 46 47 #include <machine/atomic.h> 48 #include <machine/db_machdep.h> 49 #include <machine/md_var.h> 50 #include <machine/stack.h> 51 #include <ddb/db_sym.h> 52 #include <ddb/ddb.h> 53 #include <sys/kdb.h> 54 55 #include "regset.h" 56 57 #define MAX_USTACK_DEPTH 2048 58 59 uint8_t dtrace_fuword8_nocheck(void *); 60 uint16_t dtrace_fuword16_nocheck(void *); 61 uint32_t dtrace_fuword32_nocheck(void *); 62 uint64_t dtrace_fuword64_nocheck(void *); 63 64 int dtrace_match_opcode(uint32_t, int, int); 65 int dtrace_instr_sdsp(uint32_t **); 66 int dtrace_instr_ret(uint32_t **); 67 int dtrace_instr_c_sdsp(uint32_t **); 68 int dtrace_instr_c_ret(uint32_t **); 69 70 void 71 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 72 uint32_t *intrpc) 73 { 74 struct unwind_state state; 75 uintptr_t caller; 76 register_t sp; 77 int scp_offset; 78 int depth; 79 80 depth = 0; 81 caller = solaris_cpu[curcpu].cpu_dtrace_caller; 82 83 if (intrpc != 0) { 84 pcstack[depth++] = (pc_t)intrpc; 85 } 86 87 /* 88 * Construct the unwind state, starting from this function. This frame, 89 * and 'aframes' others will be skipped. 90 */ 91 __asm __volatile("mv %0, sp" : "=&r" (sp)); 92 93 state.fp = (uintptr_t)__builtin_frame_address(0); 94 state.sp = (uintptr_t)sp; 95 state.pc = (uintptr_t)dtrace_getpcstack; 96 97 while (depth < pcstack_limit) { 98 if (!unwind_frame(curthread, &state)) 99 break; 100 101 if (!INKERNEL(state.pc) || !kstack_contains(curthread, 102 (vm_offset_t)state.fp, sizeof(uintptr_t))) 103 break; 104 105 if (aframes > 0) { 106 aframes--; 107 108 /* 109 * fbt_invop() records the return address at the time 110 * the FBT probe fires. We need to insert this into the 111 * backtrace manually, since the stack frame state at 112 * the time of the probe does not capture it. 113 */ 114 if (aframes == 0 && caller != 0) 115 pcstack[depth++] = caller; 116 } else { 117 pcstack[depth++] = state.pc; 118 } 119 } 120 121 for (; depth < pcstack_limit; depth++) { 122 pcstack[depth] = 0; 123 } 124 } 125 126 static int 127 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 128 uintptr_t fp) 129 { 130 volatile uint16_t *flags; 131 uintptr_t oldfp; 132 int ret; 133 134 oldfp = fp; 135 ret = 0; 136 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 137 138 ASSERT(pcstack == NULL || pcstack_limit > 0); 139 140 while (pc != 0) { 141 /* 142 * We limit the number of times we can go around this 143 * loop to account for a circular stack. 144 */ 145 if (ret++ >= MAX_USTACK_DEPTH) { 146 *flags |= CPU_DTRACE_BADSTACK; 147 cpu_core[curcpu].cpuc_dtrace_illval = fp; 148 break; 149 } 150 151 if (pcstack != NULL) { 152 *pcstack++ = (uint64_t)pc; 153 pcstack_limit--; 154 if (pcstack_limit <= 0) 155 break; 156 } 157 158 if (fp == 0) 159 break; 160 161 pc = dtrace_fuword64((void *)(fp - 1 * sizeof(uint64_t))); 162 fp = dtrace_fuword64((void *)(fp - 2 * sizeof(uint64_t))); 163 164 if (fp == oldfp) { 165 *flags |= CPU_DTRACE_BADSTACK; 166 cpu_core[curcpu].cpuc_dtrace_illval = fp; 167 break; 168 } 169 oldfp = fp; 170 } 171 172 return (ret); 173 } 174 175 void 176 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 177 { 178 volatile uint16_t *flags; 179 struct trapframe *tf; 180 uintptr_t pc, fp; 181 proc_t *p; 182 int n; 183 184 p = curproc; 185 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 186 187 if (*flags & CPU_DTRACE_FAULT) 188 return; 189 190 if (pcstack_limit <= 0) 191 return; 192 193 /* 194 * If there's no user context we still need to zero the stack. 195 */ 196 if (p == NULL || (tf = curthread->td_frame) == NULL) 197 goto zero; 198 199 *pcstack++ = (uint64_t)p->p_pid; 200 pcstack_limit--; 201 202 if (pcstack_limit <= 0) 203 return; 204 205 pc = tf->tf_sepc; 206 fp = tf->tf_s[0]; 207 208 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 209 /* 210 * In an entry probe. The frame pointer has not yet been 211 * pushed (that happens in the function prologue). The 212 * best approach is to add the current pc as a missing top 213 * of stack and back the pc up to the caller, which is stored 214 * at the current stack pointer address since the call 215 * instruction puts it there right before the branch. 216 */ 217 *pcstack++ = (uint64_t)pc; 218 pcstack_limit--; 219 if (pcstack_limit <= 0) 220 return; 221 222 pc = tf->tf_ra; 223 } 224 225 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); 226 ASSERT(n >= 0); 227 ASSERT(n <= pcstack_limit); 228 229 pcstack += n; 230 pcstack_limit -= n; 231 232 zero: 233 while (pcstack_limit-- > 0) 234 *pcstack++ = 0; 235 } 236 237 int 238 dtrace_getustackdepth(void) 239 { 240 struct trapframe *tf; 241 uintptr_t pc, fp; 242 int n = 0; 243 244 if (curproc == NULL || (tf = curthread->td_frame) == NULL) 245 return (0); 246 247 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 248 return (-1); 249 250 pc = tf->tf_sepc; 251 fp = tf->tf_s[0]; 252 253 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 254 /* 255 * In an entry probe. The frame pointer has not yet been 256 * pushed (that happens in the function prologue). The 257 * best approach is to add the current pc as a missing top 258 * of stack and back the pc up to the caller, which is stored 259 * at the current stack pointer address since the call 260 * instruction puts it there right before the branch. 261 */ 262 pc = tf->tf_ra; 263 n++; 264 } 265 266 n += dtrace_getustack_common(NULL, 0, pc, fp); 267 268 return (0); 269 } 270 271 void 272 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 273 { 274 275 printf("IMPLEMENT ME: %s\n", __func__); 276 } 277 278 /*ARGSUSED*/ 279 uint64_t 280 dtrace_getarg(int arg, int aframes) 281 { 282 283 printf("IMPLEMENT ME: %s\n", __func__); 284 285 return (0); 286 } 287 288 int 289 dtrace_getstackdepth(int aframes) 290 { 291 struct unwind_state state; 292 int scp_offset; 293 register_t sp; 294 int depth; 295 bool done; 296 297 depth = 1; 298 done = false; 299 300 __asm __volatile("mv %0, sp" : "=&r" (sp)); 301 302 state.fp = (uintptr_t)__builtin_frame_address(0); 303 state.sp = sp; 304 state.pc = (uintptr_t)dtrace_getstackdepth; 305 306 do { 307 done = !unwind_frame(curthread, &state); 308 if (!INKERNEL(state.pc) || !INKERNEL(state.fp)) 309 break; 310 depth++; 311 } while (!done); 312 313 if (depth < aframes) 314 return (0); 315 else 316 return (depth - aframes); 317 } 318 319 ulong_t 320 dtrace_getreg(struct trapframe *frame, uint_t reg) 321 { 322 switch (reg) { 323 case REG_ZERO: 324 return (0); 325 case REG_RA: 326 return (frame->tf_ra); 327 case REG_SP: 328 return (frame->tf_sp); 329 case REG_GP: 330 return (frame->tf_gp); 331 case REG_TP: 332 return (frame->tf_tp); 333 case REG_T0 ... REG_T2: 334 return (frame->tf_t[reg - REG_T0]); 335 case REG_S0 ... REG_S1: 336 return (frame->tf_s[reg - REG_S0]); 337 case REG_A0 ... REG_A7: 338 return (frame->tf_a[reg - REG_A0]); 339 case REG_S2 ... REG_S11: 340 return (frame->tf_s[reg - REG_S2 + 2]); 341 case REG_T3 ... REG_T6: 342 return (frame->tf_t[reg - REG_T3 + 3]); 343 case REG_PC: 344 return (frame->tf_sepc); 345 default: 346 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 347 return (0); 348 } 349 /* NOTREACHED */ 350 } 351 352 static int 353 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 354 { 355 356 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) { 357 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 358 cpu_core[curcpu].cpuc_dtrace_illval = uaddr; 359 return (0); 360 } 361 362 return (1); 363 } 364 365 void 366 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, 367 volatile uint16_t *flags) 368 { 369 370 if (dtrace_copycheck(uaddr, kaddr, size)) 371 dtrace_copy(uaddr, kaddr, size); 372 } 373 374 void 375 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, 376 volatile uint16_t *flags) 377 { 378 379 if (dtrace_copycheck(uaddr, kaddr, size)) 380 dtrace_copy(kaddr, uaddr, size); 381 } 382 383 void 384 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 385 volatile uint16_t *flags) 386 { 387 388 if (dtrace_copycheck(uaddr, kaddr, size)) 389 dtrace_copystr(uaddr, kaddr, size, flags); 390 } 391 392 void 393 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, 394 volatile uint16_t *flags) 395 { 396 397 if (dtrace_copycheck(uaddr, kaddr, size)) 398 dtrace_copystr(kaddr, uaddr, size, flags); 399 } 400 401 uint8_t 402 dtrace_fuword8(void *uaddr) 403 { 404 405 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 406 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 407 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 408 return (0); 409 } 410 411 return (dtrace_fuword8_nocheck(uaddr)); 412 } 413 414 uint16_t 415 dtrace_fuword16(void *uaddr) 416 { 417 418 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 419 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 420 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 421 return (0); 422 } 423 424 return (dtrace_fuword16_nocheck(uaddr)); 425 } 426 427 uint32_t 428 dtrace_fuword32(void *uaddr) 429 { 430 431 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 432 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 433 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 434 return (0); 435 } 436 437 return (dtrace_fuword32_nocheck(uaddr)); 438 } 439 440 uint64_t 441 dtrace_fuword64(void *uaddr) 442 { 443 444 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 445 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 446 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 447 return (0); 448 } 449 450 return (dtrace_fuword64_nocheck(uaddr)); 451 } 452 453 int 454 dtrace_match_opcode(uint32_t insn, int match, int mask) 455 { 456 if (((insn ^ match) & mask) == 0) 457 return (1); 458 459 return (0); 460 } 461 462 int 463 dtrace_instr_sdsp(uint32_t **instr) 464 { 465 if (dtrace_match_opcode(**instr, (MATCH_SD | RS2_RA | RS1_SP), 466 (MASK_SD | RS2_MASK | RS1_MASK))) 467 return (1); 468 469 return (0); 470 } 471 472 int 473 dtrace_instr_c_sdsp(uint32_t **instr) 474 { 475 uint16_t *instr1; 476 int i; 477 478 for (i = 0; i < 2; i++) { 479 instr1 = (uint16_t *)(*instr) + i; 480 if (dtrace_match_opcode(*instr1, (MATCH_C_SDSP | RS2_C_RA), 481 (MASK_C_SDSP | RS2_C_MASK))) { 482 *instr = (uint32_t *)instr1; 483 return (1); 484 } 485 } 486 487 return (0); 488 } 489 490 int 491 dtrace_instr_ret(uint32_t **instr) 492 { 493 if (dtrace_match_opcode(**instr, (MATCH_JALR | (X_RA << RS1_SHIFT)), 494 (MASK_JALR | RD_MASK | RS1_MASK | IMM_MASK))) 495 return (1); 496 497 return (0); 498 } 499 500 int 501 dtrace_instr_c_ret(uint32_t **instr) 502 { 503 uint16_t *instr1; 504 int i; 505 506 for (i = 0; i < 2; i++) { 507 instr1 = (uint16_t *)(*instr) + i; 508 if (dtrace_match_opcode(*instr1, 509 (MATCH_C_JR | (X_RA << RD_SHIFT)), (MASK_C_JR | RD_MASK))) { 510 *instr = (uint32_t *)instr1; 511 return (1); 512 } 513 } 514 515 return (0); 516 } 517