1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * Portions Copyright 2012,2013 Justin Hibbits <jhibbits@freebsd.org> 23 * 24 * $FreeBSD$ 25 */ 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #include <sys/cdefs.h> 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/stack.h> 36 #include <sys/sysent.h> 37 #include <sys/pcpu.h> 38 39 #include <machine/frame.h> 40 #include <machine/md_var.h> 41 #include <machine/reg.h> 42 #include <machine/stack.h> 43 44 #include <vm/vm.h> 45 #include <vm/vm_param.h> 46 #include <vm/pmap.h> 47 48 #include "regset.h" 49 50 /* Offset to the LR Save word (ppc32) */ 51 #define RETURN_OFFSET 4 52 #define RETURN_OFFSET64 8 53 54 #define INKERNEL(x) ((x) <= VM_MAX_KERNEL_ADDRESS && \ 55 (x) >= VM_MIN_KERNEL_ADDRESS) 56 57 greg_t 58 dtrace_getfp(void) 59 { 60 return (greg_t)__builtin_frame_address(0); 61 } 62 63 void 64 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 65 uint32_t *intrpc) 66 { 67 int depth = 0; 68 register_t sp; 69 vm_offset_t callpc; 70 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller; 71 72 if (intrpc != 0) 73 pcstack[depth++] = (pc_t) intrpc; 74 75 aframes++; 76 77 sp = dtrace_getfp(); 78 79 while (depth < pcstack_limit) { 80 if (!INKERNEL((long) sp)) 81 break; 82 83 callpc = *(uintptr_t *)(sp + RETURN_OFFSET); 84 85 if (!INKERNEL(callpc)) 86 break; 87 88 if (aframes > 0) { 89 aframes--; 90 if ((aframes == 0) && (caller != 0)) { 91 pcstack[depth++] = caller; 92 } 93 } 94 else { 95 pcstack[depth++] = callpc; 96 } 97 98 sp = *(uintptr_t*)sp; 99 } 100 101 for (; depth < pcstack_limit; depth++) { 102 pcstack[depth] = 0; 103 } 104 } 105 106 static int 107 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 108 uintptr_t sp) 109 { 110 proc_t *p = curproc; 111 int ret = 0; 112 113 ASSERT(pcstack == NULL || pcstack_limit > 0); 114 115 while (pc != 0) { 116 ret++; 117 if (pcstack != NULL) { 118 *pcstack++ = (uint64_t)pc; 119 pcstack_limit--; 120 if (pcstack_limit <= 0) 121 break; 122 } 123 124 if (sp == 0) 125 break; 126 127 if (SV_PROC_FLAG(p, SV_ILP32)) { 128 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET)); 129 sp = dtrace_fuword32((void *)sp); 130 } 131 else { 132 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64)); 133 sp = dtrace_fuword64((void *)sp); 134 } 135 } 136 137 return (ret); 138 } 139 140 void 141 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 142 { 143 proc_t *p = curproc; 144 struct trapframe *tf; 145 uintptr_t pc, sp; 146 volatile uint16_t *flags = 147 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 148 int n; 149 150 if (*flags & CPU_DTRACE_FAULT) 151 return; 152 153 if (pcstack_limit <= 0) 154 return; 155 156 /* 157 * If there's no user context we still need to zero the stack. 158 */ 159 if (p == NULL || (tf = curthread->td_frame) == NULL) 160 goto zero; 161 162 *pcstack++ = (uint64_t)p->p_pid; 163 pcstack_limit--; 164 165 if (pcstack_limit <= 0) 166 return; 167 168 pc = tf->srr0; 169 sp = tf->fixreg[1]; 170 171 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 172 /* 173 * In an entry probe. The frame pointer has not yet been 174 * pushed (that happens in the function prologue). The 175 * best approach is to add the current pc as a missing top 176 * of stack and back the pc up to the caller, which is stored 177 * at the current stack pointer address since the call 178 * instruction puts it there right before the branch. 179 */ 180 181 *pcstack++ = (uint64_t)pc; 182 pcstack_limit--; 183 if (pcstack_limit <= 0) 184 return; 185 186 pc = tf->lr; 187 } 188 189 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp); 190 ASSERT(n >= 0); 191 ASSERT(n <= pcstack_limit); 192 193 pcstack += n; 194 pcstack_limit -= n; 195 196 zero: 197 while (pcstack_limit-- > 0) 198 *pcstack++ = 0; 199 } 200 201 int 202 dtrace_getustackdepth(void) 203 { 204 proc_t *p = curproc; 205 struct trapframe *tf; 206 uintptr_t pc, sp; 207 int n = 0; 208 209 if (p == NULL || (tf = curthread->td_frame) == NULL) 210 return (0); 211 212 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 213 return (-1); 214 215 pc = tf->srr0; 216 sp = tf->fixreg[1]; 217 218 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 219 /* 220 * In an entry probe. The frame pointer has not yet been 221 * pushed (that happens in the function prologue). The 222 * best approach is to add the current pc as a missing top 223 * of stack and back the pc up to the caller, which is stored 224 * at the current stack pointer address since the call 225 * instruction puts it there right before the branch. 226 */ 227 228 if (SV_PROC_FLAG(p, SV_ILP32)) { 229 pc = dtrace_fuword32((void *) sp); 230 } 231 else 232 pc = dtrace_fuword64((void *) sp); 233 n++; 234 } 235 236 n += dtrace_getustack_common(NULL, 0, pc, sp); 237 238 return (n); 239 } 240 241 void 242 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 243 { 244 proc_t *p = curproc; 245 struct trapframe *tf; 246 uintptr_t pc, sp; 247 volatile uint16_t *flags = 248 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 249 #ifdef notyet /* XXX signal stack */ 250 uintptr_t oldcontext; 251 size_t s1, s2; 252 #endif 253 254 if (*flags & CPU_DTRACE_FAULT) 255 return; 256 257 if (pcstack_limit <= 0) 258 return; 259 260 /* 261 * If there's no user context we still need to zero the stack. 262 */ 263 if (p == NULL || (tf = curthread->td_frame) == NULL) 264 goto zero; 265 266 *pcstack++ = (uint64_t)p->p_pid; 267 pcstack_limit--; 268 269 if (pcstack_limit <= 0) 270 return; 271 272 pc = tf->srr0; 273 sp = tf->fixreg[1]; 274 275 #ifdef notyet /* XXX signal stack */ 276 oldcontext = lwp->lwp_oldcontext; 277 s1 = sizeof (struct xframe) + 2 * sizeof (long); 278 s2 = s1 + sizeof (siginfo_t); 279 #endif 280 281 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 282 *pcstack++ = (uint64_t)pc; 283 *fpstack++ = 0; 284 pcstack_limit--; 285 if (pcstack_limit <= 0) 286 return; 287 288 if (SV_PROC_FLAG(p, SV_ILP32)) { 289 pc = dtrace_fuword32((void *)sp); 290 } 291 else { 292 pc = dtrace_fuword64((void *)sp); 293 } 294 } 295 296 while (pc != 0) { 297 *pcstack++ = (uint64_t)pc; 298 *fpstack++ = sp; 299 pcstack_limit--; 300 if (pcstack_limit <= 0) 301 break; 302 303 if (sp == 0) 304 break; 305 306 #ifdef notyet /* XXX signal stack */ 307 if (oldcontext == sp + s1 || oldcontext == sp + s2) { 308 ucontext_t *ucp = (ucontext_t *)oldcontext; 309 greg_t *gregs = ucp->uc_mcontext.gregs; 310 311 sp = dtrace_fulword(&gregs[REG_FP]); 312 pc = dtrace_fulword(&gregs[REG_PC]); 313 314 oldcontext = dtrace_fulword(&ucp->uc_link); 315 } else 316 #endif /* XXX */ 317 { 318 if (SV_PROC_FLAG(p, SV_ILP32)) { 319 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET)); 320 sp = dtrace_fuword32((void *)sp); 321 } 322 else { 323 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64)); 324 sp = dtrace_fuword64((void *)sp); 325 } 326 } 327 328 /* 329 * This is totally bogus: if we faulted, we're going to clear 330 * the fault and break. This is to deal with the apparently 331 * broken Java stacks on x86. 332 */ 333 if (*flags & CPU_DTRACE_FAULT) { 334 *flags &= ~CPU_DTRACE_FAULT; 335 break; 336 } 337 } 338 339 zero: 340 while (pcstack_limit-- > 0) 341 *pcstack++ = 0; 342 } 343 344 /*ARGSUSED*/ 345 uint64_t 346 dtrace_getarg(int arg, int aframes) 347 { 348 return (0); 349 } 350 351 #ifdef notyet 352 { 353 int depth = 0; 354 register_t sp; 355 vm_offset_t callpc; 356 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller; 357 358 if (intrpc != 0) 359 pcstack[depth++] = (pc_t) intrpc; 360 361 aframes++; 362 363 sp = dtrace_getfp(); 364 365 while (depth < pcstack_limit) { 366 if (!INKERNEL((long) frame)) 367 break; 368 369 callpc = *(void **)(sp + RETURN_OFFSET); 370 371 if (!INKERNEL(callpc)) 372 break; 373 374 if (aframes > 0) { 375 aframes--; 376 if ((aframes == 0) && (caller != 0)) { 377 pcstack[depth++] = caller; 378 } 379 } 380 else { 381 pcstack[depth++] = callpc; 382 } 383 384 sp = *(void **)sp; 385 } 386 387 for (; depth < pcstack_limit; depth++) { 388 pcstack[depth] = 0; 389 } 390 } 391 #endif 392 393 int 394 dtrace_getstackdepth(int aframes) 395 { 396 int depth = 0; 397 register_t sp; 398 399 aframes++; 400 sp = dtrace_getfp(); 401 depth++; 402 for(;;) { 403 if (!INKERNEL((long) sp)) 404 break; 405 if (!INKERNEL((long) *(void **)sp)) 406 break; 407 depth++; 408 sp = *(uintptr_t *)sp; 409 } 410 if (depth < aframes) 411 return 0; 412 else 413 return depth - aframes; 414 } 415 416 ulong_t 417 dtrace_getreg(struct trapframe *rp, uint_t reg) 418 { 419 if (reg < 32) 420 return (rp->fixreg[reg]); 421 422 switch (reg) { 423 case 33: 424 return (rp->lr); 425 case 34: 426 return (rp->cr); 427 case 35: 428 return (rp->xer); 429 case 36: 430 return (rp->ctr); 431 case 37: 432 return (rp->srr0); 433 case 38: 434 return (rp->srr1); 435 case 39: 436 return (rp->exc); 437 default: 438 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 439 return (0); 440 } 441 } 442 443 static int 444 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 445 { 446 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr); 447 448 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) { 449 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 450 cpu_core[curcpu].cpuc_dtrace_illval = uaddr; 451 return (0); 452 } 453 454 return (1); 455 } 456 457 void 458 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, 459 volatile uint16_t *flags) 460 { 461 if (dtrace_copycheck(uaddr, kaddr, size)) 462 if (copyin((const void *)uaddr, (void *)kaddr, size)) { 463 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 464 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 465 } 466 } 467 468 void 469 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, 470 volatile uint16_t *flags) 471 { 472 if (dtrace_copycheck(uaddr, kaddr, size)) { 473 if (copyout((const void *)kaddr, (void *)uaddr, size)) { 474 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 475 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 476 } 477 } 478 } 479 480 void 481 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 482 volatile uint16_t *flags) 483 { 484 size_t actual; 485 int error; 486 487 if (dtrace_copycheck(uaddr, kaddr, size)) { 488 error = copyinstr((const void *)uaddr, (void *)kaddr, 489 size, &actual); 490 491 /* ENAMETOOLONG is not a fault condition. */ 492 if (error && error != ENAMETOOLONG) { 493 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 494 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 495 } 496 } 497 } 498 499 /* 500 * The bulk of this function could be replaced to match dtrace_copyinstr() 501 * if we ever implement a copyoutstr(). 502 */ 503 void 504 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, 505 volatile uint16_t *flags) 506 { 507 size_t len; 508 509 if (dtrace_copycheck(uaddr, kaddr, size)) { 510 len = strlen((const char *)kaddr); 511 if (len > size) 512 len = size; 513 514 if (copyout((const void *)kaddr, (void *)uaddr, len)) { 515 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 516 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 517 } 518 } 519 } 520 521 uint8_t 522 dtrace_fuword8(void *uaddr) 523 { 524 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 525 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 526 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 527 return (0); 528 } 529 return (fubyte(uaddr)); 530 } 531 532 uint16_t 533 dtrace_fuword16(void *uaddr) 534 { 535 uint16_t ret = 0; 536 537 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) { 538 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) { 539 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 540 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 541 } 542 } 543 return ret; 544 } 545 546 uint32_t 547 dtrace_fuword32(void *uaddr) 548 { 549 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) { 550 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 551 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 552 return (0); 553 } 554 return (fuword32(uaddr)); 555 } 556 557 uint64_t 558 dtrace_fuword64(void *uaddr) 559 { 560 uint64_t ret = 0; 561 562 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) { 563 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) { 564 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 565 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 566 } 567 } 568 return ret; 569 } 570 571 uintptr_t 572 dtrace_fulword(void *uaddr) 573 { 574 uintptr_t ret = 0; 575 576 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) { 577 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) { 578 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 579 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr; 580 } 581 } 582 return ret; 583 } 584