1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 /* 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/types.h> 33 #include <sys/vmparam.h> 34 #include <sys/systm.h> 35 #include <sys/signal.h> 36 #include <sys/stack.h> 37 #include <sys/regset.h> 38 #include <sys/privregs.h> 39 #include <sys/frame.h> 40 #include <sys/proc.h> 41 #include <sys/psw.h> 42 #include <sys/siginfo.h> 43 #include <sys/cpuvar.h> 44 #include <sys/asm_linkage.h> 45 #include <sys/kmem.h> 46 #include <sys/errno.h> 47 #include <sys/bootconf.h> 48 #include <sys/archsystm.h> 49 #include <sys/debug.h> 50 #include <sys/elf.h> 51 #include <sys/spl.h> 52 #include <sys/time.h> 53 #include <sys/atomic.h> 54 #include <sys/sysmacros.h> 55 #include <sys/cmn_err.h> 56 #include <sys/modctl.h> 57 #include <sys/kobj.h> 58 #include <sys/panic.h> 59 #include <sys/reboot.h> 60 #include <sys/time.h> 61 #include <sys/fp.h> 62 #include <sys/x86_archext.h> 63 #include <sys/auxv.h> 64 #include <sys/auxv_386.h> 65 #include <sys/dtrace.h> 66 #include <sys/brand.h> 67 #include <sys/machbrand.h> 68 #include <sys/cmn_err.h> 69 70 extern const struct fnsave_state x87_initial; 71 extern const struct fxsave_state sse_initial; 72 73 /* 74 * Map an fnsave-formatted save area into an fxsave-formatted save area. 75 * 76 * Most fields are the same width, content and semantics. However 77 * the tag word is compressed. 78 */ 79 static void 80 fnsave_to_fxsave(const struct fnsave_state *fn, struct fxsave_state *fx) 81 { 82 uint_t i, tagbits; 83 84 fx->fx_fcw = fn->f_fcw; 85 fx->fx_fsw = fn->f_fsw; 86 87 /* 88 * copy element by element (because of holes) 89 */ 90 for (i = 0; i < 8; i++) 91 bcopy(&fn->f_st[i].fpr_16[0], &fx->fx_st[i].fpr_16[0], 92 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */ 93 94 /* 95 * synthesize compressed tag bits 96 */ 97 fx->fx_fctw = 0; 98 for (tagbits = fn->f_ftw, i = 0; i < 8; i++, tagbits >>= 2) 99 if ((tagbits & 3) != 3) 100 fx->fx_fctw |= (1 << i); 101 102 fx->fx_fop = fn->f_fop; 103 104 #if defined(__amd64) 105 fx->fx_rip = (uint64_t)fn->f_eip; 106 fx->fx_rdp = (uint64_t)fn->f_dp; 107 #else 108 fx->fx_eip = fn->f_eip; 109 fx->fx_cs = fn->f_cs; 110 fx->__fx_ign0 = 0; 111 fx->fx_dp = fn->f_dp; 112 fx->fx_ds = fn->f_ds; 113 fx->__fx_ign1 = 0; 114 #endif 115 } 116 117 /* 118 * Map from an fxsave-format save area to an fnsave-format save area. 119 */ 120 static void 121 fxsave_to_fnsave(const struct fxsave_state *fx, struct fnsave_state *fn) 122 { 123 uint_t i, top, tagbits; 124 125 fn->f_fcw = fx->fx_fcw; 126 fn->__f_ign0 = 0; 127 fn->f_fsw = fx->fx_fsw; 128 fn->__f_ign1 = 0; 129 130 top = (fx->fx_fsw & FPS_TOP) >> 11; 131 132 /* 133 * copy element by element (because of holes) 134 */ 135 for (i = 0; i < 8; i++) 136 bcopy(&fx->fx_st[i].fpr_16[0], &fn->f_st[i].fpr_16[0], 137 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */ 138 139 /* 140 * synthesize uncompressed tag bits 141 */ 142 fn->f_ftw = 0; 143 for (tagbits = fx->fx_fctw, i = 0; i < 8; i++, tagbits >>= 1) { 144 uint_t ibit, expo; 145 const uint16_t *fpp; 146 static const uint16_t zero[5] = { 0, 0, 0, 0, 0 }; 147 148 if ((tagbits & 1) == 0) { 149 fn->f_ftw |= 3 << (i << 1); /* empty */ 150 continue; 151 } 152 153 /* 154 * (tags refer to *physical* registers) 155 */ 156 fpp = &fx->fx_st[(i - top + 8) & 7].fpr_16[0]; 157 ibit = fpp[3] >> 15; 158 expo = fpp[4] & 0x7fff; 159 160 if (ibit && expo != 0 && expo != 0x7fff) 161 continue; /* valid fp number */ 162 163 if (bcmp(fpp, &zero, sizeof (zero))) 164 fn->f_ftw |= 2 << (i << 1); /* NaN */ 165 else 166 fn->f_ftw |= 1 << (i << 1); /* fp zero */ 167 } 168 169 fn->f_fop = fx->fx_fop; 170 171 fn->__f_ign2 = 0; 172 #if defined(__amd64) 173 fn->f_eip = (uint32_t)fx->fx_rip; 174 fn->f_cs = U32CS_SEL; 175 fn->f_dp = (uint32_t)fx->fx_rdp; 176 fn->f_ds = UDS_SEL; 177 #else 178 fn->f_eip = fx->fx_eip; 179 fn->f_cs = fx->fx_cs; 180 fn->f_dp = fx->fx_dp; 181 fn->f_ds = fx->fx_ds; 182 #endif 183 fn->__f_ign3 = 0; 184 } 185 186 /* 187 * Map from an fpregset_t into an fxsave-format save area 188 */ 189 static void 190 fpregset_to_fxsave(const fpregset_t *fp, struct fxsave_state *fx) 191 { 192 #if defined(__amd64) 193 bcopy(fp, fx, sizeof (*fx)); 194 #else 195 const struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state; 196 197 fnsave_to_fxsave((const struct fnsave_state *)fc, fx); 198 fx->fx_mxcsr = fc->mxcsr; 199 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm)); 200 #endif 201 /* 202 * avoid useless #gp exceptions - mask reserved bits 203 */ 204 fx->fx_mxcsr &= sse_mxcsr_mask; 205 } 206 207 /* 208 * Map from an fxsave-format save area into a fpregset_t 209 */ 210 static void 211 fxsave_to_fpregset(const struct fxsave_state *fx, fpregset_t *fp) 212 { 213 #if defined(__amd64) 214 bcopy(fx, fp, sizeof (*fx)); 215 #else 216 struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state; 217 218 fxsave_to_fnsave(fx, (struct fnsave_state *)fc); 219 fc->mxcsr = fx->fx_mxcsr; 220 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm)); 221 #endif 222 } 223 224 #if defined(_SYSCALL32_IMPL) 225 static void 226 fpregset32_to_fxsave(const fpregset32_t *fp, struct fxsave_state *fx) 227 { 228 const struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state; 229 230 fnsave_to_fxsave((const struct fnsave_state *)fc, fx); 231 /* 232 * avoid useless #gp exceptions - mask reserved bits 233 */ 234 fx->fx_mxcsr = sse_mxcsr_mask & fc->mxcsr; 235 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm)); 236 } 237 238 static void 239 fxsave_to_fpregset32(const struct fxsave_state *fx, fpregset32_t *fp) 240 { 241 struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state; 242 243 fxsave_to_fnsave(fx, (struct fnsave_state *)fc); 244 fc->mxcsr = fx->fx_mxcsr; 245 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm)); 246 } 247 248 static void 249 fpregset_nto32(const fpregset_t *src, fpregset32_t *dst) 250 { 251 fxsave_to_fpregset32((struct fxsave_state *)src, dst); 252 dst->fp_reg_set.fpchip_state.status = 253 src->fp_reg_set.fpchip_state.status; 254 dst->fp_reg_set.fpchip_state.xstatus = 255 src->fp_reg_set.fpchip_state.xstatus; 256 } 257 258 static void 259 fpregset_32ton(const fpregset32_t *src, fpregset_t *dst) 260 { 261 fpregset32_to_fxsave(src, (struct fxsave_state *)dst); 262 dst->fp_reg_set.fpchip_state.status = 263 src->fp_reg_set.fpchip_state.status; 264 dst->fp_reg_set.fpchip_state.xstatus = 265 src->fp_reg_set.fpchip_state.xstatus; 266 } 267 #endif 268 269 /* 270 * Set floating-point registers from a native fpregset_t. 271 */ 272 void 273 setfpregs(klwp_t *lwp, fpregset_t *fp) 274 { 275 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu; 276 277 if (fpu->fpu_flags & FPU_EN) { 278 if (!(fpu->fpu_flags & FPU_VALID)) { 279 /* 280 * FPU context is still active, release the 281 * ownership. 282 */ 283 fp_free(fpu, 0); 284 } 285 } 286 /* 287 * Else: if we are trying to change the FPU state of a thread which 288 * hasn't yet initialized floating point, store the state in 289 * the pcb and indicate that the state is valid. When the 290 * thread enables floating point, it will use this state instead 291 * of the default state. 292 */ 293 294 switch (fp_save_mech) { 295 #if defined(__i386) 296 case FP_FNSAVE: 297 bcopy(fp, &fpu->fpu_regs.kfpu_u.kfpu_fn, 298 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn)); 299 break; 300 #endif 301 case FP_FXSAVE: 302 fpregset_to_fxsave(fp, &fpu->fpu_regs.kfpu_u.kfpu_fx); 303 fpu->fpu_regs.kfpu_xstatus = 304 fp->fp_reg_set.fpchip_state.xstatus; 305 break; 306 307 case FP_XSAVE: 308 fpregset_to_fxsave(fp, 309 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave); 310 fpu->fpu_regs.kfpu_xstatus = 311 fp->fp_reg_set.fpchip_state.xstatus; 312 fpu->fpu_regs.kfpu_u.kfpu_xs.xs_xstate_bv |= 313 (XFEATURE_LEGACY_FP | XFEATURE_SSE); 314 break; 315 default: 316 panic("Invalid fp_save_mech"); 317 /*NOTREACHED*/ 318 } 319 320 fpu->fpu_regs.kfpu_status = fp->fp_reg_set.fpchip_state.status; 321 fpu->fpu_flags |= FPU_VALID; 322 } 323 324 /* 325 * Get floating-point registers into a native fpregset_t. 326 */ 327 void 328 getfpregs(klwp_t *lwp, fpregset_t *fp) 329 { 330 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu; 331 332 kpreempt_disable(); 333 if (fpu->fpu_flags & FPU_EN) { 334 /* 335 * If we have FPU hw and the thread's pcb doesn't have 336 * a valid FPU state then get the state from the hw. 337 */ 338 if (fpu_exists && ttolwp(curthread) == lwp && 339 !(fpu->fpu_flags & FPU_VALID)) 340 fp_save(fpu); /* get the current FPU state */ 341 } 342 343 /* 344 * There are 3 possible cases we have to be aware of here: 345 * 346 * 1. FPU is enabled. FPU state is stored in the current LWP. 347 * 348 * 2. FPU is not enabled, and there have been no intervening /proc 349 * modifications. Return initial FPU state. 350 * 351 * 3. FPU is not enabled, but a /proc consumer has modified FPU state. 352 * FPU state is stored in the current LWP. 353 */ 354 if ((fpu->fpu_flags & FPU_EN) || (fpu->fpu_flags & FPU_VALID)) { 355 /* 356 * Cases 1 and 3. 357 */ 358 switch (fp_save_mech) { 359 #if defined(__i386) 360 case FP_FNSAVE: 361 bcopy(&fpu->fpu_regs.kfpu_u.kfpu_fn, fp, 362 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn)); 363 break; 364 #endif 365 case FP_FXSAVE: 366 fxsave_to_fpregset(&fpu->fpu_regs.kfpu_u.kfpu_fx, fp); 367 fp->fp_reg_set.fpchip_state.xstatus = 368 fpu->fpu_regs.kfpu_xstatus; 369 break; 370 case FP_XSAVE: 371 fxsave_to_fpregset( 372 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave, fp); 373 fp->fp_reg_set.fpchip_state.xstatus = 374 fpu->fpu_regs.kfpu_xstatus; 375 break; 376 default: 377 panic("Invalid fp_save_mech"); 378 /*NOTREACHED*/ 379 } 380 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status; 381 } else { 382 /* 383 * Case 2. 384 */ 385 switch (fp_save_mech) { 386 #if defined(__i386) 387 case FP_FNSAVE: 388 bcopy(&x87_initial, fp, sizeof (x87_initial)); 389 break; 390 #endif 391 case FP_FXSAVE: 392 case FP_XSAVE: 393 /* 394 * For now, we don't have any AVX specific field in ABI. 395 * If we add any in the future, we need to initial them 396 * as well. 397 */ 398 fxsave_to_fpregset(&sse_initial, fp); 399 fp->fp_reg_set.fpchip_state.xstatus = 400 fpu->fpu_regs.kfpu_xstatus; 401 break; 402 default: 403 panic("Invalid fp_save_mech"); 404 /*NOTREACHED*/ 405 } 406 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status; 407 } 408 kpreempt_enable(); 409 } 410 411 #if defined(_SYSCALL32_IMPL) 412 413 /* 414 * Set floating-point registers from an fpregset32_t. 415 */ 416 void 417 setfpregs32(klwp_t *lwp, fpregset32_t *fp) 418 { 419 fpregset_t fpregs; 420 421 fpregset_32ton(fp, &fpregs); 422 setfpregs(lwp, &fpregs); 423 } 424 425 /* 426 * Get floating-point registers into an fpregset32_t. 427 */ 428 void 429 getfpregs32(klwp_t *lwp, fpregset32_t *fp) 430 { 431 fpregset_t fpregs; 432 433 getfpregs(lwp, &fpregs); 434 fpregset_nto32(&fpregs, fp); 435 } 436 437 #endif /* _SYSCALL32_IMPL */ 438 439 /* 440 * Return the general registers 441 */ 442 void 443 getgregs(klwp_t *lwp, gregset_t grp) 444 { 445 struct regs *rp = lwptoregs(lwp); 446 #if defined(__amd64) 447 struct pcb *pcb = &lwp->lwp_pcb; 448 int thisthread = lwptot(lwp) == curthread; 449 450 grp[REG_RDI] = rp->r_rdi; 451 grp[REG_RSI] = rp->r_rsi; 452 grp[REG_RDX] = rp->r_rdx; 453 grp[REG_RCX] = rp->r_rcx; 454 grp[REG_R8] = rp->r_r8; 455 grp[REG_R9] = rp->r_r9; 456 grp[REG_RAX] = rp->r_rax; 457 grp[REG_RBX] = rp->r_rbx; 458 grp[REG_RBP] = rp->r_rbp; 459 grp[REG_R10] = rp->r_r10; 460 grp[REG_R11] = rp->r_r11; 461 grp[REG_R12] = rp->r_r12; 462 grp[REG_R13] = rp->r_r13; 463 grp[REG_R14] = rp->r_r14; 464 grp[REG_R15] = rp->r_r15; 465 grp[REG_FSBASE] = pcb->pcb_fsbase; 466 grp[REG_GSBASE] = pcb->pcb_gsbase; 467 if (thisthread) 468 kpreempt_disable(); 469 if (pcb->pcb_rupdate == 1) { 470 grp[REG_DS] = pcb->pcb_ds; 471 grp[REG_ES] = pcb->pcb_es; 472 grp[REG_FS] = pcb->pcb_fs; 473 grp[REG_GS] = pcb->pcb_gs; 474 } else { 475 grp[REG_DS] = rp->r_ds; 476 grp[REG_ES] = rp->r_es; 477 grp[REG_FS] = rp->r_fs; 478 grp[REG_GS] = rp->r_gs; 479 } 480 if (thisthread) 481 kpreempt_enable(); 482 grp[REG_TRAPNO] = rp->r_trapno; 483 grp[REG_ERR] = rp->r_err; 484 grp[REG_RIP] = rp->r_rip; 485 grp[REG_CS] = rp->r_cs; 486 grp[REG_SS] = rp->r_ss; 487 grp[REG_RFL] = rp->r_rfl; 488 grp[REG_RSP] = rp->r_rsp; 489 #else 490 bcopy(&rp->r_gs, grp, sizeof (gregset_t)); 491 #endif 492 } 493 494 #if defined(_SYSCALL32_IMPL) 495 496 void 497 getgregs32(klwp_t *lwp, gregset32_t grp) 498 { 499 struct regs *rp = lwptoregs(lwp); 500 struct pcb *pcb = &lwp->lwp_pcb; 501 int thisthread = lwptot(lwp) == curthread; 502 503 if (thisthread) 504 kpreempt_disable(); 505 if (pcb->pcb_rupdate == 1) { 506 grp[GS] = (uint16_t)pcb->pcb_gs; 507 grp[FS] = (uint16_t)pcb->pcb_fs; 508 grp[DS] = (uint16_t)pcb->pcb_ds; 509 grp[ES] = (uint16_t)pcb->pcb_es; 510 } else { 511 grp[GS] = (uint16_t)rp->r_gs; 512 grp[FS] = (uint16_t)rp->r_fs; 513 grp[DS] = (uint16_t)rp->r_ds; 514 grp[ES] = (uint16_t)rp->r_es; 515 } 516 if (thisthread) 517 kpreempt_enable(); 518 grp[EDI] = (greg32_t)rp->r_rdi; 519 grp[ESI] = (greg32_t)rp->r_rsi; 520 grp[EBP] = (greg32_t)rp->r_rbp; 521 grp[ESP] = 0; 522 grp[EBX] = (greg32_t)rp->r_rbx; 523 grp[EDX] = (greg32_t)rp->r_rdx; 524 grp[ECX] = (greg32_t)rp->r_rcx; 525 grp[EAX] = (greg32_t)rp->r_rax; 526 grp[TRAPNO] = (greg32_t)rp->r_trapno; 527 grp[ERR] = (greg32_t)rp->r_err; 528 grp[EIP] = (greg32_t)rp->r_rip; 529 grp[CS] = (uint16_t)rp->r_cs; 530 grp[EFL] = (greg32_t)rp->r_rfl; 531 grp[UESP] = (greg32_t)rp->r_rsp; 532 grp[SS] = (uint16_t)rp->r_ss; 533 } 534 535 void 536 ucontext_32ton(const ucontext32_t *src, ucontext_t *dst) 537 { 538 mcontext_t *dmc = &dst->uc_mcontext; 539 const mcontext32_t *smc = &src->uc_mcontext; 540 541 bzero(dst, sizeof (*dst)); 542 dst->uc_flags = src->uc_flags; 543 dst->uc_link = (ucontext_t *)(uintptr_t)src->uc_link; 544 545 bcopy(&src->uc_sigmask, &dst->uc_sigmask, sizeof (dst->uc_sigmask)); 546 547 dst->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp; 548 dst->uc_stack.ss_size = (size_t)src->uc_stack.ss_size; 549 dst->uc_stack.ss_flags = src->uc_stack.ss_flags; 550 551 dmc->gregs[REG_GS] = (greg_t)(uint32_t)smc->gregs[GS]; 552 dmc->gregs[REG_FS] = (greg_t)(uint32_t)smc->gregs[FS]; 553 dmc->gregs[REG_ES] = (greg_t)(uint32_t)smc->gregs[ES]; 554 dmc->gregs[REG_DS] = (greg_t)(uint32_t)smc->gregs[DS]; 555 dmc->gregs[REG_RDI] = (greg_t)(uint32_t)smc->gregs[EDI]; 556 dmc->gregs[REG_RSI] = (greg_t)(uint32_t)smc->gregs[ESI]; 557 dmc->gregs[REG_RBP] = (greg_t)(uint32_t)smc->gregs[EBP]; 558 dmc->gregs[REG_RBX] = (greg_t)(uint32_t)smc->gregs[EBX]; 559 dmc->gregs[REG_RDX] = (greg_t)(uint32_t)smc->gregs[EDX]; 560 dmc->gregs[REG_RCX] = (greg_t)(uint32_t)smc->gregs[ECX]; 561 dmc->gregs[REG_RAX] = (greg_t)(uint32_t)smc->gregs[EAX]; 562 dmc->gregs[REG_TRAPNO] = (greg_t)(uint32_t)smc->gregs[TRAPNO]; 563 dmc->gregs[REG_ERR] = (greg_t)(uint32_t)smc->gregs[ERR]; 564 dmc->gregs[REG_RIP] = (greg_t)(uint32_t)smc->gregs[EIP]; 565 dmc->gregs[REG_CS] = (greg_t)(uint32_t)smc->gregs[CS]; 566 dmc->gregs[REG_RFL] = (greg_t)(uint32_t)smc->gregs[EFL]; 567 dmc->gregs[REG_RSP] = (greg_t)(uint32_t)smc->gregs[UESP]; 568 dmc->gregs[REG_SS] = (greg_t)(uint32_t)smc->gregs[SS]; 569 570 /* 571 * A valid fpregs is only copied in if uc.uc_flags has UC_FPU set 572 * otherwise there is no guarantee that anything in fpregs is valid. 573 */ 574 if (src->uc_flags & UC_FPU) 575 fpregset_32ton(&src->uc_mcontext.fpregs, 576 &dst->uc_mcontext.fpregs); 577 } 578 579 #endif /* _SYSCALL32_IMPL */ 580 581 /* 582 * Return the user-level PC. 583 * If in a system call, return the address of the syscall trap. 584 */ 585 greg_t 586 getuserpc() 587 { 588 greg_t upc = lwptoregs(ttolwp(curthread))->r_pc; 589 uint32_t insn; 590 591 if (curthread->t_sysnum == 0) 592 return (upc); 593 594 /* 595 * We might've gotten here from sysenter (0xf 0x34), 596 * syscall (0xf 0x5) or lcall (0x9a 0 0 0 0 0x27 0). 597 * 598 * Go peek at the binary to figure it out.. 599 */ 600 if (fuword32((void *)(upc - 2), &insn) != -1 && 601 (insn & 0xffff) == 0x340f || (insn & 0xffff) == 0x050f) 602 return (upc - 2); 603 return (upc - 7); 604 } 605 606 /* 607 * Protect segment registers from non-user privilege levels and GDT selectors 608 * other than USER_CS, USER_DS and lwp FS and GS values. If the segment 609 * selector is non-null and not USER_CS/USER_DS, we make sure that the 610 * TI bit is set to point into the LDT and that the RPL is set to 3. 611 * 612 * Since struct regs stores each 16-bit segment register as a 32-bit greg_t, we 613 * also explicitly zero the top 16 bits since they may be coming from the 614 * user's address space via setcontext(2) or /proc. 615 * 616 * Note about null selector. When running on the hypervisor if we allow a 617 * process to set its %cs to null selector with RPL of 0 the hypervisor will 618 * crash the domain. If running on bare metal we would get a #gp fault and 619 * be able to kill the process and continue on. Therefore we make sure to 620 * force RPL to SEL_UPL even for null selector when setting %cs. 621 */ 622 623 #if defined(IS_CS) || defined(IS_NOT_CS) 624 #error "IS_CS and IS_NOT_CS already defined" 625 #endif 626 627 #define IS_CS 1 628 #define IS_NOT_CS 0 629 630 /*ARGSUSED*/ 631 static greg_t 632 fix_segreg(greg_t sr, int iscs, model_t datamodel) 633 { 634 switch (sr &= 0xffff) { 635 636 case 0: 637 if (iscs == IS_CS) 638 return (0 | SEL_UPL); 639 else 640 return (0); 641 642 #if defined(__amd64) 643 /* 644 * If lwp attempts to switch data model then force their 645 * code selector to be null selector. 646 */ 647 case U32CS_SEL: 648 if (datamodel == DATAMODEL_NATIVE) 649 return (0 | SEL_UPL); 650 else 651 return (sr); 652 653 case UCS_SEL: 654 if (datamodel == DATAMODEL_ILP32) 655 return (0 | SEL_UPL); 656 #elif defined(__i386) 657 case UCS_SEL: 658 #endif 659 /*FALLTHROUGH*/ 660 case UDS_SEL: 661 case LWPFS_SEL: 662 case LWPGS_SEL: 663 case SEL_UPL: 664 return (sr); 665 default: 666 break; 667 } 668 669 /* 670 * Force it into the LDT in ring 3 for 32-bit processes, which by 671 * default do not have an LDT, so that any attempt to use an invalid 672 * selector will reference the (non-existant) LDT, and cause a #gp 673 * fault for the process. 674 * 675 * 64-bit processes get the null gdt selector since they 676 * are not allowed to have a private LDT. 677 */ 678 #if defined(__amd64) 679 if (datamodel == DATAMODEL_ILP32) { 680 return (sr | SEL_TI_LDT | SEL_UPL); 681 } else { 682 if (iscs == IS_CS) 683 return (0 | SEL_UPL); 684 else 685 return (0); 686 } 687 688 #elif defined(__i386) 689 return (sr | SEL_TI_LDT | SEL_UPL); 690 #endif 691 } 692 693 /* 694 * Set general registers. 695 */ 696 void 697 setgregs(klwp_t *lwp, gregset_t grp) 698 { 699 struct regs *rp = lwptoregs(lwp); 700 model_t datamodel = lwp_getdatamodel(lwp); 701 702 #if defined(__amd64) 703 struct pcb *pcb = &lwp->lwp_pcb; 704 int thisthread = lwptot(lwp) == curthread; 705 706 if (datamodel == DATAMODEL_NATIVE) { 707 708 if (thisthread) 709 (void) save_syscall_args(); /* copy the args */ 710 711 rp->r_rdi = grp[REG_RDI]; 712 rp->r_rsi = grp[REG_RSI]; 713 rp->r_rdx = grp[REG_RDX]; 714 rp->r_rcx = grp[REG_RCX]; 715 rp->r_r8 = grp[REG_R8]; 716 rp->r_r9 = grp[REG_R9]; 717 rp->r_rax = grp[REG_RAX]; 718 rp->r_rbx = grp[REG_RBX]; 719 rp->r_rbp = grp[REG_RBP]; 720 rp->r_r10 = grp[REG_R10]; 721 rp->r_r11 = grp[REG_R11]; 722 rp->r_r12 = grp[REG_R12]; 723 rp->r_r13 = grp[REG_R13]; 724 rp->r_r14 = grp[REG_R14]; 725 rp->r_r15 = grp[REG_R15]; 726 rp->r_trapno = grp[REG_TRAPNO]; 727 rp->r_err = grp[REG_ERR]; 728 rp->r_rip = grp[REG_RIP]; 729 /* 730 * Setting %cs or %ss to anything else is quietly but 731 * quite definitely forbidden! 732 */ 733 rp->r_cs = UCS_SEL; 734 rp->r_ss = UDS_SEL; 735 rp->r_rsp = grp[REG_RSP]; 736 737 if (thisthread) 738 kpreempt_disable(); 739 740 pcb->pcb_ds = UDS_SEL; 741 pcb->pcb_es = UDS_SEL; 742 743 /* 744 * 64-bit processes -are- allowed to set their fsbase/gsbase 745 * values directly, but only if they're using the segment 746 * selectors that allow that semantic. 747 * 748 * (32-bit processes must use lwp_set_private().) 749 */ 750 pcb->pcb_fsbase = grp[REG_FSBASE]; 751 pcb->pcb_gsbase = grp[REG_GSBASE]; 752 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel); 753 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel); 754 755 /* 756 * Ensure that we go out via update_sregs 757 */ 758 pcb->pcb_rupdate = 1; 759 lwptot(lwp)->t_post_sys = 1; 760 if (thisthread) 761 kpreempt_enable(); 762 #if defined(_SYSCALL32_IMPL) 763 } else { 764 rp->r_rdi = (uint32_t)grp[REG_RDI]; 765 rp->r_rsi = (uint32_t)grp[REG_RSI]; 766 rp->r_rdx = (uint32_t)grp[REG_RDX]; 767 rp->r_rcx = (uint32_t)grp[REG_RCX]; 768 rp->r_rax = (uint32_t)grp[REG_RAX]; 769 rp->r_rbx = (uint32_t)grp[REG_RBX]; 770 rp->r_rbp = (uint32_t)grp[REG_RBP]; 771 rp->r_trapno = (uint32_t)grp[REG_TRAPNO]; 772 rp->r_err = (uint32_t)grp[REG_ERR]; 773 rp->r_rip = (uint32_t)grp[REG_RIP]; 774 775 rp->r_cs = fix_segreg(grp[REG_CS], IS_CS, datamodel); 776 rp->r_ss = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel); 777 778 rp->r_rsp = (uint32_t)grp[REG_RSP]; 779 780 if (thisthread) 781 kpreempt_disable(); 782 783 pcb->pcb_ds = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel); 784 pcb->pcb_es = fix_segreg(grp[REG_ES], IS_NOT_CS, datamodel); 785 786 /* 787 * (See fsbase/gsbase commentary above) 788 */ 789 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel); 790 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel); 791 792 /* 793 * Ensure that we go out via update_sregs 794 */ 795 pcb->pcb_rupdate = 1; 796 lwptot(lwp)->t_post_sys = 1; 797 if (thisthread) 798 kpreempt_enable(); 799 #endif 800 } 801 802 /* 803 * Only certain bits of the flags register can be modified. 804 */ 805 rp->r_rfl = (rp->r_rfl & ~PSL_USERMASK) | 806 (grp[REG_RFL] & PSL_USERMASK); 807 808 #elif defined(__i386) 809 810 /* 811 * Only certain bits of the flags register can be modified. 812 */ 813 grp[EFL] = (rp->r_efl & ~PSL_USERMASK) | (grp[EFL] & PSL_USERMASK); 814 815 /* 816 * Copy saved registers from user stack. 817 */ 818 bcopy(grp, &rp->r_gs, sizeof (gregset_t)); 819 820 rp->r_cs = fix_segreg(rp->r_cs, IS_CS, datamodel); 821 rp->r_ss = fix_segreg(rp->r_ss, IS_NOT_CS, datamodel); 822 rp->r_ds = fix_segreg(rp->r_ds, IS_NOT_CS, datamodel); 823 rp->r_es = fix_segreg(rp->r_es, IS_NOT_CS, datamodel); 824 rp->r_fs = fix_segreg(rp->r_fs, IS_NOT_CS, datamodel); 825 rp->r_gs = fix_segreg(rp->r_gs, IS_NOT_CS, datamodel); 826 827 #endif /* __i386 */ 828 } 829 830 /* 831 * Determine whether eip is likely to have an interrupt frame 832 * on the stack. We do this by comparing the address to the 833 * range of addresses spanned by several well-known routines. 834 */ 835 extern void _interrupt(); 836 extern void _allsyscalls(); 837 extern void _cmntrap(); 838 extern void fakesoftint(); 839 840 extern size_t _interrupt_size; 841 extern size_t _allsyscalls_size; 842 extern size_t _cmntrap_size; 843 extern size_t _fakesoftint_size; 844 845 /* 846 * Get a pc-only stacktrace. Used for kmem_alloc() buffer ownership tracking. 847 * Returns MIN(current stack depth, pcstack_limit). 848 */ 849 int 850 getpcstack(pc_t *pcstack, int pcstack_limit) 851 { 852 struct frame *fp = (struct frame *)getfp(); 853 struct frame *nextfp, *minfp, *stacktop; 854 int depth = 0; 855 int on_intr; 856 uintptr_t pc; 857 858 if ((on_intr = CPU_ON_INTR(CPU)) != 0) 859 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME)); 860 else 861 stacktop = (struct frame *)curthread->t_stk; 862 minfp = fp; 863 864 pc = ((struct regs *)fp)->r_pc; 865 866 while (depth < pcstack_limit) { 867 nextfp = (struct frame *)fp->fr_savfp; 868 pc = fp->fr_savpc; 869 if (nextfp <= minfp || nextfp >= stacktop) { 870 if (on_intr) { 871 /* 872 * Hop from interrupt stack to thread stack. 873 */ 874 stacktop = (struct frame *)curthread->t_stk; 875 minfp = (struct frame *)curthread->t_stkbase; 876 on_intr = 0; 877 continue; 878 } 879 break; 880 } 881 pcstack[depth++] = (pc_t)pc; 882 fp = nextfp; 883 minfp = fp; 884 } 885 return (depth); 886 } 887 888 /* 889 * The following ELF header fields are defined as processor-specific 890 * in the V8 ABI: 891 * 892 * e_ident[EI_DATA] encoding of the processor-specific 893 * data in the object file 894 * e_machine processor identification 895 * e_flags processor-specific flags associated 896 * with the file 897 */ 898 899 /* 900 * The value of at_flags reflects a platform's cpu module support. 901 * at_flags is used to check for allowing a binary to execute and 902 * is passed as the value of the AT_FLAGS auxiliary vector. 903 */ 904 int at_flags = 0; 905 906 /* 907 * Check the processor-specific fields of an ELF header. 908 * 909 * returns 1 if the fields are valid, 0 otherwise 910 */ 911 /*ARGSUSED2*/ 912 int 913 elfheadcheck( 914 unsigned char e_data, 915 Elf32_Half e_machine, 916 Elf32_Word e_flags) 917 { 918 if (e_data != ELFDATA2LSB) 919 return (0); 920 #if defined(__amd64) 921 if (e_machine == EM_AMD64) 922 return (1); 923 #endif 924 return (e_machine == EM_386); 925 } 926 927 uint_t auxv_hwcap_include = 0; /* patch to enable unrecognized features */ 928 uint_t auxv_hwcap_include_2 = 0; /* second word */ 929 uint_t auxv_hwcap_exclude = 0; /* patch for broken cpus, debugging */ 930 uint_t auxv_hwcap_exclude_2 = 0; /* second word */ 931 #if defined(_SYSCALL32_IMPL) 932 uint_t auxv_hwcap32_include = 0; /* ditto for 32-bit apps */ 933 uint_t auxv_hwcap32_include_2 = 0; /* ditto for 32-bit apps */ 934 uint_t auxv_hwcap32_exclude = 0; /* ditto for 32-bit apps */ 935 uint_t auxv_hwcap32_exclude_2 = 0; /* ditto for 32-bit apps */ 936 #endif 937 938 /* 939 * Gather information about the processor and place it into auxv_hwcap 940 * so that it can be exported to the linker via the aux vector. 941 * 942 * We use this seemingly complicated mechanism so that we can ensure 943 * that /etc/system can be used to override what the system can or 944 * cannot discover for itself. 945 */ 946 void 947 bind_hwcap(void) 948 { 949 uint_t cpu_hwcap_flags[2]; 950 cpuid_pass4(NULL, cpu_hwcap_flags); 951 952 auxv_hwcap = (auxv_hwcap_include | cpu_hwcap_flags[0]) & 953 ~auxv_hwcap_exclude; 954 auxv_hwcap_2 = (auxv_hwcap_include_2 | cpu_hwcap_flags[1]) & 955 ~auxv_hwcap_exclude_2; 956 957 #if defined(__amd64) 958 /* 959 * On AMD processors, sysenter just doesn't work at all 960 * when the kernel is in long mode. On IA-32e processors 961 * it does, but there's no real point in all the alternate 962 * mechanism when syscall works on both. 963 * 964 * Besides, the kernel's sysenter handler is expecting a 965 * 32-bit lwp ... 966 */ 967 auxv_hwcap &= ~AV_386_SEP; 968 #else 969 /* 970 * 32-bit processes can -always- use the lahf/sahf instructions 971 */ 972 auxv_hwcap |= AV_386_AHF; 973 #endif 974 975 if (auxv_hwcap_include || auxv_hwcap_exclude || auxv_hwcap_include_2 || 976 auxv_hwcap_exclude_2) { 977 /* 978 * The below assignment is regrettably required to get lint 979 * to accept the validity of our format string. The format 980 * string is in fact valid, but whatever intelligence in lint 981 * understands the cmn_err()-specific %b appears to have an 982 * off-by-one error: it (mistakenly) complains about bit 983 * number 32 (even though this is explicitly permitted). 984 * Normally, one would will away such warnings with a "LINTED" 985 * directive, but for reasons unclear and unknown, lint 986 * refuses to be assuaged in this case. Fortunately, lint 987 * doesn't pretend to have solved the Halting Problem -- 988 * and as soon as the format string is programmatic, it 989 * knows enough to shut up. 990 */ 991 char *fmt = "?user ABI extensions: %b\n"; 992 cmn_err(CE_CONT, fmt, auxv_hwcap, FMT_AV_386); 993 fmt = "?user ABI extensions (word 2): %b\n"; 994 cmn_err(CE_CONT, fmt, auxv_hwcap_2, FMT_AV_386_2); 995 } 996 997 #if defined(_SYSCALL32_IMPL) 998 auxv_hwcap32 = (auxv_hwcap32_include | cpu_hwcap_flags[0]) & 999 ~auxv_hwcap32_exclude; 1000 auxv_hwcap32_2 = (auxv_hwcap32_include_2 | cpu_hwcap_flags[1]) & 1001 ~auxv_hwcap32_exclude_2; 1002 1003 #if defined(__amd64) 1004 /* 1005 * If this is an amd64 architecture machine from Intel, then 1006 * syscall -doesn't- work in compatibility mode, only sysenter does. 1007 * 1008 * Sigh. 1009 */ 1010 if (!cpuid_syscall32_insn(NULL)) 1011 auxv_hwcap32 &= ~AV_386_AMD_SYSC; 1012 1013 /* 1014 * 32-bit processes can -always- use the lahf/sahf instructions 1015 */ 1016 auxv_hwcap32 |= AV_386_AHF; 1017 #endif 1018 1019 if (auxv_hwcap32_include || auxv_hwcap32_exclude || 1020 auxv_hwcap32_include_2 || auxv_hwcap32_exclude_2) { 1021 /* 1022 * See the block comment in the cmn_err() of auxv_hwcap, above. 1023 */ 1024 char *fmt = "?32-bit user ABI extensions: %b\n"; 1025 cmn_err(CE_CONT, fmt, auxv_hwcap32, FMT_AV_386); 1026 fmt = "?32-bit user ABI extensions (word 2): %b\n"; 1027 cmn_err(CE_CONT, fmt, auxv_hwcap32_2, FMT_AV_386_2); 1028 } 1029 #endif 1030 } 1031 1032 /* 1033 * sync_icache() - this is called 1034 * in proc/fs/prusrio.c. x86 has an unified cache and therefore 1035 * this is a nop. 1036 */ 1037 /* ARGSUSED */ 1038 void 1039 sync_icache(caddr_t addr, uint_t len) 1040 { 1041 /* Do nothing for now */ 1042 } 1043 1044 /*ARGSUSED*/ 1045 void 1046 sync_data_memory(caddr_t va, size_t len) 1047 { 1048 /* Not implemented for this platform */ 1049 } 1050 1051 int 1052 __ipltospl(int ipl) 1053 { 1054 return (ipltospl(ipl)); 1055 } 1056 1057 /* 1058 * The panic code invokes panic_saveregs() to record the contents of a 1059 * regs structure into the specified panic_data structure for debuggers. 1060 */ 1061 void 1062 panic_saveregs(panic_data_t *pdp, struct regs *rp) 1063 { 1064 panic_nv_t *pnv = PANICNVGET(pdp); 1065 1066 struct cregs creg; 1067 1068 getcregs(&creg); 1069 1070 #if defined(__amd64) 1071 PANICNVADD(pnv, "rdi", rp->r_rdi); 1072 PANICNVADD(pnv, "rsi", rp->r_rsi); 1073 PANICNVADD(pnv, "rdx", rp->r_rdx); 1074 PANICNVADD(pnv, "rcx", rp->r_rcx); 1075 PANICNVADD(pnv, "r8", rp->r_r8); 1076 PANICNVADD(pnv, "r9", rp->r_r9); 1077 PANICNVADD(pnv, "rax", rp->r_rax); 1078 PANICNVADD(pnv, "rbx", rp->r_rbx); 1079 PANICNVADD(pnv, "rbp", rp->r_rbp); 1080 PANICNVADD(pnv, "r10", rp->r_r10); 1081 PANICNVADD(pnv, "r10", rp->r_r10); 1082 PANICNVADD(pnv, "r11", rp->r_r11); 1083 PANICNVADD(pnv, "r12", rp->r_r12); 1084 PANICNVADD(pnv, "r13", rp->r_r13); 1085 PANICNVADD(pnv, "r14", rp->r_r14); 1086 PANICNVADD(pnv, "r15", rp->r_r15); 1087 PANICNVADD(pnv, "fsbase", rdmsr(MSR_AMD_FSBASE)); 1088 PANICNVADD(pnv, "gsbase", rdmsr(MSR_AMD_GSBASE)); 1089 PANICNVADD(pnv, "ds", rp->r_ds); 1090 PANICNVADD(pnv, "es", rp->r_es); 1091 PANICNVADD(pnv, "fs", rp->r_fs); 1092 PANICNVADD(pnv, "gs", rp->r_gs); 1093 PANICNVADD(pnv, "trapno", rp->r_trapno); 1094 PANICNVADD(pnv, "err", rp->r_err); 1095 PANICNVADD(pnv, "rip", rp->r_rip); 1096 PANICNVADD(pnv, "cs", rp->r_cs); 1097 PANICNVADD(pnv, "rflags", rp->r_rfl); 1098 PANICNVADD(pnv, "rsp", rp->r_rsp); 1099 PANICNVADD(pnv, "ss", rp->r_ss); 1100 PANICNVADD(pnv, "gdt_hi", (uint64_t)(creg.cr_gdt._l[3])); 1101 PANICNVADD(pnv, "gdt_lo", (uint64_t)(creg.cr_gdt._l[0])); 1102 PANICNVADD(pnv, "idt_hi", (uint64_t)(creg.cr_idt._l[3])); 1103 PANICNVADD(pnv, "idt_lo", (uint64_t)(creg.cr_idt._l[0])); 1104 #elif defined(__i386) 1105 PANICNVADD(pnv, "gs", (uint32_t)rp->r_gs); 1106 PANICNVADD(pnv, "fs", (uint32_t)rp->r_fs); 1107 PANICNVADD(pnv, "es", (uint32_t)rp->r_es); 1108 PANICNVADD(pnv, "ds", (uint32_t)rp->r_ds); 1109 PANICNVADD(pnv, "edi", (uint32_t)rp->r_edi); 1110 PANICNVADD(pnv, "esi", (uint32_t)rp->r_esi); 1111 PANICNVADD(pnv, "ebp", (uint32_t)rp->r_ebp); 1112 PANICNVADD(pnv, "esp", (uint32_t)rp->r_esp); 1113 PANICNVADD(pnv, "ebx", (uint32_t)rp->r_ebx); 1114 PANICNVADD(pnv, "edx", (uint32_t)rp->r_edx); 1115 PANICNVADD(pnv, "ecx", (uint32_t)rp->r_ecx); 1116 PANICNVADD(pnv, "eax", (uint32_t)rp->r_eax); 1117 PANICNVADD(pnv, "trapno", (uint32_t)rp->r_trapno); 1118 PANICNVADD(pnv, "err", (uint32_t)rp->r_err); 1119 PANICNVADD(pnv, "eip", (uint32_t)rp->r_eip); 1120 PANICNVADD(pnv, "cs", (uint32_t)rp->r_cs); 1121 PANICNVADD(pnv, "eflags", (uint32_t)rp->r_efl); 1122 PANICNVADD(pnv, "uesp", (uint32_t)rp->r_uesp); 1123 PANICNVADD(pnv, "ss", (uint32_t)rp->r_ss); 1124 PANICNVADD(pnv, "gdt", creg.cr_gdt); 1125 PANICNVADD(pnv, "idt", creg.cr_idt); 1126 #endif /* __i386 */ 1127 1128 PANICNVADD(pnv, "ldt", creg.cr_ldt); 1129 PANICNVADD(pnv, "task", creg.cr_task); 1130 PANICNVADD(pnv, "cr0", creg.cr_cr0); 1131 PANICNVADD(pnv, "cr2", creg.cr_cr2); 1132 PANICNVADD(pnv, "cr3", creg.cr_cr3); 1133 if (creg.cr_cr4) 1134 PANICNVADD(pnv, "cr4", creg.cr_cr4); 1135 1136 PANICNVSET(pdp, pnv); 1137 } 1138 1139 #define TR_ARG_MAX 6 /* Max args to print, same as SPARC */ 1140 1141 #if !defined(__amd64) 1142 1143 /* 1144 * Given a return address (%eip), determine the likely number of arguments 1145 * that were pushed on the stack prior to its execution. We do this by 1146 * expecting that a typical call sequence consists of pushing arguments on 1147 * the stack, executing a call instruction, and then performing an add 1148 * on %esp to restore it to the value prior to pushing the arguments for 1149 * the call. We attempt to detect such an add, and divide the addend 1150 * by the size of a word to determine the number of pushed arguments. 1151 * 1152 * If we do not find such an add, we punt and return TR_ARG_MAX. It is not 1153 * possible to reliably determine if a function took no arguments (i.e. was 1154 * void) because assembler routines do not reliably perform an add on %esp 1155 * immediately upon returning (eg. _sys_call()), so returning TR_ARG_MAX is 1156 * safer than returning 0. 1157 */ 1158 static ulong_t 1159 argcount(uintptr_t eip) 1160 { 1161 const uint8_t *ins = (const uint8_t *)eip; 1162 ulong_t n; 1163 1164 enum { 1165 M_MODRM_ESP = 0xc4, /* Mod/RM byte indicates %esp */ 1166 M_ADD_IMM32 = 0x81, /* ADD imm32 to r/m32 */ 1167 M_ADD_IMM8 = 0x83 /* ADD imm8 to r/m32 */ 1168 }; 1169 1170 if (eip < KERNELBASE || ins[1] != M_MODRM_ESP) 1171 return (TR_ARG_MAX); 1172 1173 switch (ins[0]) { 1174 case M_ADD_IMM32: 1175 n = ins[2] + (ins[3] << 8) + (ins[4] << 16) + (ins[5] << 24); 1176 break; 1177 1178 case M_ADD_IMM8: 1179 n = ins[2]; 1180 break; 1181 1182 default: 1183 return (TR_ARG_MAX); 1184 } 1185 1186 n /= sizeof (long); 1187 return (MIN(n, TR_ARG_MAX)); 1188 } 1189 1190 #endif /* !__amd64 */ 1191 1192 /* 1193 * Print a stack backtrace using the specified frame pointer. We delay two 1194 * seconds before continuing, unless this is the panic traceback. 1195 * If we are in the process of panicking, we also attempt to write the 1196 * stack backtrace to a staticly assigned buffer, to allow the panic 1197 * code to find it and write it in to uncompressed pages within the 1198 * system crash dump. 1199 * Note that the frame for the starting stack pointer value is omitted because 1200 * the corresponding %eip is not known. 1201 */ 1202 1203 extern char *dump_stack_scratch; 1204 1205 #if defined(__amd64) 1206 1207 void 1208 traceback(caddr_t fpreg) 1209 { 1210 struct frame *fp = (struct frame *)fpreg; 1211 struct frame *nextfp; 1212 uintptr_t pc, nextpc; 1213 ulong_t off; 1214 char args[TR_ARG_MAX * 2 + 16], *sym; 1215 uint_t offset = 0; 1216 uint_t next_offset = 0; 1217 char stack_buffer[1024]; 1218 1219 if (!panicstr) 1220 printf("traceback: %%fp = %p\n", (void *)fp); 1221 1222 if (panicstr && !dump_stack_scratch) { 1223 printf("Warning - stack not written to the dump buffer\n"); 1224 } 1225 1226 fp = (struct frame *)plat_traceback(fpreg); 1227 if ((uintptr_t)fp < KERNELBASE) 1228 goto out; 1229 1230 pc = fp->fr_savpc; 1231 fp = (struct frame *)fp->fr_savfp; 1232 1233 while ((uintptr_t)fp >= KERNELBASE) { 1234 /* 1235 * XX64 Until port is complete tolerate 8-byte aligned 1236 * frame pointers but flag with a warning so they can 1237 * be fixed. 1238 */ 1239 if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0) { 1240 if (((uintptr_t)fp & (8 - 1)) == 0) { 1241 printf(" >> warning! 8-byte" 1242 " aligned %%fp = %p\n", (void *)fp); 1243 } else { 1244 printf( 1245 " >> mis-aligned %%fp = %p\n", (void *)fp); 1246 break; 1247 } 1248 } 1249 1250 args[0] = '\0'; 1251 nextpc = (uintptr_t)fp->fr_savpc; 1252 nextfp = (struct frame *)fp->fr_savfp; 1253 if ((sym = kobj_getsymname(pc, &off)) != NULL) { 1254 printf("%016lx %s:%s+%lx (%s)\n", (uintptr_t)fp, 1255 mod_containing_pc((caddr_t)pc), sym, off, args); 1256 (void) snprintf(stack_buffer, sizeof (stack_buffer), 1257 "%s:%s+%lx (%s) | ", 1258 mod_containing_pc((caddr_t)pc), sym, off, args); 1259 } else { 1260 printf("%016lx %lx (%s)\n", 1261 (uintptr_t)fp, pc, args); 1262 (void) snprintf(stack_buffer, sizeof (stack_buffer), 1263 "%lx (%s) | ", pc, args); 1264 } 1265 1266 if (panicstr && dump_stack_scratch) { 1267 next_offset = offset + strlen(stack_buffer); 1268 if (next_offset < STACK_BUF_SIZE) { 1269 bcopy(stack_buffer, dump_stack_scratch + offset, 1270 strlen(stack_buffer)); 1271 offset = next_offset; 1272 } else { 1273 /* 1274 * In attempting to save the panic stack 1275 * to the dumpbuf we have overflowed that area. 1276 * Print a warning and continue to printf the 1277 * stack to the msgbuf 1278 */ 1279 printf("Warning: stack in the dump buffer" 1280 " may be incomplete\n"); 1281 offset = next_offset; 1282 } 1283 } 1284 1285 pc = nextpc; 1286 fp = nextfp; 1287 } 1288 out: 1289 if (!panicstr) { 1290 printf("end of traceback\n"); 1291 DELAY(2 * MICROSEC); 1292 } else if (dump_stack_scratch) { 1293 dump_stack_scratch[offset] = '\0'; 1294 } 1295 } 1296 1297 #elif defined(__i386) 1298 1299 void 1300 traceback(caddr_t fpreg) 1301 { 1302 struct frame *fp = (struct frame *)fpreg; 1303 struct frame *nextfp, *minfp, *stacktop; 1304 uintptr_t pc, nextpc; 1305 uint_t offset = 0; 1306 uint_t next_offset = 0; 1307 char stack_buffer[1024]; 1308 1309 cpu_t *cpu; 1310 1311 /* 1312 * args[] holds TR_ARG_MAX hex long args, plus ", " or '\0'. 1313 */ 1314 char args[TR_ARG_MAX * 2 + 8], *p; 1315 1316 int on_intr; 1317 ulong_t off; 1318 char *sym; 1319 1320 if (!panicstr) 1321 printf("traceback: %%fp = %p\n", (void *)fp); 1322 1323 if (panicstr && !dump_stack_scratch) { 1324 printf("Warning - stack not written to the dumpbuf\n"); 1325 } 1326 1327 /* 1328 * If we are panicking, all high-level interrupt information in 1329 * CPU was overwritten. panic_cpu has the correct values. 1330 */ 1331 kpreempt_disable(); /* prevent migration */ 1332 1333 cpu = (panicstr && CPU->cpu_id == panic_cpu.cpu_id)? &panic_cpu : CPU; 1334 1335 if ((on_intr = CPU_ON_INTR(cpu)) != 0) 1336 stacktop = (struct frame *)(cpu->cpu_intr_stack + SA(MINFRAME)); 1337 else 1338 stacktop = (struct frame *)curthread->t_stk; 1339 1340 kpreempt_enable(); 1341 1342 fp = (struct frame *)plat_traceback(fpreg); 1343 if ((uintptr_t)fp < KERNELBASE) 1344 goto out; 1345 1346 minfp = fp; /* Baseline minimum frame pointer */ 1347 pc = fp->fr_savpc; 1348 fp = (struct frame *)fp->fr_savfp; 1349 1350 while ((uintptr_t)fp >= KERNELBASE) { 1351 ulong_t argc; 1352 long *argv; 1353 1354 if (fp <= minfp || fp >= stacktop) { 1355 if (on_intr) { 1356 /* 1357 * Hop from interrupt stack to thread stack. 1358 */ 1359 stacktop = (struct frame *)curthread->t_stk; 1360 minfp = (struct frame *)curthread->t_stkbase; 1361 on_intr = 0; 1362 continue; 1363 } 1364 break; /* we're outside of the expected range */ 1365 } 1366 1367 if ((uintptr_t)fp & (STACK_ALIGN - 1)) { 1368 printf(" >> mis-aligned %%fp = %p\n", (void *)fp); 1369 break; 1370 } 1371 1372 nextpc = fp->fr_savpc; 1373 nextfp = (struct frame *)fp->fr_savfp; 1374 argc = argcount(nextpc); 1375 argv = (long *)((char *)fp + sizeof (struct frame)); 1376 1377 args[0] = '\0'; 1378 p = args; 1379 while (argc-- > 0 && argv < (long *)stacktop) { 1380 p += snprintf(p, args + sizeof (args) - p, 1381 "%s%lx", (p == args) ? "" : ", ", *argv++); 1382 } 1383 1384 if ((sym = kobj_getsymname(pc, &off)) != NULL) { 1385 printf("%08lx %s:%s+%lx (%s)\n", (uintptr_t)fp, 1386 mod_containing_pc((caddr_t)pc), sym, off, args); 1387 (void) snprintf(stack_buffer, sizeof (stack_buffer), 1388 "%s:%s+%lx (%s) | ", 1389 mod_containing_pc((caddr_t)pc), sym, off, args); 1390 1391 } else { 1392 printf("%08lx %lx (%s)\n", 1393 (uintptr_t)fp, pc, args); 1394 (void) snprintf(stack_buffer, sizeof (stack_buffer), 1395 "%lx (%s) | ", pc, args); 1396 1397 } 1398 1399 if (panicstr && dump_stack_scratch) { 1400 next_offset = offset + strlen(stack_buffer); 1401 if (next_offset < STACK_BUF_SIZE) { 1402 bcopy(stack_buffer, dump_stack_scratch + offset, 1403 strlen(stack_buffer)); 1404 offset = next_offset; 1405 } else { 1406 /* 1407 * In attempting to save the panic stack 1408 * to the dumpbuf we have overflowed that area. 1409 * Print a warning and continue to printf the 1410 * stack to the msgbuf 1411 */ 1412 printf("Warning: stack in the dumpbuf" 1413 " may be incomplete\n"); 1414 offset = next_offset; 1415 } 1416 } 1417 1418 minfp = fp; 1419 pc = nextpc; 1420 fp = nextfp; 1421 } 1422 out: 1423 if (!panicstr) { 1424 printf("end of traceback\n"); 1425 DELAY(2 * MICROSEC); 1426 } else if (dump_stack_scratch) { 1427 dump_stack_scratch[offset] = '\0'; 1428 } 1429 1430 } 1431 1432 #endif /* __i386 */ 1433 1434 /* 1435 * Generate a stack backtrace from a saved register set. 1436 */ 1437 void 1438 traceregs(struct regs *rp) 1439 { 1440 traceback((caddr_t)rp->r_fp); 1441 } 1442 1443 void 1444 exec_set_sp(size_t stksize) 1445 { 1446 klwp_t *lwp = ttolwp(curthread); 1447 1448 lwptoregs(lwp)->r_sp = (uintptr_t)curproc->p_usrstack - stksize; 1449 } 1450 1451 hrtime_t 1452 gethrtime_waitfree(void) 1453 { 1454 return (dtrace_gethrtime()); 1455 } 1456 1457 hrtime_t 1458 gethrtime(void) 1459 { 1460 return (gethrtimef()); 1461 } 1462 1463 hrtime_t 1464 gethrtime_unscaled(void) 1465 { 1466 return (gethrtimeunscaledf()); 1467 } 1468 1469 void 1470 scalehrtime(hrtime_t *hrt) 1471 { 1472 scalehrtimef(hrt); 1473 } 1474 1475 uint64_t 1476 unscalehrtime(hrtime_t nsecs) 1477 { 1478 return (unscalehrtimef(nsecs)); 1479 } 1480 1481 void 1482 gethrestime(timespec_t *tp) 1483 { 1484 gethrestimef(tp); 1485 } 1486 1487 #if defined(__amd64) 1488 /* 1489 * Part of the implementation of hres_tick(); this routine is 1490 * easier in C than assembler .. called with the hres_lock held. 1491 * 1492 * XX64 Many of these timekeeping variables need to be extern'ed in a header 1493 */ 1494 1495 #include <sys/time.h> 1496 #include <sys/machlock.h> 1497 1498 extern int one_sec; 1499 extern int max_hres_adj; 1500 1501 void 1502 __adj_hrestime(void) 1503 { 1504 long long adj; 1505 1506 if (hrestime_adj == 0) 1507 adj = 0; 1508 else if (hrestime_adj > 0) { 1509 if (hrestime_adj < max_hres_adj) 1510 adj = hrestime_adj; 1511 else 1512 adj = max_hres_adj; 1513 } else { 1514 if (hrestime_adj < -max_hres_adj) 1515 adj = -max_hres_adj; 1516 else 1517 adj = hrestime_adj; 1518 } 1519 1520 timedelta -= adj; 1521 hrestime_adj = timedelta; 1522 hrestime.tv_nsec += adj; 1523 1524 while (hrestime.tv_nsec >= NANOSEC) { 1525 one_sec++; 1526 hrestime.tv_sec++; 1527 hrestime.tv_nsec -= NANOSEC; 1528 } 1529 } 1530 #endif 1531 1532 /* 1533 * Wrapper functions to maintain backwards compability 1534 */ 1535 int 1536 xcopyin(const void *uaddr, void *kaddr, size_t count) 1537 { 1538 return (xcopyin_nta(uaddr, kaddr, count, UIO_COPY_CACHED)); 1539 } 1540 1541 int 1542 xcopyout(const void *kaddr, void *uaddr, size_t count) 1543 { 1544 return (xcopyout_nta(kaddr, uaddr, count, UIO_COPY_CACHED)); 1545 } 1546