1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25#if !defined(__lint) 26#include <sys/asm_linkage.h> 27#include <sys/trap.h> 28#include <sys/mmu.h> 29#include <sys/machasi.h> 30#include <sys/intreg.h> 31#define _KERNEL 32#include <sys/privregs.h> 33#undef _KERNEL 34#include <sys/machthread.h> 35#include <sys/machtrap.h> 36#include <sys/machparam.h> 37#endif 38 39#include <mdb/mdb_kreg_impl.h> 40#include <kmdb/kaif_regs.h> 41#include <kmdb/kaif_off.h> 42#include <kmdb/kaif.h> 43#include <kmdb/kaif_asmutil.h> 44 45#define KAIF_CPU_INDEX \ 46 set mdb, %g1; \ 47 ldx [%g1 + MDB_KDI], %g1; \ 48 ldx [%g1 + MKDI_CPU_INDEX], %g1; \ 49 set 1f, %g7; \ 50 jmp %g1; \ 51 nop; \ 521: 53 54#define KAIF_CPU_GETADDR_TL1 \ 55 set kaif_cpusave_getaddr, %g6; \ 56 sethi %hi(1f), %g7; \ 57 jmp %g6; \ 58 or %g7, %lo(1f), %g7; \ 591: 60 61#define KAIF_COPY_KREG(src, tgt, idx, tmp) \ 62 ldx [src + KREG_OFF(idx)], tmp; \ 63 stx tmp, [tgt + KREG_OFF(idx)] 64 65#ifndef sun4v 66/* 67 * Creates a new primary context register value by copying the nucleus page 68 * size bits to the primary context page size bits and setting the primary 69 * context to zero. The updated value is stored in the ctx parameter. 70 */ 71#define KAIF_MAKE_NEW_CTXREG(ctx, tmp) \ 72 srlx ctx, CTXREG_NEXT_SHIFT, ctx ; \ 73 sllx ctx, CTXREG_NEXT_SHIFT, ctx; \ 74 sllx ctx, 3, tmp; \ 75 srlx tmp, CTXREG_NEXT_SHIFT, tmp; \ 76 sllx tmp, CTXREG_EXT_SHIFT, tmp; \ 77 or ctx, tmp, ctx; \ 78 srlx ctx, CTXREG_NEXT_SHIFT + 3, tmp; \ 79 sllx tmp, CTXREG_EXT_SHIFT, tmp; \ 80 or ctx, tmp, ctx 81#endif /* sun4v */ 82 83#if !defined(__lint) 84 85 /* 86 * Calculate the address of the save area for the current CPU. This 87 * would be a macro, but for need to call platform-specific CPU ID 88 * routines. The kernel provides, via the KDI, a TL=1-safe "function" 89 * for CPU ID retrieval, which we call here. The retrieval code returns 90 * the ID in %g1, and is allowed to clobber %g2. It also assumes that 91 * the return address is in %g7. 92 * 93 * Arguments: 94 * %g7 - return address 95 * Returns: 96 * %g6 - address of save area 97 * 98 * %g4 will be preserved. 99 */ 100 ENTRY_NP(kaif_cpusave_getaddr) 101 102 mov %g7, %g5 ! we'll need %g7 for the ID retriever 103 KAIF_CPU_INDEX ! index returned in %g1, clobbers %g2, %g7 104 105 set KRS_SIZE, %g2 106 mulx %g1, %g2, %g2 107 set kaif_cpusave, %g6 108 ldx [%g6], %g6 109 110 jmp %g5 ! return to caller-provided address 111 add %g6, %g2, %g6 112 113 SET_SIZE(kaif_cpusave_getaddr) 114 115 /* 116 * Save volatile state - state that won't be available when we switch 117 * back to TL=0. We're currently at TL=1, and are on either the 118 * alternate or interrupt globals, so we'll need to do a bit of a 119 * dance in order to save the normal globals. 120 * 121 * NOTE: This routine and kaif_trap_obp must be equivalent. 122 * 123 * Parameters: 124 * %g7 - return address 125 * %g6 - cpusave area 126 * %g4 - the %pstate value to get us back to our current globals set 127 * %g4 not applicable on sun4v as it uses %gl 128 */ 129 130 ENTRY_NP(kaif_save_tl1_state) 131 132 add %g6, KRS_GREGS + GREG_KREGS, %g5 133 134 rdpr %tstate, %g2 135 stx %g2, [%g6 + KRS_TSTATE] 136 rdpr %tpc, %g2 137 stx %g2, [%g5 + KREG_OFF(KREG_PC)] 138 rdpr %tnpc, %g2 139 stx %g2, [%g5 + KREG_OFF(KREG_NPC)] 140 rdpr %tt, %g2 141 stx %g2, [%g5 + KREG_OFF(KREG_TT)] 142 143 /* 144 * Switch over to the normal globals, so we can save them. We'll need 145 * our gregs pointer and the return %pstate value, so stash them in 146 * registers that will be available to us on both sides. 147 * 148 * NOTE: Global register sets is selected by %gl register in sun4v. 149 * There is no PSTATE.AG bit in sun4v to select global set. 150 * - Normal globals is the set when %gl = 0. 151 * - TL1 globals is the set when %gl = 1. 152 */ 153 SWITCH_TO_NORMAL_GLOBALS(); /* saves %o5 and %o4 */ 154 stx %g1, [%o5 + KREG_OFF(KREG_G1)] 155 stx %g2, [%o5 + KREG_OFF(KREG_G2)] 156 stx %g3, [%o5 + KREG_OFF(KREG_G3)] 157 stx %g4, [%o5 + KREG_OFF(KREG_G4)] 158 stx %g5, [%o5 + KREG_OFF(KREG_G5)] 159 stx %g6, [%o5 + KREG_OFF(KREG_G6)] 160 stx %g7, [%o5 + KREG_OFF(KREG_G7)] 161 162 /* 163 * Restore saved %o registers and return. 164 */ 165 SWITCH_TO_TL1_GLOBALS_AND_RET(); /* restores %o5 and %o4 */ 166 SET_SIZE(kaif_save_tl1_state) 167 168 /* 169 * Save the remaining state, and prepare to enter the debugger. 170 */ 171 172 ENTRY_NP(kaif_trap_common) 173 174 /* Make sure the world is as it should be */ 175 wrpr %g0, PTSTATE_KERN_COMMON, %pstate 176 wrpr %g0, %tl 177 178 SET_GL(0); 179 set 1f, %g7 180 set kaif_cpusave_getaddr, %g6 181 jmp %g6 182 nop 1831: /* CPU save area address is now in %g6 */ 184 add %g6, KRS_GREGS + GREG_KREGS, %g5 185 186 ldx [%g5 + KREG_OFF(KREG_PC)], %g4 187 ADD_CRUMB(%g6, KRM_PC, %g4, %g1) 188 ldx [%g5 + KREG_OFF(KREG_TT)], %g4 189 ADD_CRUMB(%g6, KRM_TT, %g4, %g1) 190 191 /* 192 * The %tba is special. With normal entry, we're on the same trap table 193 * the kernel is using (this could be OBP's table if we're early enough 194 * in the boot process). We want to save it, but we don't want to 195 * switch to OBP's table just yet, as we need to ensure that only one 196 * CPU uses OBP's table at a time. We do this by waiting until we've 197 * selected the master before switching. 198 * 199 * Single-step is a bit different. Everything about the CPU's state is 200 * as it should be, with the exception of %tba. We need to step on 201 * OBP's trap table, so we didn't restore %tba during resume. The save 202 * state area still contains the real %tba value - the one we had when 203 * we first entered the debugger. We don't want to clobber that, so 204 * we'll only save %tba if we're not stepping. 205 */ 206 207 set kaif_master_cpuid, %g1 208 ld [%g1], %g1 209 ld [%g6 + KRS_CPU_ID], %g2 210 cmp %g1, %g2 211 be 1f 212 nop 213 214 rdpr %tba, %g2 215 stx %g2, [%g5 + KREG_OFF(KREG_TBA)] 216 2171: 218 /* Update the PIL to 15 to block out most interrupts */ 219 rdpr %pil, %g4 220 stx %g4, [%g5 + KREG_OFF(KREG_PIL)] 221 wrpr %g0, 15, %pil 222 223 rd %y, %g4 224 stx %g4, [%g5 + KREG_OFF(KREG_Y)] 225 226 /* 227 * Save window state and windows 228 */ 229 rdpr %cwp, %g4 230 stx %g4, [%g5 + KREG_OFF(KREG_CWP)] 231 rdpr %otherwin, %g4 232 stx %g4, [%g5 + KREG_OFF(KREG_OTHERWIN)] 233 rdpr %cleanwin, %g4 234 stx %g4, [%g5 + KREG_OFF(KREG_CLEANWIN)] 235 rdpr %cansave, %g4 236 stx %g4, [%g5 + KREG_OFF(KREG_CANSAVE)] 237 rdpr %canrestore, %g4 238 stx %g4, [%g5 + KREG_OFF(KREG_CANRESTORE)] 239 rdpr %wstate, %g4 240 stx %g4, [%g5 + KREG_OFF(KREG_WSTATE)] 241 242 GET_NWIN(%g1, %g4); ! %g1 is scratch, %g4 set to nwin-1 243 244 wrpr %g4, %cleanwin 245 246 sub %g4, 1, %g1 247 wrpr %g1, %cansave 248 wrpr %g0, %otherwin 249 wrpr %g0, %canrestore 250 wrpr %g0, %cwp 251 252 clr %g2 253 ldx [%g6 + KRS_RWINS], %g3 2541: SAVE_V9WINDOW(%g3) 255 inc %g2 256 add %g3, RWIN_SIZE, %g3 257 cmp %g2, %g4 258 ble 1b 259 wrpr %g2, %cwp 260 261 /* 262 * Save FP state 263 */ 264 add %g6, KRS_FPREGS, %g4 265 rd %fprs, %g1 266 stx %g1, [%g4 + FPU_FPRS] 267 btst FPRS_FEF, %g1 ! is FP enabled? 268 bz %icc, 1f ! if not, don't save FP regs 269 wr %g0, FPRS_FEF, %fprs ! enable FP 270 271 STORE_FPREGS(%g4) 272 stx %fsr, [%g4 + FPU_FSR] 273 2741: /* 275 * We're almost done saving state. Go back to the starting window, and 276 * switch to the CPU-specific stack. We'll use this stack to finish 277 * saving state, and for the next stage of debugger startup/resumption, 278 * when we designate the master. The slaves will continue to run on 279 * this stack until released or turned into masters. 280 */ 281 ldx [%g5 + KREG_OFF(KREG_CWP)], %g4 282 wrpr %g4, %cwp 283 284 set KRS_CPUSTACK + KAIF_CPU_STKSZ - 1, %g1 285 add %g1, %g6, %g1 286 and %g1, -STACK_ALIGN64, %g1 287 sub %g1, SA64(MINFRAME) + V9BIAS64, %sp 288 clr %fp 289 save %sp, -SA64(MINFRAME64), %sp 290 291 /* 292 * We'll need to access cpusave and gregs for our final state-saving, 293 * so stash them where they won't be clobbered by function calls. 294 */ 295 mov %g6, %l6 296 mov %g5, %l5 297 298 /* 299 * Now that we have a stack, we can save %stick. %stick isn't present 300 * on all of our target machines, so we have to use the KDI to fetch the 301 * current value (if any). We save %tick here too, because they get 302 * lonely if separated. 303 */ 304 rd %tick, %g4 305 stx %g4, [%l5 + KREG_OFF(KREG_TICK)] 306 307 call kmdb_kdi_get_stick 308 add %l5, KREG_OFF(KREG_STICK), %o0 309 brnz %o0, 1f 310 nop 311 312 /* 313 * We found %stick. Set the %stick-found flag. 314 */ 315 ld [%l5 + GREG_FLAGS], %g1 316 or %g1, MDB_V9GREG_F_STICK_VALID, %g1 317 st %g1, [%l5 + GREG_FLAGS] 318 3191: /* 320 * Enter the next phase of debugger startup 321 */ 322 call kaif_debugger_entry 323 mov %l6, %o0 324 325 ba,a kaif_resume ! expects valid %l5, %l6 326 327 /*NOTREACHED*/ 328 329 SET_SIZE(kaif_trap_common) 330 331#endif /* !__lint */ 332 333 /* 334 * The primary debugger-entry routine. This routine is the trap handler 335 * for programmed entry, watchpoints, and breakpoints, and is entered at 336 * TL=1, on the kernel's trap table, with PSTATE.AG set. It is used in 337 * the following cases: 338 * 339 * 1. (common case) - intentional entry by a CPU intending to be the 340 * master. The CPU may have encountered a watchpoint, a breakpoint, 341 * or a programmed entry trap, and is *NOT* coming from OBP. The CPU 342 * is allowed direct entry into the debugger. 343 * 344 * 2. A CPU was cross-called into kaif_slave_entry while executing in 345 * OBP. The CPU was released, but a programmed entry trap was 346 * activated, designed to be encountered when the cross-called CPU 347 * returned from OBP. The CPU is allowed to enter the debugger. We 348 * don't know how many other CPUs need the PROM-return trap, so we'll 349 * leave it active until everyone arrives. 350 * 351 * The remaining cases deal with instances where OBP got in the way. 352 * We can't allow a CPU into the debugger if it is currently executing 353 * in OBP, as chaos would ensue (OBP isn't re-entrant). As such, we 354 * have to ask the CPU to come back when it has finished with OBP (or 355 * vice versa). Depending on the circumstances, we'll need to dance 356 * around it. 357 * 358 * 3. A bystander CPU runs into the PROM-return trap described above 359 * before being cross-called. We'll let it into the debugger now, as 360 * it would have ended up here anyway. 361 * 362 * 4. An innocent CPU encounters a watchpoint while executing in OBP. 363 * We can't let the CPU into the debugger for the reasons given 364 * above, so we'll need to ignore the watchpoint. We disable 365 * watchpoints, place a programmed-entry trap at %npc, and release 366 * the CPU. 367 * 368 * 5. The stepping CPU described in case 4 encounters the programmed- 369 * entry trap. We'll remove the trap, re-enable watchpoints, and 370 * send the CPU on its way. 371 * 372 * 6. Someone encounters a breakpoint or a programmed-entry trap in OBP. 373 * We can step through watchpoints, as the text hasn't been touched. 374 * With breakpoints and programmed-entry traps, however, chances are 375 * high that someone replaced an instruction in the text with the 376 * trap instruction. We don't know where they stashed the 377 * (presumably) saved instruction, so we can't step through it. This 378 * is a very unlikely scenario, so we're going to throw up our hands, 379 * and will attempt to trigger a panic. 380 */ 381 382#if defined(__lint) 383void 384kaif_ktrap(void) 385{ 386} 387#else /* __lint */ 388 389 ENTRY_NP(kaif_ktrap) 390 391 set 1f, %g7 392 set kaif_cpusave_getaddr, %g6 393 jmp %g6 394 nop 3951: /* CPU save area address is now in %g6 */ 396 397 ADVANCE_CRUMB_POINTER(%g6, %g1, %g2) 398 ADD_CRUMB_CONST(%g6, KRM_SRC, KAIF_CRUMB_SRC_MAIN, %g1, %g2) 399 400 rdpr %tpc, %g2 401 set OFW_START_ADDR, %g1 402 cmp %g2, %g1 403 bl main_not_in_obp 404 nop 405 406 set OFW_END_ADDR, %g1 407 cmp %g2, %g1 408 bg main_not_in_obp 409 nop 410 411 /* 412 * The CPU was in OBP when it encountered the trap that sent it here. 413 * See cases 3-6 above. 414 */ 415 rdpr %tt, %g4 416 cmp %g4, T_PA_WATCHPOINT 417 be main_obp_wapt 418 419 cmp %g4, T_VA_WATCHPOINT 420 be main_obp_wapt 421 422 cmp %g4, T_SOFTWARE_TRAP|ST_KMDB_TRAP 423 be main_obp_progent 424 425 cmp %g4, T_SOFTWARE_TRAP|ST_BREAKPOINT 426 be main_obp_breakpoint 427 nop 428 429 /* This shouldn't happen - all valid traps should be checked above */ 4301: ldx [%g0], %g0 431 ba,a 1b 432 433 /* Cases 1 and 2 - head into the debugger, via the state-saver */ 434main_not_in_obp: 435 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_MAIN_NORMAL, %g1, %g2, %g3) 436 437 /* A formality - we know we came from kernel context */ 438 mov MMU_PCONTEXT, %g3 439 ldxa [%g3]ASI_MMU_CTX, %g2 ! ASI_MMU_CTX == ASI_DMMU for sun4u 440 stx %g2, [%g6 + KRS_MMU_PCONTEXT] 441 442#ifndef sun4v 443 /* 444 * If OBP supports preserving the Solaris kernel context register, 445 * then shift the nucleus bits into the primary and set context to 0, 446 * Otherwise, flush TLBs and clear the entire context register since 447 * OBP will clear it without flushing on entry to OBP. 448 */ 449 sethi %hi(kmdb_prom_preserve_kctx), %g4 450 ld [%g4 + %lo(kmdb_prom_preserve_kctx)], %g4 451 brz %g4, 1f 452 nop 453 /* 454 * Move nucleus context page size bits into primary context page size 455 * and set context to 0. Use %g4 as a temporary. 456 */ 457 KAIF_MAKE_NEW_CTXREG(%g2, %g4) ! new context reg in %g2 458 459 stxa %g2, [%g3]ASI_MMU_CTX 460 membar #Sync 461 ba 2f 462 nop 4631: 464#endif /* sun4v */ 465 /* 466 * Flush TLBs and clear primary context register. 467 */ 468 KAIF_DEMAP_TLB_ALL(%g4) 469 stxa %g0, [%g3]ASI_MMU_CTX ! ASI_MMU_CTX == ASI_DMMU for sun4u 470 membar #Sync 4712: 472 473 set kaif_trap_common, %g7 474 475 KAIF_SAVE_TL1_STATE(); 476 /*NOTREACHED*/ 477 478 /* Case 4 - watchpoint in OBP - step over it */ 479main_obp_wapt: 480 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_MAIN_OBPWAPT, %g1, %g2, %g3) 481 482#ifndef sun4v 483 /* Turn off watchpoints */ 484 ldxa [%g0]ASI_LSU, %g4 485 stx %g4, [%g6 + KRS_LSUCR_SAVE] 486 setx KAIF_LSUCTL_WAPT_MASK, %g1, %g3 487 andn %g4, %g3, %g4 488 stxa %g4, [%g0]ASI_LSU 489#endif /* sun4v */ 490 491 /* 492 * SPARC only supports data watchpoints, and we know that only certain 493 * types of instructions, none of which include branches, can trigger 494 * memory reads. As such, we can simply place a breakpoint at %npc. 495 */ 496 rdpr %tnpc, %g4 497 ld [%g4], %g3 498 st %g3, [%g6 + KRS_INSTR_SAVE] 499 set 0x91d0207d, %g3 ! ta ST_KMDB_TRAP 500 st %g3, [%g4] 501 flush %g4 502 membar #Sync 503 504 /* Back into the pool */ 505 retry 506 507 /* Case 5 - programmed entry from wapt step - restore and resume */ 508main_obp_progent: 509 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_MAIN_OBPPENT, %g1, %g2, %g3) 510 511 rdpr %tpc, %g4 512 ld [%g6 + KRS_INSTR_SAVE], %g3 513 brz %g3, main_obp_fail ! we don't have any open wapt steps 514 nop 515 516 st %g3, [%g4] 517 membar #Sync 518 st %g0, [%g6 + KRS_INSTR_SAVE] 519 520 /* XXX I$ invalidate? */ 521 522#ifndef sun4v 523 ldx [%g6 + KRS_LSUCR_SAVE], %g4 524 stxa %g4, [%g0]ASI_LSU 525#endif /* sun4v */ 526 527 /* Restored - throw it back */ 528 retry 529 530 /* Case 6 - breakpoint or unclaimed programmed entry */ 531main_obp_breakpoint: 532main_obp_fail: 533 ldx [%g0], %g0 534 ba,a main_obp_fail 535 536 SET_SIZE(kaif_ktrap) 537 538#endif /* __lint */ 539 540 /* 541 * The target for slave-stopping cross calls. This routine is entered at 542 * TL=1, on the kernel's trap table, with PSTATE.IG set. CPUs entering 543 * this handler will fall into one of the following categories: 544 * 545 * 1. (common case) - the CPU was not executing in OBP when it entered 546 * this routine. It will be allowed direct entry into the debugger. 547 * 548 * 2. The CPU had already entered the debugger, and was spinning in the 549 * slave loop (at TL=0) when it was cross-called by the debugger's 550 * world-stopper. This could happen if two CPUs encountered 551 * breakpoints simultaneously, triggering a race to become master. 552 * One would lose, and would already be in the slave loop when the 553 * master started trying to stop the world. The CPU is already where 554 * it is supposed to be, so we ignore the trap. 555 * 556 * 3. The CPU was executing in OBP. We can't allow it to go directly 557 * into OBP (see the kaif_ktrap comment), but we want to grab it when 558 * it leaves OBP. Arm the PROM-return programmed entry trap and 559 * release the CPU. 560 */ 561 562#if defined(__lint) 563void 564kaif_slave_entry(void) 565{ 566} 567#else /* __lint */ 568 569 ENTRY_NP(kaif_slave_entry) 570 571 /* 572 * We may have arrived from userland. We need to be in kernel context 573 * before we can save state, so we'll stash the current value in %g4 574 * until we've calculated the save address and have decided that we're 575 * heading into the debugger. 576 * 577 * %g4 is used to hold the entry MMU context until we decide whether to 578 * return or re-enter the debugger. 579 */ 580 mov MMU_PCONTEXT, %g3 581 ldxa [%g3]ASI_MMU_CTX, %g4 582 583#ifndef sun4v 584 /* 585 * If OBP supports preserving the Solaris kernel context register, 586 * then shift the nucleus bits into the primary and set context to 0, 587 * Otherwise, flush TLBs and clear the entire context register since 588 * OBP will clear it without flushing on entry to OBP. 589 */ 590 sethi %hi(kmdb_prom_preserve_kctx), %g1 591 ld [%g1 + %lo(kmdb_prom_preserve_kctx)], %g1 592 brz %g1, 1f 593 nop 594 /* 595 * Move nucleus context page size bits into primary context page size 596 * and set context to 0. Use %g2 as a temporary. 597 */ 598 mov %g4, %g2 599 KAIF_MAKE_NEW_CTXREG(%g2, %g1) ! new context reg in %g2 600 601 stxa %g2, [%g3]ASI_MMU_CTX 602 membar #Sync 603 ba 2f 604 nop 6051: 606#endif /* sun4v */ 607 /* 608 * Flush TLBs and clear primary context register. 609 */ 610 KAIF_DEMAP_TLB_ALL(%g1) 611 stxa %g0, [%g3]ASI_MMU_CTX 612 membar #Sync 6132: 614 615 set 1f, %g7 616 set kaif_cpusave_getaddr, %g6 617 jmp %g6 ! is not to alter %g4 618 nop 6191: /* CPU save area address is now in %g6 */ 620 621 ADVANCE_CRUMB_POINTER(%g6, %g1, %g2) 622 ADD_CRUMB_CONST(%g6, KRM_SRC, KAIF_CRUMB_SRC_IVEC, %g1, %g2) 623 624 ld [%g6 + KRS_CPU_STATE], %g5 625 cmp %g5, KAIF_CPU_STATE_NONE 626 be,a ivec_not_already_in_debugger 627 628 /* Case 2 - CPU was already stopped, so ignore this cross call */ 629 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_IVEC_REENTER, %g1, %g2, %g3) 630 631 /* Restore MMU_PCONTEXT, which we set on the way in */ 632 mov MMU_PCONTEXT, %g3 633 KAIF_DEMAP_TLB_ALL(%g2) 634 stxa %g4, [%g3]ASI_MMU_CTX 635 membar #Sync 636 637 retry 638 639ivec_not_already_in_debugger: 640 brnz %g4, ivec_not_in_obp /* OBP runs in kernel context */ 641 nop 642 643 /* Were we in OBP's memory range? */ 644 rdpr %tpc, %g2 645 set OFW_START_ADDR, %g1 646 cmp %g2, %g1 647 bl ivec_not_in_obp 648 nop 649 650 set OFW_END_ADDR, %g1 651 cmp %g2, %g1 652 bg ivec_not_in_obp 653 nop 654 655 /* Case 3 - CPU in OBP - arm return trap, release the CPU */ 656 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_IVEC_INOBP, %g1, %g2, %g3) 657 658 set kaif_promexitarmp, %g1 659 ldx [%g1], %g1 660 mov 1, %g2 661 st %g2, [%g1] 662 663 /* We were already in kernel context, so no need to restore it */ 664 665 retry 666 667 /* Case 1 - head into debugger, via the state-saver */ 668ivec_not_in_obp: 669 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_IVEC_NORMAL, %g1, %g2, %g3) 670 671 stx %g4, [%g6 + KRS_MMU_PCONTEXT] 672 673 set kaif_trap_common, %g7 674 675 KAIF_SAVE_TL1_STATE_SLAVE(); 676 677 /*NOTREACHED*/ 678 679 SET_SIZE(kaif_slave_entry) 680 681#endif 682 683 /* 684 * The trap handler used when we're on OBP's trap table, which is used 685 * during initial system startup, while the debugger itself is 686 * executing, and when we're single-stepping. When a trap occurs that 687 * it can't handle, OBP will execute our Forth word (kmdb_callback). 688 * Our word saves TL1 state, much as kaif_save_tl1_state does for the 689 * other handlers. kmdb_callback will then cause control to be 690 * transferred to this routine. 691 * 692 * CPUs entering this routine will fall into the following categories: 693 * 694 * 1. The system is booting, and we encountered a trap that OBP couldn't 695 * handle. We save the CPU's state, and let it into the debugger. 696 * 697 * 2. We were single-stepping this CPU, causing it to encounter one of 698 * the breakpoint traps we installed for stepping. We save the CPU's 699 * state, and let it back into the debugger. 700 * 701 * 3. We took a trap while executing in the debugger. Before saving 702 * this CPU's state in the CPU-specific save area, we will let the 703 * debugger handle the trap. If the trap resulted from a debugger 704 * problem, and if the user decides to use the debugger to debug 705 * itself, we'll overwrite the existing state with the state saved 706 * by the Forth word, after which we'll let the CPU enter the 707 * debugger. 708 * 709 * NOTE: The Forth word and the copying code here *must* be kept 710 * in sync with kaif_save_tl1_state. 711 */ 712 713#if defined(__lint) 714void 715kaif_trap_obp(void) 716{ 717} 718#else /* __lint */ 719 720 ENTRY_NP(kaif_trap_obp) 721 722 set 1f, %g7 723 set kaif_cpusave_getaddr, %g6 724 jmp %g6 725 nop 7261: /* CPU save area address is now in %g6 */ 727 add %g6, KRS_GREGS + GREG_KREGS, %g5 728 729 ADVANCE_CRUMB_POINTER(%g6, %g1, %g2) 730 ADD_CRUMB_CONST(%g6, KRM_SRC, KAIF_CRUMB_SRC_OBP, %g1, %g2) 731 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_OBP_NORMAL, %g1, %g2, %g3) 732 733 set kaif_cb_save, %g4 734 add %g4, KRS_GREGS + GREG_KREGS, %g4 735 ldx [%g4 + KREG_OFF(KREG_PC)], %g1 736 ADD_CRUMB(%g6, KRM_PC, %g1, %g2) 737 ldx [%g4 + KREG_OFF(KREG_TT)], %g1 738 ADD_CRUMB(%g6, KRM_TT, %g1, %g2) 739 740 ALTENTRY(kaif_trap_obp_saved) 741 742 /* 743 * Are we here because of a trap we took while running the debugger, or 744 * because of one we took while executing kernel code? 745 */ 746 set kaif_dseg, %g1 747 ldx [%g1], %g1 748 cmp %sp, %g1 749 bl obp_normal_entry 750 nop 751 752 set kaif_dseg_lim, %g1 753 ldx [%g1], %g1 754 cmp %sp, %g1 755 bg obp_normal_entry 756 nop 757 758 /* 759 * The debugger fault code will need access to saved copies of the outs 760 * and %y if the user elects to panic. We'll also need the saved outs if 761 * they decide to debug the fault with the debugger, as we'll have 762 * trashed the outs while asking the user how to handle the fault. 763 */ 764 set kaif_cb_save, %g4 765 add %g4, KRS_GREGS + GREG_KREGS, %g4 766 rd %y, %g2 767 stx %g2, [%g4 + KREG_OFF(KREG_Y)] 768 stx %o0, [%g4 + KREG_OFF(KREG_O0)] 769 stx %o1, [%g4 + KREG_OFF(KREG_O1)] 770 stx %o2, [%g4 + KREG_OFF(KREG_O2)] 771 stx %o3, [%g4 + KREG_OFF(KREG_O3)] 772 stx %o4, [%g4 + KREG_OFF(KREG_O4)] 773 stx %o5, [%g4 + KREG_OFF(KREG_O5)] 774 stx %o6, [%g4 + KREG_OFF(KREG_O6)] 775 stx %o7, [%g4 + KREG_OFF(KREG_O7)] 776 777 /* 778 * Receipt of an XIR while on the debugger's stack is likely to mean 779 * that something has gone very wrong in the debugger. Our safest 780 * course of action is to bail out to OBP, thus preserving as much state 781 * as we can. 782 */ 783 ldx [%g4 + KREG_OFF(KREG_TT)], %g1 784 cmp %g1, T_XIR 785 bne 1f 786 nop 787 788 call prom_enter_mon 789 nop 790 7911: 792 /* 793 * We're still on the debugger's stack, as we were when we took the 794 * fault. Re-arm the Forth word and transfer control to the debugger. 795 */ 796 call kaif_prom_rearm 797 nop 798 799 KAIF_CPU_INDEX ! index returned in %g1, clobbers %g2, %g7 800 mov %g1, %o4 801 802 set kaif_cb_save, %g5 803 ldx [%g5 + KREG_OFF(KREG_TT)], %o0 804 ldx [%g5 + KREG_OFF(KREG_PC)], %o1 805 ldx [%g5 + KREG_OFF(KREG_NPC)], %o2 806 call kmdb_dpi_handle_fault 807 mov %sp, %o3 808 809 /* 810 * If we return from kmdb_dpi_handle_fault, the trap was due to a 811 * problem in the debugger, and the user has elected to diagnose it 812 * using the debugger. When we pass back into the normal kaif_trap_obp 813 * flow, we'll save the debugger fault state over the state saved when 814 * we initially entered the debugger. Debugger fault handling trashed 815 * the out registers, so we'll need to restore them before returning 816 * to the normal flow. 817 */ 818 819 set kaif_cb_save, %g4 820 ldx [%g4 + KREG_OFF(KREG_O0)], %o0 821 ldx [%g4 + KREG_OFF(KREG_O1)], %o1 822 ldx [%g4 + KREG_OFF(KREG_O2)], %o2 823 ldx [%g4 + KREG_OFF(KREG_O3)], %o3 824 ldx [%g4 + KREG_OFF(KREG_O4)], %o4 825 ldx [%g4 + KREG_OFF(KREG_O5)], %o5 826 ldx [%g4 + KREG_OFF(KREG_O6)], %o6 827 ldx [%g4 + KREG_OFF(KREG_O7)], %o7 828 829obp_normal_entry: 830 831 set 1f, %g7 832 set kaif_cpusave_getaddr, %g6 833 jmp %g6 834 nop 8351: /* CPU save area address is now in %g6 */ 836 add %g6, KRS_GREGS + GREG_KREGS, %g5 837 838 /* 839 * Register state has been saved in kaif_cb_save. Now that we're sure 840 * we're going into the debugger using this state, copy it to the CPU- 841 * specific save area. 842 */ 843 844 set kaif_cb_save, %g4 845 add %g4, KRS_GREGS + GREG_KREGS, %g3 846 847 KAIF_COPY_KREG(%g3, %g5, KREG_PC, %g1) 848 KAIF_COPY_KREG(%g3, %g5, KREG_NPC, %g1) 849 KAIF_COPY_KREG(%g3, %g5, KREG_G1, %g1) 850 KAIF_COPY_KREG(%g3, %g5, KREG_G2, %g1) 851 KAIF_COPY_KREG(%g3, %g5, KREG_G3, %g1) 852 KAIF_COPY_KREG(%g3, %g5, KREG_G4, %g1) 853 KAIF_COPY_KREG(%g3, %g5, KREG_G5, %g1) 854 KAIF_COPY_KREG(%g3, %g5, KREG_G6, %g1) 855 KAIF_COPY_KREG(%g3, %g5, KREG_G7, %g1) 856 KAIF_COPY_KREG(%g3, %g5, KREG_TT, %g1) 857 858 ldx [%g4 + KRS_TSTATE], %g1 859 stx %g1, [%g6 + KRS_TSTATE] 860 861 /* A formality */ 862 mov MMU_PCONTEXT, %g3 863 ldxa [%g3]ASI_MMU_CTX, %g2 864 stx %g2, [%g6 + KRS_MMU_PCONTEXT] 865 866#ifndef sun4v 867 /* 868 * If OBP supports preserving the Solaris kernel context register, 869 * then shift the nucleus bits into the primary and set context to 0, 870 * Otherwise, flush TLBs and clear the entire context register since 871 * OBP will clear it without flushing on entry to OBP. 872 */ 873 sethi %hi(kmdb_prom_preserve_kctx), %g4 874 ld [%g4 + %lo(kmdb_prom_preserve_kctx)], %g4 875 brz %g4, 1f 876 nop 877 /* 878 * Move nucleus context page size bits into primary context page size 879 * and set context to 0. Use %g4 as a temporary. 880 */ 881 KAIF_MAKE_NEW_CTXREG(%g2, %g4) ! new context reg in %g2 882 883 stxa %g2, [%g3]ASI_MMU_CTX 884 membar #Sync 885 ba 2f 886 nop 8871: 888#endif /* sun4v */ 889 /* 890 * Flush TLBs and clear primary context register. 891 */ 892 KAIF_DEMAP_TLB_ALL(%g4) 893 stxa %g0, [%g3]ASI_MMU_CTX ! ASI_MMU_CTX == ASI_DMMU for sun4u 894 membar #Sync 8952: 896 897 ba,a kaif_trap_common 898 899 SET_SIZE(kaif_trap_obp_saved) 900 SET_SIZE(kaif_trap_obp) 901 902#endif /* __lint */ 903 904#if defined(lint) 905void 906kaif_dtrap_dprot(void) 907{ 908} 909#else /* lint */ 910 911 /* 912 * This routine is used to handle all "failed" traps. A trap is 913 * considered to have failed if it was not able to return to the code 914 * that caused the trap. A DTLB miss handler, for example, fails if 915 * it can't find a translation for a given address. Some traps always 916 * fail, because the thing that caused the trap is an actual problem 917 * that can't be resolved by the handler. Examples of these include 918 * alignment and DTLB protection faults. 919 */ 920 921 ENTRY_NP(kaif_dtrap) 922 923 SET_PSTATE_COMMON_AG(%g1); 924 SET_GL(1); /* set %gl = 1 */ 925 926 KAIF_CPU_GETADDR_TL1 /* uses label 1, %g1, %g2, %g7, ret in %g6 */ 927 928 ADVANCE_CRUMB_POINTER(%g6, %g1, %g2) 929 ADD_CRUMB_CONST(%g6, KRM_SRC, KAIF_CRUMB_SRC_OBP, %g1, %g2) 930 ADD_CRUMB_FLAG(%g6, KAIF_CRUMB_F_OBP_REVECT, %g1, %g2, %g3) 931 932 rdpr %tt, %g1 933 ADD_CRUMB(%g6, KRM_TT, %g1, %g2) 934 rdpr %tpc, %g1 935 ADD_CRUMB(%g6, KRM_PC, %g1, %g2) 936 937 set kaif_cb_save, %g6 938 939 set 1f, %g7 940 ba kaif_save_tl1_state 941 rdpr %pstate, %g4 942 9431: wrpr %g0, PTSTATE_KERN_COMMON, %pstate 944 wrpr %g0, %tl 945 SET_GL(0); 946 947 ba kaif_trap_obp_saved 948 nop 949 950 SET_SIZE(kaif_dtrap) 951 952#endif /* lint */ 953