1/* $FreeBSD$ */ 2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ 3 4/*- 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * NOTICE: This is not a standalone file. to use it, #include it in 37 * your port's locore.S, like so: 38 * 39 * #include <powerpc/aim/trap_subr.S> 40 */ 41 42/* 43 * Save/restore segment registers 44 */ 45 46/* 47 * Restore SRs for a pmap 48 * 49 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache 50 */ 51 52/* 53 * User SRs are loaded through a pointer to the current pmap. 54 */ 55restore_usersrs: 56 GET_CPUINFO(%r28) 57 ld %r28,PC_USERSLB(%r28) 58 li %r29, 0 /* Set the counter to zero */ 59 60 slbia 61 slbmfee %r31,%r29 62 clrrdi %r31,%r31,28 63 slbie %r31 641: ld %r31, 0(%r28) /* Load SLB entry pointer */ 65 cmpli 0, %r31, 0 /* If NULL, stop */ 66 beqlr 67 68 ld %r30, 0(%r31) /* Load SLBV */ 69 ld %r31, 8(%r31) /* Load SLBE */ 70 or %r31, %r31, %r29 /* Set SLBE slot */ 71 slbmte %r30, %r31 /* Install SLB entry */ 72 73 addi %r28, %r28, 8 /* Advance pointer */ 74 addi %r29, %r29, 1 75 b 1b /* Repeat */ 76 77/* 78 * Kernel SRs are loaded directly from the PCPU fields 79 */ 80restore_kernsrs: 81 GET_CPUINFO(%r28) 82 addi %r28,%r28,PC_KERNSLB 83 li %r29, 0 /* Set the counter to zero */ 84 85 slbia 86 slbmfee %r31,%r29 87 clrrdi %r31,%r31,28 88 slbie %r31 891: cmpli 0, %r29, USER_SLB_SLOT /* Skip the user slot */ 90 beq- 2f 91 92 ld %r31, 8(%r28) /* Load SLBE */ 93 cmpli 0, %r31, 0 /* If SLBE is not valid, stop */ 94 beqlr 95 ld %r30, 0(%r28) /* Load SLBV */ 96 slbmte %r30, %r31 /* Install SLB entry */ 97 982: addi %r28, %r28, 16 /* Advance pointer */ 99 addi %r29, %r29, 1 100 cmpli 0, %r29, 64 /* Repeat if we are not at the end */ 101 blt 1b 102 blr 103 104/* 105 * FRAME_SETUP assumes: 106 * SPRG1 SP (1) 107 * SPRG3 trap type 108 * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps) 109 * r28 LR 110 * r29 CR 111 * r30 scratch 112 * r31 scratch 113 * r1 kernel stack 114 * SRR0/1 as at start of trap 115 * 116 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse 117 * in any real-mode fault handler, including those handling double faults. 118 */ 119#define FRAME_SETUP(savearea) \ 120/* Have to enable translation to allow access of kernel stack: */ \ 121 GET_CPUINFO(%r31); \ 122 mfsrr0 %r30; \ 123 std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \ 124 mfsrr1 %r30; \ 125 std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \ 126 mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \ 127 mfmsr %r30; \ 128 ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \ 129 mtmsr %r30; /* stack can now be accessed */ \ 130 isync; \ 131 stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \ 132 std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \ 133 std %r31,FRAME_1+48(%r1); /* save SP " " */ \ 134 std %r2, FRAME_2+48(%r1); /* save r2 " " */ \ 135 std %r28,FRAME_LR+48(%r1); /* save LR " " */ \ 136 std %r29,FRAME_CR+48(%r1); /* save CR " " */ \ 137 GET_CPUINFO(%r2); \ 138 ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \ 139 ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \ 140 ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \ 141 ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \ 142 ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \ 143 std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \ 144 std %r4, FRAME_4+48(%r1); \ 145 std %r5, FRAME_5+48(%r1); \ 146 std %r6, FRAME_6+48(%r1); \ 147 std %r7, FRAME_7+48(%r1); \ 148 std %r8, FRAME_8+48(%r1); \ 149 std %r9, FRAME_9+48(%r1); \ 150 std %r10, FRAME_10+48(%r1); \ 151 std %r11, FRAME_11+48(%r1); \ 152 std %r12, FRAME_12+48(%r1); \ 153 std %r13, FRAME_13+48(%r1); \ 154 std %r14, FRAME_14+48(%r1); \ 155 std %r15, FRAME_15+48(%r1); \ 156 std %r16, FRAME_16+48(%r1); \ 157 std %r17, FRAME_17+48(%r1); \ 158 std %r18, FRAME_18+48(%r1); \ 159 std %r19, FRAME_19+48(%r1); \ 160 std %r20, FRAME_20+48(%r1); \ 161 std %r21, FRAME_21+48(%r1); \ 162 std %r22, FRAME_22+48(%r1); \ 163 std %r23, FRAME_23+48(%r1); \ 164 std %r24, FRAME_24+48(%r1); \ 165 std %r25, FRAME_25+48(%r1); \ 166 std %r26, FRAME_26+48(%r1); \ 167 std %r27, FRAME_27+48(%r1); \ 168 std %r28, FRAME_28+48(%r1); \ 169 std %r29, FRAME_29+48(%r1); \ 170 std %r30, FRAME_30+48(%r1); \ 171 std %r31, FRAME_31+48(%r1); \ 172 ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \ 173 ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\ 174 ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \ 175 ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \ 176 mfxer %r3; \ 177 mfctr %r4; \ 178 mfsprg3 %r5; \ 179 std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \ 180 std %r4, FRAME_CTR+48(1); \ 181 std %r5, FRAME_EXC+48(1); \ 182 std %r28,FRAME_AIM_DAR+48(1); \ 183 std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \ 184 std %r30,FRAME_SRR0+48(1); \ 185 std %r31,FRAME_SRR1+48(1); \ 186 ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */ 187 188#define FRAME_LEAVE(savearea) \ 189/* Disable exceptions: */ \ 190 mfmsr %r2; \ 191 andi. %r2,%r2,~PSL_EE@l; \ 192 mtmsr %r2; \ 193 isync; \ 194/* Now restore regs: */ \ 195 ld %r2,FRAME_SRR0+48(%r1); \ 196 ld %r3,FRAME_SRR1+48(%r1); \ 197 ld %r4,FRAME_CTR+48(%r1); \ 198 ld %r5,FRAME_XER+48(%r1); \ 199 ld %r6,FRAME_LR+48(%r1); \ 200 GET_CPUINFO(%r7); \ 201 std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \ 202 std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \ 203 ld %r7,FRAME_CR+48(%r1); \ 204 mtctr %r4; \ 205 mtxer %r5; \ 206 mtlr %r6; \ 207 mtsprg2 %r7; /* save cr */ \ 208 ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \ 209 ld %r30,FRAME_30+48(%r1); \ 210 ld %r29,FRAME_29+48(%r1); \ 211 ld %r28,FRAME_28+48(%r1); \ 212 ld %r27,FRAME_27+48(%r1); \ 213 ld %r26,FRAME_26+48(%r1); \ 214 ld %r25,FRAME_25+48(%r1); \ 215 ld %r24,FRAME_24+48(%r1); \ 216 ld %r23,FRAME_23+48(%r1); \ 217 ld %r22,FRAME_22+48(%r1); \ 218 ld %r21,FRAME_21+48(%r1); \ 219 ld %r20,FRAME_20+48(%r1); \ 220 ld %r19,FRAME_19+48(%r1); \ 221 ld %r18,FRAME_18+48(%r1); \ 222 ld %r17,FRAME_17+48(%r1); \ 223 ld %r16,FRAME_16+48(%r1); \ 224 ld %r15,FRAME_15+48(%r1); \ 225 ld %r14,FRAME_14+48(%r1); \ 226 ld %r13,FRAME_13+48(%r1); \ 227 ld %r12,FRAME_12+48(%r1); \ 228 ld %r11,FRAME_11+48(%r1); \ 229 ld %r10,FRAME_10+48(%r1); \ 230 ld %r9, FRAME_9+48(%r1); \ 231 ld %r8, FRAME_8+48(%r1); \ 232 ld %r7, FRAME_7+48(%r1); \ 233 ld %r6, FRAME_6+48(%r1); \ 234 ld %r5, FRAME_5+48(%r1); \ 235 ld %r4, FRAME_4+48(%r1); \ 236 ld %r3, FRAME_3+48(%r1); \ 237 ld %r2, FRAME_2+48(%r1); \ 238 ld %r0, FRAME_0+48(%r1); \ 239 ld %r1, FRAME_1+48(%r1); \ 240/* Can't touch %r1 from here on */ \ 241 mtsprg3 %r3; /* save r3 */ \ 242/* Disable translation, machine check and recoverability: */ \ 243 mfmsr %r3; \ 244 andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \ 245 mtmsr %r3; \ 246 isync; \ 247/* Decide whether we return to user mode: */ \ 248 GET_CPUINFO(%r3); \ 249 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \ 250 mtcr %r3; \ 251 bf 17,1f; /* branch if PSL_PR is false */ \ 252/* Restore user SRs */ \ 253 GET_CPUINFO(%r3); \ 254 std %r27,(savearea+CPUSAVE_R27)(%r3); \ 255 std %r28,(savearea+CPUSAVE_R28)(%r3); \ 256 std %r29,(savearea+CPUSAVE_R29)(%r3); \ 257 std %r30,(savearea+CPUSAVE_R30)(%r3); \ 258 std %r31,(savearea+CPUSAVE_R31)(%r3); \ 259 mflr %r27; /* preserve LR */ \ 260 bl restore_usersrs; /* uses r28-r31 */ \ 261 mtlr %r27; \ 262 ld %r31,(savearea+CPUSAVE_R31)(%r3); \ 263 ld %r30,(savearea+CPUSAVE_R30)(%r3); \ 264 ld %r29,(savearea+CPUSAVE_R29)(%r3); \ 265 ld %r28,(savearea+CPUSAVE_R28)(%r3); \ 266 ld %r27,(savearea+CPUSAVE_R27)(%r3); \ 2671: mfsprg2 %r3; /* restore cr */ \ 268 mtcr %r3; \ 269 GET_CPUINFO(%r3); \ 270 ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \ 271 mtsrr0 %r3; \ 272 GET_CPUINFO(%r3); \ 273 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \ 274 mtsrr1 %r3; \ 275 mfsprg3 %r3 /* restore r3 */ 276 277#ifdef KDTRACE_HOOKS 278 .data 279 .globl dtrace_invop_calltrap_addr 280 .align 8 281 .type dtrace_invop_calltrap_addr, @object 282 .size dtrace_invop_calltrap_addr, 8 283dtrace_invop_calltrap_addr: 284 .word 0 285 .word 0 286 287 .text 288#endif 289 290/* 291 * Processor reset exception handler. These are typically 292 * the first instructions the processor executes after a 293 * software reset. We do this in two bits so that we are 294 * not still hanging around in the trap handling region 295 * once the MMU is turned on. 296 */ 297 .globl CNAME(rstcode), CNAME(rstcodeend) 298CNAME(rstcode): 299 /* Explicitly set MSR[SF] */ 300 mfmsr %r9 301 li %r8,1 302 insrdi %r9,%r8,1,0 303 mtmsrd %r9 304 isync 305 bl 1f 306 .llong cpu_reset 3071: mflr %r9 308 ld %r9,0(%r9) 309 mtlr %r9 310 311 blr 312CNAME(rstcodeend): 313 314cpu_reset: 315 GET_TOCBASE(%r2) 316 317 ld %r1,TOC_REF(tmpstk)(%r2) /* get new SP */ 318 addi %r1,%r1,(TMPSTKSZ-48) 319 320 bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */ 321 nop 322 lis %r3,1@l 323 bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */ 324 nop 325 bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */ 326 nop 327 mr %r1,%r3 /* Use new stack */ 328 bl CNAME(cpudep_ap_setup) 329 nop 330 GET_CPUINFO(%r5) 331 ld %r3,(PC_RESTORE)(%r5) 332 cmpldi %cr0,%r3,0 333 beq %cr0,2f 334 nop 335 li %r4,1 336 b CNAME(longjmp) 337 nop 3382: 339#ifdef SMP 340 bl CNAME(machdep_ap_bootstrap) /* And away! */ 341 nop 342#endif 343 344 /* Should not be reached */ 3459: 346 b 9b 347 348/* 349 * This code gets copied to all the trap vectors 350 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions! 351 */ 352 353 .globl CNAME(trapcode),CNAME(trapcodeend) 354 .p2align 3 355CNAME(trapcode): 356 mtsprg1 %r1 /* save SP */ 357 mflr %r1 /* Save the old LR in r1 */ 358 mtsprg2 %r1 /* And then in SPRG2 */ 359 li %r1,TRAP_GENTRAP 360 ld %r1,0(%r1) 361 mtlr %r1 362 li %r1, 0xA0 /* How to get the vector from LR */ 363 blrl /* Branch to generictrap */ 364CNAME(trapcodeend): 365 366/* 367 * For SLB misses: do special things for the kernel 368 * 369 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is 370 * the only time this can be called. 371 */ 372 .globl CNAME(slbtrap),CNAME(slbtrapend) 373 .p2align 3 374CNAME(slbtrap): 375 mtsprg1 %r1 /* save SP */ 376 GET_CPUINFO(%r1) 377 std %r2,(PC_SLBSAVE+16)(%r1) 378 mfcr %r2 /* save CR */ 379 std %r2,(PC_SLBSAVE+104)(%r1) 380 mfsrr1 %r2 /* test kernel mode */ 381 mtcr %r2 382 bf 17,2f /* branch if PSL_PR is false */ 383 /* User mode */ 384 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */ 385 mtcr %r2 386 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */ 387 mflr %r1 /* Save the old LR in r1 */ 388 mtsprg2 %r1 /* And then in SPRG2 */ 389 /* 52 bytes so far */ 390 bl 1f 391 .llong generictrap 3921: mflr %r1 393 ld %r1,0(%r1) 394 mtlr %r1 395 li %r1, 0x80 /* How to get the vector from LR */ 396 blrl /* Branch to generictrap */ 397 /* 84 bytes */ 3982: mflr %r2 /* Save the old LR in r2 */ 399 nop 400 bl 3f /* Begin dance to jump to kern_slbtrap*/ 401 .llong kern_slbtrap 4023: mflr %r1 403 ld %r1,0(%r1) 404 mtlr %r1 405 GET_CPUINFO(%r1) 406 blrl /* 124 bytes -- 4 to spare */ 407CNAME(slbtrapend): 408 409kern_slbtrap: 410 std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */ 411 std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */ 412 413 /* Check if this needs to be handled as a regular trap (userseg miss) */ 414 mflr %r2 415 andi. %r2,%r2,0xff80 416 cmpwi %r2,0x380 417 bne 1f 418 mfdar %r2 419 b 2f 4201: mfsrr0 %r2 4212: /* r2 now contains the fault address */ 422 lis %r3,SEGMENT_MASK@highesta 423 ori %r3,%r3,SEGMENT_MASK@highera 424 sldi %r3,%r3,32 425 oris %r3,%r3,SEGMENT_MASK@ha 426 ori %r3,%r3,SEGMENT_MASK@l 427 and %r2,%r2,%r3 /* R2 = segment base address */ 428 lis %r3,USER_ADDR@highesta 429 ori %r3,%r3,USER_ADDR@highera 430 sldi %r3,%r3,32 431 oris %r3,%r3,USER_ADDR@ha 432 ori %r3,%r3,USER_ADDR@l 433 cmpd %r2,%r3 /* Compare fault base to USER_ADDR */ 434 bne 3f 435 436 /* User seg miss, handle as a regular trap */ 437 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */ 438 mtcr %r2 439 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */ 440 ld %r3,(PC_SLBSAVE+24)(%r1) 441 ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */ 442 mtsprg2 %r1 /* And then in SPRG2 */ 443 li %r1, 0x80 /* How to get the vector from LR */ 444 b generictrap /* Retain old LR using b */ 445 4463: /* Real kernel SLB miss */ 447 std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */ 448 mfsprg1 %r2 /* Old R1 */ 449 std %r2,(PC_SLBSAVE+8)(%r1) 450 /* R2,R3 already saved */ 451 std %r4,(PC_SLBSAVE+32)(%r1) 452 std %r5,(PC_SLBSAVE+40)(%r1) 453 std %r6,(PC_SLBSAVE+48)(%r1) 454 std %r7,(PC_SLBSAVE+56)(%r1) 455 std %r8,(PC_SLBSAVE+64)(%r1) 456 std %r9,(PC_SLBSAVE+72)(%r1) 457 std %r10,(PC_SLBSAVE+80)(%r1) 458 std %r11,(PC_SLBSAVE+88)(%r1) 459 std %r12,(PC_SLBSAVE+96)(%r1) 460 /* CR already saved */ 461 mfxer %r2 /* save XER */ 462 std %r2,(PC_SLBSAVE+112)(%r1) 463 mflr %r2 /* save LR (SP already saved) */ 464 std %r2,(PC_SLBSAVE+120)(%r1) 465 mfctr %r2 /* save CTR */ 466 std %r2,(PC_SLBSAVE+128)(%r1) 467 468 /* Call handler */ 469 addi %r1,%r1,PC_SLBSTACK-48+1024 470 li %r2,~15 471 and %r1,%r1,%r2 472 GET_TOCBASE(%r2) 473 mflr %r3 474 andi. %r3,%r3,0xff80 475 mfdar %r4 476 mfsrr0 %r5 477 bl handle_kernel_slb_spill 478 nop 479 480 /* Save r28-31, restore r4-r12 */ 481 GET_CPUINFO(%r1) 482 ld %r4,(PC_SLBSAVE+32)(%r1) 483 ld %r5,(PC_SLBSAVE+40)(%r1) 484 ld %r6,(PC_SLBSAVE+48)(%r1) 485 ld %r7,(PC_SLBSAVE+56)(%r1) 486 ld %r8,(PC_SLBSAVE+64)(%r1) 487 ld %r9,(PC_SLBSAVE+72)(%r1) 488 ld %r10,(PC_SLBSAVE+80)(%r1) 489 ld %r11,(PC_SLBSAVE+88)(%r1) 490 ld %r12,(PC_SLBSAVE+96)(%r1) 491 std %r28,(PC_SLBSAVE+64)(%r1) 492 std %r29,(PC_SLBSAVE+72)(%r1) 493 std %r30,(PC_SLBSAVE+80)(%r1) 494 std %r31,(PC_SLBSAVE+88)(%r1) 495 496 /* Restore kernel mapping */ 497 bl restore_kernsrs 498 499 /* Restore remaining registers */ 500 ld %r28,(PC_SLBSAVE+64)(%r1) 501 ld %r29,(PC_SLBSAVE+72)(%r1) 502 ld %r30,(PC_SLBSAVE+80)(%r1) 503 ld %r31,(PC_SLBSAVE+88)(%r1) 504 505 ld %r2,(PC_SLBSAVE+104)(%r1) 506 mtcr %r2 507 ld %r2,(PC_SLBSAVE+112)(%r1) 508 mtxer %r2 509 ld %r2,(PC_SLBSAVE+120)(%r1) 510 mtlr %r2 511 ld %r2,(PC_SLBSAVE+128)(%r1) 512 mtctr %r2 513 ld %r2,(PC_SLBSAVE+136)(%r1) 514 mtlr %r2 515 516 /* Restore r0-r3 */ 517 ld %r0,(PC_SLBSAVE+0)(%r1) 518 ld %r2,(PC_SLBSAVE+16)(%r1) 519 ld %r3,(PC_SLBSAVE+24)(%r1) 520 mfsprg1 %r1 521 522 /* Back to whatever we were doing */ 523 rfid 524 525/* 526 * For ALI: has to save DSISR and DAR 527 */ 528 .globl CNAME(alitrap),CNAME(aliend) 529CNAME(alitrap): 530 mtsprg1 %r1 /* save SP */ 531 GET_CPUINFO(%r1) 532 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 533 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 534 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 535 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 536 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 537 mfdar %r30 538 mfdsisr %r31 539 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 540 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) 541 mfsprg1 %r1 /* restore SP, in case of branch */ 542 mflr %r28 /* save LR */ 543 mfcr %r29 /* save CR */ 544 545 /* Begin dance to branch to s_trap in a bit */ 546 b 1f 547 .p2align 3 5481: nop 549 bl 1f 550 .llong s_trap 5511: mflr %r31 552 ld %r31,0(%r31) 553 mtlr %r31 554 555 /* Put our exception vector in SPRG3 */ 556 li %r31, EXC_ALI 557 mtsprg3 %r31 558 559 /* Test whether we already had PR set */ 560 mfsrr1 %r31 561 mtcr %r31 562 blrl 563CNAME(aliend): 564 565/* 566 * Similar to the above for DSI 567 * Has to handle standard pagetable spills 568 */ 569 .globl CNAME(dsitrap),CNAME(dsiend) 570CNAME(dsitrap): 571 mtsprg1 %r1 /* save SP */ 572 GET_CPUINFO(%r1) 573 std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 574 std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1) 575 std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1) 576 std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) 577 std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) 578 mfcr %r29 /* save CR */ 579 mfxer %r30 /* save XER */ 580 mtsprg2 %r30 /* in SPRG2 */ 581 mfsrr1 %r31 /* test kernel mode */ 582 mtcr %r31 583 mflr %r28 /* save LR (SP already saved) */ 584 bl 1f /* Begin branching to disitrap */ 585 .llong disitrap 5861: mflr %r1 587 ld %r1,0(%r1) 588 mtlr %r1 589 blrl /* Branch to generictrap */ 590CNAME(dsiend): 591 592/* 593 * Preamble code for DSI/ISI traps 594 */ 595disitrap: 596 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ 597 mflr %r1 598 andi. %r1,%r1,0xff00 599 mtsprg3 %r1 600 601 GET_CPUINFO(%r1) 602 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) 603 std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) 604 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) 605 std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 606 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) 607 std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 608 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) 609 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 610 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) 611 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 612 mfdar %r30 613 mfdsisr %r31 614 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 615 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) 616 617#ifdef KDB 618 /* Try to detect a kernel stack overflow */ 619 mfsrr1 %r31 620 mtcr %r31 621 bt 17,realtrap /* branch is user mode */ 622 mfsprg1 %r31 /* get old SP */ 623 clrrdi %r31,%r31,12 /* Round SP down to nearest page */ 624 sub. %r30,%r31,%r30 /* SP - DAR */ 625 bge 1f 626 neg %r30,%r30 /* modulo value */ 6271: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */ 628 bge %cr0,realtrap /* no, too far away. */ 629 630 /* Now convert this DSI into a DDB trap. */ 631 GET_CPUINFO(%r1) 632 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */ 633 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */ 634 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */ 635 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */ 636 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */ 637 std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */ 638 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */ 639 std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */ 640 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */ 641 std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */ 642 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */ 643 std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */ 644 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */ 645 std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */ 646 b dbtrap 647#endif 648 649 /* XXX need stack probe here */ 650realtrap: 651/* Test whether we already had PR set */ 652 mfsrr1 %r1 653 mtcr %r1 654 mfsprg1 %r1 /* restore SP (might have been 655 overwritten) */ 656 bf 17,k_trap /* branch if PSL_PR is false */ 657 GET_CPUINFO(%r1) 658 ld %r1,PC_CURPCB(%r1) 659 mr %r27,%r28 /* Save LR, r29 */ 660 mtsprg2 %r29 661 bl restore_kernsrs /* enable kernel mapping */ 662 mfsprg2 %r29 663 mr %r28,%r27 664 b s_trap 665 666/* 667 * generictrap does some standard setup for trap handling to minimize 668 * the code that need be installed in the actual vectors. It expects 669 * the following conditions. 670 * 671 * R1 - Trap vector = LR & (0xff00 | R1) 672 * SPRG1 - Original R1 contents 673 * SPRG2 - Original LR 674 */ 675 676 .globl CNAME(trapcode2) 677trapcode2: 678generictrap: 679 /* Save R1 for computing the exception vector */ 680 mtsprg3 %r1 681 682 /* Save interesting registers */ 683 GET_CPUINFO(%r1) 684 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 685 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 686 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 687 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 688 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 689 mfdar %r30 690 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 691 mfsprg1 %r1 /* restore SP, in case of branch */ 692 mfsprg2 %r28 /* save LR */ 693 mfcr %r29 /* save CR */ 694 695 /* Compute the exception vector from the link register */ 696 mfsprg3 %r31 697 ori %r31,%r31,0xff00 698 mflr %r30 699 addi %r30,%r30,-4 /* The branch instruction, not the next */ 700 and %r30,%r30,%r31 701 mtsprg3 %r30 702 703 /* Test whether we already had PR set */ 704 mfsrr1 %r31 705 mtcr %r31 706 707s_trap: 708 bf 17,k_trap /* branch if PSL_PR is false */ 709 GET_CPUINFO(%r1) 710u_trap: 711 ld %r1,PC_CURPCB(%r1) 712 mr %r27,%r28 /* Save LR, r29 */ 713 mtsprg2 %r29 714 bl restore_kernsrs /* enable kernel mapping */ 715 mfsprg2 %r29 716 mr %r28,%r27 717 718/* 719 * Now the common trap catching code. 720 */ 721k_trap: 722 FRAME_SETUP(PC_TEMPSAVE) 723/* Call C interrupt dispatcher: */ 724trapagain: 725 GET_TOCBASE(%r2) 726 addi %r3,%r1,48 727 bl CNAME(powerpc_interrupt) 728 nop 729 730 .globl CNAME(trapexit) /* backtrace code sentinel */ 731CNAME(trapexit): 732/* Disable interrupts: */ 733 mfmsr %r3 734 andi. %r3,%r3,~PSL_EE@l 735 mtmsr %r3 736 isync 737/* Test AST pending: */ 738 ld %r5,FRAME_SRR1+48(%r1) 739 mtcr %r5 740 bf 17,1f /* branch if PSL_PR is false */ 741 742 GET_CPUINFO(%r3) /* get per-CPU pointer */ 743 lwz %r4, TD_FLAGS(%r13) /* get thread flags value */ 744 lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h 745 ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l 746 and. %r4,%r4,%r5 747 beq 1f 748 mfmsr %r3 /* re-enable interrupts */ 749 ori %r3,%r3,PSL_EE@l 750 mtmsr %r3 751 isync 752 GET_TOCBASE(%r2) 753 addi %r3,%r1,48 754 bl CNAME(ast) 755 nop 756 .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */ 757CNAME(asttrapexit): 758 b trapexit /* test ast ret value ? */ 7591: 760 FRAME_LEAVE(PC_TEMPSAVE) 761 rfid 762 763#if defined(KDB) 764/* 765 * Deliberate entry to dbtrap 766 */ 767ASENTRY_NOPROF(breakpoint) 768 mtsprg1 %r1 769 mfmsr %r3 770 mtsrr1 %r3 771 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l 772 mtmsr %r3 /* disable interrupts */ 773 isync 774 GET_CPUINFO(%r3) 775 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3) 776 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3) 777 std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3) 778 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3) 779 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3) 780 mflr %r28 781 li %r29,EXC_BPT 782 mtlr %r29 783 mfcr %r29 784 mtsrr0 %r28 785 786/* 787 * Now the kdb trap catching code. 788 */ 789dbtrap: 790 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ 791 mflr %r1 792 andi. %r1,%r1,0xff00 793 mtsprg3 %r1 794 795 li %r1,TRAP_TOCBASE /* get new SP */ 796 ld %r1,0(%r1) 797 ld %r1,TOC_REF(tmpstk)(%r1) 798 addi %r1,%r1,(TMPSTKSZ-48) 799 800 FRAME_SETUP(PC_DBSAVE) 801/* Call C trap code: */ 802 GET_TOCBASE(%r2) 803 addi %r3,%r1,48 804 bl CNAME(db_trap_glue) 805 nop 806 or. %r3,%r3,%r3 807 bne dbleave 808/* This wasn't for KDB, so switch to real trap: */ 809 ld %r3,FRAME_EXC+48(%r1) /* save exception */ 810 GET_CPUINFO(%r4) 811 std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4) 812 FRAME_LEAVE(PC_DBSAVE) 813 mtsprg1 %r1 /* prepare for entrance to realtrap */ 814 GET_CPUINFO(%r1) 815 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) 816 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 817 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 818 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 819 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 820 mflr %r28 821 mfcr %r29 822 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) 823 mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */ 824 mfsprg1 %r1 825 b realtrap 826dbleave: 827 FRAME_LEAVE(PC_DBSAVE) 828 rfid 829 830/* 831 * In case of KDB we want a separate trap catcher for it 832 */ 833 .globl CNAME(dblow),CNAME(dbend) 834CNAME(dblow): 835 mtsprg1 %r1 /* save SP */ 836 mtsprg2 %r29 /* save r29 */ 837 mfcr %r29 /* save CR in r29 */ 838 mfsrr1 %r1 839 mtcr %r1 840 bf 17,1f /* branch if privileged */ 841 842 /* Unprivileged case */ 843 mtcr %r29 /* put the condition register back */ 844 mfsprg2 %r29 /* ... and r29 */ 845 mflr %r1 /* save LR */ 846 mtsprg2 %r1 /* And then in SPRG2 */ 847 848 nop /* Begin branching to generictrap */ 849 bl 9f 850 .llong generictrap 8519: mflr %r1 852 ld %r1,0(%r1) 853 mtlr %r1 854 li %r1, 0 /* How to get the vector from LR */ 855 blrl /* Branch to generictrap */ 856 8571: 858 GET_CPUINFO(%r1) 859 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */ 860 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */ 861 mfsprg2 %r28 /* r29 holds cr... */ 862 std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */ 863 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */ 864 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */ 865 mflr %r28 /* save LR */ 866 bl 9f /* Begin branch */ 867 .llong dbtrap 8689: mflr %r1 869 ld %r1,0(%r1) 870 mtlr %r1 871 blrl /* Branch to generictrap */ 872CNAME(dbend): 873#endif /* KDB */ 874