1/* $FreeBSD$ */ 2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ 3 4/*- 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * NOTICE: This is not a standalone file. to use it, #include it in 37 * your port's locore.S, like so: 38 * 39 * #include <powerpc/aim/trap_subr.S> 40 */ 41 42/* Locate the per-CPU data structure */ 43#define GET_CPUINFO(r) \ 44 mfsprg0 r 45#define GET_TOCBASE(r) \ 46 li r,TRAP_TOCBASE; /* Magic address for TOC */ \ 47 ld r,0(r) 48 49/* 50 * Restore SRs for a pmap 51 * 52 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache 53 */ 54 55/* 56 * User SRs are loaded through a pointer to the current pmap. 57 */ 58restore_usersrs: 59 GET_CPUINFO(%r28) 60 ld %r28,PC_USERSLB(%r28) 61 li %r29, 0 /* Set the counter to zero */ 62 63 slbia 64 slbmfee %r31,%r29 65 clrrdi %r31,%r31,28 66 slbie %r31 671: ld %r31, 0(%r28) /* Load SLB entry pointer */ 68 cmpdi %r31, 0 /* If NULL, stop */ 69 beqlr 70 71 ld %r30, 0(%r31) /* Load SLBV */ 72 ld %r31, 8(%r31) /* Load SLBE */ 73 or %r31, %r31, %r29 /* Set SLBE slot */ 74 slbmte %r30, %r31 /* Install SLB entry */ 75 76 addi %r28, %r28, 8 /* Advance pointer */ 77 addi %r29, %r29, 1 78 b 1b /* Repeat */ 79 80/* 81 * Kernel SRs are loaded directly from the PCPU fields 82 */ 83restore_kernsrs: 84 GET_CPUINFO(%r28) 85 addi %r28,%r28,PC_KERNSLB 86 li %r29, 0 /* Set the counter to zero */ 87 88 slbia 89 slbmfee %r31,%r29 90 clrrdi %r31,%r31,28 91 slbie %r31 921: cmpdi %r29, USER_SLB_SLOT /* Skip the user slot */ 93 beq- 2f 94 95 ld %r31, 8(%r28) /* Load SLBE */ 96 cmpdi %r31, 0 /* If SLBE is not valid, stop */ 97 beqlr 98 ld %r30, 0(%r28) /* Load SLBV */ 99 slbmte %r30, %r31 /* Install SLB entry */ 100 1012: addi %r28, %r28, 16 /* Advance pointer */ 102 addi %r29, %r29, 1 103 cmpdi %r29, 64 /* Repeat if we are not at the end */ 104 blt 1b 105 blr 106 107/* 108 * FRAME_SETUP assumes: 109 * SPRG1 SP (1) 110 * SPRG3 trap type 111 * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps) 112 * r28 LR 113 * r29 CR 114 * r30 scratch 115 * r31 scratch 116 * r1 kernel stack 117 * SRR0/1 as at start of trap 118 * 119 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse 120 * in any real-mode fault handler, including those handling double faults. 121 */ 122#define FRAME_SETUP(savearea) \ 123/* Have to enable translation to allow access of kernel stack: */ \ 124 GET_CPUINFO(%r31); \ 125 mfsrr0 %r30; \ 126 std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \ 127 mfsrr1 %r30; \ 128 std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \ 129 mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \ 130 mfmsr %r30; \ 131 ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \ 132 mtmsr %r30; /* stack can now be accessed */ \ 133 isync; \ 134 stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \ 135 std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \ 136 std %r31,FRAME_1+48(%r1); /* save SP " " */ \ 137 std %r2, FRAME_2+48(%r1); /* save r2 " " */ \ 138 std %r28,FRAME_LR+48(%r1); /* save LR " " */ \ 139 std %r29,FRAME_CR+48(%r1); /* save CR " " */ \ 140 GET_CPUINFO(%r2); \ 141 ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \ 142 ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \ 143 ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \ 144 ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \ 145 ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \ 146 std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \ 147 std %r4, FRAME_4+48(%r1); \ 148 std %r5, FRAME_5+48(%r1); \ 149 std %r6, FRAME_6+48(%r1); \ 150 std %r7, FRAME_7+48(%r1); \ 151 std %r8, FRAME_8+48(%r1); \ 152 std %r9, FRAME_9+48(%r1); \ 153 std %r10, FRAME_10+48(%r1); \ 154 std %r11, FRAME_11+48(%r1); \ 155 std %r12, FRAME_12+48(%r1); \ 156 std %r13, FRAME_13+48(%r1); \ 157 std %r14, FRAME_14+48(%r1); \ 158 std %r15, FRAME_15+48(%r1); \ 159 std %r16, FRAME_16+48(%r1); \ 160 std %r17, FRAME_17+48(%r1); \ 161 std %r18, FRAME_18+48(%r1); \ 162 std %r19, FRAME_19+48(%r1); \ 163 std %r20, FRAME_20+48(%r1); \ 164 std %r21, FRAME_21+48(%r1); \ 165 std %r22, FRAME_22+48(%r1); \ 166 std %r23, FRAME_23+48(%r1); \ 167 std %r24, FRAME_24+48(%r1); \ 168 std %r25, FRAME_25+48(%r1); \ 169 std %r26, FRAME_26+48(%r1); \ 170 std %r27, FRAME_27+48(%r1); \ 171 std %r28, FRAME_28+48(%r1); \ 172 std %r29, FRAME_29+48(%r1); \ 173 std %r30, FRAME_30+48(%r1); \ 174 std %r31, FRAME_31+48(%r1); \ 175 ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \ 176 ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\ 177 ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \ 178 ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \ 179 mfxer %r3; \ 180 mfctr %r4; \ 181 mfsprg3 %r5; \ 182 std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \ 183 std %r4, FRAME_CTR+48(1); \ 184 std %r5, FRAME_EXC+48(1); \ 185 std %r28,FRAME_AIM_DAR+48(1); \ 186 std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \ 187 std %r30,FRAME_SRR0+48(1); \ 188 std %r31,FRAME_SRR1+48(1); \ 189 ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */ 190 191#define FRAME_LEAVE(savearea) \ 192/* Disable exceptions: */ \ 193 mfmsr %r2; \ 194 andi. %r2,%r2,~PSL_EE@l; \ 195 mtmsr %r2; \ 196 isync; \ 197/* Now restore regs: */ \ 198 ld %r2,FRAME_SRR0+48(%r1); \ 199 ld %r3,FRAME_SRR1+48(%r1); \ 200 ld %r4,FRAME_CTR+48(%r1); \ 201 ld %r5,FRAME_XER+48(%r1); \ 202 ld %r6,FRAME_LR+48(%r1); \ 203 GET_CPUINFO(%r7); \ 204 std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \ 205 std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \ 206 ld %r7,FRAME_CR+48(%r1); \ 207 mtctr %r4; \ 208 mtxer %r5; \ 209 mtlr %r6; \ 210 mtsprg2 %r7; /* save cr */ \ 211 ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \ 212 ld %r30,FRAME_30+48(%r1); \ 213 ld %r29,FRAME_29+48(%r1); \ 214 ld %r28,FRAME_28+48(%r1); \ 215 ld %r27,FRAME_27+48(%r1); \ 216 ld %r26,FRAME_26+48(%r1); \ 217 ld %r25,FRAME_25+48(%r1); \ 218 ld %r24,FRAME_24+48(%r1); \ 219 ld %r23,FRAME_23+48(%r1); \ 220 ld %r22,FRAME_22+48(%r1); \ 221 ld %r21,FRAME_21+48(%r1); \ 222 ld %r20,FRAME_20+48(%r1); \ 223 ld %r19,FRAME_19+48(%r1); \ 224 ld %r18,FRAME_18+48(%r1); \ 225 ld %r17,FRAME_17+48(%r1); \ 226 ld %r16,FRAME_16+48(%r1); \ 227 ld %r15,FRAME_15+48(%r1); \ 228 ld %r14,FRAME_14+48(%r1); \ 229 ld %r13,FRAME_13+48(%r1); \ 230 ld %r12,FRAME_12+48(%r1); \ 231 ld %r11,FRAME_11+48(%r1); \ 232 ld %r10,FRAME_10+48(%r1); \ 233 ld %r9, FRAME_9+48(%r1); \ 234 ld %r8, FRAME_8+48(%r1); \ 235 ld %r7, FRAME_7+48(%r1); \ 236 ld %r6, FRAME_6+48(%r1); \ 237 ld %r5, FRAME_5+48(%r1); \ 238 ld %r4, FRAME_4+48(%r1); \ 239 ld %r3, FRAME_3+48(%r1); \ 240 ld %r2, FRAME_2+48(%r1); \ 241 ld %r0, FRAME_0+48(%r1); \ 242 ld %r1, FRAME_1+48(%r1); \ 243/* Can't touch %r1 from here on */ \ 244 mtsprg3 %r3; /* save r3 */ \ 245/* Disable translation, machine check and recoverability: */ \ 246 mfmsr %r3; \ 247 andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \ 248 mtmsr %r3; \ 249 isync; \ 250/* Decide whether we return to user mode: */ \ 251 GET_CPUINFO(%r3); \ 252 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \ 253 mtcr %r3; \ 254 bf 17,1f; /* branch if PSL_PR is false */ \ 255/* Restore user SRs */ \ 256 GET_CPUINFO(%r3); \ 257 std %r27,(savearea+CPUSAVE_R27)(%r3); \ 258 std %r28,(savearea+CPUSAVE_R28)(%r3); \ 259 std %r29,(savearea+CPUSAVE_R29)(%r3); \ 260 std %r30,(savearea+CPUSAVE_R30)(%r3); \ 261 std %r31,(savearea+CPUSAVE_R31)(%r3); \ 262 mflr %r27; /* preserve LR */ \ 263 bl restore_usersrs; /* uses r28-r31 */ \ 264 mtlr %r27; \ 265 ld %r31,(savearea+CPUSAVE_R31)(%r3); \ 266 ld %r30,(savearea+CPUSAVE_R30)(%r3); \ 267 ld %r29,(savearea+CPUSAVE_R29)(%r3); \ 268 ld %r28,(savearea+CPUSAVE_R28)(%r3); \ 269 ld %r27,(savearea+CPUSAVE_R27)(%r3); \ 2701: mfsprg2 %r3; /* restore cr */ \ 271 mtcr %r3; \ 272 GET_CPUINFO(%r3); \ 273 ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \ 274 mtsrr0 %r3; \ 275 GET_CPUINFO(%r3); \ 276 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \ 277 mtsrr1 %r3; \ 278 mfsprg3 %r3 /* restore r3 */ 279 280#ifdef KDTRACE_HOOKS 281 .data 282 .globl dtrace_invop_calltrap_addr 283 .align 8 284 .type dtrace_invop_calltrap_addr, @object 285 .size dtrace_invop_calltrap_addr, 8 286dtrace_invop_calltrap_addr: 287 .word 0 288 .word 0 289 290 .text 291#endif 292 293/* 294 * Processor reset exception handler. These are typically 295 * the first instructions the processor executes after a 296 * software reset. We do this in two bits so that we are 297 * not still hanging around in the trap handling region 298 * once the MMU is turned on. 299 */ 300 .globl CNAME(rstcode), CNAME(rstcodeend) 301CNAME(rstcode): 302 /* Explicitly set MSR[SF] */ 303 mfmsr %r9 304 li %r8,1 305 insrdi %r9,%r8,1,0 306 mtmsrd %r9 307 isync 308 bl 1f 309 .llong cpu_reset 3101: mflr %r9 311 ld %r9,0(%r9) 312 mtlr %r9 313 314 blr 315CNAME(rstcodeend): 316 317cpu_reset: 318 GET_TOCBASE(%r2) 319 320 ld %r1,TOC_REF(tmpstk)(%r2) /* get new SP */ 321 addi %r1,%r1,(TMPSTKSZ-48) 322 323 bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */ 324 nop 325 lis %r3,1@l 326 bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */ 327 nop 328 bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */ 329 nop 330 mr %r1,%r3 /* Use new stack */ 331 bl CNAME(cpudep_ap_setup) 332 nop 333 GET_CPUINFO(%r5) 334 ld %r3,(PC_RESTORE)(%r5) 335 cmpldi %cr0,%r3,0 336 beq %cr0,2f 337 nop 338 li %r4,1 339 b CNAME(longjmp) 340 nop 3412: 342#ifdef SMP 343 bl CNAME(machdep_ap_bootstrap) /* And away! */ 344 nop 345#endif 346 347 /* Should not be reached */ 3489: 349 b 9b 350 351/* 352 * This code gets copied to all the trap vectors 353 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions! 354 */ 355 356 .globl CNAME(trapcode),CNAME(trapcodeend) 357 .p2align 3 358CNAME(trapcode): 359 mtsprg1 %r1 /* save SP */ 360 mflr %r1 /* Save the old LR in r1 */ 361 mtsprg2 %r1 /* And then in SPRG2 */ 362 ld %r1,TRAP_GENTRAP(0) 363 mtlr %r1 364 li %r1, 0xe0 /* How to get the vector from LR */ 365 blrl /* Branch to generictrap */ 366CNAME(trapcodeend): 367 368/* 369 * For SLB misses: do special things for the kernel 370 * 371 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is 372 * the only time this can be called. 373 */ 374 .globl CNAME(slbtrap),CNAME(slbtrapend) 375 .p2align 3 376CNAME(slbtrap): 377 mtsprg1 %r1 /* save SP */ 378 GET_CPUINFO(%r1) 379 std %r2,(PC_SLBSAVE+16)(%r1) 380 mfcr %r2 /* save CR */ 381 std %r2,(PC_SLBSAVE+104)(%r1) 382 mfsrr1 %r2 /* test kernel mode */ 383 mtcr %r2 384 bf 17,2f /* branch if PSL_PR is false */ 385 /* User mode */ 386 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */ 387 mtcr %r2 388 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */ 389 mflr %r1 /* Save the old LR in r1 */ 390 mtsprg2 %r1 /* And then in SPRG2 */ 391 /* 52 bytes so far */ 392 bl 1f 393 .llong generictrap 3941: mflr %r1 395 ld %r1,0(%r1) 396 mtlr %r1 397 li %r1, 0x80 /* How to get the vector from LR */ 398 blrl /* Branch to generictrap */ 399 /* 84 bytes */ 4002: mflr %r2 /* Save the old LR in r2 */ 401 nop 402 bl 3f /* Begin dance to jump to kern_slbtrap*/ 403 .llong kern_slbtrap 4043: mflr %r1 405 ld %r1,0(%r1) 406 mtlr %r1 407 GET_CPUINFO(%r1) 408 blrl /* 124 bytes -- 4 to spare */ 409CNAME(slbtrapend): 410 411kern_slbtrap: 412 std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */ 413 std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */ 414 415 /* Check if this needs to be handled as a regular trap (userseg miss) */ 416 mflr %r2 417 andi. %r2,%r2,0xff80 418 cmpwi %r2,0x380 419 bne 1f 420 mfdar %r2 421 b 2f 4221: mfsrr0 %r2 4232: /* r2 now contains the fault address */ 424 lis %r3,SEGMENT_MASK@highesta 425 ori %r3,%r3,SEGMENT_MASK@highera 426 sldi %r3,%r3,32 427 oris %r3,%r3,SEGMENT_MASK@ha 428 ori %r3,%r3,SEGMENT_MASK@l 429 and %r2,%r2,%r3 /* R2 = segment base address */ 430 lis %r3,USER_ADDR@highesta 431 ori %r3,%r3,USER_ADDR@highera 432 sldi %r3,%r3,32 433 oris %r3,%r3,USER_ADDR@ha 434 ori %r3,%r3,USER_ADDR@l 435 cmpd %r2,%r3 /* Compare fault base to USER_ADDR */ 436 bne 3f 437 438 /* User seg miss, handle as a regular trap */ 439 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */ 440 mtcr %r2 441 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */ 442 ld %r3,(PC_SLBSAVE+24)(%r1) 443 ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */ 444 mtsprg2 %r1 /* And then in SPRG2 */ 445 li %r1, 0x80 /* How to get the vector from LR */ 446 b generictrap /* Retain old LR using b */ 447 4483: /* Real kernel SLB miss */ 449 std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */ 450 mfsprg1 %r2 /* Old R1 */ 451 std %r2,(PC_SLBSAVE+8)(%r1) 452 /* R2,R3 already saved */ 453 std %r4,(PC_SLBSAVE+32)(%r1) 454 std %r5,(PC_SLBSAVE+40)(%r1) 455 std %r6,(PC_SLBSAVE+48)(%r1) 456 std %r7,(PC_SLBSAVE+56)(%r1) 457 std %r8,(PC_SLBSAVE+64)(%r1) 458 std %r9,(PC_SLBSAVE+72)(%r1) 459 std %r10,(PC_SLBSAVE+80)(%r1) 460 std %r11,(PC_SLBSAVE+88)(%r1) 461 std %r12,(PC_SLBSAVE+96)(%r1) 462 /* CR already saved */ 463 mfxer %r2 /* save XER */ 464 std %r2,(PC_SLBSAVE+112)(%r1) 465 mflr %r2 /* save LR (SP already saved) */ 466 std %r2,(PC_SLBSAVE+120)(%r1) 467 mfctr %r2 /* save CTR */ 468 std %r2,(PC_SLBSAVE+128)(%r1) 469 470 /* Call handler */ 471 addi %r1,%r1,PC_SLBSTACK-48+1024 472 li %r2,~15 473 and %r1,%r1,%r2 474 GET_TOCBASE(%r2) 475 mflr %r3 476 andi. %r3,%r3,0xff80 477 mfdar %r4 478 mfsrr0 %r5 479 bl handle_kernel_slb_spill 480 nop 481 482 /* Save r28-31, restore r4-r12 */ 483 GET_CPUINFO(%r1) 484 ld %r4,(PC_SLBSAVE+32)(%r1) 485 ld %r5,(PC_SLBSAVE+40)(%r1) 486 ld %r6,(PC_SLBSAVE+48)(%r1) 487 ld %r7,(PC_SLBSAVE+56)(%r1) 488 ld %r8,(PC_SLBSAVE+64)(%r1) 489 ld %r9,(PC_SLBSAVE+72)(%r1) 490 ld %r10,(PC_SLBSAVE+80)(%r1) 491 ld %r11,(PC_SLBSAVE+88)(%r1) 492 ld %r12,(PC_SLBSAVE+96)(%r1) 493 std %r28,(PC_SLBSAVE+64)(%r1) 494 std %r29,(PC_SLBSAVE+72)(%r1) 495 std %r30,(PC_SLBSAVE+80)(%r1) 496 std %r31,(PC_SLBSAVE+88)(%r1) 497 498 /* Restore kernel mapping */ 499 bl restore_kernsrs 500 501 /* Restore remaining registers */ 502 ld %r28,(PC_SLBSAVE+64)(%r1) 503 ld %r29,(PC_SLBSAVE+72)(%r1) 504 ld %r30,(PC_SLBSAVE+80)(%r1) 505 ld %r31,(PC_SLBSAVE+88)(%r1) 506 507 ld %r2,(PC_SLBSAVE+104)(%r1) 508 mtcr %r2 509 ld %r2,(PC_SLBSAVE+112)(%r1) 510 mtxer %r2 511 ld %r2,(PC_SLBSAVE+120)(%r1) 512 mtlr %r2 513 ld %r2,(PC_SLBSAVE+128)(%r1) 514 mtctr %r2 515 ld %r2,(PC_SLBSAVE+136)(%r1) 516 mtlr %r2 517 518 /* Restore r0-r3 */ 519 ld %r0,(PC_SLBSAVE+0)(%r1) 520 ld %r2,(PC_SLBSAVE+16)(%r1) 521 ld %r3,(PC_SLBSAVE+24)(%r1) 522 mfsprg1 %r1 523 524 /* Back to whatever we were doing */ 525 rfid 526 527/* 528 * For ALI: has to save DSISR and DAR 529 */ 530 .globl CNAME(alitrap),CNAME(aliend) 531CNAME(alitrap): 532 mtsprg1 %r1 /* save SP */ 533 GET_CPUINFO(%r1) 534 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 535 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 536 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 537 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 538 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 539 mfdar %r30 540 mfdsisr %r31 541 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 542 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) 543 mfsprg1 %r1 /* restore SP, in case of branch */ 544 mflr %r28 /* save LR */ 545 mfcr %r29 /* save CR */ 546 547 /* Begin dance to branch to s_trap in a bit */ 548 b 1f 549 .p2align 3 5501: nop 551 bl 1f 552 .llong s_trap 5531: mflr %r31 554 ld %r31,0(%r31) 555 mtlr %r31 556 557 /* Put our exception vector in SPRG3 */ 558 li %r31, EXC_ALI 559 mtsprg3 %r31 560 561 /* Test whether we already had PR set */ 562 mfsrr1 %r31 563 mtcr %r31 564 blrl 565CNAME(aliend): 566 567/* 568 * Similar to the above for DSI 569 * Has to handle standard pagetable spills 570 */ 571 .globl CNAME(dsitrap),CNAME(dsiend) 572CNAME(dsitrap): 573 mtsprg1 %r1 /* save SP */ 574 GET_CPUINFO(%r1) 575 std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 576 std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1) 577 std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1) 578 std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) 579 std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) 580 mfcr %r29 /* save CR */ 581 mfxer %r30 /* save XER */ 582 mtsprg2 %r30 /* in SPRG2 */ 583 mfsrr1 %r31 /* test kernel mode */ 584 mtcr %r31 585 mflr %r28 /* save LR (SP already saved) */ 586 bl 1f /* Begin branching to disitrap */ 587 .llong disitrap 5881: mflr %r1 589 ld %r1,0(%r1) 590 mtlr %r1 591 blrl /* Branch to generictrap */ 592CNAME(dsiend): 593 594/* 595 * Preamble code for DSI/ISI traps 596 */ 597disitrap: 598 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ 599 mflr %r1 600 andi. %r1,%r1,0xff00 601 mtsprg3 %r1 602 603 GET_CPUINFO(%r1) 604 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) 605 std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) 606 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) 607 std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 608 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) 609 std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 610 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) 611 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 612 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) 613 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 614 mfdar %r30 615 mfdsisr %r31 616 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 617 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) 618 619#ifdef KDB 620 /* Try to detect a kernel stack overflow */ 621 mfsrr1 %r31 622 mtcr %r31 623 bt 17,realtrap /* branch is user mode */ 624 mfsprg1 %r31 /* get old SP */ 625 clrrdi %r31,%r31,12 /* Round SP down to nearest page */ 626 sub. %r30,%r31,%r30 /* SP - DAR */ 627 bge 1f 628 neg %r30,%r30 /* modulo value */ 6291: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */ 630 bge %cr0,realtrap /* no, too far away. */ 631 632 /* Now convert this DSI into a DDB trap. */ 633 GET_CPUINFO(%r1) 634 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */ 635 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */ 636 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */ 637 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */ 638 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */ 639 std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */ 640 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */ 641 std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */ 642 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */ 643 std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */ 644 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */ 645 std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */ 646 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */ 647 std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */ 648 b dbtrap 649#endif 650 651 /* XXX need stack probe here */ 652realtrap: 653/* Test whether we already had PR set */ 654 mfsrr1 %r1 655 mtcr %r1 656 mfsprg1 %r1 /* restore SP (might have been 657 overwritten) */ 658 bf 17,k_trap /* branch if PSL_PR is false */ 659 GET_CPUINFO(%r1) 660 ld %r1,PC_CURPCB(%r1) 661 mr %r27,%r28 /* Save LR, r29 */ 662 mtsprg2 %r29 663 bl restore_kernsrs /* enable kernel mapping */ 664 mfsprg2 %r29 665 mr %r28,%r27 666 b s_trap 667 668/* 669 * generictrap does some standard setup for trap handling to minimize 670 * the code that need be installed in the actual vectors. It expects 671 * the following conditions. 672 * 673 * R1 - Trap vector = LR & (0xff00 | R1) 674 * SPRG1 - Original R1 contents 675 * SPRG2 - Original LR 676 */ 677 678 .globl CNAME(generictrap) 679generictrap: 680 /* Save R1 for computing the exception vector */ 681 mtsprg3 %r1 682 683 /* Save interesting registers */ 684 GET_CPUINFO(%r1) 685 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ 686 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 687 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 688 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 689 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 690 mfdar %r30 691 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) 692 mfsprg1 %r1 /* restore SP, in case of branch */ 693 mfsprg2 %r28 /* save LR */ 694 mfcr %r29 /* save CR */ 695 696 /* Compute the exception vector from the link register */ 697 mfsprg3 %r31 698 ori %r31,%r31,0xff00 699 mflr %r30 700 addi %r30,%r30,-4 /* The branch instruction, not the next */ 701 and %r30,%r30,%r31 702 mtsprg3 %r30 703 704 /* Test whether we already had PR set */ 705 mfsrr1 %r31 706 mtcr %r31 707 708s_trap: 709 bf 17,k_trap /* branch if PSL_PR is false */ 710 GET_CPUINFO(%r1) 711u_trap: 712 ld %r1,PC_CURPCB(%r1) 713 mr %r27,%r28 /* Save LR, r29 */ 714 mtsprg2 %r29 715 bl restore_kernsrs /* enable kernel mapping */ 716 mfsprg2 %r29 717 mr %r28,%r27 718 719/* 720 * Now the common trap catching code. 721 */ 722k_trap: 723 FRAME_SETUP(PC_TEMPSAVE) 724/* Call C interrupt dispatcher: */ 725trapagain: 726 GET_TOCBASE(%r2) 727 addi %r3,%r1,48 728 bl CNAME(powerpc_interrupt) 729 nop 730 731 .globl CNAME(trapexit) /* backtrace code sentinel */ 732CNAME(trapexit): 733/* Disable interrupts: */ 734 mfmsr %r3 735 andi. %r3,%r3,~PSL_EE@l 736 mtmsr %r3 737 isync 738/* Test AST pending: */ 739 ld %r5,FRAME_SRR1+48(%r1) 740 mtcr %r5 741 bf 17,1f /* branch if PSL_PR is false */ 742 743 GET_CPUINFO(%r3) /* get per-CPU pointer */ 744 lwz %r4, TD_FLAGS(%r13) /* get thread flags value */ 745 lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h 746 ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l 747 and. %r4,%r4,%r5 748 beq 1f 749 mfmsr %r3 /* re-enable interrupts */ 750 ori %r3,%r3,PSL_EE@l 751 mtmsr %r3 752 isync 753 GET_TOCBASE(%r2) 754 addi %r3,%r1,48 755 bl CNAME(ast) 756 nop 757 .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */ 758CNAME(asttrapexit): 759 b trapexit /* test ast ret value ? */ 7601: 761 FRAME_LEAVE(PC_TEMPSAVE) 762 rfid 763 764#if defined(KDB) 765/* 766 * Deliberate entry to dbtrap 767 */ 768ASENTRY_NOPROF(breakpoint) 769 mtsprg1 %r1 770 mfmsr %r3 771 mtsrr1 %r3 772 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l 773 mtmsr %r3 /* disable interrupts */ 774 isync 775 GET_CPUINFO(%r3) 776 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3) 777 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3) 778 std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3) 779 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3) 780 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3) 781 mflr %r28 782 li %r29,EXC_BPT 783 mtlr %r29 784 mfcr %r29 785 mtsrr0 %r28 786 787/* 788 * Now the kdb trap catching code. 789 */ 790dbtrap: 791 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ 792 mflr %r1 793 andi. %r1,%r1,0xff00 794 mtsprg3 %r1 795 796 ld %r1,TRAP_TOCBASE(0) /* get new SP */ 797 ld %r1,TOC_REF(tmpstk)(%r1) 798 addi %r1,%r1,(TMPSTKSZ-48) 799 800 FRAME_SETUP(PC_DBSAVE) 801/* Call C trap code: */ 802 GET_TOCBASE(%r2) 803 addi %r3,%r1,48 804 bl CNAME(db_trap_glue) 805 nop 806 or. %r3,%r3,%r3 807 bne dbleave 808/* This wasn't for KDB, so switch to real trap: */ 809 ld %r3,FRAME_EXC+48(%r1) /* save exception */ 810 GET_CPUINFO(%r4) 811 std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4) 812 FRAME_LEAVE(PC_DBSAVE) 813 mtsprg1 %r1 /* prepare for entrance to realtrap */ 814 GET_CPUINFO(%r1) 815 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) 816 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) 817 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) 818 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) 819 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) 820 mflr %r28 821 mfcr %r29 822 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) 823 mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */ 824 mfsprg1 %r1 825 b realtrap 826dbleave: 827 FRAME_LEAVE(PC_DBSAVE) 828 rfid 829 830/* 831 * In case of KDB we want a separate trap catcher for it 832 */ 833 .globl CNAME(dblow),CNAME(dbend) 834CNAME(dblow): 835 mtsprg1 %r1 /* save SP */ 836 mtsprg2 %r29 /* save r29 */ 837 mfcr %r29 /* save CR in r29 */ 838 mfsrr1 %r1 839 mtcr %r1 840 bf 17,1f /* branch if privileged */ 841 842 /* Unprivileged case */ 843 mtcr %r29 /* put the condition register back */ 844 mfsprg2 %r29 /* ... and r29 */ 845 mflr %r1 /* save LR */ 846 mtsprg2 %r1 /* And then in SPRG2 */ 847 848 ld %r1, TRAP_GENTRAP(0) /* Get branch address */ 849 mtlr %r1 850 li %r1, 0 /* How to get the vector from LR */ 851 blrl /* Branch to generictrap */ 852 8531: 854 GET_CPUINFO(%r1) 855 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */ 856 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */ 857 mfsprg2 %r28 /* r29 holds cr... */ 858 std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */ 859 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */ 860 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */ 861 mflr %r28 /* save LR */ 862 bl 9f /* Begin branch */ 863 .llong dbtrap 8649: mflr %r1 865 ld %r1,0(%r1) 866 mtlr %r1 867 blrl /* Branch to generictrap */ 868CNAME(dbend): 869#endif /* KDB */ 870