1/* 2 * arch/ppc64/kernel/head.S 3 * 4 * PowerPC version 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 9 * Adapted for Power Macintosh by Paul Mackerras. 10 * Low-level exception handlers and MMU support 11 * rewritten by Paul Mackerras. 12 * Copyright (C) 1996 Paul Mackerras. 13 * 14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 16 * 17 * This file contains the low-level support and setup for the 18 * PowerPC-64 platform, including trap and interrupt dispatch. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26#include <linux/config.h> 27#include <linux/threads.h> 28#include <asm/reg.h> 29#include <asm/page.h> 30#include <asm/mmu.h> 31#include <asm/ppc_asm.h> 32#include <asm/asm-offsets.h> 33#include <asm/bug.h> 34#include <asm/cputable.h> 35#include <asm/setup.h> 36#include <asm/hvcall.h> 37#include <asm/iseries/lpar_map.h> 38#include <asm/thread_info.h> 39 40#ifdef CONFIG_PPC_ISERIES 41#define DO_SOFT_DISABLE 42#endif 43 44/* 45 * We layout physical memory as follows: 46 * 0x0000 - 0x00ff : Secondary processor spin code 47 * 0x0100 - 0x2fff : pSeries Interrupt prologs 48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 49 * 0x6000 - 0x6fff : Initial (CPU0) segment table 50 * 0x7000 - 0x7fff : FWNMI data area 51 * 0x8000 - : Early init and support code 52 */ 53 54/* 55 * SPRG Usage 56 * 57 * Register Definition 58 * 59 * SPRG0 reserved for hypervisor 60 * SPRG1 temp - used to save gpr 61 * SPRG2 temp - used to save gpr 62 * SPRG3 virt addr of paca 63 */ 64 65/* 66 * Entering into this code we make the following assumptions: 67 * For pSeries: 68 * 1. The MMU is off & open firmware is running in real mode. 69 * 2. The kernel is entered at __start 70 * 71 * For iSeries: 72 * 1. The MMU is on (as it always is for iSeries) 73 * 2. The kernel is entered at system_reset_iSeries 74 */ 75 76 .text 77 .globl _stext 78_stext: 79#ifdef CONFIG_PPC_MULTIPLATFORM 80_GLOBAL(__start) 81 /* NOP this out unconditionally */ 82BEGIN_FTR_SECTION 83 b .__start_initialization_multiplatform 84END_FTR_SECTION(0, 1) 85#endif /* CONFIG_PPC_MULTIPLATFORM */ 86 87 /* Catch branch to 0 in real mode */ 88 trap 89 90#ifdef CONFIG_PPC_ISERIES 91 /* 92 * At offset 0x20, there is a pointer to iSeries LPAR data. 93 * This is required by the hypervisor 94 */ 95 . = 0x20 96 .llong hvReleaseData-KERNELBASE 97 98 /* 99 * At offset 0x28 and 0x30 are offsets to the mschunks_map 100 * array (used by the iSeries LPAR debugger to do translation 101 * between physical addresses and absolute addresses) and 102 * to the pidhash table (also used by the debugger) 103 */ 104 .llong mschunks_map-KERNELBASE 105 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 106 107 /* Offset 0x38 - Pointer to start of embedded System.map */ 108 .globl embedded_sysmap_start 109embedded_sysmap_start: 110 .llong 0 111 /* Offset 0x40 - Pointer to end of embedded System.map */ 112 .globl embedded_sysmap_end 113embedded_sysmap_end: 114 .llong 0 115 116#endif /* CONFIG_PPC_ISERIES */ 117 118 /* Secondary processors spin on this value until it goes to 1. */ 119 .globl __secondary_hold_spinloop 120__secondary_hold_spinloop: 121 .llong 0x0 122 123 /* Secondary processors write this value with their cpu # */ 124 /* after they enter the spin loop immediately below. */ 125 .globl __secondary_hold_acknowledge 126__secondary_hold_acknowledge: 127 .llong 0x0 128 129 . = 0x60 130/* 131 * The following code is used on pSeries to hold secondary processors 132 * in a spin loop after they have been freed from OpenFirmware, but 133 * before the bulk of the kernel has been relocated. This code 134 * is relocated to physical address 0x60 before prom_init is run. 135 * All of it must fit below the first exception vector at 0x100. 136 */ 137_GLOBAL(__secondary_hold) 138 mfmsr r24 139 ori r24,r24,MSR_RI 140 mtmsrd r24 /* RI on */ 141 142 /* Grab our linux cpu number */ 143 mr r24,r3 144 145 /* Tell the master cpu we're here */ 146 /* Relocation is off & we are located at an address less */ 147 /* than 0x100, so only need to grab low order offset. */ 148 std r24,__secondary_hold_acknowledge@l(0) 149 sync 150 151 /* All secondary cpus wait here until told to start. */ 152100: ld r4,__secondary_hold_spinloop@l(0) 153 cmpdi 0,r4,1 154 bne 100b 155 156#ifdef CONFIG_HMT 157 SET_REG_IMMEDIATE(r4, .hmt_init) 158 mtctr r4 159 bctr 160#else 161#ifdef CONFIG_SMP 162 LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) 163 mtctr r4 164 mr r3,r24 165 bctr 166#else 167 BUG_OPCODE 168#endif 169#endif 170 171/* This value is used to mark exception frames on the stack. */ 172 .section ".toc","aw" 173exception_marker: 174 .tc ID_72656773_68657265[TC],0x7265677368657265 175 .text 176 177/* 178 * The following macros define the code that appears as 179 * the prologue to each of the exception handlers. They 180 * are split into two parts to allow a single kernel binary 181 * to be used for pSeries and iSeries. 182 * LOL. One day... - paulus 183 */ 184 185/* 186 * We make as much of the exception code common between native 187 * exception handlers (including pSeries LPAR) and iSeries LPAR 188 * implementations as possible. 189 */ 190 191/* 192 * This is the start of the interrupt handlers for pSeries 193 * This code runs with relocation off. 194 */ 195#define EX_R9 0 196#define EX_R10 8 197#define EX_R11 16 198#define EX_R12 24 199#define EX_R13 32 200#define EX_SRR0 40 201#define EX_DAR 48 202#define EX_DSISR 56 203#define EX_CCR 60 204#define EX_R3 64 205#define EX_LR 72 206 207/* 208 * We're short on space and time in the exception prolog, so we can't 209 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the 210 * low halfword of the address, but for Kdump we need the whole low 211 * word. 212 */ 213#ifdef CONFIG_CRASH_DUMP 214#define LOAD_HANDLER(reg, label) \ 215 oris reg,reg,(label)@h; /* virt addr of handler ... */ \ 216 ori reg,reg,(label)@l; /* .. and the rest */ 217#else 218#define LOAD_HANDLER(reg, label) \ 219 ori reg,reg,(label)@l; /* virt addr of handler ... */ 220#endif 221 222#define EXCEPTION_PROLOG_PSERIES(area, label) \ 223 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 224 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 225 std r10,area+EX_R10(r13); \ 226 std r11,area+EX_R11(r13); \ 227 std r12,area+EX_R12(r13); \ 228 mfspr r9,SPRN_SPRG1; \ 229 std r9,area+EX_R13(r13); \ 230 mfcr r9; \ 231 clrrdi r12,r13,32; /* get high part of &label */ \ 232 mfmsr r10; \ 233 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 234 LOAD_HANDLER(r12,label) \ 235 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 236 mtspr SPRN_SRR0,r12; \ 237 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 238 mtspr SPRN_SRR1,r10; \ 239 rfid; \ 240 b . /* prevent speculative execution */ 241 242/* 243 * This is the start of the interrupt handlers for iSeries 244 * This code runs with relocation on. 245 */ 246#define EXCEPTION_PROLOG_ISERIES_1(area) \ 247 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 248 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 249 std r10,area+EX_R10(r13); \ 250 std r11,area+EX_R11(r13); \ 251 std r12,area+EX_R12(r13); \ 252 mfspr r9,SPRN_SPRG1; \ 253 std r9,area+EX_R13(r13); \ 254 mfcr r9 255 256#define EXCEPTION_PROLOG_ISERIES_2 \ 257 mfmsr r10; \ 258 ld r12,PACALPPACAPTR(r13); \ 259 ld r11,LPPACASRR0(r12); \ 260 ld r12,LPPACASRR1(r12); \ 261 ori r10,r10,MSR_RI; \ 262 mtmsrd r10,1 263 264/* 265 * The common exception prolog is used for all except a few exceptions 266 * such as a segment miss on a kernel address. We have to be prepared 267 * to take another exception from the point where we first touch the 268 * kernel stack onwards. 269 * 270 * On entry r13 points to the paca, r9-r13 are saved in the paca, 271 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 272 * SRR1, and relocation is on. 273 */ 274#define EXCEPTION_PROLOG_COMMON(n, area) \ 275 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 276 mr r10,r1; /* Save r1 */ \ 277 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 278 beq- 1f; \ 279 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 2801: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 281 bge- cr1,bad_stack; /* abort if it is */ \ 282 std r9,_CCR(r1); /* save CR in stackframe */ \ 283 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 284 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 285 std r10,0(r1); /* make stack chain pointer */ \ 286 std r0,GPR0(r1); /* save r0 in stackframe */ \ 287 std r10,GPR1(r1); /* save r1 in stackframe */ \ 288 std r2,GPR2(r1); /* save r2 in stackframe */ \ 289 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 290 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 291 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 292 ld r10,area+EX_R10(r13); \ 293 std r9,GPR9(r1); \ 294 std r10,GPR10(r1); \ 295 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 296 ld r10,area+EX_R12(r13); \ 297 ld r11,area+EX_R13(r13); \ 298 std r9,GPR11(r1); \ 299 std r10,GPR12(r1); \ 300 std r11,GPR13(r1); \ 301 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 302 mflr r9; /* save LR in stackframe */ \ 303 std r9,_LINK(r1); \ 304 mfctr r10; /* save CTR in stackframe */ \ 305 std r10,_CTR(r1); \ 306 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 307 std r11,_XER(r1); \ 308 li r9,(n)+1; \ 309 std r9,_TRAP(r1); /* set trap number */ \ 310 li r10,0; \ 311 ld r11,exception_marker@toc(r2); \ 312 std r10,RESULT(r1); /* clear regs->result */ \ 313 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ 314 315/* 316 * Exception vectors. 317 */ 318#define STD_EXCEPTION_PSERIES(n, label) \ 319 . = n; \ 320 .globl label##_pSeries; \ 321label##_pSeries: \ 322 HMT_MEDIUM; \ 323 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 324 RUNLATCH_ON(r13); \ 325 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 326 327#define STD_EXCEPTION_ISERIES(n, label, area) \ 328 .globl label##_iSeries; \ 329label##_iSeries: \ 330 HMT_MEDIUM; \ 331 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 332 RUNLATCH_ON(r13); \ 333 EXCEPTION_PROLOG_ISERIES_1(area); \ 334 EXCEPTION_PROLOG_ISERIES_2; \ 335 b label##_common 336 337#define MASKABLE_EXCEPTION_ISERIES(n, label) \ 338 .globl label##_iSeries; \ 339label##_iSeries: \ 340 HMT_MEDIUM; \ 341 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 342 RUNLATCH_ON(r13); \ 343 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 344 lbz r10,PACAPROCENABLED(r13); \ 345 cmpwi 0,r10,0; \ 346 beq- label##_iSeries_masked; \ 347 EXCEPTION_PROLOG_ISERIES_2; \ 348 b label##_common; \ 349 350#ifdef DO_SOFT_DISABLE 351#define DISABLE_INTS \ 352 lbz r10,PACAPROCENABLED(r13); \ 353 li r11,0; \ 354 std r10,SOFTE(r1); \ 355 mfmsr r10; \ 356 stb r11,PACAPROCENABLED(r13); \ 357 ori r10,r10,MSR_EE; \ 358 mtmsrd r10,1 359 360#define ENABLE_INTS \ 361 lbz r10,PACAPROCENABLED(r13); \ 362 mfmsr r11; \ 363 std r10,SOFTE(r1); \ 364 ori r11,r11,MSR_EE; \ 365 mtmsrd r11,1 366 367#else /* hard enable/disable interrupts */ 368#define DISABLE_INTS 369 370#define ENABLE_INTS \ 371 ld r12,_MSR(r1); \ 372 mfmsr r11; \ 373 rlwimi r11,r12,0,MSR_EE; \ 374 mtmsrd r11,1 375 376#endif 377 378#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 379 .align 7; \ 380 .globl label##_common; \ 381label##_common: \ 382 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 383 DISABLE_INTS; \ 384 bl .save_nvgprs; \ 385 addi r3,r1,STACK_FRAME_OVERHEAD; \ 386 bl hdlr; \ 387 b .ret_from_except 388 389#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 390 .align 7; \ 391 .globl label##_common; \ 392label##_common: \ 393 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 394 DISABLE_INTS; \ 395 addi r3,r1,STACK_FRAME_OVERHEAD; \ 396 bl hdlr; \ 397 b .ret_from_except_lite 398 399/* 400 * Start of pSeries system interrupt routines 401 */ 402 . = 0x100 403 .globl __start_interrupts 404__start_interrupts: 405 406 STD_EXCEPTION_PSERIES(0x100, system_reset) 407 408 . = 0x200 409_machine_check_pSeries: 410 HMT_MEDIUM 411 mtspr SPRN_SPRG1,r13 /* save r13 */ 412 RUNLATCH_ON(r13) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 414 415 . = 0x300 416 .globl data_access_pSeries 417data_access_pSeries: 418 HMT_MEDIUM 419 mtspr SPRN_SPRG1,r13 420BEGIN_FTR_SECTION 421 mtspr SPRN_SPRG2,r12 422 mfspr r13,SPRN_DAR 423 mfspr r12,SPRN_DSISR 424 srdi r13,r13,60 425 rlwimi r13,r12,16,0x20 426 mfcr r12 427 cmpwi r13,0x2c 428 beq .do_stab_bolted_pSeries 429 mtcrf 0x80,r12 430 mfspr r12,SPRN_SPRG2 431END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 432 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 433 434 . = 0x380 435 .globl data_access_slb_pSeries 436data_access_slb_pSeries: 437 HMT_MEDIUM 438 mtspr SPRN_SPRG1,r13 439 RUNLATCH_ON(r13) 440 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 441 std r3,PACA_EXSLB+EX_R3(r13) 442 mfspr r3,SPRN_DAR 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 444 mfcr r9 445#ifdef __DISABLED__ 446 /* Keep that around for when we re-implement dynamic VSIDs */ 447 cmpdi r3,0 448 bge slb_miss_user_pseries 449#endif /* __DISABLED__ */ 450 std r10,PACA_EXSLB+EX_R10(r13) 451 std r11,PACA_EXSLB+EX_R11(r13) 452 std r12,PACA_EXSLB+EX_R12(r13) 453 mfspr r10,SPRN_SPRG1 454 std r10,PACA_EXSLB+EX_R13(r13) 455 mfspr r12,SPRN_SRR1 /* and SRR1 */ 456 b .slb_miss_realmode /* Rel. branch works in real mode */ 457 458 STD_EXCEPTION_PSERIES(0x400, instruction_access) 459 460 . = 0x480 461 .globl instruction_access_slb_pSeries 462instruction_access_slb_pSeries: 463 HMT_MEDIUM 464 mtspr SPRN_SPRG1,r13 465 RUNLATCH_ON(r13) 466 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 467 std r3,PACA_EXSLB+EX_R3(r13) 468 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 469 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 470 mfcr r9 471#ifdef __DISABLED__ 472 /* Keep that around for when we re-implement dynamic VSIDs */ 473 cmpdi r3,0 474 bge slb_miss_user_pseries 475#endif /* __DISABLED__ */ 476 std r10,PACA_EXSLB+EX_R10(r13) 477 std r11,PACA_EXSLB+EX_R11(r13) 478 std r12,PACA_EXSLB+EX_R12(r13) 479 mfspr r10,SPRN_SPRG1 480 std r10,PACA_EXSLB+EX_R13(r13) 481 mfspr r12,SPRN_SRR1 /* and SRR1 */ 482 b .slb_miss_realmode /* Rel. branch works in real mode */ 483 484 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 485 STD_EXCEPTION_PSERIES(0x600, alignment) 486 STD_EXCEPTION_PSERIES(0x700, program_check) 487 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 488 STD_EXCEPTION_PSERIES(0x900, decrementer) 489 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 490 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 491 492 . = 0xc00 493 .globl system_call_pSeries 494system_call_pSeries: 495 HMT_MEDIUM 496 RUNLATCH_ON(r9) 497 mr r9,r13 498 mfmsr r10 499 mfspr r13,SPRN_SPRG3 500 mfspr r11,SPRN_SRR0 501 clrrdi r12,r13,32 502 oris r12,r12,system_call_common@h 503 ori r12,r12,system_call_common@l 504 mtspr SPRN_SRR0,r12 505 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 506 mfspr r12,SPRN_SRR1 507 mtspr SPRN_SRR1,r10 508 rfid 509 b . /* prevent speculative execution */ 510 511 STD_EXCEPTION_PSERIES(0xd00, single_step) 512 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 513 514 /* We need to deal with the Altivec unavailable exception 515 * here which is at 0xf20, thus in the middle of the 516 * prolog code of the PerformanceMonitor one. A little 517 * trickery is thus necessary 518 */ 519 . = 0xf00 520 b performance_monitor_pSeries 521 522 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) 523 524 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 525 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 526 527 . = 0x3000 528 529/*** pSeries interrupt support ***/ 530 531 /* moved from 0xf00 */ 532 STD_EXCEPTION_PSERIES(., performance_monitor) 533 534 .align 7 535_GLOBAL(do_stab_bolted_pSeries) 536 mtcrf 0x80,r12 537 mfspr r12,SPRN_SPRG2 538 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 539 540/* 541 * We have some room here we use that to put 542 * the peries slb miss user trampoline code so it's reasonably 543 * away from slb_miss_user_common to avoid problems with rfid 544 * 545 * This is used for when the SLB miss handler has to go virtual, 546 * which doesn't happen for now anymore but will once we re-implement 547 * dynamic VSIDs for shared page tables 548 */ 549#ifdef __DISABLED__ 550slb_miss_user_pseries: 551 std r10,PACA_EXGEN+EX_R10(r13) 552 std r11,PACA_EXGEN+EX_R11(r13) 553 std r12,PACA_EXGEN+EX_R12(r13) 554 mfspr r10,SPRG1 555 ld r11,PACA_EXSLB+EX_R9(r13) 556 ld r12,PACA_EXSLB+EX_R3(r13) 557 std r10,PACA_EXGEN+EX_R13(r13) 558 std r11,PACA_EXGEN+EX_R9(r13) 559 std r12,PACA_EXGEN+EX_R3(r13) 560 clrrdi r12,r13,32 561 mfmsr r10 562 mfspr r11,SRR0 /* save SRR0 */ 563 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 564 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 565 mtspr SRR0,r12 566 mfspr r12,SRR1 /* and SRR1 */ 567 mtspr SRR1,r10 568 rfid 569 b . /* prevent spec. execution */ 570#endif /* __DISABLED__ */ 571 572/* 573 * Vectors for the FWNMI option. Share common code. 574 */ 575 .globl system_reset_fwnmi 576 .align 7 577system_reset_fwnmi: 578 HMT_MEDIUM 579 mtspr SPRN_SPRG1,r13 /* save r13 */ 580 RUNLATCH_ON(r13) 581 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 582 583 .globl machine_check_fwnmi 584 .align 7 585machine_check_fwnmi: 586 HMT_MEDIUM 587 mtspr SPRN_SPRG1,r13 /* save r13 */ 588 RUNLATCH_ON(r13) 589 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 590 591#ifdef CONFIG_PPC_ISERIES 592/*** ISeries-LPAR interrupt handlers ***/ 593 594 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 595 596 .globl data_access_iSeries 597data_access_iSeries: 598 mtspr SPRN_SPRG1,r13 599BEGIN_FTR_SECTION 600 mtspr SPRN_SPRG2,r12 601 mfspr r13,SPRN_DAR 602 mfspr r12,SPRN_DSISR 603 srdi r13,r13,60 604 rlwimi r13,r12,16,0x20 605 mfcr r12 606 cmpwi r13,0x2c 607 beq .do_stab_bolted_iSeries 608 mtcrf 0x80,r12 609 mfspr r12,SPRN_SPRG2 610END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 611 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 612 EXCEPTION_PROLOG_ISERIES_2 613 b data_access_common 614 615.do_stab_bolted_iSeries: 616 mtcrf 0x80,r12 617 mfspr r12,SPRN_SPRG2 618 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 619 EXCEPTION_PROLOG_ISERIES_2 620 b .do_stab_bolted 621 622 .globl data_access_slb_iSeries 623data_access_slb_iSeries: 624 mtspr SPRN_SPRG1,r13 /* save r13 */ 625 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 626 std r3,PACA_EXSLB+EX_R3(r13) 627 mfspr r3,SPRN_DAR 628 std r9,PACA_EXSLB+EX_R9(r13) 629 mfcr r9 630#ifdef __DISABLED__ 631 cmpdi r3,0 632 bge slb_miss_user_iseries 633#endif 634 std r10,PACA_EXSLB+EX_R10(r13) 635 std r11,PACA_EXSLB+EX_R11(r13) 636 std r12,PACA_EXSLB+EX_R12(r13) 637 mfspr r10,SPRN_SPRG1 638 std r10,PACA_EXSLB+EX_R13(r13) 639 ld r12,PACALPPACAPTR(r13) 640 ld r12,LPPACASRR1(r12) 641 b .slb_miss_realmode 642 643 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 644 645 .globl instruction_access_slb_iSeries 646instruction_access_slb_iSeries: 647 mtspr SPRN_SPRG1,r13 /* save r13 */ 648 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 649 std r3,PACA_EXSLB+EX_R3(r13) 650 ld r3,PACALPPACAPTR(r13) 651 ld r3,LPPACASRR0(r3) /* get SRR0 value */ 652 std r9,PACA_EXSLB+EX_R9(r13) 653 mfcr r9 654#ifdef __DISABLED__ 655 cmpdi r3,0 656 bge .slb_miss_user_iseries 657#endif 658 std r10,PACA_EXSLB+EX_R10(r13) 659 std r11,PACA_EXSLB+EX_R11(r13) 660 std r12,PACA_EXSLB+EX_R12(r13) 661 mfspr r10,SPRN_SPRG1 662 std r10,PACA_EXSLB+EX_R13(r13) 663 ld r12,PACALPPACAPTR(r13) 664 ld r12,LPPACASRR1(r12) 665 b .slb_miss_realmode 666 667#ifdef __DISABLED__ 668slb_miss_user_iseries: 669 std r10,PACA_EXGEN+EX_R10(r13) 670 std r11,PACA_EXGEN+EX_R11(r13) 671 std r12,PACA_EXGEN+EX_R12(r13) 672 mfspr r10,SPRG1 673 ld r11,PACA_EXSLB+EX_R9(r13) 674 ld r12,PACA_EXSLB+EX_R3(r13) 675 std r10,PACA_EXGEN+EX_R13(r13) 676 std r11,PACA_EXGEN+EX_R9(r13) 677 std r12,PACA_EXGEN+EX_R3(r13) 678 EXCEPTION_PROLOG_ISERIES_2 679 b slb_miss_user_common 680#endif 681 682 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) 683 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) 684 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) 685 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) 686 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) 687 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) 688 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) 689 690 .globl system_call_iSeries 691system_call_iSeries: 692 mr r9,r13 693 mfspr r13,SPRN_SPRG3 694 EXCEPTION_PROLOG_ISERIES_2 695 b system_call_common 696 697 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) 698 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) 699 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) 700 701 .globl system_reset_iSeries 702system_reset_iSeries: 703 mfspr r13,SPRN_SPRG3 /* Get paca address */ 704 mfmsr r24 705 ori r24,r24,MSR_RI 706 mtmsrd r24 /* RI on */ 707 lhz r24,PACAPACAINDEX(r13) /* Get processor # */ 708 cmpwi 0,r24,0 /* Are we processor 0? */ 709 beq .__start_initialization_iSeries /* Start up the first processor */ 710 mfspr r4,SPRN_CTRLF 711 li r5,CTRL_RUNLATCH /* Turn off the run light */ 712 andc r4,r4,r5 713 mtspr SPRN_CTRLT,r4 714 7151: 716 HMT_LOW 717#ifdef CONFIG_SMP 718 lbz r23,PACAPROCSTART(r13) /* Test if this processor 719 * should start */ 720 sync 721 LOAD_REG_IMMEDIATE(r3,current_set) 722 sldi r28,r24,3 /* get current_set[cpu#] */ 723 ldx r3,r3,r28 724 addi r1,r3,THREAD_SIZE 725 subi r1,r1,STACK_FRAME_OVERHEAD 726 727 cmpwi 0,r23,0 728 beq iSeries_secondary_smp_loop /* Loop until told to go */ 729 bne .__secondary_start /* Loop until told to go */ 730iSeries_secondary_smp_loop: 731 /* Let the Hypervisor know we are alive */ 732 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 733 lis r3,0x8002 734 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ 735#else /* CONFIG_SMP */ 736 /* Yield the processor. This is required for non-SMP kernels 737 which are running on multi-threaded machines. */ 738 lis r3,0x8000 739 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ 740 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ 741 li r4,0 /* "yield timed" */ 742 li r5,-1 /* "yield forever" */ 743#endif /* CONFIG_SMP */ 744 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 745 sc /* Invoke the hypervisor via a system call */ 746 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */ 747 b 1b /* If SMP not configured, secondaries 748 * loop forever */ 749 750 .globl decrementer_iSeries_masked 751decrementer_iSeries_masked: 752 /* We may not have a valid TOC pointer in here. */ 753 li r11,1 754 ld r12,PACALPPACAPTR(r13) 755 stb r11,LPPACADECRINT(r12) 756 LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) 757 lwz r12,0(r12) 758 mtspr SPRN_DEC,r12 759 /* fall through */ 760 761 .globl hardware_interrupt_iSeries_masked 762hardware_interrupt_iSeries_masked: 763 mtcrf 0x80,r9 /* Restore regs */ 764 ld r12,PACALPPACAPTR(r13) 765 ld r11,LPPACASRR0(r12) 766 ld r12,LPPACASRR1(r12) 767 mtspr SPRN_SRR0,r11 768 mtspr SPRN_SRR1,r12 769 ld r9,PACA_EXGEN+EX_R9(r13) 770 ld r10,PACA_EXGEN+EX_R10(r13) 771 ld r11,PACA_EXGEN+EX_R11(r13) 772 ld r12,PACA_EXGEN+EX_R12(r13) 773 ld r13,PACA_EXGEN+EX_R13(r13) 774 rfid 775 b . /* prevent speculative execution */ 776#endif /* CONFIG_PPC_ISERIES */ 777 778/*** Common interrupt handlers ***/ 779 780 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 781 782 /* 783 * Machine check is different because we use a different 784 * save area: PACA_EXMC instead of PACA_EXGEN. 785 */ 786 .align 7 787 .globl machine_check_common 788machine_check_common: 789 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 790 DISABLE_INTS 791 bl .save_nvgprs 792 addi r3,r1,STACK_FRAME_OVERHEAD 793 bl .machine_check_exception 794 b .ret_from_except 795 796 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 797 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 798 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 799 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 800 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 801 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) 802 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 803#ifdef CONFIG_ALTIVEC 804 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 805#else 806 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 807#endif 808 809/* 810 * Here we have detected that the kernel stack pointer is bad. 811 * R9 contains the saved CR, r13 points to the paca, 812 * r10 contains the (bad) kernel stack pointer, 813 * r11 and r12 contain the saved SRR0 and SRR1. 814 * We switch to using an emergency stack, save the registers there, 815 * and call kernel_bad_stack(), which panics. 816 */ 817bad_stack: 818 ld r1,PACAEMERGSP(r13) 819 subi r1,r1,64+INT_FRAME_SIZE 820 std r9,_CCR(r1) 821 std r10,GPR1(r1) 822 std r11,_NIP(r1) 823 std r12,_MSR(r1) 824 mfspr r11,SPRN_DAR 825 mfspr r12,SPRN_DSISR 826 std r11,_DAR(r1) 827 std r12,_DSISR(r1) 828 mflr r10 829 mfctr r11 830 mfxer r12 831 std r10,_LINK(r1) 832 std r11,_CTR(r1) 833 std r12,_XER(r1) 834 SAVE_GPR(0,r1) 835 SAVE_GPR(2,r1) 836 SAVE_4GPRS(3,r1) 837 SAVE_2GPRS(7,r1) 838 SAVE_10GPRS(12,r1) 839 SAVE_10GPRS(22,r1) 840 addi r11,r1,INT_FRAME_SIZE 841 std r11,0(r1) 842 li r12,0 843 std r12,0(r11) 844 ld r2,PACATOC(r13) 8451: addi r3,r1,STACK_FRAME_OVERHEAD 846 bl .kernel_bad_stack 847 b 1b 848 849/* 850 * Return from an exception with minimal checks. 851 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 852 * If interrupts have been enabled, or anything has been 853 * done that might have changed the scheduling status of 854 * any task or sent any task a signal, you should use 855 * ret_from_except or ret_from_except_lite instead of this. 856 */ 857 .globl fast_exception_return 858fast_exception_return: 859 ld r12,_MSR(r1) 860 ld r11,_NIP(r1) 861 andi. r3,r12,MSR_RI /* check if RI is set */ 862 beq- unrecov_fer 863 ld r3,_CCR(r1) 864 ld r4,_LINK(r1) 865 ld r5,_CTR(r1) 866 ld r6,_XER(r1) 867 mtcr r3 868 mtlr r4 869 mtctr r5 870 mtxer r6 871 REST_GPR(0, r1) 872 REST_8GPRS(2, r1) 873 874 mfmsr r10 875 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 876 mtmsrd r10,1 877 878 mtspr SPRN_SRR1,r12 879 mtspr SPRN_SRR0,r11 880 REST_4GPRS(10, r1) 881 ld r1,GPR1(r1) 882 rfid 883 b . /* prevent speculative execution */ 884 885unrecov_fer: 886 bl .save_nvgprs 8871: addi r3,r1,STACK_FRAME_OVERHEAD 888 bl .unrecoverable_exception 889 b 1b 890 891/* 892 * Here r13 points to the paca, r9 contains the saved CR, 893 * SRR0 and SRR1 are saved in r11 and r12, 894 * r9 - r13 are saved in paca->exgen. 895 */ 896 .align 7 897 .globl data_access_common 898data_access_common: 899 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 900 mfspr r10,SPRN_DAR 901 std r10,PACA_EXGEN+EX_DAR(r13) 902 mfspr r10,SPRN_DSISR 903 stw r10,PACA_EXGEN+EX_DSISR(r13) 904 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 905 ld r3,PACA_EXGEN+EX_DAR(r13) 906 lwz r4,PACA_EXGEN+EX_DSISR(r13) 907 li r5,0x300 908 b .do_hash_page /* Try to handle as hpte fault */ 909 910 .align 7 911 .globl instruction_access_common 912instruction_access_common: 913 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 914 ld r3,_NIP(r1) 915 andis. r4,r12,0x5820 916 li r5,0x400 917 b .do_hash_page /* Try to handle as hpte fault */ 918 919/* 920 * Here is the common SLB miss user that is used when going to virtual 921 * mode for SLB misses, that is currently not used 922 */ 923#ifdef __DISABLED__ 924 .align 7 925 .globl slb_miss_user_common 926slb_miss_user_common: 927 mflr r10 928 std r3,PACA_EXGEN+EX_DAR(r13) 929 stw r9,PACA_EXGEN+EX_CCR(r13) 930 std r10,PACA_EXGEN+EX_LR(r13) 931 std r11,PACA_EXGEN+EX_SRR0(r13) 932 bl .slb_allocate_user 933 934 ld r10,PACA_EXGEN+EX_LR(r13) 935 ld r3,PACA_EXGEN+EX_R3(r13) 936 lwz r9,PACA_EXGEN+EX_CCR(r13) 937 ld r11,PACA_EXGEN+EX_SRR0(r13) 938 mtlr r10 939 beq- slb_miss_fault 940 941 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 942 beq- unrecov_user_slb 943 mfmsr r10 944 945.machine push 946.machine "power4" 947 mtcrf 0x80,r9 948.machine pop 949 950 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 951 mtmsrd r10,1 952 953 mtspr SRR0,r11 954 mtspr SRR1,r12 955 956 ld r9,PACA_EXGEN+EX_R9(r13) 957 ld r10,PACA_EXGEN+EX_R10(r13) 958 ld r11,PACA_EXGEN+EX_R11(r13) 959 ld r12,PACA_EXGEN+EX_R12(r13) 960 ld r13,PACA_EXGEN+EX_R13(r13) 961 rfid 962 b . 963 964slb_miss_fault: 965 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 966 ld r4,PACA_EXGEN+EX_DAR(r13) 967 li r5,0 968 std r4,_DAR(r1) 969 std r5,_DSISR(r1) 970 b .handle_page_fault 971 972unrecov_user_slb: 973 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 974 DISABLE_INTS 975 bl .save_nvgprs 9761: addi r3,r1,STACK_FRAME_OVERHEAD 977 bl .unrecoverable_exception 978 b 1b 979 980#endif /* __DISABLED__ */ 981 982 983/* 984 * r13 points to the PACA, r9 contains the saved CR, 985 * r12 contain the saved SRR1, SRR0 is still ready for return 986 * r3 has the faulting address 987 * r9 - r13 are saved in paca->exslb. 988 * r3 is saved in paca->slb_r3 989 * We assume we aren't going to take any exceptions during this procedure. 990 */ 991_GLOBAL(slb_miss_realmode) 992 mflr r10 993 994 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 995 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 996 997 bl .slb_allocate_realmode 998 999 /* All done -- return from exception. */ 1000 1001 ld r10,PACA_EXSLB+EX_LR(r13) 1002 ld r3,PACA_EXSLB+EX_R3(r13) 1003 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1004#ifdef CONFIG_PPC_ISERIES 1005 ld r11,PACALPPACAPTR(r13) 1006 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 1007#endif /* CONFIG_PPC_ISERIES */ 1008 1009 mtlr r10 1010 1011 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1012 beq- unrecov_slb 1013 1014.machine push 1015.machine "power4" 1016 mtcrf 0x80,r9 1017 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1018.machine pop 1019 1020#ifdef CONFIG_PPC_ISERIES 1021 mtspr SPRN_SRR0,r11 1022 mtspr SPRN_SRR1,r12 1023#endif /* CONFIG_PPC_ISERIES */ 1024 ld r9,PACA_EXSLB+EX_R9(r13) 1025 ld r10,PACA_EXSLB+EX_R10(r13) 1026 ld r11,PACA_EXSLB+EX_R11(r13) 1027 ld r12,PACA_EXSLB+EX_R12(r13) 1028 ld r13,PACA_EXSLB+EX_R13(r13) 1029 rfid 1030 b . /* prevent speculative execution */ 1031 1032unrecov_slb: 1033 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1034 DISABLE_INTS 1035 bl .save_nvgprs 10361: addi r3,r1,STACK_FRAME_OVERHEAD 1037 bl .unrecoverable_exception 1038 b 1b 1039 1040 .align 7 1041 .globl hardware_interrupt_common 1042 .globl hardware_interrupt_entry 1043hardware_interrupt_common: 1044 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 1045hardware_interrupt_entry: 1046 DISABLE_INTS 1047 addi r3,r1,STACK_FRAME_OVERHEAD 1048 bl .do_IRQ 1049 b .ret_from_except_lite 1050 1051 .align 7 1052 .globl alignment_common 1053alignment_common: 1054 mfspr r10,SPRN_DAR 1055 std r10,PACA_EXGEN+EX_DAR(r13) 1056 mfspr r10,SPRN_DSISR 1057 stw r10,PACA_EXGEN+EX_DSISR(r13) 1058 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1059 ld r3,PACA_EXGEN+EX_DAR(r13) 1060 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1061 std r3,_DAR(r1) 1062 std r4,_DSISR(r1) 1063 bl .save_nvgprs 1064 addi r3,r1,STACK_FRAME_OVERHEAD 1065 ENABLE_INTS 1066 bl .alignment_exception 1067 b .ret_from_except 1068 1069 .align 7 1070 .globl program_check_common 1071program_check_common: 1072 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1073 bl .save_nvgprs 1074 addi r3,r1,STACK_FRAME_OVERHEAD 1075 ENABLE_INTS 1076 bl .program_check_exception 1077 b .ret_from_except 1078 1079 .align 7 1080 .globl fp_unavailable_common 1081fp_unavailable_common: 1082 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1083 bne .load_up_fpu /* if from user, just load it up */ 1084 bl .save_nvgprs 1085 addi r3,r1,STACK_FRAME_OVERHEAD 1086 ENABLE_INTS 1087 bl .kernel_fp_unavailable_exception 1088 BUG_OPCODE 1089 1090 .align 7 1091 .globl altivec_unavailable_common 1092altivec_unavailable_common: 1093 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1094#ifdef CONFIG_ALTIVEC 1095BEGIN_FTR_SECTION 1096 bne .load_up_altivec /* if from user, just load it up */ 1097END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1098#endif 1099 bl .save_nvgprs 1100 addi r3,r1,STACK_FRAME_OVERHEAD 1101 ENABLE_INTS 1102 bl .altivec_unavailable_exception 1103 b .ret_from_except 1104 1105#ifdef CONFIG_ALTIVEC 1106/* 1107 * load_up_altivec(unused, unused, tsk) 1108 * Disable VMX for the task which had it previously, 1109 * and save its vector registers in its thread_struct. 1110 * Enables the VMX for use in the kernel on return. 1111 * On SMP we know the VMX is free, since we give it up every 1112 * switch (ie, no lazy save of the vector registers). 1113 * On entry: r13 == 'current' && last_task_used_altivec != 'current' 1114 */ 1115_STATIC(load_up_altivec) 1116 mfmsr r5 /* grab the current MSR */ 1117 oris r5,r5,MSR_VEC@h 1118 mtmsrd r5 /* enable use of VMX now */ 1119 isync 1120 1121/* 1122 * For SMP, we don't do lazy VMX switching because it just gets too 1123 * horrendously complex, especially when a task switches from one CPU 1124 * to another. Instead we call giveup_altvec in switch_to. 1125 * VRSAVE isn't dealt with here, that is done in the normal context 1126 * switch code. Note that we could rely on vrsave value to eventually 1127 * avoid saving all of the VREGs here... 1128 */ 1129#ifndef CONFIG_SMP 1130 ld r3,last_task_used_altivec@got(r2) 1131 ld r4,0(r3) 1132 cmpdi 0,r4,0 1133 beq 1f 1134 /* Save VMX state to last_task_used_altivec's THREAD struct */ 1135 addi r4,r4,THREAD 1136 SAVE_32VRS(0,r5,r4) 1137 mfvscr vr0 1138 li r10,THREAD_VSCR 1139 stvx vr0,r10,r4 1140 /* Disable VMX for last_task_used_altivec */ 1141 ld r5,PT_REGS(r4) 1142 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1143 lis r6,MSR_VEC@h 1144 andc r4,r4,r6 1145 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 11461: 1147#endif /* CONFIG_SMP */ 1148 /* Hack: if we get an altivec unavailable trap with VRSAVE 1149 * set to all zeros, we assume this is a broken application 1150 * that fails to set it properly, and thus we switch it to 1151 * all 1's 1152 */ 1153 mfspr r4,SPRN_VRSAVE 1154 cmpdi 0,r4,0 1155 bne+ 1f 1156 li r4,-1 1157 mtspr SPRN_VRSAVE,r4 11581: 1159 /* enable use of VMX after return */ 1160 ld r4,PACACURRENT(r13) 1161 addi r5,r4,THREAD /* Get THREAD */ 1162 oris r12,r12,MSR_VEC@h 1163 std r12,_MSR(r1) 1164 li r4,1 1165 li r10,THREAD_VSCR 1166 stw r4,THREAD_USED_VR(r5) 1167 lvx vr0,r10,r5 1168 mtvscr vr0 1169 REST_32VRS(0,r4,r5) 1170#ifndef CONFIG_SMP 1171 /* Update last_task_used_math to 'current' */ 1172 subi r4,r5,THREAD /* Back to 'current' */ 1173 std r4,0(r3) 1174#endif /* CONFIG_SMP */ 1175 /* restore registers and return */ 1176 b fast_exception_return 1177#endif /* CONFIG_ALTIVEC */ 1178 1179/* 1180 * Hash table stuff 1181 */ 1182 .align 7 1183_GLOBAL(do_hash_page) 1184 std r3,_DAR(r1) 1185 std r4,_DSISR(r1) 1186 1187 andis. r0,r4,0xa450 /* weird error? */ 1188 bne- .handle_page_fault /* if not, try to insert a HPTE */ 1189BEGIN_FTR_SECTION 1190 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 1191 bne- .do_ste_alloc /* If so handle it */ 1192END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 1193 1194 /* 1195 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 1196 * accessing a userspace segment (even from the kernel). We assume 1197 * kernel addresses always have the high bit set. 1198 */ 1199 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1200 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1201 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1202 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1203 ori r4,r4,1 /* add _PAGE_PRESENT */ 1204 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1205 1206 /* 1207 * On iSeries, we soft-disable interrupts here, then 1208 * hard-enable interrupts so that the hash_page code can spin on 1209 * the hash_table_lock without problems on a shared processor. 1210 */ 1211 DISABLE_INTS 1212 1213 /* 1214 * r3 contains the faulting address 1215 * r4 contains the required access permissions 1216 * r5 contains the trap number 1217 * 1218 * at return r3 = 0 for success 1219 */ 1220 bl .hash_page /* build HPTE if possible */ 1221 cmpdi r3,0 /* see if hash_page succeeded */ 1222 1223#ifdef DO_SOFT_DISABLE 1224 /* 1225 * If we had interrupts soft-enabled at the point where the 1226 * DSI/ISI occurred, and an interrupt came in during hash_page, 1227 * handle it now. 1228 * We jump to ret_from_except_lite rather than fast_exception_return 1229 * because ret_from_except_lite will check for and handle pending 1230 * interrupts if necessary. 1231 */ 1232 beq .ret_from_except_lite 1233 /* For a hash failure, we don't bother re-enabling interrupts */ 1234 ble- 12f 1235 1236 /* 1237 * hash_page couldn't handle it, set soft interrupt enable back 1238 * to what it was before the trap. Note that .local_irq_restore 1239 * handles any interrupts pending at this point. 1240 */ 1241 ld r3,SOFTE(r1) 1242 bl .local_irq_restore 1243 b 11f 1244#else 1245 beq fast_exception_return /* Return from exception on success */ 1246 ble- 12f /* Failure return from hash_page */ 1247 1248 /* fall through */ 1249#endif 1250 1251/* Here we have a page fault that hash_page can't handle. */ 1252_GLOBAL(handle_page_fault) 1253 ENABLE_INTS 125411: ld r4,_DAR(r1) 1255 ld r5,_DSISR(r1) 1256 addi r3,r1,STACK_FRAME_OVERHEAD 1257 bl .do_page_fault 1258 cmpdi r3,0 1259 beq+ .ret_from_except_lite 1260 bl .save_nvgprs 1261 mr r5,r3 1262 addi r3,r1,STACK_FRAME_OVERHEAD 1263 lwz r4,_DAR(r1) 1264 bl .bad_page_fault 1265 b .ret_from_except 1266 1267/* We have a page fault that hash_page could handle but HV refused 1268 * the PTE insertion 1269 */ 127012: bl .save_nvgprs 1271 addi r3,r1,STACK_FRAME_OVERHEAD 1272 lwz r4,_DAR(r1) 1273 bl .low_hash_fault 1274 b .ret_from_except 1275 1276 /* here we have a segment miss */ 1277_GLOBAL(do_ste_alloc) 1278 bl .ste_allocate /* try to insert stab entry */ 1279 cmpdi r3,0 1280 beq+ fast_exception_return 1281 b .handle_page_fault 1282 1283/* 1284 * r13 points to the PACA, r9 contains the saved CR, 1285 * r11 and r12 contain the saved SRR0 and SRR1. 1286 * r9 - r13 are saved in paca->exslb. 1287 * We assume we aren't going to take any exceptions during this procedure. 1288 * We assume (DAR >> 60) == 0xc. 1289 */ 1290 .align 7 1291_GLOBAL(do_stab_bolted) 1292 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1293 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1294 1295 /* Hash to the primary group */ 1296 ld r10,PACASTABVIRT(r13) 1297 mfspr r11,SPRN_DAR 1298 srdi r11,r11,28 1299 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1300 1301 /* Calculate VSID */ 1302 /* This is a kernel address, so protovsid = ESID */ 1303 ASM_VSID_SCRAMBLE(r11, r9) 1304 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1305 1306 /* Search the primary group for a free entry */ 13071: ld r11,0(r10) /* Test valid bit of the current ste */ 1308 andi. r11,r11,0x80 1309 beq 2f 1310 addi r10,r10,16 1311 andi. r11,r10,0x70 1312 bne 1b 1313 1314 /* Stick for only searching the primary group for now. */ 1315 /* At least for now, we use a very simple random castout scheme */ 1316 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1317 mftb r11 1318 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1319 ori r11,r11,0x10 1320 1321 /* r10 currently points to an ste one past the group of interest */ 1322 /* make it point to the randomly selected entry */ 1323 subi r10,r10,128 1324 or r10,r10,r11 /* r10 is the entry to invalidate */ 1325 1326 isync /* mark the entry invalid */ 1327 ld r11,0(r10) 1328 rldicl r11,r11,56,1 /* clear the valid bit */ 1329 rotldi r11,r11,8 1330 std r11,0(r10) 1331 sync 1332 1333 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1334 slbie r11 1335 13362: std r9,8(r10) /* Store the vsid part of the ste */ 1337 eieio 1338 1339 mfspr r11,SPRN_DAR /* Get the new esid */ 1340 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1341 ori r11,r11,0x90 /* Turn on valid and kp */ 1342 std r11,0(r10) /* Put new entry back into the stab */ 1343 1344 sync 1345 1346 /* All done -- return from exception. */ 1347 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1348 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1349 1350 andi. r10,r12,MSR_RI 1351 beq- unrecov_slb 1352 1353 mtcrf 0x80,r9 /* restore CR */ 1354 1355 mfmsr r10 1356 clrrdi r10,r10,2 1357 mtmsrd r10,1 1358 1359 mtspr SPRN_SRR0,r11 1360 mtspr SPRN_SRR1,r12 1361 ld r9,PACA_EXSLB+EX_R9(r13) 1362 ld r10,PACA_EXSLB+EX_R10(r13) 1363 ld r11,PACA_EXSLB+EX_R11(r13) 1364 ld r12,PACA_EXSLB+EX_R12(r13) 1365 ld r13,PACA_EXSLB+EX_R13(r13) 1366 rfid 1367 b . /* prevent speculative execution */ 1368 1369/* 1370 * Space for CPU0's segment table. 1371 * 1372 * On iSeries, the hypervisor must fill in at least one entry before 1373 * we get control (with relocate on). The address is give to the hv 1374 * as a page number (see xLparMap in lpardata.c), so this must be at a 1375 * fixed address (the linker can't compute (u64)&initial_stab >> 1376 * PAGE_SHIFT). 1377 */ 1378 . = STAB0_OFFSET /* 0x6000 */ 1379 .globl initial_stab 1380initial_stab: 1381 .space 4096 1382 1383/* 1384 * Data area reserved for FWNMI option. 1385 * This address (0x7000) is fixed by the RPA. 1386 */ 1387 .= 0x7000 1388 .globl fwnmi_data_area 1389fwnmi_data_area: 1390 1391 /* iSeries does not use the FWNMI stuff, so it is safe to put 1392 * this here, even if we later allow kernels that will boot on 1393 * both pSeries and iSeries */ 1394#ifdef CONFIG_PPC_ISERIES 1395 . = LPARMAP_PHYS 1396#include "lparmap.s" 1397/* 1398 * This ".text" is here for old compilers that generate a trailing 1399 * .note section when compiling .c files to .s 1400 */ 1401 .text 1402#endif /* CONFIG_PPC_ISERIES */ 1403 1404 . = 0x8000 1405 1406/* 1407 * On pSeries, secondary processors spin in the following code. 1408 * At entry, r3 = this processor's number (physical cpu id) 1409 */ 1410_GLOBAL(pSeries_secondary_smp_init) 1411 mr r24,r3 1412 1413 /* turn on 64-bit mode */ 1414 bl .enable_64b_mode 1415 isync 1416 1417 /* Copy some CPU settings from CPU 0 */ 1418 bl .__restore_cpu_setup 1419 1420 /* Set up a paca value for this processor. Since we have the 1421 * physical cpu id in r24, we need to search the pacas to find 1422 * which logical id maps to our physical one. 1423 */ 1424 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ 1425 li r5,0 /* logical cpu id */ 14261: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 1427 cmpw r6,r24 /* Compare to our id */ 1428 beq 2f 1429 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 1430 addi r5,r5,1 1431 cmpwi r5,NR_CPUS 1432 blt 1b 1433 1434 mr r3,r24 /* not found, copy phys to r3 */ 1435 b .kexec_wait /* next kernel might do better */ 1436 14372: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1438 /* From now on, r24 is expected to be logical cpuid */ 1439 mr r24,r5 14403: HMT_LOW 1441 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1442 /* start. */ 1443 sync 1444 1445 /* Create a temp kernel stack for use before relocation is on. */ 1446 ld r1,PACAEMERGSP(r13) 1447 subi r1,r1,STACK_FRAME_OVERHEAD 1448 1449 cmpwi 0,r23,0 1450#ifdef CONFIG_SMP 1451 bne .__secondary_start 1452#endif 1453 b 3b /* Loop until told to go */ 1454 1455#ifdef CONFIG_PPC_ISERIES 1456_STATIC(__start_initialization_iSeries) 1457 /* Clear out the BSS */ 1458 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1459 LOAD_REG_IMMEDIATE(r8,__bss_start) 1460 sub r11,r11,r8 /* bss size */ 1461 addi r11,r11,7 /* round up to an even double word */ 1462 rldicl. r11,r11,61,3 /* shift right by 3 */ 1463 beq 4f 1464 addi r8,r8,-8 1465 li r0,0 1466 mtctr r11 /* zero this many doublewords */ 14673: stdu r0,8(r8) 1468 bdnz 3b 14694: 1470 LOAD_REG_IMMEDIATE(r1,init_thread_union) 1471 addi r1,r1,THREAD_SIZE 1472 li r0,0 1473 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1474 1475 LOAD_REG_IMMEDIATE(r3,cpu_specs) 1476 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) 1477 li r5,0 1478 bl .identify_cpu 1479 1480 LOAD_REG_IMMEDIATE(r2,__toc_start) 1481 addi r2,r2,0x4000 1482 addi r2,r2,0x4000 1483 1484 bl .iSeries_early_setup 1485 bl .early_setup 1486 1487 /* relocation is on at this point */ 1488 1489 b .start_here_common 1490#endif /* CONFIG_PPC_ISERIES */ 1491 1492#ifdef CONFIG_PPC_MULTIPLATFORM 1493 1494_STATIC(__mmu_off) 1495 mfmsr r3 1496 andi. r0,r3,MSR_IR|MSR_DR 1497 beqlr 1498 andc r3,r3,r0 1499 mtspr SPRN_SRR0,r4 1500 mtspr SPRN_SRR1,r3 1501 sync 1502 rfid 1503 b . /* prevent speculative execution */ 1504 1505 1506/* 1507 * Here is our main kernel entry point. We support currently 2 kind of entries 1508 * depending on the value of r5. 1509 * 1510 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 1511 * in r3...r7 1512 * 1513 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 1514 * DT block, r4 is a physical pointer to the kernel itself 1515 * 1516 */ 1517_GLOBAL(__start_initialization_multiplatform) 1518#ifdef CONFIG_PPC_MULTIPLATFORM 1519 /* 1520 * Are we booted from a PROM Of-type client-interface ? 1521 */ 1522 cmpldi cr0,r5,0 1523 bne .__boot_from_prom /* yes -> prom */ 1524#endif 1525 1526 /* Save parameters */ 1527 mr r31,r3 1528 mr r30,r4 1529 1530 /* Make sure we are running in 64 bits mode */ 1531 bl .enable_64b_mode 1532 1533 /* Setup some critical 970 SPRs before switching MMU off */ 1534 bl .__970_cpu_preinit 1535 1536 /* cpu # */ 1537 li r24,0 1538 1539 /* Switch off MMU if not already */ 1540 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) 1541 add r4,r4,r30 1542 bl .__mmu_off 1543 b .__after_prom_start 1544 1545#ifdef CONFIG_PPC_MULTIPLATFORM 1546_STATIC(__boot_from_prom) 1547 /* Save parameters */ 1548 mr r31,r3 1549 mr r30,r4 1550 mr r29,r5 1551 mr r28,r6 1552 mr r27,r7 1553 1554 /* Make sure we are running in 64 bits mode */ 1555 bl .enable_64b_mode 1556 1557 /* put a relocation offset into r3 */ 1558 bl .reloc_offset 1559 1560 LOAD_REG_IMMEDIATE(r2,__toc_start) 1561 addi r2,r2,0x4000 1562 addi r2,r2,0x4000 1563 1564 /* Relocate the TOC from a virt addr to a real addr */ 1565 add r2,r2,r3 1566 1567 /* Restore parameters */ 1568 mr r3,r31 1569 mr r4,r30 1570 mr r5,r29 1571 mr r6,r28 1572 mr r7,r27 1573 1574 /* Do all of the interaction with OF client interface */ 1575 bl .prom_init 1576 /* We never return */ 1577 trap 1578#endif 1579 1580/* 1581 * At this point, r3 contains the physical address we are running at, 1582 * returned by prom_init() 1583 */ 1584_STATIC(__after_prom_start) 1585 1586/* 1587 * We need to run with __start at physical address PHYSICAL_START. 1588 * This will leave some code in the first 256B of 1589 * real memory, which are reserved for software use. 1590 * The remainder of the first page is loaded with the fixed 1591 * interrupt vectors. The next two pages are filled with 1592 * unknown exception placeholders. 1593 * 1594 * Note: This process overwrites the OF exception vectors. 1595 * r26 == relocation offset 1596 * r27 == KERNELBASE 1597 */ 1598 bl .reloc_offset 1599 mr r26,r3 1600 LOAD_REG_IMMEDIATE(r27, KERNELBASE) 1601 1602 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ 1603 1604 // XXX FIXME: Use phys returned by OF (r30) 1605 add r4,r27,r26 /* source addr */ 1606 /* current address of _start */ 1607 /* i.e. where we are running */ 1608 /* the source addr */ 1609 1610 LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ 1611 sub r5,r5,r27 1612 1613 li r6,0x100 /* Start offset, the first 0x100 */ 1614 /* bytes were copied earlier. */ 1615 1616 bl .copy_and_flush /* copy the first n bytes */ 1617 /* this includes the code being */ 1618 /* executed here. */ 1619 1620 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ 1621 mtctr r0 /* that we just made/relocated */ 1622 bctr 1623 16244: LOAD_REG_IMMEDIATE(r5,klimit) 1625 add r5,r5,r26 1626 ld r5,0(r5) /* get the value of klimit */ 1627 sub r5,r5,r27 1628 bl .copy_and_flush /* copy the rest */ 1629 b .start_here_multiplatform 1630 1631#endif /* CONFIG_PPC_MULTIPLATFORM */ 1632 1633/* 1634 * Copy routine used to copy the kernel to start at physical address 0 1635 * and flush and invalidate the caches as needed. 1636 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 1637 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 1638 * 1639 * Note: this routine *only* clobbers r0, r6 and lr 1640 */ 1641_GLOBAL(copy_and_flush) 1642 addi r5,r5,-8 1643 addi r6,r6,-8 16444: li r0,16 /* Use the least common */ 1645 /* denominator cache line */ 1646 /* size. This results in */ 1647 /* extra cache line flushes */ 1648 /* but operation is correct. */ 1649 /* Can't get cache line size */ 1650 /* from NACA as it is being */ 1651 /* moved too. */ 1652 1653 mtctr r0 /* put # words/line in ctr */ 16543: addi r6,r6,8 /* copy a cache line */ 1655 ldx r0,r6,r4 1656 stdx r0,r6,r3 1657 bdnz 3b 1658 dcbst r6,r3 /* write it to memory */ 1659 sync 1660 icbi r6,r3 /* flush the icache line */ 1661 cmpld 0,r6,r5 1662 blt 4b 1663 sync 1664 addi r5,r5,8 1665 addi r6,r6,8 1666 blr 1667 1668.align 8 1669copy_to_here: 1670 1671#ifdef CONFIG_SMP 1672#ifdef CONFIG_PPC_PMAC 1673/* 1674 * On PowerMac, secondary processors starts from the reset vector, which 1675 * is temporarily turned into a call to one of the functions below. 1676 */ 1677 .section ".text"; 1678 .align 2 ; 1679 1680 .globl __secondary_start_pmac_0 1681__secondary_start_pmac_0: 1682 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 1683 li r24,0 1684 b 1f 1685 li r24,1 1686 b 1f 1687 li r24,2 1688 b 1f 1689 li r24,3 16901: 1691 1692_GLOBAL(pmac_secondary_start) 1693 /* turn on 64-bit mode */ 1694 bl .enable_64b_mode 1695 isync 1696 1697 /* Copy some CPU settings from CPU 0 */ 1698 bl .__restore_cpu_setup 1699 1700 /* pSeries do that early though I don't think we really need it */ 1701 mfmsr r3 1702 ori r3,r3,MSR_RI 1703 mtmsrd r3 /* RI on */ 1704 1705 /* Set up a paca value for this processor. */ 1706 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ 1707 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1708 add r13,r13,r4 /* for this processor. */ 1709 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1710 1711 /* Create a temp kernel stack for use before relocation is on. */ 1712 ld r1,PACAEMERGSP(r13) 1713 subi r1,r1,STACK_FRAME_OVERHEAD 1714 1715 b .__secondary_start 1716 1717#endif /* CONFIG_PPC_PMAC */ 1718 1719/* 1720 * This function is called after the master CPU has released the 1721 * secondary processors. The execution environment is relocation off. 1722 * The paca for this processor has the following fields initialized at 1723 * this point: 1724 * 1. Processor number 1725 * 2. Segment table pointer (virtual address) 1726 * On entry the following are set: 1727 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 1728 * r24 = cpu# (in Linux terms) 1729 * r13 = paca virtual address 1730 * SPRG3 = paca virtual address 1731 */ 1732_GLOBAL(__secondary_start) 1733 /* Set thread priority to MEDIUM */ 1734 HMT_MEDIUM 1735 1736 /* Load TOC */ 1737 ld r2,PACATOC(r13) 1738 1739 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 1740 bl .early_setup_secondary 1741 1742 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1743 LOAD_REG_ADDR(r3, current_set) 1744 sldi r28,r24,3 /* get current_set[cpu#] */ 1745 ldx r1,r3,r28 1746 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1747 std r1,PACAKSAVE(r13) 1748 1749 /* Clear backchain so we get nice backtraces */ 1750 li r7,0 1751 mtlr r7 1752 1753 /* enable MMU and jump to start_secondary */ 1754 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1755 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1756#ifdef DO_SOFT_DISABLE 1757 ori r4,r4,MSR_EE 1758#endif 1759 mtspr SPRN_SRR0,r3 1760 mtspr SPRN_SRR1,r4 1761 rfid 1762 b . /* prevent speculative execution */ 1763 1764/* 1765 * Running with relocation on at this point. All we want to do is 1766 * zero the stack back-chain pointer before going into C code. 1767 */ 1768_GLOBAL(start_secondary_prolog) 1769 li r3,0 1770 std r3,0(r1) /* Zero the stack frame pointer */ 1771 bl .start_secondary 1772 b . 1773#endif 1774 1775/* 1776 * This subroutine clobbers r11 and r12 1777 */ 1778_GLOBAL(enable_64b_mode) 1779 mfmsr r11 /* grab the current MSR */ 1780 li r12,1 1781 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 1782 or r11,r11,r12 1783 li r12,1 1784 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) 1785 or r11,r11,r12 1786 mtmsrd r11 1787 isync 1788 blr 1789 1790#ifdef CONFIG_PPC_MULTIPLATFORM 1791/* 1792 * This is where the main kernel code starts. 1793 */ 1794_STATIC(start_here_multiplatform) 1795 /* get a new offset, now that the kernel has moved. */ 1796 bl .reloc_offset 1797 mr r26,r3 1798 1799 /* Clear out the BSS. It may have been done in prom_init, 1800 * already but that's irrelevant since prom_init will soon 1801 * be detached from the kernel completely. Besides, we need 1802 * to clear it now for kexec-style entry. 1803 */ 1804 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1805 LOAD_REG_IMMEDIATE(r8,__bss_start) 1806 sub r11,r11,r8 /* bss size */ 1807 addi r11,r11,7 /* round up to an even double word */ 1808 rldicl. r11,r11,61,3 /* shift right by 3 */ 1809 beq 4f 1810 addi r8,r8,-8 1811 li r0,0 1812 mtctr r11 /* zero this many doublewords */ 18133: stdu r0,8(r8) 1814 bdnz 3b 18154: 1816 1817 mfmsr r6 1818 ori r6,r6,MSR_RI 1819 mtmsrd r6 /* RI on */ 1820 1821#ifdef CONFIG_HMT 1822 /* Start up the second thread on cpu 0 */ 1823 mfspr r3,SPRN_PVR 1824 srwi r3,r3,16 1825 cmpwi r3,0x34 /* Pulsar */ 1826 beq 90f 1827 cmpwi r3,0x36 /* Icestar */ 1828 beq 90f 1829 cmpwi r3,0x37 /* SStar */ 1830 beq 90f 1831 b 91f /* HMT not supported */ 183290: li r3,0 1833 bl .hmt_start_secondary 183491: 1835#endif 1836 1837 /* The following gets the stack and TOC set up with the regs */ 1838 /* pointing to the real addr of the kernel stack. This is */ 1839 /* all done to support the C function call below which sets */ 1840 /* up the htab. This is done because we have relocated the */ 1841 /* kernel but are still running in real mode. */ 1842 1843 LOAD_REG_IMMEDIATE(r3,init_thread_union) 1844 add r3,r3,r26 1845 1846 /* set up a stack pointer (physical address) */ 1847 addi r1,r3,THREAD_SIZE 1848 li r0,0 1849 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1850 1851 /* set up the TOC (physical address) */ 1852 LOAD_REG_IMMEDIATE(r2,__toc_start) 1853 addi r2,r2,0x4000 1854 addi r2,r2,0x4000 1855 add r2,r2,r26 1856 1857 LOAD_REG_IMMEDIATE(r3, cpu_specs) 1858 add r3,r3,r26 1859 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) 1860 add r4,r4,r26 1861 mr r5,r26 1862 bl .identify_cpu 1863 1864 /* Save some low level config HIDs of CPU0 to be copied to 1865 * other CPUs later on, or used for suspend/resume 1866 */ 1867 bl .__save_cpu_setup 1868 sync 1869 1870 /* Setup a valid physical PACA pointer in SPRG3 for early_setup 1871 * note that boot_cpuid can always be 0 nowadays since there is 1872 * nowhere it can be initialized differently before we reach this 1873 * code 1874 */ 1875 LOAD_REG_IMMEDIATE(r27, boot_cpuid) 1876 add r27,r27,r26 1877 lwz r27,0(r27) 1878 1879 LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ 1880 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1881 add r13,r13,r24 /* for this processor. */ 1882 add r13,r13,r26 /* convert to physical addr */ 1883 mtspr SPRN_SPRG3,r13 1884 1885 /* Do very early kernel initializations, including initial hash table, 1886 * stab and slb setup before we turn on relocation. */ 1887 1888 /* Restore parameters passed from prom_init/kexec */ 1889 mr r3,r31 1890 bl .early_setup 1891 1892 LOAD_REG_IMMEDIATE(r3, .start_here_common) 1893 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1894 mtspr SPRN_SRR0,r3 1895 mtspr SPRN_SRR1,r4 1896 rfid 1897 b . /* prevent speculative execution */ 1898#endif /* CONFIG_PPC_MULTIPLATFORM */ 1899 1900 /* This is where all platforms converge execution */ 1901_STATIC(start_here_common) 1902 /* relocation is on at this point */ 1903 1904 /* The following code sets up the SP and TOC now that we are */ 1905 /* running with translation enabled. */ 1906 1907 LOAD_REG_IMMEDIATE(r3,init_thread_union) 1908 1909 /* set up the stack */ 1910 addi r1,r3,THREAD_SIZE 1911 li r0,0 1912 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1913 1914 /* Apply the CPUs-specific fixups (nop out sections not relevant 1915 * to this CPU 1916 */ 1917 li r3,0 1918 bl .do_cpu_ftr_fixups 1919 1920 LOAD_REG_IMMEDIATE(r26, boot_cpuid) 1921 lwz r26,0(r26) 1922 1923 LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ 1924 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1925 add r13,r13,r24 /* for this processor. */ 1926 mtspr SPRN_SPRG3,r13 1927 1928 /* ptr to current */ 1929 LOAD_REG_IMMEDIATE(r4, init_task) 1930 std r4,PACACURRENT(r13) 1931 1932 /* Load the TOC */ 1933 ld r2,PACATOC(r13) 1934 std r1,PACAKSAVE(r13) 1935 1936 bl .setup_system 1937 1938 /* Load up the kernel context */ 19395: 1940#ifdef DO_SOFT_DISABLE 1941 li r5,0 1942 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 1943 mfmsr r5 1944 ori r5,r5,MSR_EE /* Hard Enabled */ 1945 mtmsrd r5 1946#endif 1947 1948 bl .start_kernel 1949 1950_GLOBAL(hmt_init) 1951#ifdef CONFIG_HMT 1952 LOAD_REG_IMMEDIATE(r5, hmt_thread_data) 1953 mfspr r7,SPRN_PVR 1954 srwi r7,r7,16 1955 cmpwi r7,0x34 /* Pulsar */ 1956 beq 90f 1957 cmpwi r7,0x36 /* Icestar */ 1958 beq 91f 1959 cmpwi r7,0x37 /* SStar */ 1960 beq 91f 1961 b 101f 196290: mfspr r6,SPRN_PIR 1963 andi. r6,r6,0x1f 1964 b 92f 196591: mfspr r6,SPRN_PIR 1966 andi. r6,r6,0x3ff 196792: sldi r4,r24,3 1968 stwx r6,r5,r4 1969 bl .hmt_start_secondary 1970 b 101f 1971 1972__hmt_secondary_hold: 1973 LOAD_REG_IMMEDIATE(r5, hmt_thread_data) 1974 clrldi r5,r5,4 1975 li r7,0 1976 mfspr r6,SPRN_PIR 1977 mfspr r8,SPRN_PVR 1978 srwi r8,r8,16 1979 cmpwi r8,0x34 1980 bne 93f 1981 andi. r6,r6,0x1f 1982 b 103f 198393: andi. r6,r6,0x3f 1984 1985103: lwzx r8,r5,r7 1986 cmpw r8,r6 1987 beq 104f 1988 addi r7,r7,8 1989 b 103b 1990 1991104: addi r7,r7,4 1992 lwzx r9,r5,r7 1993 mr r24,r9 1994101: 1995#endif 1996 mr r3,r24 1997 b .pSeries_secondary_smp_init 1998 1999#ifdef CONFIG_HMT 2000_GLOBAL(hmt_start_secondary) 2001 LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold) 2002 clrldi r4,r4,4 2003 mtspr SPRN_NIADORM, r4 2004 mfspr r4, SPRN_MSRDORM 2005 li r5, -65 2006 and r4, r4, r5 2007 mtspr SPRN_MSRDORM, r4 2008 lis r4,0xffef 2009 ori r4,r4,0x7403 2010 mtspr SPRN_TSC, r4 2011 li r4,0x1f4 2012 mtspr SPRN_TST, r4 2013 mfspr r4, SPRN_HID0 2014 ori r4, r4, 0x1 2015 mtspr SPRN_HID0, r4 2016 mfspr r4, SPRN_CTRLF 2017 oris r4, r4, 0x40 2018 mtspr SPRN_CTRLT, r4 2019 blr 2020#endif 2021 2022/* 2023 * We put a few things here that have to be page-aligned. 2024 * This stuff goes at the beginning of the bss, which is page-aligned. 2025 */ 2026 .section ".bss" 2027 2028 .align PAGE_SHIFT 2029 2030 .globl empty_zero_page 2031empty_zero_page: 2032 .space PAGE_SIZE 2033 2034 .globl swapper_pg_dir 2035swapper_pg_dir: 2036 .space PAGE_SIZE 2037 2038/* 2039 * This space gets a copy of optional info passed to us by the bootstrap 2040 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 2041 */ 2042 .globl cmd_line 2043cmd_line: 2044 .space COMMAND_LINE_SIZE 2045