1/* 2 * arch/ppc64/kernel/head.S 3 * 4 * PowerPC version 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 9 * Adapted for Power Macintosh by Paul Mackerras. 10 * Low-level exception handlers and MMU support 11 * rewritten by Paul Mackerras. 12 * Copyright (C) 1996 Paul Mackerras. 13 * 14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 16 * 17 * This file contains the low-level support and setup for the 18 * PowerPC-64 platform, including trap and interrupt dispatch. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26#include <linux/config.h> 27#include <linux/threads.h> 28#include <asm/reg.h> 29#include <asm/page.h> 30#include <asm/mmu.h> 31#include <asm/systemcfg.h> 32#include <asm/ppc_asm.h> 33#include <asm/asm-offsets.h> 34#include <asm/bug.h> 35#include <asm/cputable.h> 36#include <asm/setup.h> 37#include <asm/hvcall.h> 38#include <asm/iSeries/LparMap.h> 39#include <asm/thread_info.h> 40 41#ifdef CONFIG_PPC_ISERIES 42#define DO_SOFT_DISABLE 43#endif 44 45/* 46 * We layout physical memory as follows: 47 * 0x0000 - 0x00ff : Secondary processor spin code 48 * 0x0100 - 0x2fff : pSeries Interrupt prologs 49 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 50 * 0x6000 - 0x6fff : Initial (CPU0) segment table 51 * 0x7000 - 0x7fff : FWNMI data area 52 * 0x8000 - : Early init and support code 53 */ 54 55/* 56 * SPRG Usage 57 * 58 * Register Definition 59 * 60 * SPRG0 reserved for hypervisor 61 * SPRG1 temp - used to save gpr 62 * SPRG2 temp - used to save gpr 63 * SPRG3 virt addr of paca 64 */ 65 66/* 67 * Entering into this code we make the following assumptions: 68 * For pSeries: 69 * 1. The MMU is off & open firmware is running in real mode. 70 * 2. The kernel is entered at __start 71 * 72 * For iSeries: 73 * 1. The MMU is on (as it always is for iSeries) 74 * 2. The kernel is entered at system_reset_iSeries 75 */ 76 77 .text 78 .globl _stext 79_stext: 80#ifdef CONFIG_PPC_MULTIPLATFORM 81_GLOBAL(__start) 82 /* NOP this out unconditionally */ 83BEGIN_FTR_SECTION 84 b .__start_initialization_multiplatform 85END_FTR_SECTION(0, 1) 86#endif /* CONFIG_PPC_MULTIPLATFORM */ 87 88 /* Catch branch to 0 in real mode */ 89 trap 90 91#ifdef CONFIG_PPC_ISERIES 92 /* 93 * At offset 0x20, there is a pointer to iSeries LPAR data. 94 * This is required by the hypervisor 95 */ 96 . = 0x20 97 .llong hvReleaseData-KERNELBASE 98 99 /* 100 * At offset 0x28 and 0x30 are offsets to the mschunks_map 101 * array (used by the iSeries LPAR debugger to do translation 102 * between physical addresses and absolute addresses) and 103 * to the pidhash table (also used by the debugger) 104 */ 105 .llong mschunks_map-KERNELBASE 106 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 107 108 /* Offset 0x38 - Pointer to start of embedded System.map */ 109 .globl embedded_sysmap_start 110embedded_sysmap_start: 111 .llong 0 112 /* Offset 0x40 - Pointer to end of embedded System.map */ 113 .globl embedded_sysmap_end 114embedded_sysmap_end: 115 .llong 0 116 117#endif /* CONFIG_PPC_ISERIES */ 118 119 /* Secondary processors spin on this value until it goes to 1. */ 120 .globl __secondary_hold_spinloop 121__secondary_hold_spinloop: 122 .llong 0x0 123 124 /* Secondary processors write this value with their cpu # */ 125 /* after they enter the spin loop immediately below. */ 126 .globl __secondary_hold_acknowledge 127__secondary_hold_acknowledge: 128 .llong 0x0 129 130 . = 0x60 131/* 132 * The following code is used on pSeries to hold secondary processors 133 * in a spin loop after they have been freed from OpenFirmware, but 134 * before the bulk of the kernel has been relocated. This code 135 * is relocated to physical address 0x60 before prom_init is run. 136 * All of it must fit below the first exception vector at 0x100. 137 */ 138_GLOBAL(__secondary_hold) 139 mfmsr r24 140 ori r24,r24,MSR_RI 141 mtmsrd r24 /* RI on */ 142 143 /* Grab our linux cpu number */ 144 mr r24,r3 145 146 /* Tell the master cpu we're here */ 147 /* Relocation is off & we are located at an address less */ 148 /* than 0x100, so only need to grab low order offset. */ 149 std r24,__secondary_hold_acknowledge@l(0) 150 sync 151 152 /* All secondary cpus wait here until told to start. */ 153100: ld r4,__secondary_hold_spinloop@l(0) 154 cmpdi 0,r4,1 155 bne 100b 156 157#ifdef CONFIG_HMT 158 b .hmt_init 159#else 160#ifdef CONFIG_SMP 161 mr r3,r24 162 b .pSeries_secondary_smp_init 163#else 164 BUG_OPCODE 165#endif 166#endif 167 168/* This value is used to mark exception frames on the stack. */ 169 .section ".toc","aw" 170exception_marker: 171 .tc ID_72656773_68657265[TC],0x7265677368657265 172 .text 173 174/* 175 * The following macros define the code that appears as 176 * the prologue to each of the exception handlers. They 177 * are split into two parts to allow a single kernel binary 178 * to be used for pSeries and iSeries. 179 * LOL. One day... - paulus 180 */ 181 182/* 183 * We make as much of the exception code common between native 184 * exception handlers (including pSeries LPAR) and iSeries LPAR 185 * implementations as possible. 186 */ 187 188/* 189 * This is the start of the interrupt handlers for pSeries 190 * This code runs with relocation off. 191 */ 192#define EX_R9 0 193#define EX_R10 8 194#define EX_R11 16 195#define EX_R12 24 196#define EX_R13 32 197#define EX_SRR0 40 198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ 199#define EX_DAR 48 200#define EX_LR 48 /* SLB miss saves LR, but not DAR */ 201#define EX_DSISR 56 202#define EX_CCR 60 203 204#define EXCEPTION_PROLOG_PSERIES(area, label) \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 207 std r10,area+EX_R10(r13); \ 208 std r11,area+EX_R11(r13); \ 209 std r12,area+EX_R12(r13); \ 210 mfspr r9,SPRN_SPRG1; \ 211 std r9,area+EX_R13(r13); \ 212 mfcr r9; \ 213 clrrdi r12,r13,32; /* get high part of &label */ \ 214 mfmsr r10; \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 216 ori r12,r12,(label)@l; /* virt addr of handler */ \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 218 mtspr SPRN_SRR0,r12; \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 220 mtspr SPRN_SRR1,r10; \ 221 rfid; \ 222 b . /* prevent speculative execution */ 223 224/* 225 * This is the start of the interrupt handlers for iSeries 226 * This code runs with relocation on. 227 */ 228#define EXCEPTION_PROLOG_ISERIES_1(area) \ 229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 231 std r10,area+EX_R10(r13); \ 232 std r11,area+EX_R11(r13); \ 233 std r12,area+EX_R12(r13); \ 234 mfspr r9,SPRN_SPRG1; \ 235 std r9,area+EX_R13(r13); \ 236 mfcr r9 237 238#define EXCEPTION_PROLOG_ISERIES_2 \ 239 mfmsr r10; \ 240 ld r11,PACALPPACA+LPPACASRR0(r13); \ 241 ld r12,PACALPPACA+LPPACASRR1(r13); \ 242 ori r10,r10,MSR_RI; \ 243 mtmsrd r10,1 244 245/* 246 * The common exception prolog is used for all except a few exceptions 247 * such as a segment miss on a kernel address. We have to be prepared 248 * to take another exception from the point where we first touch the 249 * kernel stack onwards. 250 * 251 * On entry r13 points to the paca, r9-r13 are saved in the paca, 252 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 253 * SRR1, and relocation is on. 254 */ 255#define EXCEPTION_PROLOG_COMMON(n, area) \ 256 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 257 mr r10,r1; /* Save r1 */ \ 258 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 259 beq- 1f; \ 260 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 2611: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 262 bge- cr1,bad_stack; /* abort if it is */ \ 263 std r9,_CCR(r1); /* save CR in stackframe */ \ 264 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 265 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 266 std r10,0(r1); /* make stack chain pointer */ \ 267 std r0,GPR0(r1); /* save r0 in stackframe */ \ 268 std r10,GPR1(r1); /* save r1 in stackframe */ \ 269 std r2,GPR2(r1); /* save r2 in stackframe */ \ 270 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 271 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 272 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 273 ld r10,area+EX_R10(r13); \ 274 std r9,GPR9(r1); \ 275 std r10,GPR10(r1); \ 276 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 277 ld r10,area+EX_R12(r13); \ 278 ld r11,area+EX_R13(r13); \ 279 std r9,GPR11(r1); \ 280 std r10,GPR12(r1); \ 281 std r11,GPR13(r1); \ 282 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 283 mflr r9; /* save LR in stackframe */ \ 284 std r9,_LINK(r1); \ 285 mfctr r10; /* save CTR in stackframe */ \ 286 std r10,_CTR(r1); \ 287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 288 std r11,_XER(r1); \ 289 li r9,(n)+1; \ 290 std r9,_TRAP(r1); /* set trap number */ \ 291 li r10,0; \ 292 ld r11,exception_marker@toc(r2); \ 293 std r10,RESULT(r1); /* clear regs->result */ \ 294 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ 295 296/* 297 * Exception vectors. 298 */ 299#define STD_EXCEPTION_PSERIES(n, label) \ 300 . = n; \ 301 .globl label##_pSeries; \ 302label##_pSeries: \ 303 HMT_MEDIUM; \ 304 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 305 RUNLATCH_ON(r13); \ 306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 307 308#define STD_EXCEPTION_ISERIES(n, label, area) \ 309 .globl label##_iSeries; \ 310label##_iSeries: \ 311 HMT_MEDIUM; \ 312 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 313 RUNLATCH_ON(r13); \ 314 EXCEPTION_PROLOG_ISERIES_1(area); \ 315 EXCEPTION_PROLOG_ISERIES_2; \ 316 b label##_common 317 318#define MASKABLE_EXCEPTION_ISERIES(n, label) \ 319 .globl label##_iSeries; \ 320label##_iSeries: \ 321 HMT_MEDIUM; \ 322 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 323 RUNLATCH_ON(r13); \ 324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 325 lbz r10,PACAPROCENABLED(r13); \ 326 cmpwi 0,r10,0; \ 327 beq- label##_iSeries_masked; \ 328 EXCEPTION_PROLOG_ISERIES_2; \ 329 b label##_common; \ 330 331#ifdef DO_SOFT_DISABLE 332#define DISABLE_INTS \ 333 lbz r10,PACAPROCENABLED(r13); \ 334 li r11,0; \ 335 std r10,SOFTE(r1); \ 336 mfmsr r10; \ 337 stb r11,PACAPROCENABLED(r13); \ 338 ori r10,r10,MSR_EE; \ 339 mtmsrd r10,1 340 341#define ENABLE_INTS \ 342 lbz r10,PACAPROCENABLED(r13); \ 343 mfmsr r11; \ 344 std r10,SOFTE(r1); \ 345 ori r11,r11,MSR_EE; \ 346 mtmsrd r11,1 347 348#else /* hard enable/disable interrupts */ 349#define DISABLE_INTS 350 351#define ENABLE_INTS \ 352 ld r12,_MSR(r1); \ 353 mfmsr r11; \ 354 rlwimi r11,r12,0,MSR_EE; \ 355 mtmsrd r11,1 356 357#endif 358 359#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 360 .align 7; \ 361 .globl label##_common; \ 362label##_common: \ 363 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 364 DISABLE_INTS; \ 365 bl .save_nvgprs; \ 366 addi r3,r1,STACK_FRAME_OVERHEAD; \ 367 bl hdlr; \ 368 b .ret_from_except 369 370#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 371 .align 7; \ 372 .globl label##_common; \ 373label##_common: \ 374 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 375 DISABLE_INTS; \ 376 addi r3,r1,STACK_FRAME_OVERHEAD; \ 377 bl hdlr; \ 378 b .ret_from_except_lite 379 380/* 381 * Start of pSeries system interrupt routines 382 */ 383 . = 0x100 384 .globl __start_interrupts 385__start_interrupts: 386 387 STD_EXCEPTION_PSERIES(0x100, system_reset) 388 389 . = 0x200 390_machine_check_pSeries: 391 HMT_MEDIUM 392 mtspr SPRN_SPRG1,r13 /* save r13 */ 393 RUNLATCH_ON(r13) 394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 395 396 . = 0x300 397 .globl data_access_pSeries 398data_access_pSeries: 399 HMT_MEDIUM 400 mtspr SPRN_SPRG1,r13 401BEGIN_FTR_SECTION 402 mtspr SPRN_SPRG2,r12 403 mfspr r13,SPRN_DAR 404 mfspr r12,SPRN_DSISR 405 srdi r13,r13,60 406 rlwimi r13,r12,16,0x20 407 mfcr r12 408 cmpwi r13,0x2c 409 beq .do_stab_bolted_pSeries 410 mtcrf 0x80,r12 411 mfspr r12,SPRN_SPRG2 412END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 414 415 . = 0x380 416 .globl data_access_slb_pSeries 417data_access_slb_pSeries: 418 HMT_MEDIUM 419 mtspr SPRN_SPRG1,r13 420 RUNLATCH_ON(r13) 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 423 std r10,PACA_EXSLB+EX_R10(r13) 424 std r11,PACA_EXSLB+EX_R11(r13) 425 std r12,PACA_EXSLB+EX_R12(r13) 426 std r3,PACA_EXSLB+EX_R3(r13) 427 mfspr r9,SPRN_SPRG1 428 std r9,PACA_EXSLB+EX_R13(r13) 429 mfcr r9 430 mfspr r12,SPRN_SRR1 /* and SRR1 */ 431 mfspr r3,SPRN_DAR 432 b .do_slb_miss /* Rel. branch works in real mode */ 433 434 STD_EXCEPTION_PSERIES(0x400, instruction_access) 435 436 . = 0x480 437 .globl instruction_access_slb_pSeries 438instruction_access_slb_pSeries: 439 HMT_MEDIUM 440 mtspr SPRN_SPRG1,r13 441 RUNLATCH_ON(r13) 442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 444 std r10,PACA_EXSLB+EX_R10(r13) 445 std r11,PACA_EXSLB+EX_R11(r13) 446 std r12,PACA_EXSLB+EX_R12(r13) 447 std r3,PACA_EXSLB+EX_R3(r13) 448 mfspr r9,SPRN_SPRG1 449 std r9,PACA_EXSLB+EX_R13(r13) 450 mfcr r9 451 mfspr r12,SPRN_SRR1 /* and SRR1 */ 452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 453 b .do_slb_miss /* Rel. branch works in real mode */ 454 455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 456 STD_EXCEPTION_PSERIES(0x600, alignment) 457 STD_EXCEPTION_PSERIES(0x700, program_check) 458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 459 STD_EXCEPTION_PSERIES(0x900, decrementer) 460 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 461 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 462 463 . = 0xc00 464 .globl system_call_pSeries 465system_call_pSeries: 466 HMT_MEDIUM 467 RUNLATCH_ON(r9) 468 mr r9,r13 469 mfmsr r10 470 mfspr r13,SPRN_SPRG3 471 mfspr r11,SPRN_SRR0 472 clrrdi r12,r13,32 473 oris r12,r12,system_call_common@h 474 ori r12,r12,system_call_common@l 475 mtspr SPRN_SRR0,r12 476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 477 mfspr r12,SPRN_SRR1 478 mtspr SPRN_SRR1,r10 479 rfid 480 b . /* prevent speculative execution */ 481 482 STD_EXCEPTION_PSERIES(0xd00, single_step) 483 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 484 485 /* We need to deal with the Altivec unavailable exception 486 * here which is at 0xf20, thus in the middle of the 487 * prolog code of the PerformanceMonitor one. A little 488 * trickery is thus necessary 489 */ 490 . = 0xf00 491 b performance_monitor_pSeries 492 493 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) 494 495 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 496 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 497 498 . = 0x3000 499 500/*** pSeries interrupt support ***/ 501 502 /* moved from 0xf00 */ 503 STD_EXCEPTION_PSERIES(., performance_monitor) 504 505 .align 7 506_GLOBAL(do_stab_bolted_pSeries) 507 mtcrf 0x80,r12 508 mfspr r12,SPRN_SPRG2 509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 510 511/* 512 * Vectors for the FWNMI option. Share common code. 513 */ 514 .globl system_reset_fwnmi 515system_reset_fwnmi: 516 HMT_MEDIUM 517 mtspr SPRN_SPRG1,r13 /* save r13 */ 518 RUNLATCH_ON(r13) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 520 521 .globl machine_check_fwnmi 522machine_check_fwnmi: 523 HMT_MEDIUM 524 mtspr SPRN_SPRG1,r13 /* save r13 */ 525 RUNLATCH_ON(r13) 526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 527 528#ifdef CONFIG_PPC_ISERIES 529/*** ISeries-LPAR interrupt handlers ***/ 530 531 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 532 533 .globl data_access_iSeries 534data_access_iSeries: 535 mtspr SPRN_SPRG1,r13 536BEGIN_FTR_SECTION 537 mtspr SPRN_SPRG2,r12 538 mfspr r13,SPRN_DAR 539 mfspr r12,SPRN_DSISR 540 srdi r13,r13,60 541 rlwimi r13,r12,16,0x20 542 mfcr r12 543 cmpwi r13,0x2c 544 beq .do_stab_bolted_iSeries 545 mtcrf 0x80,r12 546 mfspr r12,SPRN_SPRG2 547END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 549 EXCEPTION_PROLOG_ISERIES_2 550 b data_access_common 551 552.do_stab_bolted_iSeries: 553 mtcrf 0x80,r12 554 mfspr r12,SPRN_SPRG2 555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 556 EXCEPTION_PROLOG_ISERIES_2 557 b .do_stab_bolted 558 559 .globl data_access_slb_iSeries 560data_access_slb_iSeries: 561 mtspr SPRN_SPRG1,r13 /* save r13 */ 562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 563 std r3,PACA_EXSLB+EX_R3(r13) 564 ld r12,PACALPPACA+LPPACASRR1(r13) 565 mfspr r3,SPRN_DAR 566 b .do_slb_miss 567 568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 569 570 .globl instruction_access_slb_iSeries 571instruction_access_slb_iSeries: 572 mtspr SPRN_SPRG1,r13 /* save r13 */ 573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 574 std r3,PACA_EXSLB+EX_R3(r13) 575 ld r12,PACALPPACA+LPPACASRR1(r13) 576 ld r3,PACALPPACA+LPPACASRR0(r13) 577 b .do_slb_miss 578 579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) 580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) 581 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) 582 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) 583 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) 584 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) 585 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) 586 587 .globl system_call_iSeries 588system_call_iSeries: 589 mr r9,r13 590 mfspr r13,SPRN_SPRG3 591 EXCEPTION_PROLOG_ISERIES_2 592 b system_call_common 593 594 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) 595 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) 596 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) 597 598 .globl system_reset_iSeries 599system_reset_iSeries: 600 mfspr r13,SPRN_SPRG3 /* Get paca address */ 601 mfmsr r24 602 ori r24,r24,MSR_RI 603 mtmsrd r24 /* RI on */ 604 lhz r24,PACAPACAINDEX(r13) /* Get processor # */ 605 cmpwi 0,r24,0 /* Are we processor 0? */ 606 beq .__start_initialization_iSeries /* Start up the first processor */ 607 mfspr r4,SPRN_CTRLF 608 li r5,CTRL_RUNLATCH /* Turn off the run light */ 609 andc r4,r4,r5 610 mtspr SPRN_CTRLT,r4 611 6121: 613 HMT_LOW 614#ifdef CONFIG_SMP 615 lbz r23,PACAPROCSTART(r13) /* Test if this processor 616 * should start */ 617 sync 618 LOADADDR(r3,current_set) 619 sldi r28,r24,3 /* get current_set[cpu#] */ 620 ldx r3,r3,r28 621 addi r1,r3,THREAD_SIZE 622 subi r1,r1,STACK_FRAME_OVERHEAD 623 624 cmpwi 0,r23,0 625 beq iSeries_secondary_smp_loop /* Loop until told to go */ 626 bne .__secondary_start /* Loop until told to go */ 627iSeries_secondary_smp_loop: 628 /* Let the Hypervisor know we are alive */ 629 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 630 lis r3,0x8002 631 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ 632#else /* CONFIG_SMP */ 633 /* Yield the processor. This is required for non-SMP kernels 634 which are running on multi-threaded machines. */ 635 lis r3,0x8000 636 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ 637 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ 638 li r4,0 /* "yield timed" */ 639 li r5,-1 /* "yield forever" */ 640#endif /* CONFIG_SMP */ 641 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 642 sc /* Invoke the hypervisor via a system call */ 643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */ 644 b 1b /* If SMP not configured, secondaries 645 * loop forever */ 646 647 .globl decrementer_iSeries_masked 648decrementer_iSeries_masked: 649 li r11,1 650 stb r11,PACALPPACA+LPPACADECRINT(r13) 651 lwz r12,PACADEFAULTDECR(r13) 652 mtspr SPRN_DEC,r12 653 /* fall through */ 654 655 .globl hardware_interrupt_iSeries_masked 656hardware_interrupt_iSeries_masked: 657 mtcrf 0x80,r9 /* Restore regs */ 658 ld r11,PACALPPACA+LPPACASRR0(r13) 659 ld r12,PACALPPACA+LPPACASRR1(r13) 660 mtspr SPRN_SRR0,r11 661 mtspr SPRN_SRR1,r12 662 ld r9,PACA_EXGEN+EX_R9(r13) 663 ld r10,PACA_EXGEN+EX_R10(r13) 664 ld r11,PACA_EXGEN+EX_R11(r13) 665 ld r12,PACA_EXGEN+EX_R12(r13) 666 ld r13,PACA_EXGEN+EX_R13(r13) 667 rfid 668 b . /* prevent speculative execution */ 669#endif /* CONFIG_PPC_ISERIES */ 670 671/*** Common interrupt handlers ***/ 672 673 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 674 675 /* 676 * Machine check is different because we use a different 677 * save area: PACA_EXMC instead of PACA_EXGEN. 678 */ 679 .align 7 680 .globl machine_check_common 681machine_check_common: 682 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 683 DISABLE_INTS 684 bl .save_nvgprs 685 addi r3,r1,STACK_FRAME_OVERHEAD 686 bl .machine_check_exception 687 b .ret_from_except 688 689 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 690 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 691 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 692 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 693 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 694 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) 695 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 696#ifdef CONFIG_ALTIVEC 697 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 698#else 699 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 700#endif 701 702/* 703 * Here we have detected that the kernel stack pointer is bad. 704 * R9 contains the saved CR, r13 points to the paca, 705 * r10 contains the (bad) kernel stack pointer, 706 * r11 and r12 contain the saved SRR0 and SRR1. 707 * We switch to using an emergency stack, save the registers there, 708 * and call kernel_bad_stack(), which panics. 709 */ 710bad_stack: 711 ld r1,PACAEMERGSP(r13) 712 subi r1,r1,64+INT_FRAME_SIZE 713 std r9,_CCR(r1) 714 std r10,GPR1(r1) 715 std r11,_NIP(r1) 716 std r12,_MSR(r1) 717 mfspr r11,SPRN_DAR 718 mfspr r12,SPRN_DSISR 719 std r11,_DAR(r1) 720 std r12,_DSISR(r1) 721 mflr r10 722 mfctr r11 723 mfxer r12 724 std r10,_LINK(r1) 725 std r11,_CTR(r1) 726 std r12,_XER(r1) 727 SAVE_GPR(0,r1) 728 SAVE_GPR(2,r1) 729 SAVE_4GPRS(3,r1) 730 SAVE_2GPRS(7,r1) 731 SAVE_10GPRS(12,r1) 732 SAVE_10GPRS(22,r1) 733 addi r11,r1,INT_FRAME_SIZE 734 std r11,0(r1) 735 li r12,0 736 std r12,0(r11) 737 ld r2,PACATOC(r13) 7381: addi r3,r1,STACK_FRAME_OVERHEAD 739 bl .kernel_bad_stack 740 b 1b 741 742/* 743 * Return from an exception with minimal checks. 744 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 745 * If interrupts have been enabled, or anything has been 746 * done that might have changed the scheduling status of 747 * any task or sent any task a signal, you should use 748 * ret_from_except or ret_from_except_lite instead of this. 749 */ 750 .globl fast_exception_return 751fast_exception_return: 752 ld r12,_MSR(r1) 753 ld r11,_NIP(r1) 754 andi. r3,r12,MSR_RI /* check if RI is set */ 755 beq- unrecov_fer 756 ld r3,_CCR(r1) 757 ld r4,_LINK(r1) 758 ld r5,_CTR(r1) 759 ld r6,_XER(r1) 760 mtcr r3 761 mtlr r4 762 mtctr r5 763 mtxer r6 764 REST_GPR(0, r1) 765 REST_8GPRS(2, r1) 766 767 mfmsr r10 768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 769 mtmsrd r10,1 770 771 mtspr SPRN_SRR1,r12 772 mtspr SPRN_SRR0,r11 773 REST_4GPRS(10, r1) 774 ld r1,GPR1(r1) 775 rfid 776 b . /* prevent speculative execution */ 777 778unrecov_fer: 779 bl .save_nvgprs 7801: addi r3,r1,STACK_FRAME_OVERHEAD 781 bl .unrecoverable_exception 782 b 1b 783 784/* 785 * Here r13 points to the paca, r9 contains the saved CR, 786 * SRR0 and SRR1 are saved in r11 and r12, 787 * r9 - r13 are saved in paca->exgen. 788 */ 789 .align 7 790 .globl data_access_common 791data_access_common: 792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 793 mfspr r10,SPRN_DAR 794 std r10,PACA_EXGEN+EX_DAR(r13) 795 mfspr r10,SPRN_DSISR 796 stw r10,PACA_EXGEN+EX_DSISR(r13) 797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 798 ld r3,PACA_EXGEN+EX_DAR(r13) 799 lwz r4,PACA_EXGEN+EX_DSISR(r13) 800 li r5,0x300 801 b .do_hash_page /* Try to handle as hpte fault */ 802 803 .align 7 804 .globl instruction_access_common 805instruction_access_common: 806 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 807 ld r3,_NIP(r1) 808 andis. r4,r12,0x5820 809 li r5,0x400 810 b .do_hash_page /* Try to handle as hpte fault */ 811 812 .align 7 813 .globl hardware_interrupt_common 814 .globl hardware_interrupt_entry 815hardware_interrupt_common: 816 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 817hardware_interrupt_entry: 818 DISABLE_INTS 819 addi r3,r1,STACK_FRAME_OVERHEAD 820 bl .do_IRQ 821 b .ret_from_except_lite 822 823 .align 7 824 .globl alignment_common 825alignment_common: 826 mfspr r10,SPRN_DAR 827 std r10,PACA_EXGEN+EX_DAR(r13) 828 mfspr r10,SPRN_DSISR 829 stw r10,PACA_EXGEN+EX_DSISR(r13) 830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 831 ld r3,PACA_EXGEN+EX_DAR(r13) 832 lwz r4,PACA_EXGEN+EX_DSISR(r13) 833 std r3,_DAR(r1) 834 std r4,_DSISR(r1) 835 bl .save_nvgprs 836 addi r3,r1,STACK_FRAME_OVERHEAD 837 ENABLE_INTS 838 bl .alignment_exception 839 b .ret_from_except 840 841 .align 7 842 .globl program_check_common 843program_check_common: 844 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 845 bl .save_nvgprs 846 addi r3,r1,STACK_FRAME_OVERHEAD 847 ENABLE_INTS 848 bl .program_check_exception 849 b .ret_from_except 850 851 .align 7 852 .globl fp_unavailable_common 853fp_unavailable_common: 854 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 855 bne .load_up_fpu /* if from user, just load it up */ 856 bl .save_nvgprs 857 addi r3,r1,STACK_FRAME_OVERHEAD 858 ENABLE_INTS 859 bl .kernel_fp_unavailable_exception 860 BUG_OPCODE 861 862 .align 7 863 .globl altivec_unavailable_common 864altivec_unavailable_common: 865 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 866#ifdef CONFIG_ALTIVEC 867BEGIN_FTR_SECTION 868 bne .load_up_altivec /* if from user, just load it up */ 869END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 870#endif 871 bl .save_nvgprs 872 addi r3,r1,STACK_FRAME_OVERHEAD 873 ENABLE_INTS 874 bl .altivec_unavailable_exception 875 b .ret_from_except 876 877#ifdef CONFIG_ALTIVEC 878/* 879 * load_up_altivec(unused, unused, tsk) 880 * Disable VMX for the task which had it previously, 881 * and save its vector registers in its thread_struct. 882 * Enables the VMX for use in the kernel on return. 883 * On SMP we know the VMX is free, since we give it up every 884 * switch (ie, no lazy save of the vector registers). 885 * On entry: r13 == 'current' && last_task_used_altivec != 'current' 886 */ 887_STATIC(load_up_altivec) 888 mfmsr r5 /* grab the current MSR */ 889 oris r5,r5,MSR_VEC@h 890 mtmsrd r5 /* enable use of VMX now */ 891 isync 892 893/* 894 * For SMP, we don't do lazy VMX switching because it just gets too 895 * horrendously complex, especially when a task switches from one CPU 896 * to another. Instead we call giveup_altvec in switch_to. 897 * VRSAVE isn't dealt with here, that is done in the normal context 898 * switch code. Note that we could rely on vrsave value to eventually 899 * avoid saving all of the VREGs here... 900 */ 901#ifndef CONFIG_SMP 902 ld r3,last_task_used_altivec@got(r2) 903 ld r4,0(r3) 904 cmpdi 0,r4,0 905 beq 1f 906 /* Save VMX state to last_task_used_altivec's THREAD struct */ 907 addi r4,r4,THREAD 908 SAVE_32VRS(0,r5,r4) 909 mfvscr vr0 910 li r10,THREAD_VSCR 911 stvx vr0,r10,r4 912 /* Disable VMX for last_task_used_altivec */ 913 ld r5,PT_REGS(r4) 914 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 915 lis r6,MSR_VEC@h 916 andc r4,r4,r6 917 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 9181: 919#endif /* CONFIG_SMP */ 920 /* Hack: if we get an altivec unavailable trap with VRSAVE 921 * set to all zeros, we assume this is a broken application 922 * that fails to set it properly, and thus we switch it to 923 * all 1's 924 */ 925 mfspr r4,SPRN_VRSAVE 926 cmpdi 0,r4,0 927 bne+ 1f 928 li r4,-1 929 mtspr SPRN_VRSAVE,r4 9301: 931 /* enable use of VMX after return */ 932 ld r4,PACACURRENT(r13) 933 addi r5,r4,THREAD /* Get THREAD */ 934 oris r12,r12,MSR_VEC@h 935 std r12,_MSR(r1) 936 li r4,1 937 li r10,THREAD_VSCR 938 stw r4,THREAD_USED_VR(r5) 939 lvx vr0,r10,r5 940 mtvscr vr0 941 REST_32VRS(0,r4,r5) 942#ifndef CONFIG_SMP 943 /* Update last_task_used_math to 'current' */ 944 subi r4,r5,THREAD /* Back to 'current' */ 945 std r4,0(r3) 946#endif /* CONFIG_SMP */ 947 /* restore registers and return */ 948 b fast_exception_return 949#endif /* CONFIG_ALTIVEC */ 950 951/* 952 * Hash table stuff 953 */ 954 .align 7 955_GLOBAL(do_hash_page) 956 std r3,_DAR(r1) 957 std r4,_DSISR(r1) 958 959 andis. r0,r4,0xa450 /* weird error? */ 960 bne- .handle_page_fault /* if not, try to insert a HPTE */ 961BEGIN_FTR_SECTION 962 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 963 bne- .do_ste_alloc /* If so handle it */ 964END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 965 966 /* 967 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 968 * accessing a userspace segment (even from the kernel). We assume 969 * kernel addresses always have the high bit set. 970 */ 971 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 972 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 973 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 974 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 975 ori r4,r4,1 /* add _PAGE_PRESENT */ 976 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 977 978 /* 979 * On iSeries, we soft-disable interrupts here, then 980 * hard-enable interrupts so that the hash_page code can spin on 981 * the hash_table_lock without problems on a shared processor. 982 */ 983 DISABLE_INTS 984 985 /* 986 * r3 contains the faulting address 987 * r4 contains the required access permissions 988 * r5 contains the trap number 989 * 990 * at return r3 = 0 for success 991 */ 992 bl .hash_page /* build HPTE if possible */ 993 cmpdi r3,0 /* see if hash_page succeeded */ 994 995#ifdef DO_SOFT_DISABLE 996 /* 997 * If we had interrupts soft-enabled at the point where the 998 * DSI/ISI occurred, and an interrupt came in during hash_page, 999 * handle it now. 1000 * We jump to ret_from_except_lite rather than fast_exception_return 1001 * because ret_from_except_lite will check for and handle pending 1002 * interrupts if necessary. 1003 */ 1004 beq .ret_from_except_lite 1005 /* For a hash failure, we don't bother re-enabling interrupts */ 1006 ble- 12f 1007 1008 /* 1009 * hash_page couldn't handle it, set soft interrupt enable back 1010 * to what it was before the trap. Note that .local_irq_restore 1011 * handles any interrupts pending at this point. 1012 */ 1013 ld r3,SOFTE(r1) 1014 bl .local_irq_restore 1015 b 11f 1016#else 1017 beq fast_exception_return /* Return from exception on success */ 1018 ble- 12f /* Failure return from hash_page */ 1019 1020 /* fall through */ 1021#endif 1022 1023/* Here we have a page fault that hash_page can't handle. */ 1024_GLOBAL(handle_page_fault) 1025 ENABLE_INTS 102611: ld r4,_DAR(r1) 1027 ld r5,_DSISR(r1) 1028 addi r3,r1,STACK_FRAME_OVERHEAD 1029 bl .do_page_fault 1030 cmpdi r3,0 1031 beq+ .ret_from_except_lite 1032 bl .save_nvgprs 1033 mr r5,r3 1034 addi r3,r1,STACK_FRAME_OVERHEAD 1035 lwz r4,_DAR(r1) 1036 bl .bad_page_fault 1037 b .ret_from_except 1038 1039/* We have a page fault that hash_page could handle but HV refused 1040 * the PTE insertion 1041 */ 104212: bl .save_nvgprs 1043 addi r3,r1,STACK_FRAME_OVERHEAD 1044 lwz r4,_DAR(r1) 1045 bl .low_hash_fault 1046 b .ret_from_except 1047 1048 /* here we have a segment miss */ 1049_GLOBAL(do_ste_alloc) 1050 bl .ste_allocate /* try to insert stab entry */ 1051 cmpdi r3,0 1052 beq+ fast_exception_return 1053 b .handle_page_fault 1054 1055/* 1056 * r13 points to the PACA, r9 contains the saved CR, 1057 * r11 and r12 contain the saved SRR0 and SRR1. 1058 * r9 - r13 are saved in paca->exslb. 1059 * We assume we aren't going to take any exceptions during this procedure. 1060 * We assume (DAR >> 60) == 0xc. 1061 */ 1062 .align 7 1063_GLOBAL(do_stab_bolted) 1064 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1065 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1066 1067 /* Hash to the primary group */ 1068 ld r10,PACASTABVIRT(r13) 1069 mfspr r11,SPRN_DAR 1070 srdi r11,r11,28 1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1072 1073 /* Calculate VSID */ 1074 /* This is a kernel address, so protovsid = ESID */ 1075 ASM_VSID_SCRAMBLE(r11, r9) 1076 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1077 1078 /* Search the primary group for a free entry */ 10791: ld r11,0(r10) /* Test valid bit of the current ste */ 1080 andi. r11,r11,0x80 1081 beq 2f 1082 addi r10,r10,16 1083 andi. r11,r10,0x70 1084 bne 1b 1085 1086 /* Stick for only searching the primary group for now. */ 1087 /* At least for now, we use a very simple random castout scheme */ 1088 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1089 mftb r11 1090 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1091 ori r11,r11,0x10 1092 1093 /* r10 currently points to an ste one past the group of interest */ 1094 /* make it point to the randomly selected entry */ 1095 subi r10,r10,128 1096 or r10,r10,r11 /* r10 is the entry to invalidate */ 1097 1098 isync /* mark the entry invalid */ 1099 ld r11,0(r10) 1100 rldicl r11,r11,56,1 /* clear the valid bit */ 1101 rotldi r11,r11,8 1102 std r11,0(r10) 1103 sync 1104 1105 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1106 slbie r11 1107 11082: std r9,8(r10) /* Store the vsid part of the ste */ 1109 eieio 1110 1111 mfspr r11,SPRN_DAR /* Get the new esid */ 1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1113 ori r11,r11,0x90 /* Turn on valid and kp */ 1114 std r11,0(r10) /* Put new entry back into the stab */ 1115 1116 sync 1117 1118 /* All done -- return from exception. */ 1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1120 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1121 1122 andi. r10,r12,MSR_RI 1123 beq- unrecov_slb 1124 1125 mtcrf 0x80,r9 /* restore CR */ 1126 1127 mfmsr r10 1128 clrrdi r10,r10,2 1129 mtmsrd r10,1 1130 1131 mtspr SPRN_SRR0,r11 1132 mtspr SPRN_SRR1,r12 1133 ld r9,PACA_EXSLB+EX_R9(r13) 1134 ld r10,PACA_EXSLB+EX_R10(r13) 1135 ld r11,PACA_EXSLB+EX_R11(r13) 1136 ld r12,PACA_EXSLB+EX_R12(r13) 1137 ld r13,PACA_EXSLB+EX_R13(r13) 1138 rfid 1139 b . /* prevent speculative execution */ 1140 1141/* 1142 * r13 points to the PACA, r9 contains the saved CR, 1143 * r11 and r12 contain the saved SRR0 and SRR1. 1144 * r3 has the faulting address 1145 * r9 - r13 are saved in paca->exslb. 1146 * r3 is saved in paca->slb_r3 1147 * We assume we aren't going to take any exceptions during this procedure. 1148 */ 1149_GLOBAL(do_slb_miss) 1150 mflr r10 1151 1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1154 1155 bl .slb_allocate /* handle it */ 1156 1157 /* All done -- return from exception. */ 1158 1159 ld r10,PACA_EXSLB+EX_LR(r13) 1160 ld r3,PACA_EXSLB+EX_R3(r13) 1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1162#ifdef CONFIG_PPC_ISERIES 1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ 1164#endif /* CONFIG_PPC_ISERIES */ 1165 1166 mtlr r10 1167 1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1169 beq- unrecov_slb 1170 1171.machine push 1172.machine "power4" 1173 mtcrf 0x80,r9 1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1175.machine pop 1176 1177#ifdef CONFIG_PPC_ISERIES 1178 mtspr SPRN_SRR0,r11 1179 mtspr SPRN_SRR1,r12 1180#endif /* CONFIG_PPC_ISERIES */ 1181 ld r9,PACA_EXSLB+EX_R9(r13) 1182 ld r10,PACA_EXSLB+EX_R10(r13) 1183 ld r11,PACA_EXSLB+EX_R11(r13) 1184 ld r12,PACA_EXSLB+EX_R12(r13) 1185 ld r13,PACA_EXSLB+EX_R13(r13) 1186 rfid 1187 b . /* prevent speculative execution */ 1188 1189unrecov_slb: 1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1191 DISABLE_INTS 1192 bl .save_nvgprs 11931: addi r3,r1,STACK_FRAME_OVERHEAD 1194 bl .unrecoverable_exception 1195 b 1b 1196 1197/* 1198 * Space for CPU0's segment table. 1199 * 1200 * On iSeries, the hypervisor must fill in at least one entry before 1201 * we get control (with relocate on). The address is give to the hv 1202 * as a page number (see xLparMap in lpardata.c), so this must be at a 1203 * fixed address (the linker can't compute (u64)&initial_stab >> 1204 * PAGE_SHIFT). 1205 */ 1206 . = STAB0_PHYS_ADDR /* 0x6000 */ 1207 .globl initial_stab 1208initial_stab: 1209 .space 4096 1210 1211/* 1212 * Data area reserved for FWNMI option. 1213 * This address (0x7000) is fixed by the RPA. 1214 */ 1215 .= 0x7000 1216 .globl fwnmi_data_area 1217fwnmi_data_area: 1218 1219 /* iSeries does not use the FWNMI stuff, so it is safe to put 1220 * this here, even if we later allow kernels that will boot on 1221 * both pSeries and iSeries */ 1222#ifdef CONFIG_PPC_ISERIES 1223 . = LPARMAP_PHYS 1224#include "lparmap.s" 1225/* 1226 * This ".text" is here for old compilers that generate a trailing 1227 * .note section when compiling .c files to .s 1228 */ 1229 .text 1230#endif /* CONFIG_PPC_ISERIES */ 1231 1232 . = 0x8000 1233 1234/* 1235 * On pSeries, secondary processors spin in the following code. 1236 * At entry, r3 = this processor's number (physical cpu id) 1237 */ 1238_GLOBAL(pSeries_secondary_smp_init) 1239 mr r24,r3 1240 1241 /* turn on 64-bit mode */ 1242 bl .enable_64b_mode 1243 isync 1244 1245 /* Copy some CPU settings from CPU 0 */ 1246 bl .__restore_cpu_setup 1247 1248 /* Set up a paca value for this processor. Since we have the 1249 * physical cpu id in r24, we need to search the pacas to find 1250 * which logical id maps to our physical one. 1251 */ 1252 LOADADDR(r13, paca) /* Get base vaddr of paca array */ 1253 li r5,0 /* logical cpu id */ 12541: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 1255 cmpw r6,r24 /* Compare to our id */ 1256 beq 2f 1257 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 1258 addi r5,r5,1 1259 cmpwi r5,NR_CPUS 1260 blt 1b 1261 1262 mr r3,r24 /* not found, copy phys to r3 */ 1263 b .kexec_wait /* next kernel might do better */ 1264 12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1266 /* From now on, r24 is expected to be logical cpuid */ 1267 mr r24,r5 12683: HMT_LOW 1269 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1270 /* start. */ 1271 sync 1272 1273 /* Create a temp kernel stack for use before relocation is on. */ 1274 ld r1,PACAEMERGSP(r13) 1275 subi r1,r1,STACK_FRAME_OVERHEAD 1276 1277 cmpwi 0,r23,0 1278#ifdef CONFIG_SMP 1279 bne .__secondary_start 1280#endif 1281 b 3b /* Loop until told to go */ 1282 1283#ifdef CONFIG_PPC_ISERIES 1284_STATIC(__start_initialization_iSeries) 1285 /* Clear out the BSS */ 1286 LOADADDR(r11,__bss_stop) 1287 LOADADDR(r8,__bss_start) 1288 sub r11,r11,r8 /* bss size */ 1289 addi r11,r11,7 /* round up to an even double word */ 1290 rldicl. r11,r11,61,3 /* shift right by 3 */ 1291 beq 4f 1292 addi r8,r8,-8 1293 li r0,0 1294 mtctr r11 /* zero this many doublewords */ 12953: stdu r0,8(r8) 1296 bdnz 3b 12974: 1298 LOADADDR(r1,init_thread_union) 1299 addi r1,r1,THREAD_SIZE 1300 li r0,0 1301 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1302 1303 LOADADDR(r3,cpu_specs) 1304 LOADADDR(r4,cur_cpu_spec) 1305 li r5,0 1306 bl .identify_cpu 1307 1308 LOADADDR(r2,__toc_start) 1309 addi r2,r2,0x4000 1310 addi r2,r2,0x4000 1311 1312 bl .iSeries_early_setup 1313 bl .early_setup 1314 1315 /* relocation is on at this point */ 1316 1317 b .start_here_common 1318#endif /* CONFIG_PPC_ISERIES */ 1319 1320#ifdef CONFIG_PPC_MULTIPLATFORM 1321 1322_STATIC(__mmu_off) 1323 mfmsr r3 1324 andi. r0,r3,MSR_IR|MSR_DR 1325 beqlr 1326 andc r3,r3,r0 1327 mtspr SPRN_SRR0,r4 1328 mtspr SPRN_SRR1,r3 1329 sync 1330 rfid 1331 b . /* prevent speculative execution */ 1332 1333 1334/* 1335 * Here is our main kernel entry point. We support currently 2 kind of entries 1336 * depending on the value of r5. 1337 * 1338 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 1339 * in r3...r7 1340 * 1341 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 1342 * DT block, r4 is a physical pointer to the kernel itself 1343 * 1344 */ 1345_GLOBAL(__start_initialization_multiplatform) 1346 /* 1347 * Are we booted from a PROM Of-type client-interface ? 1348 */ 1349 cmpldi cr0,r5,0 1350 bne .__boot_from_prom /* yes -> prom */ 1351 1352 /* Save parameters */ 1353 mr r31,r3 1354 mr r30,r4 1355 1356 /* Make sure we are running in 64 bits mode */ 1357 bl .enable_64b_mode 1358 1359 /* Setup some critical 970 SPRs before switching MMU off */ 1360 bl .__970_cpu_preinit 1361 1362 /* cpu # */ 1363 li r24,0 1364 1365 /* Switch off MMU if not already */ 1366 LOADADDR(r4, .__after_prom_start - KERNELBASE) 1367 add r4,r4,r30 1368 bl .__mmu_off 1369 b .__after_prom_start 1370 1371_STATIC(__boot_from_prom) 1372 /* Save parameters */ 1373 mr r31,r3 1374 mr r30,r4 1375 mr r29,r5 1376 mr r28,r6 1377 mr r27,r7 1378 1379 /* Make sure we are running in 64 bits mode */ 1380 bl .enable_64b_mode 1381 1382 /* put a relocation offset into r3 */ 1383 bl .reloc_offset 1384 1385 LOADADDR(r2,__toc_start) 1386 addi r2,r2,0x4000 1387 addi r2,r2,0x4000 1388 1389 /* Relocate the TOC from a virt addr to a real addr */ 1390 add r2,r2,r3 1391 1392 /* Restore parameters */ 1393 mr r3,r31 1394 mr r4,r30 1395 mr r5,r29 1396 mr r6,r28 1397 mr r7,r27 1398 1399 /* Do all of the interaction with OF client interface */ 1400 bl .prom_init 1401 /* We never return */ 1402 trap 1403 1404/* 1405 * At this point, r3 contains the physical address we are running at, 1406 * returned by prom_init() 1407 */ 1408_STATIC(__after_prom_start) 1409 1410/* 1411 * We need to run with __start at physical address 0. 1412 * This will leave some code in the first 256B of 1413 * real memory, which are reserved for software use. 1414 * The remainder of the first page is loaded with the fixed 1415 * interrupt vectors. The next two pages are filled with 1416 * unknown exception placeholders. 1417 * 1418 * Note: This process overwrites the OF exception vectors. 1419 * r26 == relocation offset 1420 * r27 == KERNELBASE 1421 */ 1422 bl .reloc_offset 1423 mr r26,r3 1424 SET_REG_TO_CONST(r27,KERNELBASE) 1425 1426 li r3,0 /* target addr */ 1427 1428 // XXX FIXME: Use phys returned by OF (r30) 1429 add r4,r27,r26 /* source addr */ 1430 /* current address of _start */ 1431 /* i.e. where we are running */ 1432 /* the source addr */ 1433 1434 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ 1435 sub r5,r5,r27 1436 1437 li r6,0x100 /* Start offset, the first 0x100 */ 1438 /* bytes were copied earlier. */ 1439 1440 bl .copy_and_flush /* copy the first n bytes */ 1441 /* this includes the code being */ 1442 /* executed here. */ 1443 1444 LOADADDR(r0, 4f) /* Jump to the copy of this code */ 1445 mtctr r0 /* that we just made/relocated */ 1446 bctr 1447 14484: LOADADDR(r5,klimit) 1449 add r5,r5,r26 1450 ld r5,0(r5) /* get the value of klimit */ 1451 sub r5,r5,r27 1452 bl .copy_and_flush /* copy the rest */ 1453 b .start_here_multiplatform 1454 1455#endif /* CONFIG_PPC_MULTIPLATFORM */ 1456 1457/* 1458 * Copy routine used to copy the kernel to start at physical address 0 1459 * and flush and invalidate the caches as needed. 1460 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 1461 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 1462 * 1463 * Note: this routine *only* clobbers r0, r6 and lr 1464 */ 1465_GLOBAL(copy_and_flush) 1466 addi r5,r5,-8 1467 addi r6,r6,-8 14684: li r0,16 /* Use the least common */ 1469 /* denominator cache line */ 1470 /* size. This results in */ 1471 /* extra cache line flushes */ 1472 /* but operation is correct. */ 1473 /* Can't get cache line size */ 1474 /* from NACA as it is being */ 1475 /* moved too. */ 1476 1477 mtctr r0 /* put # words/line in ctr */ 14783: addi r6,r6,8 /* copy a cache line */ 1479 ldx r0,r6,r4 1480 stdx r0,r6,r3 1481 bdnz 3b 1482 dcbst r6,r3 /* write it to memory */ 1483 sync 1484 icbi r6,r3 /* flush the icache line */ 1485 cmpld 0,r6,r5 1486 blt 4b 1487 sync 1488 addi r5,r5,8 1489 addi r6,r6,8 1490 blr 1491 1492.align 8 1493copy_to_here: 1494 1495#ifdef CONFIG_SMP 1496#ifdef CONFIG_PPC_PMAC 1497/* 1498 * On PowerMac, secondary processors starts from the reset vector, which 1499 * is temporarily turned into a call to one of the functions below. 1500 */ 1501 .section ".text"; 1502 .align 2 ; 1503 1504 .globl __secondary_start_pmac_0 1505__secondary_start_pmac_0: 1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 1507 li r24,0 1508 b 1f 1509 li r24,1 1510 b 1f 1511 li r24,2 1512 b 1f 1513 li r24,3 15141: 1515 1516_GLOBAL(pmac_secondary_start) 1517 /* turn on 64-bit mode */ 1518 bl .enable_64b_mode 1519 isync 1520 1521 /* Copy some CPU settings from CPU 0 */ 1522 bl .__restore_cpu_setup 1523 1524 /* pSeries do that early though I don't think we really need it */ 1525 mfmsr r3 1526 ori r3,r3,MSR_RI 1527 mtmsrd r3 /* RI on */ 1528 1529 /* Set up a paca value for this processor. */ 1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1532 add r13,r13,r4 /* for this processor. */ 1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1534 1535 /* Create a temp kernel stack for use before relocation is on. */ 1536 ld r1,PACAEMERGSP(r13) 1537 subi r1,r1,STACK_FRAME_OVERHEAD 1538 1539 b .__secondary_start 1540 1541#endif /* CONFIG_PPC_PMAC */ 1542 1543/* 1544 * This function is called after the master CPU has released the 1545 * secondary processors. The execution environment is relocation off. 1546 * The paca for this processor has the following fields initialized at 1547 * this point: 1548 * 1. Processor number 1549 * 2. Segment table pointer (virtual address) 1550 * On entry the following are set: 1551 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 1552 * r24 = cpu# (in Linux terms) 1553 * r13 = paca virtual address 1554 * SPRG3 = paca virtual address 1555 */ 1556_GLOBAL(__secondary_start) 1557 1558 HMT_MEDIUM /* Set thread priority to MEDIUM */ 1559 1560 ld r2,PACATOC(r13) 1561 li r6,0 1562 stb r6,PACAPROCENABLED(r13) 1563 1564#ifndef CONFIG_PPC_ISERIES 1565 /* Initialize the page table pointer register. */ 1566 LOADADDR(r6,_SDR1) 1567 ld r6,0(r6) /* get the value of _SDR1 */ 1568 mtspr SPRN_SDR1,r6 /* set the htab location */ 1569#endif 1570 /* Initialize the first segment table (or SLB) entry */ 1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1572 bl .stab_initialize 1573 1574 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1575 LOADADDR(r3,current_set) 1576 sldi r28,r24,3 /* get current_set[cpu#] */ 1577 ldx r1,r3,r28 1578 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1579 std r1,PACAKSAVE(r13) 1580 1581 ld r3,PACASTABREAL(r13) /* get raddr of segment table */ 1582 ori r4,r3,1 /* turn on valid bit */ 1583 1584#ifdef CONFIG_PPC_ISERIES 1585 li r0,-1 /* hypervisor call */ 1586 li r3,1 1587 sldi r3,r3,63 /* 0x8000000000000000 */ 1588 ori r3,r3,4 /* 0x8000000000000004 */ 1589 sc /* HvCall_setASR */ 1590#else 1591 /* set the ASR */ 1592 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1593 ld r3,0(r3) 1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1596 beq 98f /* branch if result is 0 */ 1597 mfspr r3,SPRN_PVR 1598 srwi r3,r3,16 1599 cmpwi r3,0x37 /* SStar */ 1600 beq 97f 1601 cmpwi r3,0x36 /* IStar */ 1602 beq 97f 1603 cmpwi r3,0x34 /* Pulsar */ 1604 bne 98f 160597: li r3,H_SET_ASR /* hcall = H_SET_ASR */ 1606 HVSC /* Invoking hcall */ 1607 b 99f 160898: /* !(rpa hypervisor) || !(star) */ 1609 mtasr r4 /* set the stab location */ 161099: 1611#endif 1612 li r7,0 1613 mtlr r7 1614 1615 /* enable MMU and jump to start_secondary */ 1616 LOADADDR(r3,.start_secondary_prolog) 1617 SET_REG_TO_CONST(r4, MSR_KERNEL) 1618#ifdef DO_SOFT_DISABLE 1619 ori r4,r4,MSR_EE 1620#endif 1621 mtspr SPRN_SRR0,r3 1622 mtspr SPRN_SRR1,r4 1623 rfid 1624 b . /* prevent speculative execution */ 1625 1626/* 1627 * Running with relocation on at this point. All we want to do is 1628 * zero the stack back-chain pointer before going into C code. 1629 */ 1630_GLOBAL(start_secondary_prolog) 1631 li r3,0 1632 std r3,0(r1) /* Zero the stack frame pointer */ 1633 bl .start_secondary 1634#endif 1635 1636/* 1637 * This subroutine clobbers r11 and r12 1638 */ 1639_GLOBAL(enable_64b_mode) 1640 mfmsr r11 /* grab the current MSR */ 1641 li r12,1 1642 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 1643 or r11,r11,r12 1644 li r12,1 1645 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) 1646 or r11,r11,r12 1647 mtmsrd r11 1648 isync 1649 blr 1650 1651#ifdef CONFIG_PPC_MULTIPLATFORM 1652/* 1653 * This is where the main kernel code starts. 1654 */ 1655_STATIC(start_here_multiplatform) 1656 /* get a new offset, now that the kernel has moved. */ 1657 bl .reloc_offset 1658 mr r26,r3 1659 1660 /* Clear out the BSS. It may have been done in prom_init, 1661 * already but that's irrelevant since prom_init will soon 1662 * be detached from the kernel completely. Besides, we need 1663 * to clear it now for kexec-style entry. 1664 */ 1665 LOADADDR(r11,__bss_stop) 1666 LOADADDR(r8,__bss_start) 1667 sub r11,r11,r8 /* bss size */ 1668 addi r11,r11,7 /* round up to an even double word */ 1669 rldicl. r11,r11,61,3 /* shift right by 3 */ 1670 beq 4f 1671 addi r8,r8,-8 1672 li r0,0 1673 mtctr r11 /* zero this many doublewords */ 16743: stdu r0,8(r8) 1675 bdnz 3b 16764: 1677 1678 mfmsr r6 1679 ori r6,r6,MSR_RI 1680 mtmsrd r6 /* RI on */ 1681 1682#ifdef CONFIG_HMT 1683 /* Start up the second thread on cpu 0 */ 1684 mfspr r3,SPRN_PVR 1685 srwi r3,r3,16 1686 cmpwi r3,0x34 /* Pulsar */ 1687 beq 90f 1688 cmpwi r3,0x36 /* Icestar */ 1689 beq 90f 1690 cmpwi r3,0x37 /* SStar */ 1691 beq 90f 1692 b 91f /* HMT not supported */ 169390: li r3,0 1694 bl .hmt_start_secondary 169591: 1696#endif 1697 1698 /* The following gets the stack and TOC set up with the regs */ 1699 /* pointing to the real addr of the kernel stack. This is */ 1700 /* all done to support the C function call below which sets */ 1701 /* up the htab. This is done because we have relocated the */ 1702 /* kernel but are still running in real mode. */ 1703 1704 LOADADDR(r3,init_thread_union) 1705 add r3,r3,r26 1706 1707 /* set up a stack pointer (physical address) */ 1708 addi r1,r3,THREAD_SIZE 1709 li r0,0 1710 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1711 1712 /* set up the TOC (physical address) */ 1713 LOADADDR(r2,__toc_start) 1714 addi r2,r2,0x4000 1715 addi r2,r2,0x4000 1716 add r2,r2,r26 1717 1718 LOADADDR(r3,cpu_specs) 1719 add r3,r3,r26 1720 LOADADDR(r4,cur_cpu_spec) 1721 add r4,r4,r26 1722 mr r5,r26 1723 bl .identify_cpu 1724 1725 /* Save some low level config HIDs of CPU0 to be copied to 1726 * other CPUs later on, or used for suspend/resume 1727 */ 1728 bl .__save_cpu_setup 1729 sync 1730 1731 /* Setup a valid physical PACA pointer in SPRG3 for early_setup 1732 * note that boot_cpuid can always be 0 nowadays since there is 1733 * nowhere it can be initialized differently before we reach this 1734 * code 1735 */ 1736 LOADADDR(r27, boot_cpuid) 1737 add r27,r27,r26 1738 lwz r27,0(r27) 1739 1740 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1742 add r13,r13,r24 /* for this processor. */ 1743 add r13,r13,r26 /* convert to physical addr */ 1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */ 1745 1746 /* Do very early kernel initializations, including initial hash table, 1747 * stab and slb setup before we turn on relocation. */ 1748 1749 /* Restore parameters passed from prom_init/kexec */ 1750 mr r3,r31 1751 bl .early_setup 1752 1753 /* set the ASR */ 1754 ld r3,PACASTABREAL(r13) 1755 ori r4,r3,1 /* turn on valid bit */ 1756 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1757 ld r3,0(r3) 1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1760 beq 98f /* branch if result is 0 */ 1761 mfspr r3,SPRN_PVR 1762 srwi r3,r3,16 1763 cmpwi r3,0x37 /* SStar */ 1764 beq 97f 1765 cmpwi r3,0x36 /* IStar */ 1766 beq 97f 1767 cmpwi r3,0x34 /* Pulsar */ 1768 bne 98f 176997: li r3,H_SET_ASR /* hcall = H_SET_ASR */ 1770 HVSC /* Invoking hcall */ 1771 b 99f 177298: /* !(rpa hypervisor) || !(star) */ 1773 mtasr r4 /* set the stab location */ 177499: 1775 /* Set SDR1 (hash table pointer) */ 1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1777 ld r3,0(r3) 1778 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1779 /* Test if bit 0 is set (LPAR bit) */ 1780 andi. r3,r3,PLATFORM_LPAR 1781 bne 98f /* branch if result is !0 */ 1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1783 add r6,r6,r26 1784 ld r6,0(r6) /* get the value of _SDR1 */ 1785 mtspr SPRN_SDR1,r6 /* set the htab location */ 178698: 1787 LOADADDR(r3,.start_here_common) 1788 SET_REG_TO_CONST(r4, MSR_KERNEL) 1789 mtspr SPRN_SRR0,r3 1790 mtspr SPRN_SRR1,r4 1791 rfid 1792 b . /* prevent speculative execution */ 1793#endif /* CONFIG_PPC_MULTIPLATFORM */ 1794 1795 /* This is where all platforms converge execution */ 1796_STATIC(start_here_common) 1797 /* relocation is on at this point */ 1798 1799 /* The following code sets up the SP and TOC now that we are */ 1800 /* running with translation enabled. */ 1801 1802 LOADADDR(r3,init_thread_union) 1803 1804 /* set up the stack */ 1805 addi r1,r3,THREAD_SIZE 1806 li r0,0 1807 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1808 1809 /* Apply the CPUs-specific fixups (nop out sections not relevant 1810 * to this CPU 1811 */ 1812 li r3,0 1813 bl .do_cpu_ftr_fixups 1814 1815 LOADADDR(r26, boot_cpuid) 1816 lwz r26,0(r26) 1817 1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1820 add r13,r13,r24 /* for this processor. */ 1821 mtspr SPRN_SPRG3,r13 1822 1823 /* ptr to current */ 1824 LOADADDR(r4,init_task) 1825 std r4,PACACURRENT(r13) 1826 1827 /* Load the TOC */ 1828 ld r2,PACATOC(r13) 1829 std r1,PACAKSAVE(r13) 1830 1831 bl .setup_system 1832 1833 /* Load up the kernel context */ 18345: 1835#ifdef DO_SOFT_DISABLE 1836 li r5,0 1837 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 1838 mfmsr r5 1839 ori r5,r5,MSR_EE /* Hard Enabled */ 1840 mtmsrd r5 1841#endif 1842 1843 bl .start_kernel 1844 1845_GLOBAL(hmt_init) 1846#ifdef CONFIG_HMT 1847 LOADADDR(r5, hmt_thread_data) 1848 mfspr r7,SPRN_PVR 1849 srwi r7,r7,16 1850 cmpwi r7,0x34 /* Pulsar */ 1851 beq 90f 1852 cmpwi r7,0x36 /* Icestar */ 1853 beq 91f 1854 cmpwi r7,0x37 /* SStar */ 1855 beq 91f 1856 b 101f 185790: mfspr r6,SPRN_PIR 1858 andi. r6,r6,0x1f 1859 b 92f 186091: mfspr r6,SPRN_PIR 1861 andi. r6,r6,0x3ff 186292: sldi r4,r24,3 1863 stwx r6,r5,r4 1864 bl .hmt_start_secondary 1865 b 101f 1866 1867__hmt_secondary_hold: 1868 LOADADDR(r5, hmt_thread_data) 1869 clrldi r5,r5,4 1870 li r7,0 1871 mfspr r6,SPRN_PIR 1872 mfspr r8,SPRN_PVR 1873 srwi r8,r8,16 1874 cmpwi r8,0x34 1875 bne 93f 1876 andi. r6,r6,0x1f 1877 b 103f 187893: andi. r6,r6,0x3f 1879 1880103: lwzx r8,r5,r7 1881 cmpw r8,r6 1882 beq 104f 1883 addi r7,r7,8 1884 b 103b 1885 1886104: addi r7,r7,4 1887 lwzx r9,r5,r7 1888 mr r24,r9 1889101: 1890#endif 1891 mr r3,r24 1892 b .pSeries_secondary_smp_init 1893 1894#ifdef CONFIG_HMT 1895_GLOBAL(hmt_start_secondary) 1896 LOADADDR(r4,__hmt_secondary_hold) 1897 clrldi r4,r4,4 1898 mtspr SPRN_NIADORM, r4 1899 mfspr r4, SPRN_MSRDORM 1900 li r5, -65 1901 and r4, r4, r5 1902 mtspr SPRN_MSRDORM, r4 1903 lis r4,0xffef 1904 ori r4,r4,0x7403 1905 mtspr SPRN_TSC, r4 1906 li r4,0x1f4 1907 mtspr SPRN_TST, r4 1908 mfspr r4, SPRN_HID0 1909 ori r4, r4, 0x1 1910 mtspr SPRN_HID0, r4 1911 mfspr r4, SPRN_CTRLF 1912 oris r4, r4, 0x40 1913 mtspr SPRN_CTRLT, r4 1914 blr 1915#endif 1916 1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP) 1918_GLOBAL(smp_release_cpus) 1919 /* All secondary cpus are spinning on a common 1920 * spinloop, release them all now so they can start 1921 * to spin on their individual paca spinloops. 1922 * For non SMP kernels, the secondary cpus never 1923 * get out of the common spinloop. 1924 * XXX This does nothing useful on iSeries, secondaries are 1925 * already waiting on their paca. 1926 */ 1927 li r3,1 1928 LOADADDR(r5,__secondary_hold_spinloop) 1929 std r3,0(r5) 1930 sync 1931 blr 1932#endif /* CONFIG_SMP */ 1933 1934 1935/* 1936 * We put a few things here that have to be page-aligned. 1937 * This stuff goes at the beginning of the bss, which is page-aligned. 1938 */ 1939 .section ".bss" 1940 1941 .align PAGE_SHIFT 1942 1943 .globl empty_zero_page 1944empty_zero_page: 1945 .space PAGE_SIZE 1946 1947 .globl swapper_pg_dir 1948swapper_pg_dir: 1949 .space PAGE_SIZE 1950 1951/* 1952 * This space gets a copy of optional info passed to us by the bootstrap 1953 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 1954 */ 1955 .globl cmd_line 1956cmd_line: 1957 .space COMMAND_LINE_SIZE 1958