1/* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Adapted for Power Macintosh by Paul Mackerras. 8 * Low-level exception handlers and MMU support 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * 12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 14 * 15 * This file contains the low-level support and setup for the 16 * PowerPC-64 platform, including trap and interrupt dispatch. 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 */ 23 24#include <linux/threads.h> 25#include <asm/reg.h> 26#include <asm/page.h> 27#include <asm/mmu.h> 28#include <asm/ppc_asm.h> 29#include <asm/asm-offsets.h> 30#include <asm/bug.h> 31#include <asm/cputable.h> 32#include <asm/setup.h> 33#include <asm/hvcall.h> 34#include <asm/iseries/lpar_map.h> 35#include <asm/thread_info.h> 36#include <asm/firmware.h> 37#include <asm/page_64.h> 38#include <asm/exception.h> 39#include <asm/irqflags.h> 40 41/* 42 * We layout physical memory as follows: 43 * 0x0000 - 0x00ff : Secondary processor spin code 44 * 0x0100 - 0x2fff : pSeries Interrupt prologs 45 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 46 * 0x6000 - 0x6fff : Initial (CPU0) segment table 47 * 0x7000 - 0x7fff : FWNMI data area 48 * 0x8000 - : Early init and support code 49 */ 50 51/* 52 * SPRG Usage 53 * 54 * Register Definition 55 * 56 * SPRG0 reserved for hypervisor 57 * SPRG1 temp - used to save gpr 58 * SPRG2 temp - used to save gpr 59 * SPRG3 virt addr of paca 60 */ 61 62/* 63 * Entering into this code we make the following assumptions: 64 * For pSeries: 65 * 1. The MMU is off & open firmware is running in real mode. 66 * 2. The kernel is entered at __start 67 * 68 * For iSeries: 69 * 1. The MMU is on (as it always is for iSeries) 70 * 2. The kernel is entered at system_reset_iSeries 71 */ 72 73 .text 74 .globl _stext 75_stext: 76_GLOBAL(__start) 77 /* NOP this out unconditionally */ 78BEGIN_FTR_SECTION 79 b .__start_initialization_multiplatform 80END_FTR_SECTION(0, 1) 81 82 /* Catch branch to 0 in real mode */ 83 trap 84 85 /* Secondary processors spin on this value until it becomes nonzero. 86 * When it does it contains the real address of the descriptor 87 * of the function that the cpu should jump to to continue 88 * initialization. 89 */ 90 .globl __secondary_hold_spinloop 91__secondary_hold_spinloop: 92 .llong 0x0 93 94 /* Secondary processors write this value with their cpu # */ 95 /* after they enter the spin loop immediately below. */ 96 .globl __secondary_hold_acknowledge 97__secondary_hold_acknowledge: 98 .llong 0x0 99 100 /* This flag is set by purgatory if we should be a kdump kernel. */ 101 /* Do not move this variable as purgatory knows about it. */ 102 .globl __kdump_flag 103__kdump_flag: 104 .llong 0x0 105 106#ifdef CONFIG_PPC_ISERIES 107 /* 108 * At offset 0x20, there is a pointer to iSeries LPAR data. 109 * This is required by the hypervisor 110 */ 111 . = 0x20 112 .llong hvReleaseData-KERNELBASE 113#endif /* CONFIG_PPC_ISERIES */ 114 115 . = 0x60 116/* 117 * The following code is used to hold secondary processors 118 * in a spin loop after they have entered the kernel, but 119 * before the bulk of the kernel has been relocated. This code 120 * is relocated to physical address 0x60 before prom_init is run. 121 * All of it must fit below the first exception vector at 0x100. 122 * Use .globl here not _GLOBAL because we want __secondary_hold 123 * to be the actual text address, not a descriptor. 124 */ 125 .globl __secondary_hold 126__secondary_hold: 127 mfmsr r24 128 ori r24,r24,MSR_RI 129 mtmsrd r24 /* RI on */ 130 131 /* Grab our physical cpu number */ 132 mr r24,r3 133 134 /* Tell the master cpu we're here */ 135 /* Relocation is off & we are located at an address less */ 136 /* than 0x100, so only need to grab low order offset. */ 137 std r24,__secondary_hold_acknowledge-_stext(0) 138 sync 139 140 /* All secondary cpus wait here until told to start. */ 141100: ld r4,__secondary_hold_spinloop-_stext(0) 142 cmpdi 0,r4,0 143 beq 100b 144 145#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 146 ld r4,0(r4) /* deref function descriptor */ 147 mtctr r4 148 mr r3,r24 149 bctr 150#else 151 BUG_OPCODE 152#endif 153 154/* This value is used to mark exception frames on the stack. */ 155 .section ".toc","aw" 156exception_marker: 157 .tc ID_72656773_68657265[TC],0x7265677368657265 158 .text 159 160/* 161 * This is the start of the interrupt handlers for pSeries 162 * This code runs with relocation off. 163 * Code from here to __end_interrupts gets copied down to real 164 * address 0x100 when we are running a relocatable kernel. 165 * Therefore any relative branches in this section must only 166 * branch to labels in this section. 167 */ 168 . = 0x100 169 .globl __start_interrupts 170__start_interrupts: 171 172 STD_EXCEPTION_PSERIES(0x100, system_reset) 173 174 . = 0x200 175_machine_check_pSeries: 176 HMT_MEDIUM 177 mtspr SPRN_SPRG1,r13 /* save r13 */ 178 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 179 180 . = 0x300 181 .globl data_access_pSeries 182data_access_pSeries: 183 HMT_MEDIUM 184 mtspr SPRN_SPRG1,r13 185BEGIN_FTR_SECTION 186 mtspr SPRN_SPRG2,r12 187 mfspr r13,SPRN_DAR 188 mfspr r12,SPRN_DSISR 189 srdi r13,r13,60 190 rlwimi r13,r12,16,0x20 191 mfcr r12 192 cmpwi r13,0x2c 193 beq do_stab_bolted_pSeries 194 mtcrf 0x80,r12 195 mfspr r12,SPRN_SPRG2 196END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 197 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 198 199 . = 0x380 200 .globl data_access_slb_pSeries 201data_access_slb_pSeries: 202 HMT_MEDIUM 203 mtspr SPRN_SPRG1,r13 204 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 205 std r3,PACA_EXSLB+EX_R3(r13) 206 mfspr r3,SPRN_DAR 207 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 208 mfcr r9 209#ifdef __DISABLED__ 210 /* Keep that around for when we re-implement dynamic VSIDs */ 211 cmpdi r3,0 212 bge slb_miss_user_pseries 213#endif /* __DISABLED__ */ 214 std r10,PACA_EXSLB+EX_R10(r13) 215 std r11,PACA_EXSLB+EX_R11(r13) 216 std r12,PACA_EXSLB+EX_R12(r13) 217 mfspr r10,SPRN_SPRG1 218 std r10,PACA_EXSLB+EX_R13(r13) 219 mfspr r12,SPRN_SRR1 /* and SRR1 */ 220#ifndef CONFIG_RELOCATABLE 221 b .slb_miss_realmode 222#else 223 /* 224 * We can't just use a direct branch to .slb_miss_realmode 225 * because the distance from here to there depends on where 226 * the kernel ends up being put. 227 */ 228 mfctr r11 229 ld r10,PACAKBASE(r13) 230 LOAD_HANDLER(r10, .slb_miss_realmode) 231 mtctr r10 232 bctr 233#endif 234 235 STD_EXCEPTION_PSERIES(0x400, instruction_access) 236 237 . = 0x480 238 .globl instruction_access_slb_pSeries 239instruction_access_slb_pSeries: 240 HMT_MEDIUM 241 mtspr SPRN_SPRG1,r13 242 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 243 std r3,PACA_EXSLB+EX_R3(r13) 244 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 245 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 246 mfcr r9 247#ifdef __DISABLED__ 248 /* Keep that around for when we re-implement dynamic VSIDs */ 249 cmpdi r3,0 250 bge slb_miss_user_pseries 251#endif /* __DISABLED__ */ 252 std r10,PACA_EXSLB+EX_R10(r13) 253 std r11,PACA_EXSLB+EX_R11(r13) 254 std r12,PACA_EXSLB+EX_R12(r13) 255 mfspr r10,SPRN_SPRG1 256 std r10,PACA_EXSLB+EX_R13(r13) 257 mfspr r12,SPRN_SRR1 /* and SRR1 */ 258#ifndef CONFIG_RELOCATABLE 259 b .slb_miss_realmode 260#else 261 mfctr r11 262 ld r10,PACAKBASE(r13) 263 LOAD_HANDLER(r10, .slb_miss_realmode) 264 mtctr r10 265 bctr 266#endif 267 268 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 269 STD_EXCEPTION_PSERIES(0x600, alignment) 270 STD_EXCEPTION_PSERIES(0x700, program_check) 271 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 272 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) 273 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 274 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 275 276 . = 0xc00 277 .globl system_call_pSeries 278system_call_pSeries: 279 HMT_MEDIUM 280BEGIN_FTR_SECTION 281 cmpdi r0,0x1ebe 282 beq- 1f 283END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 284 mr r9,r13 285 mfspr r13,SPRN_SPRG3 286 mfspr r11,SPRN_SRR0 287 ld r12,PACAKBASE(r13) 288 ld r10,PACAKMSR(r13) 289 LOAD_HANDLER(r12, system_call_entry) 290 mtspr SPRN_SRR0,r12 291 mfspr r12,SPRN_SRR1 292 mtspr SPRN_SRR1,r10 293 rfid 294 b . /* prevent speculative execution */ 295 296/* Fast LE/BE switch system call */ 2971: mfspr r12,SPRN_SRR1 298 xori r12,r12,MSR_LE 299 mtspr SPRN_SRR1,r12 300 rfid /* return to userspace */ 301 b . 302 303 STD_EXCEPTION_PSERIES(0xd00, single_step) 304 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 305 306 /* We need to deal with the Altivec unavailable exception 307 * here which is at 0xf20, thus in the middle of the 308 * prolog code of the PerformanceMonitor one. A little 309 * trickery is thus necessary 310 */ 311 . = 0xf00 312 b performance_monitor_pSeries 313 314 . = 0xf20 315 b altivec_unavailable_pSeries 316 317 . = 0xf40 318 b vsx_unavailable_pSeries 319 320#ifdef CONFIG_CBE_RAS 321 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 322#endif /* CONFIG_CBE_RAS */ 323 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 324#ifdef CONFIG_CBE_RAS 325 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) 326#endif /* CONFIG_CBE_RAS */ 327 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 328#ifdef CONFIG_CBE_RAS 329 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) 330#endif /* CONFIG_CBE_RAS */ 331 332 . = 0x3000 333 334/*** pSeries interrupt support ***/ 335 336 /* moved from 0xf00 */ 337 STD_EXCEPTION_PSERIES(., performance_monitor) 338 STD_EXCEPTION_PSERIES(., altivec_unavailable) 339 STD_EXCEPTION_PSERIES(., vsx_unavailable) 340 341/* 342 * An interrupt came in while soft-disabled; clear EE in SRR1, 343 * clear paca->hard_enabled and return. 344 */ 345masked_interrupt: 346 stb r10,PACAHARDIRQEN(r13) 347 mtcrf 0x80,r9 348 ld r9,PACA_EXGEN+EX_R9(r13) 349 mfspr r10,SPRN_SRR1 350 rldicl r10,r10,48,1 /* clear MSR_EE */ 351 rotldi r10,r10,16 352 mtspr SPRN_SRR1,r10 353 ld r10,PACA_EXGEN+EX_R10(r13) 354 mfspr r13,SPRN_SPRG1 355 rfid 356 b . 357 358 .align 7 359do_stab_bolted_pSeries: 360 mtcrf 0x80,r12 361 mfspr r12,SPRN_SPRG2 362 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 363 364#ifdef CONFIG_PPC_PSERIES 365/* 366 * Vectors for the FWNMI option. Share common code. 367 */ 368 .globl system_reset_fwnmi 369 .align 7 370system_reset_fwnmi: 371 HMT_MEDIUM 372 mtspr SPRN_SPRG1,r13 /* save r13 */ 373 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 374 375 .globl machine_check_fwnmi 376 .align 7 377machine_check_fwnmi: 378 HMT_MEDIUM 379 mtspr SPRN_SPRG1,r13 /* save r13 */ 380 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 381 382#endif /* CONFIG_PPC_PSERIES */ 383 384#ifdef __DISABLED__ 385/* 386 * This is used for when the SLB miss handler has to go virtual, 387 * which doesn't happen for now anymore but will once we re-implement 388 * dynamic VSIDs for shared page tables 389 */ 390slb_miss_user_pseries: 391 std r10,PACA_EXGEN+EX_R10(r13) 392 std r11,PACA_EXGEN+EX_R11(r13) 393 std r12,PACA_EXGEN+EX_R12(r13) 394 mfspr r10,SPRG1 395 ld r11,PACA_EXSLB+EX_R9(r13) 396 ld r12,PACA_EXSLB+EX_R3(r13) 397 std r10,PACA_EXGEN+EX_R13(r13) 398 std r11,PACA_EXGEN+EX_R9(r13) 399 std r12,PACA_EXGEN+EX_R3(r13) 400 clrrdi r12,r13,32 401 mfmsr r10 402 mfspr r11,SRR0 /* save SRR0 */ 403 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 404 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 405 mtspr SRR0,r12 406 mfspr r12,SRR1 /* and SRR1 */ 407 mtspr SRR1,r10 408 rfid 409 b . /* prevent spec. execution */ 410#endif /* __DISABLED__ */ 411 412 .align 7 413 .globl __end_interrupts 414__end_interrupts: 415 416/* 417 * Code from here down to __end_handlers is invoked from the 418 * exception prologs above. Because the prologs assemble the 419 * addresses of these handlers using the LOAD_HANDLER macro, 420 * which uses an addi instruction, these handlers must be in 421 * the first 32k of the kernel image. 422 */ 423 424/*** Common interrupt handlers ***/ 425 426 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 427 428 /* 429 * Machine check is different because we use a different 430 * save area: PACA_EXMC instead of PACA_EXGEN. 431 */ 432 .align 7 433 .globl machine_check_common 434machine_check_common: 435 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 436 FINISH_NAP 437 DISABLE_INTS 438 bl .save_nvgprs 439 addi r3,r1,STACK_FRAME_OVERHEAD 440 bl .machine_check_exception 441 b .ret_from_except 442 443 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 444 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 445 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 446 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 447 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 448 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 449 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 450#ifdef CONFIG_ALTIVEC 451 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 452#else 453 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 454#endif 455#ifdef CONFIG_CBE_RAS 456 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 457 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 458 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 459#endif /* CONFIG_CBE_RAS */ 460 461 .align 7 462system_call_entry: 463 b system_call_common 464 465/* 466 * Here we have detected that the kernel stack pointer is bad. 467 * R9 contains the saved CR, r13 points to the paca, 468 * r10 contains the (bad) kernel stack pointer, 469 * r11 and r12 contain the saved SRR0 and SRR1. 470 * We switch to using an emergency stack, save the registers there, 471 * and call kernel_bad_stack(), which panics. 472 */ 473bad_stack: 474 ld r1,PACAEMERGSP(r13) 475 subi r1,r1,64+INT_FRAME_SIZE 476 std r9,_CCR(r1) 477 std r10,GPR1(r1) 478 std r11,_NIP(r1) 479 std r12,_MSR(r1) 480 mfspr r11,SPRN_DAR 481 mfspr r12,SPRN_DSISR 482 std r11,_DAR(r1) 483 std r12,_DSISR(r1) 484 mflr r10 485 mfctr r11 486 mfxer r12 487 std r10,_LINK(r1) 488 std r11,_CTR(r1) 489 std r12,_XER(r1) 490 SAVE_GPR(0,r1) 491 SAVE_GPR(2,r1) 492 SAVE_4GPRS(3,r1) 493 SAVE_2GPRS(7,r1) 494 SAVE_10GPRS(12,r1) 495 SAVE_10GPRS(22,r1) 496 lhz r12,PACA_TRAP_SAVE(r13) 497 std r12,_TRAP(r1) 498 addi r11,r1,INT_FRAME_SIZE 499 std r11,0(r1) 500 li r12,0 501 std r12,0(r11) 502 ld r2,PACATOC(r13) 5031: addi r3,r1,STACK_FRAME_OVERHEAD 504 bl .kernel_bad_stack 505 b 1b 506 507/* 508 * Here r13 points to the paca, r9 contains the saved CR, 509 * SRR0 and SRR1 are saved in r11 and r12, 510 * r9 - r13 are saved in paca->exgen. 511 */ 512 .align 7 513 .globl data_access_common 514data_access_common: 515 mfspr r10,SPRN_DAR 516 std r10,PACA_EXGEN+EX_DAR(r13) 517 mfspr r10,SPRN_DSISR 518 stw r10,PACA_EXGEN+EX_DSISR(r13) 519 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 520 ld r3,PACA_EXGEN+EX_DAR(r13) 521 lwz r4,PACA_EXGEN+EX_DSISR(r13) 522 li r5,0x300 523 b .do_hash_page /* Try to handle as hpte fault */ 524 525 .align 7 526 .globl instruction_access_common 527instruction_access_common: 528 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 529 ld r3,_NIP(r1) 530 andis. r4,r12,0x5820 531 li r5,0x400 532 b .do_hash_page /* Try to handle as hpte fault */ 533 534/* 535 * Here is the common SLB miss user that is used when going to virtual 536 * mode for SLB misses, that is currently not used 537 */ 538#ifdef __DISABLED__ 539 .align 7 540 .globl slb_miss_user_common 541slb_miss_user_common: 542 mflr r10 543 std r3,PACA_EXGEN+EX_DAR(r13) 544 stw r9,PACA_EXGEN+EX_CCR(r13) 545 std r10,PACA_EXGEN+EX_LR(r13) 546 std r11,PACA_EXGEN+EX_SRR0(r13) 547 bl .slb_allocate_user 548 549 ld r10,PACA_EXGEN+EX_LR(r13) 550 ld r3,PACA_EXGEN+EX_R3(r13) 551 lwz r9,PACA_EXGEN+EX_CCR(r13) 552 ld r11,PACA_EXGEN+EX_SRR0(r13) 553 mtlr r10 554 beq- slb_miss_fault 555 556 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 557 beq- unrecov_user_slb 558 mfmsr r10 559 560.machine push 561.machine "power4" 562 mtcrf 0x80,r9 563.machine pop 564 565 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 566 mtmsrd r10,1 567 568 mtspr SRR0,r11 569 mtspr SRR1,r12 570 571 ld r9,PACA_EXGEN+EX_R9(r13) 572 ld r10,PACA_EXGEN+EX_R10(r13) 573 ld r11,PACA_EXGEN+EX_R11(r13) 574 ld r12,PACA_EXGEN+EX_R12(r13) 575 ld r13,PACA_EXGEN+EX_R13(r13) 576 rfid 577 b . 578 579slb_miss_fault: 580 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 581 ld r4,PACA_EXGEN+EX_DAR(r13) 582 li r5,0 583 std r4,_DAR(r1) 584 std r5,_DSISR(r1) 585 b handle_page_fault 586 587unrecov_user_slb: 588 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 589 DISABLE_INTS 590 bl .save_nvgprs 5911: addi r3,r1,STACK_FRAME_OVERHEAD 592 bl .unrecoverable_exception 593 b 1b 594 595#endif /* __DISABLED__ */ 596 597 598/* 599 * r13 points to the PACA, r9 contains the saved CR, 600 * r12 contain the saved SRR1, SRR0 is still ready for return 601 * r3 has the faulting address 602 * r9 - r13 are saved in paca->exslb. 603 * r3 is saved in paca->slb_r3 604 * We assume we aren't going to take any exceptions during this procedure. 605 */ 606_GLOBAL(slb_miss_realmode) 607 mflr r10 608#ifdef CONFIG_RELOCATABLE 609 mtctr r11 610#endif 611 612 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 613 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 614 615 bl .slb_allocate_realmode 616 617 /* All done -- return from exception. */ 618 619 ld r10,PACA_EXSLB+EX_LR(r13) 620 ld r3,PACA_EXSLB+EX_R3(r13) 621 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 622#ifdef CONFIG_PPC_ISERIES 623BEGIN_FW_FTR_SECTION 624 ld r11,PACALPPACAPTR(r13) 625 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 626END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 627#endif /* CONFIG_PPC_ISERIES */ 628 629 mtlr r10 630 631 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 632 beq- 2f 633 634.machine push 635.machine "power4" 636 mtcrf 0x80,r9 637 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 638.machine pop 639 640#ifdef CONFIG_PPC_ISERIES 641BEGIN_FW_FTR_SECTION 642 mtspr SPRN_SRR0,r11 643 mtspr SPRN_SRR1,r12 644END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 645#endif /* CONFIG_PPC_ISERIES */ 646 ld r9,PACA_EXSLB+EX_R9(r13) 647 ld r10,PACA_EXSLB+EX_R10(r13) 648 ld r11,PACA_EXSLB+EX_R11(r13) 649 ld r12,PACA_EXSLB+EX_R12(r13) 650 ld r13,PACA_EXSLB+EX_R13(r13) 651 rfid 652 b . /* prevent speculative execution */ 653 6542: 655#ifdef CONFIG_PPC_ISERIES 656BEGIN_FW_FTR_SECTION 657 b unrecov_slb 658END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 659#endif /* CONFIG_PPC_ISERIES */ 660 mfspr r11,SPRN_SRR0 661 ld r10,PACAKBASE(r13) 662 LOAD_HANDLER(r10,unrecov_slb) 663 mtspr SPRN_SRR0,r10 664 ld r10,PACAKMSR(r13) 665 mtspr SPRN_SRR1,r10 666 rfid 667 b . 668 669unrecov_slb: 670 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 671 DISABLE_INTS 672 bl .save_nvgprs 6731: addi r3,r1,STACK_FRAME_OVERHEAD 674 bl .unrecoverable_exception 675 b 1b 676 677 .align 7 678 .globl hardware_interrupt_common 679 .globl hardware_interrupt_entry 680hardware_interrupt_common: 681 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 682 FINISH_NAP 683hardware_interrupt_entry: 684 DISABLE_INTS 685BEGIN_FTR_SECTION 686 bl .ppc64_runlatch_on 687END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 688 addi r3,r1,STACK_FRAME_OVERHEAD 689 bl .do_IRQ 690 b .ret_from_except_lite 691 692#ifdef CONFIG_PPC_970_NAP 693power4_fixup_nap: 694 andc r9,r9,r10 695 std r9,TI_LOCAL_FLAGS(r11) 696 ld r10,_LINK(r1) /* make idle task do the */ 697 std r10,_NIP(r1) /* equivalent of a blr */ 698 blr 699#endif 700 701 .align 7 702 .globl alignment_common 703alignment_common: 704 mfspr r10,SPRN_DAR 705 std r10,PACA_EXGEN+EX_DAR(r13) 706 mfspr r10,SPRN_DSISR 707 stw r10,PACA_EXGEN+EX_DSISR(r13) 708 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 709 ld r3,PACA_EXGEN+EX_DAR(r13) 710 lwz r4,PACA_EXGEN+EX_DSISR(r13) 711 std r3,_DAR(r1) 712 std r4,_DSISR(r1) 713 bl .save_nvgprs 714 addi r3,r1,STACK_FRAME_OVERHEAD 715 ENABLE_INTS 716 bl .alignment_exception 717 b .ret_from_except 718 719 .align 7 720 .globl program_check_common 721program_check_common: 722 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 723 bl .save_nvgprs 724 addi r3,r1,STACK_FRAME_OVERHEAD 725 ENABLE_INTS 726 bl .program_check_exception 727 b .ret_from_except 728 729 .align 7 730 .globl fp_unavailable_common 731fp_unavailable_common: 732 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 733 bne 1f /* if from user, just load it up */ 734 bl .save_nvgprs 735 addi r3,r1,STACK_FRAME_OVERHEAD 736 ENABLE_INTS 737 bl .kernel_fp_unavailable_exception 738 BUG_OPCODE 7391: bl .load_up_fpu 740 b fast_exception_return 741 742 .align 7 743 .globl altivec_unavailable_common 744altivec_unavailable_common: 745 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 746#ifdef CONFIG_ALTIVEC 747BEGIN_FTR_SECTION 748 beq 1f 749 bl .load_up_altivec 750 b fast_exception_return 7511: 752END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 753#endif 754 bl .save_nvgprs 755 addi r3,r1,STACK_FRAME_OVERHEAD 756 ENABLE_INTS 757 bl .altivec_unavailable_exception 758 b .ret_from_except 759 760 .align 7 761 .globl vsx_unavailable_common 762vsx_unavailable_common: 763 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 764#ifdef CONFIG_VSX 765BEGIN_FTR_SECTION 766 bne .load_up_vsx 7671: 768END_FTR_SECTION_IFSET(CPU_FTR_VSX) 769#endif 770 bl .save_nvgprs 771 addi r3,r1,STACK_FRAME_OVERHEAD 772 ENABLE_INTS 773 bl .vsx_unavailable_exception 774 b .ret_from_except 775 776 .align 7 777 .globl __end_handlers 778__end_handlers: 779 780/* 781 * Return from an exception with minimal checks. 782 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 783 * If interrupts have been enabled, or anything has been 784 * done that might have changed the scheduling status of 785 * any task or sent any task a signal, you should use 786 * ret_from_except or ret_from_except_lite instead of this. 787 */ 788fast_exc_return_irq: /* restores irq state too */ 789 ld r3,SOFTE(r1) 790 TRACE_AND_RESTORE_IRQ(r3); 791 ld r12,_MSR(r1) 792 rldicl r4,r12,49,63 /* get MSR_EE to LSB */ 793 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ 794 b 1f 795 796 .globl fast_exception_return 797fast_exception_return: 798 ld r12,_MSR(r1) 7991: ld r11,_NIP(r1) 800 andi. r3,r12,MSR_RI /* check if RI is set */ 801 beq- unrecov_fer 802 803#ifdef CONFIG_VIRT_CPU_ACCOUNTING 804 andi. r3,r12,MSR_PR 805 beq 2f 806 ACCOUNT_CPU_USER_EXIT(r3, r4) 8072: 808#endif 809 810 ld r3,_CCR(r1) 811 ld r4,_LINK(r1) 812 ld r5,_CTR(r1) 813 ld r6,_XER(r1) 814 mtcr r3 815 mtlr r4 816 mtctr r5 817 mtxer r6 818 REST_GPR(0, r1) 819 REST_8GPRS(2, r1) 820 821 mfmsr r10 822 rldicl r10,r10,48,1 /* clear EE */ 823 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ 824 mtmsrd r10,1 825 826 mtspr SPRN_SRR1,r12 827 mtspr SPRN_SRR0,r11 828 REST_4GPRS(10, r1) 829 ld r1,GPR1(r1) 830 rfid 831 b . /* prevent speculative execution */ 832 833unrecov_fer: 834 bl .save_nvgprs 8351: addi r3,r1,STACK_FRAME_OVERHEAD 836 bl .unrecoverable_exception 837 b 1b 838 839#ifdef CONFIG_ALTIVEC 840/* 841 * load_up_altivec(unused, unused, tsk) 842 * Disable VMX for the task which had it previously, 843 * and save its vector registers in its thread_struct. 844 * Enables the VMX for use in the kernel on return. 845 * On SMP we know the VMX is free, since we give it up every 846 * switch (ie, no lazy save of the vector registers). 847 * On entry: r13 == 'current' && last_task_used_altivec != 'current' 848 */ 849_STATIC(load_up_altivec) 850 mfmsr r5 /* grab the current MSR */ 851 oris r5,r5,MSR_VEC@h 852 mtmsrd r5 /* enable use of VMX now */ 853 isync 854 855/* 856 * For SMP, we don't do lazy VMX switching because it just gets too 857 * horrendously complex, especially when a task switches from one CPU 858 * to another. Instead we call giveup_altvec in switch_to. 859 * VRSAVE isn't dealt with here, that is done in the normal context 860 * switch code. Note that we could rely on vrsave value to eventually 861 * avoid saving all of the VREGs here... 862 */ 863#ifndef CONFIG_SMP 864 ld r3,last_task_used_altivec@got(r2) 865 ld r4,0(r3) 866 cmpdi 0,r4,0 867 beq 1f 868 /* Save VMX state to last_task_used_altivec's THREAD struct */ 869 addi r4,r4,THREAD 870 SAVE_32VRS(0,r5,r4) 871 mfvscr vr0 872 li r10,THREAD_VSCR 873 stvx vr0,r10,r4 874 /* Disable VMX for last_task_used_altivec */ 875 ld r5,PT_REGS(r4) 876 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 877 lis r6,MSR_VEC@h 878 andc r4,r4,r6 879 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 8801: 881#endif /* CONFIG_SMP */ 882 /* Hack: if we get an altivec unavailable trap with VRSAVE 883 * set to all zeros, we assume this is a broken application 884 * that fails to set it properly, and thus we switch it to 885 * all 1's 886 */ 887 mfspr r4,SPRN_VRSAVE 888 cmpdi 0,r4,0 889 bne+ 1f 890 li r4,-1 891 mtspr SPRN_VRSAVE,r4 8921: 893 /* enable use of VMX after return */ 894 ld r4,PACACURRENT(r13) 895 addi r5,r4,THREAD /* Get THREAD */ 896 oris r12,r12,MSR_VEC@h 897 std r12,_MSR(r1) 898 li r4,1 899 li r10,THREAD_VSCR 900 stw r4,THREAD_USED_VR(r5) 901 lvx vr0,r10,r5 902 mtvscr vr0 903 REST_32VRS(0,r4,r5) 904#ifndef CONFIG_SMP 905 /* Update last_task_used_math to 'current' */ 906 subi r4,r5,THREAD /* Back to 'current' */ 907 std r4,0(r3) 908#endif /* CONFIG_SMP */ 909 /* restore registers and return */ 910 blr 911#endif /* CONFIG_ALTIVEC */ 912 913#ifdef CONFIG_VSX 914/* 915 * load_up_vsx(unused, unused, tsk) 916 * Disable VSX for the task which had it previously, 917 * and save its vector registers in its thread_struct. 918 * Reuse the fp and vsx saves, but first check to see if they have 919 * been saved already. 920 * On entry: r13 == 'current' && last_task_used_vsx != 'current' 921 */ 922_STATIC(load_up_vsx) 923/* Load FP and VSX registers if they haven't been done yet */ 924 andi. r5,r12,MSR_FP 925 beql+ load_up_fpu /* skip if already loaded */ 926 andis. r5,r12,MSR_VEC@h 927 beql+ load_up_altivec /* skip if already loaded */ 928 929#ifndef CONFIG_SMP 930 ld r3,last_task_used_vsx@got(r2) 931 ld r4,0(r3) 932 cmpdi 0,r4,0 933 beq 1f 934 /* Disable VSX for last_task_used_vsx */ 935 addi r4,r4,THREAD 936 ld r5,PT_REGS(r4) 937 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 938 lis r6,MSR_VSX@h 939 andc r6,r4,r6 940 std r6,_MSR-STACK_FRAME_OVERHEAD(r5) 9411: 942#endif /* CONFIG_SMP */ 943 ld r4,PACACURRENT(r13) 944 addi r4,r4,THREAD /* Get THREAD */ 945 li r6,1 946 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ 947 /* enable use of VSX after return */ 948 oris r12,r12,MSR_VSX@h 949 std r12,_MSR(r1) 950#ifndef CONFIG_SMP 951 /* Update last_task_used_math to 'current' */ 952 ld r4,PACACURRENT(r13) 953 std r4,0(r3) 954#endif /* CONFIG_SMP */ 955 b fast_exception_return 956#endif /* CONFIG_VSX */ 957 958/* 959 * Hash table stuff 960 */ 961 .align 7 962_STATIC(do_hash_page) 963 std r3,_DAR(r1) 964 std r4,_DSISR(r1) 965 966 andis. r0,r4,0xa450 /* weird error? */ 967 bne- handle_page_fault /* if not, try to insert a HPTE */ 968BEGIN_FTR_SECTION 969 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 970 bne- do_ste_alloc /* If so handle it */ 971END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 972 973 /* 974 * On iSeries, we soft-disable interrupts here, then 975 * hard-enable interrupts so that the hash_page code can spin on 976 * the hash_table_lock without problems on a shared processor. 977 */ 978 DISABLE_INTS 979 980 /* 981 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS 982 * and will clobber volatile registers when irq tracing is enabled 983 * so we need to reload them. It may be possible to be smarter here 984 * and move the irq tracing elsewhere but let's keep it simple for 985 * now 986 */ 987#ifdef CONFIG_TRACE_IRQFLAGS 988 ld r3,_DAR(r1) 989 ld r4,_DSISR(r1) 990 ld r5,_TRAP(r1) 991 ld r12,_MSR(r1) 992 clrrdi r5,r5,4 993#endif /* CONFIG_TRACE_IRQFLAGS */ 994 /* 995 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 996 * accessing a userspace segment (even from the kernel). We assume 997 * kernel addresses always have the high bit set. 998 */ 999 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1000 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1001 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1002 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1003 ori r4,r4,1 /* add _PAGE_PRESENT */ 1004 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1005 1006 /* 1007 * r3 contains the faulting address 1008 * r4 contains the required access permissions 1009 * r5 contains the trap number 1010 * 1011 * at return r3 = 0 for success 1012 */ 1013 bl .hash_page /* build HPTE if possible */ 1014 cmpdi r3,0 /* see if hash_page succeeded */ 1015 1016BEGIN_FW_FTR_SECTION 1017 /* 1018 * If we had interrupts soft-enabled at the point where the 1019 * DSI/ISI occurred, and an interrupt came in during hash_page, 1020 * handle it now. 1021 * We jump to ret_from_except_lite rather than fast_exception_return 1022 * because ret_from_except_lite will check for and handle pending 1023 * interrupts if necessary. 1024 */ 1025 beq 13f 1026END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1027 1028BEGIN_FW_FTR_SECTION 1029 /* 1030 * Here we have interrupts hard-disabled, so it is sufficient 1031 * to restore paca->{soft,hard}_enable and get out. 1032 */ 1033 beq fast_exc_return_irq /* Return from exception on success */ 1034END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 1035 1036 /* For a hash failure, we don't bother re-enabling interrupts */ 1037 ble- 12f 1038 1039 /* 1040 * hash_page couldn't handle it, set soft interrupt enable back 1041 * to what it was before the trap. Note that .raw_local_irq_restore 1042 * handles any interrupts pending at this point. 1043 */ 1044 ld r3,SOFTE(r1) 1045 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 1046 bl .raw_local_irq_restore 1047 b 11f 1048 1049/* Here we have a page fault that hash_page can't handle. */ 1050handle_page_fault: 1051 ENABLE_INTS 105211: ld r4,_DAR(r1) 1053 ld r5,_DSISR(r1) 1054 addi r3,r1,STACK_FRAME_OVERHEAD 1055 bl .do_page_fault 1056 cmpdi r3,0 1057 beq+ 13f 1058 bl .save_nvgprs 1059 mr r5,r3 1060 addi r3,r1,STACK_FRAME_OVERHEAD 1061 lwz r4,_DAR(r1) 1062 bl .bad_page_fault 1063 b .ret_from_except 1064 106513: b .ret_from_except_lite 1066 1067/* We have a page fault that hash_page could handle but HV refused 1068 * the PTE insertion 1069 */ 107012: bl .save_nvgprs 1071 mr r5,r3 1072 addi r3,r1,STACK_FRAME_OVERHEAD 1073 ld r4,_DAR(r1) 1074 bl .low_hash_fault 1075 b .ret_from_except 1076 1077 /* here we have a segment miss */ 1078do_ste_alloc: 1079 bl .ste_allocate /* try to insert stab entry */ 1080 cmpdi r3,0 1081 bne- handle_page_fault 1082 b fast_exception_return 1083 1084/* 1085 * r13 points to the PACA, r9 contains the saved CR, 1086 * r11 and r12 contain the saved SRR0 and SRR1. 1087 * r9 - r13 are saved in paca->exslb. 1088 * We assume we aren't going to take any exceptions during this procedure. 1089 * We assume (DAR >> 60) == 0xc. 1090 */ 1091 .align 7 1092_GLOBAL(do_stab_bolted) 1093 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1094 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1095 1096 /* Hash to the primary group */ 1097 ld r10,PACASTABVIRT(r13) 1098 mfspr r11,SPRN_DAR 1099 srdi r11,r11,28 1100 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1101 1102 /* Calculate VSID */ 1103 /* This is a kernel address, so protovsid = ESID */ 1104 ASM_VSID_SCRAMBLE(r11, r9, 256M) 1105 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1106 1107 /* Search the primary group for a free entry */ 11081: ld r11,0(r10) /* Test valid bit of the current ste */ 1109 andi. r11,r11,0x80 1110 beq 2f 1111 addi r10,r10,16 1112 andi. r11,r10,0x70 1113 bne 1b 1114 1115 /* Stick for only searching the primary group for now. */ 1116 /* At least for now, we use a very simple random castout scheme */ 1117 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1118 mftb r11 1119 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1120 ori r11,r11,0x10 1121 1122 /* r10 currently points to an ste one past the group of interest */ 1123 /* make it point to the randomly selected entry */ 1124 subi r10,r10,128 1125 or r10,r10,r11 /* r10 is the entry to invalidate */ 1126 1127 isync /* mark the entry invalid */ 1128 ld r11,0(r10) 1129 rldicl r11,r11,56,1 /* clear the valid bit */ 1130 rotldi r11,r11,8 1131 std r11,0(r10) 1132 sync 1133 1134 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1135 slbie r11 1136 11372: std r9,8(r10) /* Store the vsid part of the ste */ 1138 eieio 1139 1140 mfspr r11,SPRN_DAR /* Get the new esid */ 1141 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1142 ori r11,r11,0x90 /* Turn on valid and kp */ 1143 std r11,0(r10) /* Put new entry back into the stab */ 1144 1145 sync 1146 1147 /* All done -- return from exception. */ 1148 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1149 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1150 1151 andi. r10,r12,MSR_RI 1152 beq- unrecov_slb 1153 1154 mtcrf 0x80,r9 /* restore CR */ 1155 1156 mfmsr r10 1157 clrrdi r10,r10,2 1158 mtmsrd r10,1 1159 1160 mtspr SPRN_SRR0,r11 1161 mtspr SPRN_SRR1,r12 1162 ld r9,PACA_EXSLB+EX_R9(r13) 1163 ld r10,PACA_EXSLB+EX_R10(r13) 1164 ld r11,PACA_EXSLB+EX_R11(r13) 1165 ld r12,PACA_EXSLB+EX_R12(r13) 1166 ld r13,PACA_EXSLB+EX_R13(r13) 1167 rfid 1168 b . /* prevent speculative execution */ 1169 1170/* 1171 * Space for CPU0's segment table. 1172 * 1173 * On iSeries, the hypervisor must fill in at least one entry before 1174 * we get control (with relocate on). The address is given to the hv 1175 * as a page number (see xLparMap below), so this must be at a 1176 * fixed address (the linker can't compute (u64)&initial_stab >> 1177 * PAGE_SHIFT). 1178 */ 1179 . = STAB0_OFFSET /* 0x6000 */ 1180 .globl initial_stab 1181initial_stab: 1182 .space 4096 1183 1184#ifdef CONFIG_PPC_PSERIES 1185/* 1186 * Data area reserved for FWNMI option. 1187 * This address (0x7000) is fixed by the RPA. 1188 */ 1189 .= 0x7000 1190 .globl fwnmi_data_area 1191fwnmi_data_area: 1192#endif /* CONFIG_PPC_PSERIES */ 1193 1194 /* iSeries does not use the FWNMI stuff, so it is safe to put 1195 * this here, even if we later allow kernels that will boot on 1196 * both pSeries and iSeries */ 1197#ifdef CONFIG_PPC_ISERIES 1198 . = LPARMAP_PHYS 1199 .globl xLparMap 1200xLparMap: 1201 .quad HvEsidsToMap /* xNumberEsids */ 1202 .quad HvRangesToMap /* xNumberRanges */ 1203 .quad STAB0_PAGE /* xSegmentTableOffs */ 1204 .zero 40 /* xRsvd */ 1205 /* xEsids (HvEsidsToMap entries of 2 quads) */ 1206 .quad PAGE_OFFSET_ESID /* xKernelEsid */ 1207 .quad PAGE_OFFSET_VSID /* xKernelVsid */ 1208 .quad VMALLOC_START_ESID /* xKernelEsid */ 1209 .quad VMALLOC_START_VSID /* xKernelVsid */ 1210 /* xRanges (HvRangesToMap entries of 3 quads) */ 1211 .quad HvPagesToMap /* xPages */ 1212 .quad 0 /* xOffset */ 1213 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ 1214 1215#endif /* CONFIG_PPC_ISERIES */ 1216 1217#ifdef CONFIG_PPC_PSERIES 1218 . = 0x8000 1219#endif /* CONFIG_PPC_PSERIES */ 1220 1221/* 1222 * On pSeries and most other platforms, secondary processors spin 1223 * in the following code. 1224 * At entry, r3 = this processor's number (physical cpu id) 1225 */ 1226_GLOBAL(generic_secondary_smp_init) 1227 mr r24,r3 1228 1229 /* turn on 64-bit mode */ 1230 bl .enable_64b_mode 1231 1232 /* get the TOC pointer (real address) */ 1233 bl .relative_toc 1234 1235 /* Set up a paca value for this processor. Since we have the 1236 * physical cpu id in r24, we need to search the pacas to find 1237 * which logical id maps to our physical one. 1238 */ 1239 LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */ 1240 li r5,0 /* logical cpu id */ 12411: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 1242 cmpw r6,r24 /* Compare to our id */ 1243 beq 2f 1244 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 1245 addi r5,r5,1 1246 cmpwi r5,NR_CPUS 1247 blt 1b 1248 1249 mr r3,r24 /* not found, copy phys to r3 */ 1250 b .kexec_wait /* next kernel might do better */ 1251 12522: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1253 /* From now on, r24 is expected to be logical cpuid */ 1254 mr r24,r5 12553: HMT_LOW 1256 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1257 /* start. */ 1258 1259#ifndef CONFIG_SMP 1260 b 3b /* Never go on non-SMP */ 1261#else 1262 cmpwi 0,r23,0 1263 beq 3b /* Loop until told to go */ 1264 1265 sync /* order paca.run and cur_cpu_spec */ 1266 1267 /* See if we need to call a cpu state restore handler */ 1268 LOAD_REG_ADDR(r23, cur_cpu_spec) 1269 ld r23,0(r23) 1270 ld r23,CPU_SPEC_RESTORE(r23) 1271 cmpdi 0,r23,0 1272 beq 4f 1273 ld r23,0(r23) 1274 mtctr r23 1275 bctrl 1276 12774: /* Create a temp kernel stack for use before relocation is on. */ 1278 ld r1,PACAEMERGSP(r13) 1279 subi r1,r1,STACK_FRAME_OVERHEAD 1280 1281 b __secondary_start 1282#endif 1283 1284/* 1285 * Turn the MMU off. 1286 * Assumes we're mapped EA == RA if the MMU is on. 1287 */ 1288_STATIC(__mmu_off) 1289 mfmsr r3 1290 andi. r0,r3,MSR_IR|MSR_DR 1291 beqlr 1292 mflr r4 1293 andc r3,r3,r0 1294 mtspr SPRN_SRR0,r4 1295 mtspr SPRN_SRR1,r3 1296 sync 1297 rfid 1298 b . /* prevent speculative execution */ 1299 1300 1301/* 1302 * Here is our main kernel entry point. We support currently 2 kind of entries 1303 * depending on the value of r5. 1304 * 1305 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 1306 * in r3...r7 1307 * 1308 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 1309 * DT block, r4 is a physical pointer to the kernel itself 1310 * 1311 */ 1312_GLOBAL(__start_initialization_multiplatform) 1313 /* Make sure we are running in 64 bits mode */ 1314 bl .enable_64b_mode 1315 1316 /* Get TOC pointer (current runtime address) */ 1317 bl .relative_toc 1318 1319 /* find out where we are now */ 1320 bcl 20,31,$+4 13210: mflr r26 /* r26 = runtime addr here */ 1322 addis r26,r26,(_stext - 0b)@ha 1323 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ 1324 1325 /* 1326 * Are we booted from a PROM Of-type client-interface ? 1327 */ 1328 cmpldi cr0,r5,0 1329 beq 1f 1330 b .__boot_from_prom /* yes -> prom */ 13311: 1332 /* Save parameters */ 1333 mr r31,r3 1334 mr r30,r4 1335 1336 /* Setup some critical 970 SPRs before switching MMU off */ 1337 mfspr r0,SPRN_PVR 1338 srwi r0,r0,16 1339 cmpwi r0,0x39 /* 970 */ 1340 beq 1f 1341 cmpwi r0,0x3c /* 970FX */ 1342 beq 1f 1343 cmpwi r0,0x44 /* 970MP */ 1344 beq 1f 1345 cmpwi r0,0x45 /* 970GX */ 1346 bne 2f 13471: bl .__cpu_preinit_ppc970 13482: 1349 1350 /* Switch off MMU if not already off */ 1351 bl .__mmu_off 1352 b .__after_prom_start 1353 1354_INIT_STATIC(__boot_from_prom) 1355 /* Save parameters */ 1356 mr r31,r3 1357 mr r30,r4 1358 mr r29,r5 1359 mr r28,r6 1360 mr r27,r7 1361 1362 /* 1363 * Align the stack to 16-byte boundary 1364 * Depending on the size and layout of the ELF sections in the initial 1365 * boot binary, the stack pointer may be unaligned on PowerMac 1366 */ 1367 rldicr r1,r1,0,59 1368 1369#ifdef CONFIG_RELOCATABLE 1370 /* Relocate code for where we are now */ 1371 mr r3,r26 1372 bl .relocate 1373#endif 1374 1375 /* Restore parameters */ 1376 mr r3,r31 1377 mr r4,r30 1378 mr r5,r29 1379 mr r6,r28 1380 mr r7,r27 1381 1382 /* Do all of the interaction with OF client interface */ 1383 mr r8,r26 1384 bl .prom_init 1385 /* We never return */ 1386 trap 1387 1388_STATIC(__after_prom_start) 1389#ifdef CONFIG_RELOCATABLE 1390 /* process relocations for the final address of the kernel */ 1391 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 1392 sldi r25,r25,32 1393#ifdef CONFIG_CRASH_DUMP 1394 ld r7,__kdump_flag-_stext(r26) 1395 cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */ 1396 bne 1f 1397 add r25,r25,r26 1398#endif 13991: mr r3,r25 1400 bl .relocate 1401#endif 1402 1403/* 1404 * We need to run with _stext at physical address PHYSICAL_START. 1405 * This will leave some code in the first 256B of 1406 * real memory, which are reserved for software use. 1407 * 1408 * Note: This process overwrites the OF exception vectors. 1409 */ 1410 li r3,0 /* target addr */ 1411 mr. r4,r26 /* In some cases the loader may */ 1412 beq 9f /* have already put us at zero */ 1413 li r6,0x100 /* Start offset, the first 0x100 */ 1414 /* bytes were copied earlier. */ 1415 1416#ifdef CONFIG_CRASH_DUMP 1417/* 1418 * Check if the kernel has to be running as relocatable kernel based on the 1419 * variable __kdump_flag, if it is set the kernel is treated as relocatable 1420 * kernel, otherwise it will be moved to PHYSICAL_START 1421 */ 1422 ld r7,__kdump_flag-_stext(r26) 1423 cmpldi cr0,r7,1 1424 bne 3f 1425 1426 li r5,__end_interrupts - _stext /* just copy interrupts */ 1427 b 5f 14283: 1429#endif 1430 lis r5,(copy_to_here - _stext)@ha 1431 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ 1432 1433 bl .copy_and_flush /* copy the first n bytes */ 1434 /* this includes the code being */ 1435 /* executed here. */ 1436 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ 1437 addi r8,r8,(4f - _stext)@l /* that we just made */ 1438 mtctr r8 1439 bctr 1440 1441p_end: .llong _end - _stext 1442 14434: /* Now copy the rest of the kernel up to _end */ 1444 addis r5,r26,(p_end - _stext)@ha 1445 ld r5,(p_end - _stext)@l(r5) /* get _end */ 14465: bl .copy_and_flush /* copy the rest */ 1447 14489: b .start_here_multiplatform 1449 1450/* 1451 * Copy routine used to copy the kernel to start at physical address 0 1452 * and flush and invalidate the caches as needed. 1453 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 1454 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 1455 * 1456 * Note: this routine *only* clobbers r0, r6 and lr 1457 */ 1458_GLOBAL(copy_and_flush) 1459 addi r5,r5,-8 1460 addi r6,r6,-8 14614: li r0,8 /* Use the smallest common */ 1462 /* denominator cache line */ 1463 /* size. This results in */ 1464 /* extra cache line flushes */ 1465 /* but operation is correct. */ 1466 /* Can't get cache line size */ 1467 /* from NACA as it is being */ 1468 /* moved too. */ 1469 1470 mtctr r0 /* put # words/line in ctr */ 14713: addi r6,r6,8 /* copy a cache line */ 1472 ldx r0,r6,r4 1473 stdx r0,r6,r3 1474 bdnz 3b 1475 dcbst r6,r3 /* write it to memory */ 1476 sync 1477 icbi r6,r3 /* flush the icache line */ 1478 cmpld 0,r6,r5 1479 blt 4b 1480 sync 1481 addi r5,r5,8 1482 addi r6,r6,8 1483 blr 1484 1485.align 8 1486copy_to_here: 1487 1488#ifdef CONFIG_SMP 1489#ifdef CONFIG_PPC_PMAC 1490/* 1491 * On PowerMac, secondary processors starts from the reset vector, which 1492 * is temporarily turned into a call to one of the functions below. 1493 */ 1494 .section ".text"; 1495 .align 2 ; 1496 1497 .globl __secondary_start_pmac_0 1498__secondary_start_pmac_0: 1499 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 1500 li r24,0 1501 b 1f 1502 li r24,1 1503 b 1f 1504 li r24,2 1505 b 1f 1506 li r24,3 15071: 1508 1509_GLOBAL(pmac_secondary_start) 1510 /* turn on 64-bit mode */ 1511 bl .enable_64b_mode 1512 1513 /* get TOC pointer (real address) */ 1514 bl .relative_toc 1515 1516 /* Copy some CPU settings from CPU 0 */ 1517 bl .__restore_cpu_ppc970 1518 1519 /* pSeries do that early though I don't think we really need it */ 1520 mfmsr r3 1521 ori r3,r3,MSR_RI 1522 mtmsrd r3 /* RI on */ 1523 1524 /* Set up a paca value for this processor. */ 1525 LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */ 1526 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1527 add r13,r13,r4 /* for this processor. */ 1528 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1529 1530 /* Create a temp kernel stack for use before relocation is on. */ 1531 ld r1,PACAEMERGSP(r13) 1532 subi r1,r1,STACK_FRAME_OVERHEAD 1533 1534 b __secondary_start 1535 1536#endif /* CONFIG_PPC_PMAC */ 1537 1538/* 1539 * This function is called after the master CPU has released the 1540 * secondary processors. The execution environment is relocation off. 1541 * The paca for this processor has the following fields initialized at 1542 * this point: 1543 * 1. Processor number 1544 * 2. Segment table pointer (virtual address) 1545 * On entry the following are set: 1546 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 1547 * r24 = cpu# (in Linux terms) 1548 * r13 = paca virtual address 1549 * SPRG3 = paca virtual address 1550 */ 1551 .globl __secondary_start 1552__secondary_start: 1553 /* Set thread priority to MEDIUM */ 1554 HMT_MEDIUM 1555 1556 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 1557 bl .early_setup_secondary 1558 1559 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1560 LOAD_REG_ADDR(r3, current_set) 1561 sldi r28,r24,3 /* get current_set[cpu#] */ 1562 ldx r1,r3,r28 1563 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1564 std r1,PACAKSAVE(r13) 1565 1566 /* Clear backchain so we get nice backtraces */ 1567 li r7,0 1568 mtlr r7 1569 1570 /* enable MMU and jump to start_secondary */ 1571 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1572 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1573#ifdef CONFIG_PPC_ISERIES 1574BEGIN_FW_FTR_SECTION 1575 ori r4,r4,MSR_EE 1576 li r8,1 1577 stb r8,PACAHARDIRQEN(r13) 1578END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1579#endif 1580BEGIN_FW_FTR_SECTION 1581 stb r7,PACAHARDIRQEN(r13) 1582END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 1583 stb r7,PACASOFTIRQEN(r13) 1584 1585 mtspr SPRN_SRR0,r3 1586 mtspr SPRN_SRR1,r4 1587 rfid 1588 b . /* prevent speculative execution */ 1589 1590/* 1591 * Running with relocation on at this point. All we want to do is 1592 * zero the stack back-chain pointer and get the TOC virtual address 1593 * before going into C code. 1594 */ 1595_GLOBAL(start_secondary_prolog) 1596 ld r2,PACATOC(r13) 1597 li r3,0 1598 std r3,0(r1) /* Zero the stack frame pointer */ 1599 bl .start_secondary 1600 b . 1601#endif 1602 1603/* 1604 * This subroutine clobbers r11 and r12 1605 */ 1606_GLOBAL(enable_64b_mode) 1607 mfmsr r11 /* grab the current MSR */ 1608 li r12,(MSR_SF | MSR_ISF)@highest 1609 sldi r12,r12,48 1610 or r11,r11,r12 1611 mtmsrd r11 1612 isync 1613 blr 1614 1615/* 1616 * This puts the TOC pointer into r2, offset by 0x8000 (as expected 1617 * by the toolchain). It computes the correct value for wherever we 1618 * are running at the moment, using position-independent code. 1619 */ 1620_GLOBAL(relative_toc) 1621 mflr r0 1622 bcl 20,31,$+4 16230: mflr r9 1624 ld r2,(p_toc - 0b)(r9) 1625 add r2,r2,r9 1626 mtlr r0 1627 blr 1628 1629p_toc: .llong __toc_start + 0x8000 - 0b 1630 1631/* 1632 * This is where the main kernel code starts. 1633 */ 1634_INIT_STATIC(start_here_multiplatform) 1635 /* set up the TOC (real address) */ 1636 bl .relative_toc 1637 1638 /* Clear out the BSS. It may have been done in prom_init, 1639 * already but that's irrelevant since prom_init will soon 1640 * be detached from the kernel completely. Besides, we need 1641 * to clear it now for kexec-style entry. 1642 */ 1643 LOAD_REG_ADDR(r11,__bss_stop) 1644 LOAD_REG_ADDR(r8,__bss_start) 1645 sub r11,r11,r8 /* bss size */ 1646 addi r11,r11,7 /* round up to an even double word */ 1647 srdi. r11,r11,3 /* shift right by 3 */ 1648 beq 4f 1649 addi r8,r8,-8 1650 li r0,0 1651 mtctr r11 /* zero this many doublewords */ 16523: stdu r0,8(r8) 1653 bdnz 3b 16544: 1655 1656 mfmsr r6 1657 ori r6,r6,MSR_RI 1658 mtmsrd r6 /* RI on */ 1659 1660#ifdef CONFIG_RELOCATABLE 1661 /* Save the physical address we're running at in kernstart_addr */ 1662 LOAD_REG_ADDR(r4, kernstart_addr) 1663 clrldi r0,r25,2 1664 std r0,0(r4) 1665#endif 1666 1667 /* The following gets the stack set up with the regs */ 1668 /* pointing to the real addr of the kernel stack. This is */ 1669 /* all done to support the C function call below which sets */ 1670 /* up the htab. This is done because we have relocated the */ 1671 /* kernel but are still running in real mode. */ 1672 1673 LOAD_REG_ADDR(r3,init_thread_union) 1674 1675 /* set up a stack pointer */ 1676 addi r1,r3,THREAD_SIZE 1677 li r0,0 1678 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1679 1680 /* Do very early kernel initializations, including initial hash table, 1681 * stab and slb setup before we turn on relocation. */ 1682 1683 /* Restore parameters passed from prom_init/kexec */ 1684 mr r3,r31 1685 bl .early_setup /* also sets r13 and SPRG3 */ 1686 1687 LOAD_REG_ADDR(r3, .start_here_common) 1688 ld r4,PACAKMSR(r13) 1689 mtspr SPRN_SRR0,r3 1690 mtspr SPRN_SRR1,r4 1691 rfid 1692 b . /* prevent speculative execution */ 1693 1694 /* This is where all platforms converge execution */ 1695_INIT_GLOBAL(start_here_common) 1696 /* relocation is on at this point */ 1697 std r1,PACAKSAVE(r13) 1698 1699 /* Load the TOC (virtual address) */ 1700 ld r2,PACATOC(r13) 1701 1702 bl .setup_system 1703 1704 /* Load up the kernel context */ 17055: 1706 li r5,0 1707 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ 1708#ifdef CONFIG_PPC_ISERIES 1709BEGIN_FW_FTR_SECTION 1710 mfmsr r5 1711 ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ 1712 mtmsrd r5 1713 li r5,1 1714END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1715#endif 1716 stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ 1717 1718 bl .start_kernel 1719 1720 /* Not reached */ 1721 BUG_OPCODE 1722 1723/* 1724 * We put a few things here that have to be page-aligned. 1725 * This stuff goes at the beginning of the bss, which is page-aligned. 1726 */ 1727 .section ".bss" 1728 1729 .align PAGE_SHIFT 1730 1731 .globl empty_zero_page 1732empty_zero_page: 1733 .space PAGE_SIZE 1734 1735 .globl swapper_pg_dir 1736swapper_pg_dir: 1737 .space PGD_TABLE_SIZE 1738