1/* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Adapted for Power Macintosh by Paul Mackerras. 8 * Low-level exception handlers and MMU support 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * 12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 14 * 15 * This file contains the entry point for the 64-bit kernel along 16 * with some early initialization code common to all 64-bit powerpc 17 * variants. 18 * 19 * This program is free software; you can redistribute it and/or 20 * modify it under the terms of the GNU General Public License 21 * as published by the Free Software Foundation; either version 22 * 2 of the License, or (at your option) any later version. 23 */ 24 25#include <linux/threads.h> 26#include <linux/init.h> 27#include <asm/reg.h> 28#include <asm/page.h> 29#include <asm/mmu.h> 30#include <asm/ppc_asm.h> 31#include <asm/head-64.h> 32#include <asm/asm-offsets.h> 33#include <asm/bug.h> 34#include <asm/cputable.h> 35#include <asm/setup.h> 36#include <asm/hvcall.h> 37#include <asm/thread_info.h> 38#include <asm/firmware.h> 39#include <asm/page_64.h> 40#include <asm/irqflags.h> 41#include <asm/kvm_book3s_asm.h> 42#include <asm/ptrace.h> 43#include <asm/hw_irq.h> 44#include <asm/cputhreads.h> 45#include <asm/ppc-opcode.h> 46 47/* The physical memory is laid out such that the secondary processor 48 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 49 * using the layout described in exceptions-64s.S 50 */ 51 52/* 53 * Entering into this code we make the following assumptions: 54 * 55 * For pSeries or server processors: 56 * 1. The MMU is off & open firmware is running in real mode. 57 * 2. The kernel is entered at __start 58 * -or- For OPAL entry: 59 * 1. The MMU is off, processor in HV mode, primary CPU enters at 0 60 * with device-tree in gpr3. We also get OPAL base in r8 and 61 * entry in r9 for debugging purposes 62 * 2. Secondary processors enter at 0x60 with PIR in gpr3 63 * 64 * For Book3E processors: 65 * 1. The MMU is on running in AS0 in a state defined in ePAPR 66 * 2. The kernel is entered at __start 67 */ 68 69OPEN_FIXED_SECTION(first_256B, 0x0, 0x100) 70USE_FIXED_SECTION(first_256B) 71 /* 72 * Offsets are relative from the start of fixed section, and 73 * first_256B starts at 0. Offsets are a bit easier to use here 74 * than the fixed section entry macros. 75 */ 76 . = 0x0 77_GLOBAL(__start) 78 /* NOP this out unconditionally */ 79BEGIN_FTR_SECTION 80 FIXUP_ENDIAN 81 b __start_initialization_multiplatform 82END_FTR_SECTION(0, 1) 83 84 /* Catch branch to 0 in real mode */ 85 trap 86 87 /* Secondary processors spin on this value until it becomes non-zero. 88 * When non-zero, it contains the real address of the function the cpu 89 * should jump to. 90 */ 91 .balign 8 92 .globl __secondary_hold_spinloop 93__secondary_hold_spinloop: 94 .llong 0x0 95 96 /* Secondary processors write this value with their cpu # */ 97 /* after they enter the spin loop immediately below. */ 98 .globl __secondary_hold_acknowledge 99__secondary_hold_acknowledge: 100 .llong 0x0 101 102#ifdef CONFIG_RELOCATABLE 103 /* This flag is set to 1 by a loader if the kernel should run 104 * at the loaded address instead of the linked address. This 105 * is used by kexec-tools to keep the the kdump kernel in the 106 * crash_kernel region. The loader is responsible for 107 * observing the alignment requirement. 108 */ 109 /* Do not move this variable as kexec-tools knows about it. */ 110 . = 0x5c 111 .globl __run_at_load 112__run_at_load: 113DEFINE_FIXED_SYMBOL(__run_at_load) 114 .long 0x72756e30 /* "run0" -- relocate to 0 by default */ 115#endif 116 117 . = 0x60 118/* 119 * The following code is used to hold secondary processors 120 * in a spin loop after they have entered the kernel, but 121 * before the bulk of the kernel has been relocated. This code 122 * is relocated to physical address 0x60 before prom_init is run. 123 * All of it must fit below the first exception vector at 0x100. 124 * Use .globl here not _GLOBAL because we want __secondary_hold 125 * to be the actual text address, not a descriptor. 126 */ 127 .globl __secondary_hold 128__secondary_hold: 129 FIXUP_ENDIAN 130#ifndef CONFIG_PPC_BOOK3E 131 mfmsr r24 132 ori r24,r24,MSR_RI 133 mtmsrd r24 /* RI on */ 134#endif 135 /* Grab our physical cpu number */ 136 mr r24,r3 137 /* stash r4 for book3e */ 138 mr r25,r4 139 140 /* Tell the master cpu we're here */ 141 /* Relocation is off & we are located at an address less */ 142 /* than 0x100, so only need to grab low order offset. */ 143 std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0) 144 sync 145 146 li r26,0 147#ifdef CONFIG_PPC_BOOK3E 148 tovirt(r26,r26) 149#endif 150 /* All secondary cpus wait here until told to start. */ 151100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26) 152 cmpdi 0,r12,0 153 beq 100b 154 155#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 156#ifdef CONFIG_PPC_BOOK3E 157 tovirt(r12,r12) 158#endif 159 mtctr r12 160 mr r3,r24 161 /* 162 * it may be the case that other platforms have r4 right to 163 * begin with, this gives us some safety in case it is not 164 */ 165#ifdef CONFIG_PPC_BOOK3E 166 mr r4,r25 167#else 168 li r4,0 169#endif 170 /* Make sure that patched code is visible */ 171 isync 172 bctr 173#else 174 BUG_OPCODE 175#endif 176CLOSE_FIXED_SECTION(first_256B) 177 178/* This value is used to mark exception frames on the stack. */ 179 .section ".toc","aw" 180exception_marker: 181 .tc ID_72656773_68657265[TC],0x7265677368657265 182 .previous 183 184/* 185 * On server, we include the exception vectors code here as it 186 * relies on absolute addressing which is only possible within 187 * this compilation unit 188 */ 189#ifdef CONFIG_PPC_BOOK3S 190#include "exceptions-64s.S" 191#else 192OPEN_TEXT_SECTION(0x100) 193#endif 194 195USE_TEXT_SECTION() 196 197#ifdef CONFIG_PPC_BOOK3E 198/* 199 * The booting_thread_hwid holds the thread id we want to boot in cpu 200 * hotplug case. It is set by cpu hotplug code, and is invalid by default. 201 * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID] 202 * bit field. 203 */ 204 .globl booting_thread_hwid 205booting_thread_hwid: 206 .long INVALID_THREAD_HWID 207 .align 3 208/* 209 * start a thread in the same core 210 * input parameters: 211 * r3 = the thread physical id 212 * r4 = the entry point where thread starts 213 */ 214_GLOBAL(book3e_start_thread) 215 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL) 216 cmpi 0, r3, 0 217 beq 10f 218 cmpi 0, r3, 1 219 beq 11f 220 /* If the thread id is invalid, just exit. */ 221 b 13f 22210: 223 MTTMR(TMRN_IMSR0, 5) 224 MTTMR(TMRN_INIA0, 4) 225 b 12f 22611: 227 MTTMR(TMRN_IMSR1, 5) 228 MTTMR(TMRN_INIA1, 4) 22912: 230 isync 231 li r6, 1 232 sld r6, r6, r3 233 mtspr SPRN_TENS, r6 23413: 235 blr 236 237/* 238 * stop a thread in the same core 239 * input parameter: 240 * r3 = the thread physical id 241 */ 242_GLOBAL(book3e_stop_thread) 243 cmpi 0, r3, 0 244 beq 10f 245 cmpi 0, r3, 1 246 beq 10f 247 /* If the thread id is invalid, just exit. */ 248 b 13f 24910: 250 li r4, 1 251 sld r4, r4, r3 252 mtspr SPRN_TENC, r4 25313: 254 blr 255 256_GLOBAL(fsl_secondary_thread_init) 257 mfspr r4,SPRN_BUCSR 258 259 /* Enable branch prediction */ 260 lis r3,BUCSR_INIT@h 261 ori r3,r3,BUCSR_INIT@l 262 mtspr SPRN_BUCSR,r3 263 isync 264 265 /* 266 * Fix PIR to match the linear numbering in the device tree. 267 * 268 * On e6500, the reset value of PIR uses the low three bits for 269 * the thread within a core, and the upper bits for the core 270 * number. There are two threads per core, so shift everything 271 * but the low bit right by two bits so that the cpu numbering is 272 * continuous. 273 * 274 * If the old value of BUCSR is non-zero, this thread has run 275 * before. Thus, we assume we are coming from kexec or a similar 276 * scenario, and PIR is already set to the correct value. This 277 * is a bit of a hack, but there are limited opportunities for 278 * getting information into the thread and the alternatives 279 * seemed like they'd be overkill. We can't tell just by looking 280 * at the old PIR value which state it's in, since the same value 281 * could be valid for one thread out of reset and for a different 282 * thread in Linux. 283 */ 284 285 mfspr r3, SPRN_PIR 286 cmpwi r4,0 287 bne 1f 288 rlwimi r3, r3, 30, 2, 30 289 mtspr SPRN_PIR, r3 2901: 291#endif 292 293_GLOBAL(generic_secondary_thread_init) 294 mr r24,r3 295 296 /* turn on 64-bit mode */ 297 bl enable_64b_mode 298 299 /* get a valid TOC pointer, wherever we're mapped at */ 300 bl relative_toc 301 tovirt(r2,r2) 302 303#ifdef CONFIG_PPC_BOOK3E 304 /* Book3E initialization */ 305 mr r3,r24 306 bl book3e_secondary_thread_init 307#endif 308 b generic_secondary_common_init 309 310/* 311 * On pSeries and most other platforms, secondary processors spin 312 * in the following code. 313 * At entry, r3 = this processor's number (physical cpu id) 314 * 315 * On Book3E, r4 = 1 to indicate that the initial TLB entry for 316 * this core already exists (setup via some other mechanism such 317 * as SCOM before entry). 318 */ 319_GLOBAL(generic_secondary_smp_init) 320 FIXUP_ENDIAN 321 mr r24,r3 322 mr r25,r4 323 324 /* turn on 64-bit mode */ 325 bl enable_64b_mode 326 327 /* get a valid TOC pointer, wherever we're mapped at */ 328 bl relative_toc 329 tovirt(r2,r2) 330 331#ifdef CONFIG_PPC_BOOK3E 332 /* Book3E initialization */ 333 mr r3,r24 334 mr r4,r25 335 bl book3e_secondary_core_init 336 337/* 338 * After common core init has finished, check if the current thread is the 339 * one we wanted to boot. If not, start the specified thread and stop the 340 * current thread. 341 */ 342 LOAD_REG_ADDR(r4, booting_thread_hwid) 343 lwz r3, 0(r4) 344 li r5, INVALID_THREAD_HWID 345 cmpw r3, r5 346 beq 20f 347 348 /* 349 * The value of booting_thread_hwid has been stored in r3, 350 * so make it invalid. 351 */ 352 stw r5, 0(r4) 353 354 /* 355 * Get the current thread id and check if it is the one we wanted. 356 * If not, start the one specified in booting_thread_hwid and stop 357 * the current thread. 358 */ 359 mfspr r8, SPRN_TIR 360 cmpw r3, r8 361 beq 20f 362 363 /* start the specified thread */ 364 LOAD_REG_ADDR(r5, fsl_secondary_thread_init) 365 ld r4, 0(r5) 366 bl book3e_start_thread 367 368 /* stop the current thread */ 369 mr r3, r8 370 bl book3e_stop_thread 37110: 372 b 10b 37320: 374#endif 375 376generic_secondary_common_init: 377 /* Set up a paca value for this processor. Since we have the 378 * physical cpu id in r24, we need to search the pacas to find 379 * which logical id maps to our physical one. 380 */ 381 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ 382 ld r13,0(r13) /* Get base vaddr of paca array */ 383#ifndef CONFIG_SMP 384 addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ 385 b kexec_wait /* wait for next kernel if !SMP */ 386#else 387 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ 388 lwz r7,0(r7) /* also the max paca allocated */ 389 li r5,0 /* logical cpu id */ 3901: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 391 cmpw r6,r24 /* Compare to our id */ 392 beq 2f 393 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 394 addi r5,r5,1 395 cmpw r5,r7 /* Check if more pacas exist */ 396 blt 1b 397 398 mr r3,r24 /* not found, copy phys to r3 */ 399 b kexec_wait /* next kernel might do better */ 400 4012: SET_PACA(r13) 402#ifdef CONFIG_PPC_BOOK3E 403 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ 404 mtspr SPRN_SPRG_TLB_EXFRAME,r12 405#endif 406 407 /* From now on, r24 is expected to be logical cpuid */ 408 mr r24,r5 409 410 /* See if we need to call a cpu state restore handler */ 411 LOAD_REG_ADDR(r23, cur_cpu_spec) 412 ld r23,0(r23) 413 ld r12,CPU_SPEC_RESTORE(r23) 414 cmpdi 0,r12,0 415 beq 3f 416#ifdef PPC64_ELF_ABI_v1 417 ld r12,0(r12) 418#endif 419 mtctr r12 420 bctrl 421 4223: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ 423 lwarx r4,0,r3 424 subi r4,r4,1 425 stwcx. r4,0,r3 426 bne 3b 427 isync 428 4294: HMT_LOW 430 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 431 /* start. */ 432 cmpwi 0,r23,0 433 beq 4b /* Loop until told to go */ 434 435 sync /* order paca.run and cur_cpu_spec */ 436 isync /* In case code patching happened */ 437 438 /* Create a temp kernel stack for use before relocation is on. */ 439 ld r1,PACAEMERGSP(r13) 440 subi r1,r1,STACK_FRAME_OVERHEAD 441 442 b __secondary_start 443#endif /* SMP */ 444 445/* 446 * Turn the MMU off. 447 * Assumes we're mapped EA == RA if the MMU is on. 448 */ 449#ifdef CONFIG_PPC_BOOK3S 450__mmu_off: 451 mfmsr r3 452 andi. r0,r3,MSR_IR|MSR_DR 453 beqlr 454 mflr r4 455 andc r3,r3,r0 456 mtspr SPRN_SRR0,r4 457 mtspr SPRN_SRR1,r3 458 sync 459 rfid 460 b . /* prevent speculative execution */ 461#endif 462 463 464/* 465 * Here is our main kernel entry point. We support currently 2 kind of entries 466 * depending on the value of r5. 467 * 468 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 469 * in r3...r7 470 * 471 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 472 * DT block, r4 is a physical pointer to the kernel itself 473 * 474 */ 475__start_initialization_multiplatform: 476 /* Make sure we are running in 64 bits mode */ 477 bl enable_64b_mode 478 479 /* Get TOC pointer (current runtime address) */ 480 bl relative_toc 481 482 /* find out where we are now */ 483 bcl 20,31,$+4 4840: mflr r26 /* r26 = runtime addr here */ 485 addis r26,r26,(_stext - 0b)@ha 486 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ 487 488 /* 489 * Are we booted from a PROM Of-type client-interface ? 490 */ 491 cmpldi cr0,r5,0 492 beq 1f 493 b __boot_from_prom /* yes -> prom */ 4941: 495 /* Save parameters */ 496 mr r31,r3 497 mr r30,r4 498#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 499 /* Save OPAL entry */ 500 mr r28,r8 501 mr r29,r9 502#endif 503 504#ifdef CONFIG_PPC_BOOK3E 505 bl start_initialization_book3e 506 b __after_prom_start 507#else 508 /* Setup some critical 970 SPRs before switching MMU off */ 509 mfspr r0,SPRN_PVR 510 srwi r0,r0,16 511 cmpwi r0,0x39 /* 970 */ 512 beq 1f 513 cmpwi r0,0x3c /* 970FX */ 514 beq 1f 515 cmpwi r0,0x44 /* 970MP */ 516 beq 1f 517 cmpwi r0,0x45 /* 970GX */ 518 bne 2f 5191: bl __cpu_preinit_ppc970 5202: 521 522 /* Switch off MMU if not already off */ 523 bl __mmu_off 524 b __after_prom_start 525#endif /* CONFIG_PPC_BOOK3E */ 526 527__boot_from_prom: 528#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE 529 /* Save parameters */ 530 mr r31,r3 531 mr r30,r4 532 mr r29,r5 533 mr r28,r6 534 mr r27,r7 535 536 /* 537 * Align the stack to 16-byte boundary 538 * Depending on the size and layout of the ELF sections in the initial 539 * boot binary, the stack pointer may be unaligned on PowerMac 540 */ 541 rldicr r1,r1,0,59 542 543#ifdef CONFIG_RELOCATABLE 544 /* Relocate code for where we are now */ 545 mr r3,r26 546 bl relocate 547#endif 548 549 /* Restore parameters */ 550 mr r3,r31 551 mr r4,r30 552 mr r5,r29 553 mr r6,r28 554 mr r7,r27 555 556 /* Do all of the interaction with OF client interface */ 557 mr r8,r26 558 bl prom_init 559#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 560 561 /* We never return. We also hit that trap if trying to boot 562 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ 563 trap 564 565__after_prom_start: 566#ifdef CONFIG_RELOCATABLE 567 /* process relocations for the final address of the kernel */ 568 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 569 sldi r25,r25,32 570#if defined(CONFIG_PPC_BOOK3E) 571 tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ 572#endif 573 lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) 574#if defined(CONFIG_PPC_BOOK3E) 575 tophys(r26,r26) 576#endif 577 cmplwi cr0,r7,1 /* flagged to stay where we are ? */ 578 bne 1f 579 add r25,r25,r26 5801: mr r3,r25 581 bl relocate 582#if defined(CONFIG_PPC_BOOK3E) 583 /* IVPR needs to be set after relocation. */ 584 bl init_core_book3e 585#endif 586#endif 587 588/* 589 * We need to run with _stext at physical address PHYSICAL_START. 590 * This will leave some code in the first 256B of 591 * real memory, which are reserved for software use. 592 * 593 * Note: This process overwrites the OF exception vectors. 594 */ 595 li r3,0 /* target addr */ 596#ifdef CONFIG_PPC_BOOK3E 597 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ 598#endif 599 mr. r4,r26 /* In some cases the loader may */ 600#if defined(CONFIG_PPC_BOOK3E) 601 tovirt(r4,r4) 602#endif 603 beq 9f /* have already put us at zero */ 604 li r6,0x100 /* Start offset, the first 0x100 */ 605 /* bytes were copied earlier. */ 606 607#ifdef CONFIG_RELOCATABLE 608/* 609 * Check if the kernel has to be running as relocatable kernel based on the 610 * variable __run_at_load, if it is set the kernel is treated as relocatable 611 * kernel, otherwise it will be moved to PHYSICAL_START 612 */ 613#if defined(CONFIG_PPC_BOOK3E) 614 tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ 615#endif 616 lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) 617 cmplwi cr0,r7,1 618 bne 3f 619 620#ifdef CONFIG_PPC_BOOK3E 621 LOAD_REG_ADDR(r5, __end_interrupts) 622 LOAD_REG_ADDR(r11, _stext) 623 sub r5,r5,r11 624#else 625 /* just copy interrupts */ 626 LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts)) 627#endif 628 b 5f 6293: 630#endif 631 /* # bytes of memory to copy */ 632 lis r5,(ABS_ADDR(copy_to_here))@ha 633 addi r5,r5,(ABS_ADDR(copy_to_here))@l 634 635 bl copy_and_flush /* copy the first n bytes */ 636 /* this includes the code being */ 637 /* executed here. */ 638 /* Jump to the copy of this code that we just made */ 639 addis r8,r3,(ABS_ADDR(4f))@ha 640 addi r12,r8,(ABS_ADDR(4f))@l 641 mtctr r12 642 bctr 643 644.balign 8 645p_end: .llong _end - copy_to_here 646 6474: 648 /* 649 * Now copy the rest of the kernel up to _end, add 650 * _end - copy_to_here to the copy limit and run again. 651 */ 652 addis r8,r26,(ABS_ADDR(p_end))@ha 653 ld r8,(ABS_ADDR(p_end))@l(r8) 654 add r5,r5,r8 6555: bl copy_and_flush /* copy the rest */ 656 6579: b start_here_multiplatform 658 659/* 660 * Copy routine used to copy the kernel to start at physical address 0 661 * and flush and invalidate the caches as needed. 662 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 663 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 664 * 665 * Note: this routine *only* clobbers r0, r6 and lr 666 */ 667_GLOBAL(copy_and_flush) 668 addi r5,r5,-8 669 addi r6,r6,-8 6704: li r0,8 /* Use the smallest common */ 671 /* denominator cache line */ 672 /* size. This results in */ 673 /* extra cache line flushes */ 674 /* but operation is correct. */ 675 /* Can't get cache line size */ 676 /* from NACA as it is being */ 677 /* moved too. */ 678 679 mtctr r0 /* put # words/line in ctr */ 6803: addi r6,r6,8 /* copy a cache line */ 681 ldx r0,r6,r4 682 stdx r0,r6,r3 683 bdnz 3b 684 dcbst r6,r3 /* write it to memory */ 685 sync 686 icbi r6,r3 /* flush the icache line */ 687 cmpld 0,r6,r5 688 blt 4b 689 sync 690 addi r5,r5,8 691 addi r6,r6,8 692 isync 693 blr 694 695.align 8 696copy_to_here: 697 698#ifdef CONFIG_SMP 699#ifdef CONFIG_PPC_PMAC 700/* 701 * On PowerMac, secondary processors starts from the reset vector, which 702 * is temporarily turned into a call to one of the functions below. 703 */ 704 .section ".text"; 705 .align 2 ; 706 707 .globl __secondary_start_pmac_0 708__secondary_start_pmac_0: 709 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 710 li r24,0 711 b 1f 712 li r24,1 713 b 1f 714 li r24,2 715 b 1f 716 li r24,3 7171: 718 719_GLOBAL(pmac_secondary_start) 720 /* turn on 64-bit mode */ 721 bl enable_64b_mode 722 723 li r0,0 724 mfspr r3,SPRN_HID4 725 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ 726 sync 727 mtspr SPRN_HID4,r3 728 isync 729 sync 730 slbia 731 732 /* get TOC pointer (real address) */ 733 bl relative_toc 734 tovirt(r2,r2) 735 736 /* Copy some CPU settings from CPU 0 */ 737 bl __restore_cpu_ppc970 738 739 /* pSeries do that early though I don't think we really need it */ 740 mfmsr r3 741 ori r3,r3,MSR_RI 742 mtmsrd r3 /* RI on */ 743 744 /* Set up a paca value for this processor. */ 745 LOAD_REG_ADDR(r4,paca) /* Load paca pointer */ 746 ld r4,0(r4) /* Get base vaddr of paca array */ 747 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 748 add r13,r13,r4 /* for this processor. */ 749 SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ 750 751 /* Mark interrupts soft and hard disabled (they might be enabled 752 * in the PACA when doing hotplug) 753 */ 754 li r0,0 755 stb r0,PACASOFTIRQEN(r13) 756 li r0,PACA_IRQ_HARD_DIS 757 stb r0,PACAIRQHAPPENED(r13) 758 759 /* Create a temp kernel stack for use before relocation is on. */ 760 ld r1,PACAEMERGSP(r13) 761 subi r1,r1,STACK_FRAME_OVERHEAD 762 763 b __secondary_start 764 765#endif /* CONFIG_PPC_PMAC */ 766 767/* 768 * This function is called after the master CPU has released the 769 * secondary processors. The execution environment is relocation off. 770 * The paca for this processor has the following fields initialized at 771 * this point: 772 * 1. Processor number 773 * 2. Segment table pointer (virtual address) 774 * On entry the following are set: 775 * r1 = stack pointer (real addr of temp stack) 776 * r24 = cpu# (in Linux terms) 777 * r13 = paca virtual address 778 * SPRG_PACA = paca virtual address 779 */ 780 .section ".text"; 781 .align 2 ; 782 783 .globl __secondary_start 784__secondary_start: 785 /* Set thread priority to MEDIUM */ 786 HMT_MEDIUM 787 788 /* Initialize the kernel stack */ 789 LOAD_REG_ADDR(r3, current_set) 790 sldi r28,r24,3 /* get current_set[cpu#] */ 791 ldx r14,r3,r28 792 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD 793 std r14,PACAKSAVE(r13) 794 795 /* Do early setup for that CPU (SLB and hash table pointer) */ 796 bl early_setup_secondary 797 798 /* 799 * setup the new stack pointer, but *don't* use this until 800 * translation is on. 801 */ 802 mr r1, r14 803 804 /* Clear backchain so we get nice backtraces */ 805 li r7,0 806 mtlr r7 807 808 /* Mark interrupts soft and hard disabled (they might be enabled 809 * in the PACA when doing hotplug) 810 */ 811 stb r7,PACASOFTIRQEN(r13) 812 li r0,PACA_IRQ_HARD_DIS 813 stb r0,PACAIRQHAPPENED(r13) 814 815 /* enable MMU and jump to start_secondary */ 816 LOAD_REG_ADDR(r3, start_secondary_prolog) 817 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 818 819 mtspr SPRN_SRR0,r3 820 mtspr SPRN_SRR1,r4 821 RFI 822 b . /* prevent speculative execution */ 823 824/* 825 * Running with relocation on at this point. All we want to do is 826 * zero the stack back-chain pointer and get the TOC virtual address 827 * before going into C code. 828 */ 829start_secondary_prolog: 830 ld r2,PACATOC(r13) 831 li r3,0 832 std r3,0(r1) /* Zero the stack frame pointer */ 833 bl start_secondary 834 b . 835/* 836 * Reset stack pointer and call start_secondary 837 * to continue with online operation when woken up 838 * from cede in cpu offline. 839 */ 840_GLOBAL(start_secondary_resume) 841 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ 842 li r3,0 843 std r3,0(r1) /* Zero the stack frame pointer */ 844 bl start_secondary 845 b . 846#endif 847 848/* 849 * This subroutine clobbers r11 and r12 850 */ 851enable_64b_mode: 852 mfmsr r11 /* grab the current MSR */ 853#ifdef CONFIG_PPC_BOOK3E 854 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ 855 mtmsr r11 856#else /* CONFIG_PPC_BOOK3E */ 857 li r12,(MSR_64BIT | MSR_ISF)@highest 858 sldi r12,r12,48 859 or r11,r11,r12 860 mtmsrd r11 861 isync 862#endif 863 blr 864 865/* 866 * This puts the TOC pointer into r2, offset by 0x8000 (as expected 867 * by the toolchain). It computes the correct value for wherever we 868 * are running at the moment, using position-independent code. 869 * 870 * Note: The compiler constructs pointers using offsets from the 871 * TOC in -mcmodel=medium mode. After we relocate to 0 but before 872 * the MMU is on we need our TOC to be a virtual address otherwise 873 * these pointers will be real addresses which may get stored and 874 * accessed later with the MMU on. We use tovirt() at the call 875 * sites to handle this. 876 */ 877_GLOBAL(relative_toc) 878 mflr r0 879 bcl 20,31,$+4 8800: mflr r11 881 ld r2,(p_toc - 0b)(r11) 882 add r2,r2,r11 883 mtlr r0 884 blr 885 886.balign 8 887p_toc: .llong __toc_start + 0x8000 - 0b 888 889/* 890 * This is where the main kernel code starts. 891 */ 892start_here_multiplatform: 893 /* set up the TOC */ 894 bl relative_toc 895 tovirt(r2,r2) 896 897 /* Clear out the BSS. It may have been done in prom_init, 898 * already but that's irrelevant since prom_init will soon 899 * be detached from the kernel completely. Besides, we need 900 * to clear it now for kexec-style entry. 901 */ 902 LOAD_REG_ADDR(r11,__bss_stop) 903 LOAD_REG_ADDR(r8,__bss_start) 904 sub r11,r11,r8 /* bss size */ 905 addi r11,r11,7 /* round up to an even double word */ 906 srdi. r11,r11,3 /* shift right by 3 */ 907 beq 4f 908 addi r8,r8,-8 909 li r0,0 910 mtctr r11 /* zero this many doublewords */ 9113: stdu r0,8(r8) 912 bdnz 3b 9134: 914 915#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 916 /* Setup OPAL entry */ 917 LOAD_REG_ADDR(r11, opal) 918 std r28,0(r11); 919 std r29,8(r11); 920#endif 921 922#ifndef CONFIG_PPC_BOOK3E 923 mfmsr r6 924 ori r6,r6,MSR_RI 925 mtmsrd r6 /* RI on */ 926#endif 927 928#ifdef CONFIG_RELOCATABLE 929 /* Save the physical address we're running at in kernstart_addr */ 930 LOAD_REG_ADDR(r4, kernstart_addr) 931 clrldi r0,r25,2 932 std r0,0(r4) 933#endif 934 935 /* The following gets the stack set up with the regs */ 936 /* pointing to the real addr of the kernel stack. This is */ 937 /* all done to support the C function call below which sets */ 938 /* up the htab. This is done because we have relocated the */ 939 /* kernel but are still running in real mode. */ 940 941 LOAD_REG_ADDR(r3,init_thread_union) 942 943 /* set up a stack pointer */ 944 addi r1,r3,THREAD_SIZE 945 li r0,0 946 stdu r0,-STACK_FRAME_OVERHEAD(r1) 947 948 /* 949 * Do very early kernel initializations, including initial hash table 950 * and SLB setup before we turn on relocation. 951 */ 952 953 /* Restore parameters passed from prom_init/kexec */ 954 mr r3,r31 955 bl early_setup /* also sets r13 and SPRG_PACA */ 956 957 LOAD_REG_ADDR(r3, start_here_common) 958 ld r4,PACAKMSR(r13) 959 mtspr SPRN_SRR0,r3 960 mtspr SPRN_SRR1,r4 961 RFI 962 b . /* prevent speculative execution */ 963 964 /* This is where all platforms converge execution */ 965 966start_here_common: 967 /* relocation is on at this point */ 968 std r1,PACAKSAVE(r13) 969 970 /* Load the TOC (virtual address) */ 971 ld r2,PACATOC(r13) 972 973 /* Mark interrupts soft and hard disabled (they might be enabled 974 * in the PACA when doing hotplug) 975 */ 976 li r0,0 977 stb r0,PACASOFTIRQEN(r13) 978 li r0,PACA_IRQ_HARD_DIS 979 stb r0,PACAIRQHAPPENED(r13) 980 981 /* Generic kernel entry */ 982 bl start_kernel 983 984 /* Not reached */ 985 BUG_OPCODE 986 987/* 988 * We put a few things here that have to be page-aligned. 989 * This stuff goes at the beginning of the bss, which is page-aligned. 990 */ 991 .section ".bss" 992/* 993 * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K. 994 * We will need to find a better way to fix this 995 */ 996 .align 16 997 998 .globl swapper_pg_dir 999swapper_pg_dir: 1000 .space PGD_TABLE_SIZE 1001 1002 .globl empty_zero_page 1003empty_zero_page: 1004 .space PAGE_SIZE 1005