1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.inc" 30 31#include "opt_hwpmc_hooks.h" 32 33#include <machine/asm.h> 34#include <machine/hid.h> 35#include <machine/param.h> 36#include <machine/spr.h> 37#include <machine/pte.h> 38#include <machine/trap.h> 39#include <machine/vmparam.h> 40#include <machine/tlb.h> 41 42#define TMPSTACKSZ 16384 43 44#ifdef __powerpc64__ 45#define GET_TOCBASE(r) \ 46 mfspr r, SPR_SPRG8 47#define TOC_RESTORE nop 48#define CMPI cmpdi 49#define CMPL cmpld 50#define LOAD ld 51#define LOADX ldarx 52#define STORE std 53#define STOREX stdcx. 54#define STU stdu 55#define CALLSIZE 48 56#define REDZONE 288 57#define THREAD_REG %r13 58#define ADDR(x) \ 59 .llong x 60#define WORD_SIZE 8 61#else 62#define GET_TOCBASE(r) 63#define TOC_RESTORE 64#define CMPI cmpwi 65#define CMPL cmplw 66#define LOAD lwz 67#define LOADX lwarx 68#define STOREX stwcx. 69#define STORE stw 70#define STU stwu 71#define CALLSIZE 8 72#define REDZONE 0 73#define THREAD_REG %r2 74#define ADDR(x) \ 75 .long x 76#define WORD_SIZE 4 77#endif 78 79 .text 80 .globl btext 81btext: 82 83/* 84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to 85 * mark the start of kernel text. 86 */ 87 .globl kernel_text 88kernel_text: 89 90/* 91 * Startup entry. Note, this must be the first thing in the text segment! 92 */ 93 .text 94 .globl __start 95__start: 96 97/* 98 * Assumptions on the boot loader: 99 * - System memory starts from physical address 0 100 * - It's mapped by a single TLB1 entry 101 * - TLB1 mapping is 1:1 pa to va 102 * - Kernel is loaded at 64MB boundary 103 * - All PID registers are set to the same value 104 * - CPU is running in AS=0 105 * 106 * Registers contents provided by the loader(8): 107 * r1 : stack pointer 108 * r3 : metadata pointer 109 * 110 * We rearrange the TLB1 layout as follows: 111 * - Find TLB1 entry we started in 112 * - Make sure it's protected, invalidate other entries 113 * - Create temp entry in the second AS (make sure it's not TLB[1]) 114 * - Switch to temp mapping 115 * - Map 64MB of RAM in TLB1[1] 116 * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address 117 * - Switch to TLB1[1] mapping 118 * - Invalidate temp mapping 119 * 120 * locore registers use: 121 * r1 : stack pointer 122 * r2 : trace pointer (AP only, for early diagnostics) 123 * r3-r27 : scratch registers 124 * r28 : temp TLB1 entry 125 * r29 : initial TLB1 entry we started in 126 * r30-r31 : arguments (metadata pointer) 127 */ 128 129/* 130 * Keep arguments in r30 & r31 for later use. 131 */ 132 mr %r30, %r3 133 mr %r31, %r4 134 135/* 136 * Initial cleanup 137 */ 138 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ 139#ifdef __powerpc64__ 140 oris %r3, %r3, PSL_CM@h 141#endif 142 mtmsr %r3 143 isync 144 145/* 146 * Initial HIDs configuration 147 */ 1481: 149 mfpvr %r3 150 rlwinm %r3, %r3, 16, 16, 31 151 152 lis %r4, HID0_E500_DEFAULT_SET@h 153 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 154 155 /* Check for e500mc and e5500 */ 156 cmpli 0, 0, %r3, FSL_E500mc 157 bne 2f 158 159 lis %r4, HID0_E500MC_DEFAULT_SET@h 160 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 161 b 3f 1622: 163 cmpli 0, 0, %r3, FSL_E5500 164 bne 3f 165 166 lis %r4, HID0_E5500_DEFAULT_SET@h 167 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 168 1693: 170 mtspr SPR_HID0, %r4 171 isync 172 173/* 174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on 175 * this core. 176 */ 177 cmpli 0, 0, %r3, FSL_E500mc 178 beq 1f 179 cmpli 0, 0, %r3, FSL_E5500 180 beq 1f 181 cmpli 0, 0, %r3, FSL_E6500 182 beq 1f 183 184 lis %r3, HID1_E500_DEFAULT_SET@h 185 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 186 mtspr SPR_HID1, %r3 187 isync 1881: 189 /* Invalidate all entries in TLB0 */ 190 li %r3, 0 191 bl tlb_inval_all 192 193 cmpwi %r30, 0 194 beq done_mapping 195 196/* 197 * Locate the TLB1 entry that maps this code 198 */ 199 bl 1f 2001: mflr %r3 201 bl tlb1_find_current /* the entry found is returned in r29 */ 202 203 bl tlb1_inval_all_but_current 204 205/* 206 * Create temporary mapping in AS=1 and switch to it 207 */ 208 bl tlb1_temp_mapping_as1 209 210 mfmsr %r3 211 ori %r3, %r3, (PSL_IS | PSL_DS) 212 bl 2f 2132: mflr %r4 214 addi %r4, %r4, (3f - 2b) 215 mtspr SPR_SRR0, %r4 216 mtspr SPR_SRR1, %r3 217 rfi /* Switch context */ 218 219/* 220 * Invalidate initial entry 221 */ 2223: 223 mr %r3, %r29 224 bl tlb1_inval_entry 225 226/* 227 * Setup final mapping in TLB1[1] and switch to it 228 */ 229 /* Final kernel mapping, map in 64 MB of RAM */ 230 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 231 li %r4, 0 /* Entry 0 */ 232 rlwimi %r3, %r4, 16, 10, 15 233 mtspr SPR_MAS0, %r3 234 isync 235 236 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 237 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 238 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 239 isync 240 241 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 242 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 243 mtspr SPR_MAS2, %r3 244 isync 245 246 /* Discover phys load address */ 247 bl 3f 2483: mflr %r4 /* Use current address */ 249 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ 250 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l 251 mtspr SPR_MAS3, %r4 /* Set RPN and protection */ 252 isync 253 li %r4, 0 254 mtspr SPR_MAS7, %r4 255 isync 256 tlbwe 257 isync 258 msync 259 260 /* Switch to the above TLB1[1] mapping */ 261 bl 4f 2624: mflr %r4 263#ifdef __powerpc64__ 264 clrldi %r4, %r4, 38 265 clrrdi %r3, %r3, 12 266#else 267 rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */ 268 rlwinm %r3, %r3, 0, 0, 19 269#endif 270 add %r4, %r4, %r3 /* Convert to kernel virtual address */ 271 addi %r4, %r4, (5f - 4b) 272 li %r3, PSL_DE /* Note AS=0 */ 273#ifdef __powerpc64__ 274 oris %r3, %r3, PSL_CM@h 275#endif 276 mtspr SPR_SRR0, %r4 277 mtspr SPR_SRR1, %r3 278 rfi 279 280/* 281 * Invalidate temp mapping 282 */ 2835: 284 mr %r3, %r28 285 bl tlb1_inval_entry 286 287done_mapping: 288 289#ifdef __powerpc64__ 290 /* Set up the TOC pointer */ 291 b 0f 292 .align 3 2930: nop 294 bl 1f 295 .llong __tocbase + 0x8000 - . 2961: mflr %r2 297 ld %r1,0(%r2) 298 add %r2,%r1,%r2 299 mtspr SPR_SPRG8, %r2 300 nop 301 302 /* Get load offset */ 303 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ 304 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ 305 306 /* Set up the stack pointer */ 307 bl 1f 308 .llong tmpstack + TMPSTACKSZ - 96 - . 3091: mflr %r3 310 ld %r1,0(%r3) 311 add %r1,%r1,%r3 312 bl 1f 313 .llong _DYNAMIC-. 3141: mflr %r3 315 ld %r4,0(%r3) 316 add %r3,%r4,%r3 317 mr %r4,%r31 318#else 319/* 320 * Setup a temporary stack 321 */ 322 bl 1f 323 .long tmpstack-. 3241: mflr %r1 325 lwz %r2,0(%r1) 326 add %r1,%r1,%r2 327 addi %r1, %r1, (TMPSTACKSZ - 16) 328 329/* 330 * Relocate kernel 331 */ 332 bl 1f 333 .long _DYNAMIC-. 334 .long _GLOBAL_OFFSET_TABLE_-. 3351: mflr %r5 336 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ 337 add %r3,%r3,%r5 338 lwz %r4,4(%r5) /* GOT pointer */ 339 add %r4,%r4,%r5 340 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ 341 subf %r4,%r4,%r3 /* subtract to calculate relocbase */ 342#endif 343 bl CNAME(elf_reloc_self) 344 TOC_RESTORE 345 346/* 347 * Initialise exception vector offsets 348 */ 349 bl CNAME(ivor_setup) 350 TOC_RESTORE 351 352/* 353 * Set up arguments and jump to system initialization code 354 */ 355 mr %r3, %r30 356 mr %r4, %r31 357 358 /* Prepare core */ 359 bl CNAME(booke_init) 360 TOC_RESTORE 361 362 /* Switch to thread0.td_kstack now */ 363 mr %r1, %r3 364 li %r3, 0 365 STORE %r3, 0(%r1) 366 367 /* Machine independet part, does not return */ 368 bl CNAME(mi_startup) 369 TOC_RESTORE 370 /* NOT REACHED */ 3715: b 5b 372 373 374#ifdef SMP 375/************************************************************************/ 376/* AP Boot page */ 377/************************************************************************/ 378 .text 379 .globl __boot_page 380 .align 12 381__boot_page: 382 bl 1f 383 384 .globl bp_trace 385bp_trace: 386 .long 0 387 388 .globl bp_kernload 389bp_kernload: 390 .long 0 391 392/* 393 * Initial configuration 394 */ 3951: 396 mflr %r31 /* r31 hold the address of bp_trace */ 397 398 /* Set HIDs */ 399 mfpvr %r3 400 rlwinm %r3, %r3, 16, 16, 31 401 402 /* HID0 for E500 is default */ 403 lis %r4, HID0_E500_DEFAULT_SET@h 404 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 405 406 cmpli 0, 0, %r3, FSL_E500mc 407 bne 2f 408 lis %r4, HID0_E500MC_DEFAULT_SET@h 409 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 410 b 3f 4112: 412 cmpli 0, 0, %r3, FSL_E5500 413 bne 3f 414 lis %r4, HID0_E5500_DEFAULT_SET@h 415 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 4163: 417 mtspr SPR_HID0, %r4 418 isync 419 420 /* Enable branch prediction */ 421 li %r3, BUCSR_BPEN 422 mtspr SPR_BUCSR, %r3 423 isync 424 425 /* Invalidate all entries in TLB0 */ 426 li %r3, 0 427 bl tlb_inval_all 428 429/* 430 * Find TLB1 entry which is translating us now 431 */ 432 bl 2f 4332: mflr %r3 434 bl tlb1_find_current /* the entry number found is in r29 */ 435 436 bl tlb1_inval_all_but_current 437 438/* 439 * Create temporary translation in AS=1 and switch to it 440 */ 441 442 bl tlb1_temp_mapping_as1 443 444 mfmsr %r3 445 ori %r3, %r3, (PSL_IS | PSL_DS) 446#ifdef __powerpc64__ 447 oris %r3, %r3, PSL_CM@h 448#endif 449 bl 3f 4503: mflr %r4 451 addi %r4, %r4, (4f - 3b) 452 mtspr SPR_SRR0, %r4 453 mtspr SPR_SRR1, %r3 454 rfi /* Switch context */ 455 456/* 457 * Invalidate initial entry 458 */ 4594: 460 mr %r3, %r29 461 bl tlb1_inval_entry 462 463/* 464 * Setup final mapping in TLB1[1] and switch to it 465 */ 466 /* Final kernel mapping, map in 64 MB of RAM */ 467 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 468 li %r4, 0 /* Entry 0 */ 469 rlwimi %r3, %r4, 16, 4, 15 470 mtspr SPR_MAS0, %r3 471 isync 472 473 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 474 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 475 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 476 isync 477 478 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 479 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 480 mtspr SPR_MAS2, %r3 481 isync 482 483 /* Retrieve kernel load [physical] address from bp_kernload */ 484#ifdef __powerpc64__ 485 b 0f 486 .align 3 4870: 488 nop 489#endif 490 bl 5f 491 ADDR(bp_kernload) 492 ADDR(__boot_page) 4935: mflr %r3 494#ifdef __powerpc64__ 495 ld %r4, 0(%r3) 496 ld %r5, 8(%r3) 497 clrrdi %r3, %r3, 12 498#else 499 lwz %r4, 0(%r3) 500 lwz %r5, 4(%r3) 501 rlwinm %r3, %r3, 0, 0, 19 502#endif 503 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ 504 lwzx %r3, %r4, %r3 505 506 /* Set RPN and protection */ 507 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l 508 mtspr SPR_MAS3, %r3 509 isync 510 li %r4, 0 511 mtspr SPR_MAS7, %r4 512 isync 513 tlbwe 514 isync 515 msync 516 517 /* Switch to the final mapping */ 518 bl 6f 5196: mflr %r3 520 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ 521 add %r3, %r3, %r5 /* Make this virtual address */ 522 addi %r3, %r3, (7f - 6b) 523#ifdef __powerpc64__ 524 lis %r4, PSL_CM@h /* Note AS=0 */ 525#else 526 li %r4, 0 /* Note AS=0 */ 527#endif 528 mtspr SPR_SRR0, %r3 529 mtspr SPR_SRR1, %r4 530 rfi 5317: 532 533/* 534 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and 535 * beyond so it's allowed to directly access all locations the kernel was linked 536 * against. 537 */ 538 539/* 540 * Invalidate temp mapping 541 */ 542 mr %r3, %r28 543 bl tlb1_inval_entry 544 545#ifdef __powerpc64__ 546 /* Set up the TOC pointer */ 547 b 0f 548 .align 3 5490: nop 550 bl 1f 551 .llong __tocbase + 0x8000 - . 5521: mflr %r2 553 ld %r1,0(%r2) 554 add %r2,%r1,%r2 555 mtspr SPR_SPRG8, %r2 556 557 /* Set up the stack pointer */ 558 addis %r1,%r2,TOC_REF(tmpstack)@ha 559 ld %r1,TOC_REF(tmpstack)@l(%r1) 560 addi %r1,%r1,TMPSTACKSZ-96 561#else 562/* 563 * Setup a temporary stack 564 */ 565 bl 1f 566 .long tmpstack-. 5671: mflr %r1 568 lwz %r2,0(%r1) 569 add %r1,%r1,%r2 570 stw %r1, 0(%r1) 571 addi %r1, %r1, (TMPSTACKSZ - 16) 572#endif 573 574/* 575 * Initialise exception vector offsets 576 */ 577 bl CNAME(ivor_setup) 578 TOC_RESTORE 579 580 /* 581 * Assign our pcpu instance 582 */ 583 bl 1f 584 .long ap_pcpu-. 5851: mflr %r4 586 lwz %r3, 0(%r4) 587 add %r3, %r3, %r4 588 LOAD %r3, 0(%r3) 589 mtsprg0 %r3 590 591 bl CNAME(pmap_bootstrap_ap) 592 TOC_RESTORE 593 594 bl CNAME(cpudep_ap_bootstrap) 595 TOC_RESTORE 596 /* Switch to the idle thread's kstack */ 597 mr %r1, %r3 598 599 bl CNAME(machdep_ap_bootstrap) 600 TOC_RESTORE 601 602 /* NOT REACHED */ 6036: b 6b 604#endif /* SMP */ 605 606#if defined (BOOKE_E500) 607/* 608 * Invalidate all entries in the given TLB. 609 * 610 * r3 TLBSEL 611 */ 612tlb_inval_all: 613 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ 614 ori %r3, %r3, (1 << 2) /* INVALL */ 615 tlbivax 0, %r3 616 isync 617 msync 618 619 tlbsync 620 msync 621 blr 622 623/* 624 * expects address to look up in r3, returns entry number in r29 625 * 626 * FIXME: the hidden assumption is we are now running in AS=0, but we should 627 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] 628 */ 629tlb1_find_current: 630 mfspr %r17, SPR_PID0 631 slwi %r17, %r17, MAS6_SPID0_SHIFT 632 mtspr SPR_MAS6, %r17 633 isync 634 tlbsx 0, %r3 635 mfspr %r17, SPR_MAS0 636 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ 637 638 /* Make sure we have IPROT set on the entry */ 639 mfspr %r17, SPR_MAS1 640 oris %r17, %r17, MAS1_IPROT@h 641 mtspr SPR_MAS1, %r17 642 isync 643 tlbwe 644 isync 645 msync 646 blr 647 648/* 649 * Invalidates a single entry in TLB1. 650 * 651 * r3 ESEL 652 * r4-r5 scratched 653 */ 654tlb1_inval_entry: 655 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ 656 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ 657 mtspr SPR_MAS0, %r4 658 isync 659 tlbre 660 li %r5, 0 /* MAS1[V] = 0 */ 661 mtspr SPR_MAS1, %r5 662 isync 663 tlbwe 664 isync 665 msync 666 blr 667 668/* 669 * r29 current entry number 670 * r28 returned temp entry 671 * r3-r5 scratched 672 */ 673tlb1_temp_mapping_as1: 674 /* Read our current translation */ 675 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 676 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ 677 mtspr SPR_MAS0, %r3 678 isync 679 tlbre 680 681 /* 682 * Prepare and write temp entry 683 * 684 * FIXME this is not robust against overflow i.e. when the current 685 * entry is the last in TLB1 686 */ 687 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 688 addi %r28, %r29, 1 /* Use next entry. */ 689 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ 690 mtspr SPR_MAS0, %r3 691 isync 692 mfspr %r5, SPR_MAS1 693 li %r4, 1 /* AS=1 */ 694 rlwimi %r5, %r4, 12, 19, 19 695 li %r4, 0 /* Global mapping, TID=0 */ 696 rlwimi %r5, %r4, 16, 8, 15 697 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h 698 mtspr SPR_MAS1, %r5 699 isync 700 mflr %r3 701 li %r4, 0 702 mtspr SPR_MAS7, %r4 703 mtlr %r3 704 isync 705 tlbwe 706 isync 707 msync 708 blr 709 710/* 711 * Loops over TLB1, invalidates all entries skipping the one which currently 712 * maps this code. 713 * 714 * r29 current entry 715 * r3-r5 scratched 716 */ 717tlb1_inval_all_but_current: 718 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ 719 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l 720 li %r4, 0 /* Start from Entry 0 */ 7211: lis %r5, MAS0_TLBSEL1@h 722 rlwimi %r5, %r4, 16, 10, 15 723 mtspr SPR_MAS0, %r5 724 isync 725 tlbre 726 mfspr %r5, SPR_MAS1 727 cmpw %r4, %r29 /* our current entry? */ 728 beq 2f 729 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ 730 mtspr SPR_MAS1, %r5 731 isync 732 tlbwe 733 isync 734 msync 7352: addi %r4, %r4, 1 736 cmpw %r4, %r3 /* Check if this is the last entry */ 737 bne 1b 738 blr 739#endif 740 741#ifdef SMP 742.globl __boot_tlb1 743 /* 744 * The __boot_tlb1 table is used to hold BSP TLB1 entries 745 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. 746 * The BSP fills in the table in tlb_ap_prep() function. Next, 747 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). 748 */ 749__boot_tlb1: 750 .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE 751 752__boot_page_padding: 753 /* 754 * Boot page needs to be exactly 4K, with the last word of this page 755 * acting as the reset vector, so we need to stuff the remainder. 756 * Upon release from holdoff CPU fetches the last word of the boot 757 * page. 758 */ 759 .space 4092 - (__boot_page_padding - __boot_page) 760 b __boot_page 761#endif /* SMP */ 762 763/************************************************************************/ 764/* locore subroutines */ 765/************************************************************************/ 766 767/* 768 * Cache disable/enable/inval sequences according 769 * to section 2.16 of E500CORE RM. 770 */ 771ENTRY(dcache_inval) 772 /* Invalidate d-cache */ 773 mfspr %r3, SPR_L1CSR0 774 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l 775 msync 776 isync 777 mtspr SPR_L1CSR0, %r3 778 isync 7791: mfspr %r3, SPR_L1CSR0 780 andi. %r3, %r3, L1CSR0_DCFI 781 bne 1b 782 blr 783 784ENTRY(dcache_disable) 785 /* Disable d-cache */ 786 mfspr %r3, SPR_L1CSR0 787 li %r4, L1CSR0_DCE@l 788 not %r4, %r4 789 and %r3, %r3, %r4 790 msync 791 isync 792 mtspr SPR_L1CSR0, %r3 793 isync 794 blr 795 796ENTRY(dcache_enable) 797 /* Enable d-cache */ 798 mfspr %r3, SPR_L1CSR0 799 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h 800 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l 801 msync 802 isync 803 mtspr SPR_L1CSR0, %r3 804 isync 805 blr 806 807ENTRY(icache_inval) 808 /* Invalidate i-cache */ 809 mfspr %r3, SPR_L1CSR1 810 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l 811 isync 812 mtspr SPR_L1CSR1, %r3 813 isync 8141: mfspr %r3, SPR_L1CSR1 815 andi. %r3, %r3, L1CSR1_ICFI 816 bne 1b 817 blr 818 819ENTRY(icache_disable) 820 /* Disable i-cache */ 821 mfspr %r3, SPR_L1CSR1 822 li %r4, L1CSR1_ICE@l 823 not %r4, %r4 824 and %r3, %r3, %r4 825 isync 826 mtspr SPR_L1CSR1, %r3 827 isync 828 blr 829 830ENTRY(icache_enable) 831 /* Enable i-cache */ 832 mfspr %r3, SPR_L1CSR1 833 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h 834 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l 835 isync 836 mtspr SPR_L1CSR1, %r3 837 isync 838 blr 839 840/* 841 * L2 cache disable/enable/inval sequences for E500mc. 842 */ 843 844ENTRY(l2cache_inval) 845 mfspr %r3, SPR_L2CSR0 846 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h 847 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l 848 isync 849 mtspr SPR_L2CSR0, %r3 850 isync 8511: mfspr %r3, SPR_L2CSR0 852 andis. %r3, %r3, L2CSR0_L2FI@h 853 bne 1b 854 blr 855 856ENTRY(l2cache_enable) 857 mfspr %r3, SPR_L2CSR0 858 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h 859 isync 860 mtspr SPR_L2CSR0, %r3 861 isync 862 blr 863 864/* 865 * Branch predictor setup. 866 */ 867ENTRY(bpred_enable) 868 mfspr %r3, SPR_BUCSR 869 ori %r3, %r3, BUCSR_BBFI 870 isync 871 mtspr SPR_BUCSR, %r3 872 isync 873 ori %r3, %r3, BUCSR_BPEN 874 isync 875 mtspr SPR_BUCSR, %r3 876 isync 877 blr 878 879/* 880 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is 881 * created. 882 */ 883ENTRY(get_spr) 884 mfspr %r3, 0 885 blr 886 887/************************************************************************/ 888/* Data section */ 889/************************************************************************/ 890 .data 891 .align 3 892GLOBAL(__startkernel) 893 ADDR(begin) 894GLOBAL(__endkernel) 895 ADDR(end) 896 .align 4 897tmpstack: 898 .space TMPSTACKSZ 899tmpstackbound: 900 .space 10240 /* XXX: this really should not be necessary */ 901#ifdef __powerpc64__ 902TOC_ENTRY(tmpstack) 903TOC_ENTRY(bp_kernload) 904#endif 905 906/* 907 * Compiled KERNBASE locations 908 */ 909 .globl kernbase 910 .set kernbase, KERNBASE 911 912#include <powerpc/booke/trap_subr.S> 913