1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.inc" 30 31#include "opt_hwpmc_hooks.h" 32 33#include <machine/asm.h> 34#include <machine/hid.h> 35#include <machine/param.h> 36#include <machine/spr.h> 37#include <machine/pte.h> 38#include <machine/trap.h> 39#include <machine/vmparam.h> 40#include <machine/tlb.h> 41 42#define TMPSTACKSZ 16384 43 44#ifdef __powerpc64__ 45#define GET_TOCBASE(r) \ 46 mfspr r, SPR_SPRG8 47#define TOC_RESTORE nop 48#define CMPI cmpdi 49#define CMPL cmpld 50#define LOAD ld 51#define LOADX ldarx 52#define STORE std 53#define STOREX stdcx. 54#define STU stdu 55#define CALLSIZE 48 56#define REDZONE 288 57#define THREAD_REG %r13 58#define ADDR(x) \ 59 .llong x 60#define WORD_SIZE 8 61#else 62#define GET_TOCBASE(r) 63#define TOC_RESTORE 64#define CMPI cmpwi 65#define CMPL cmplw 66#define LOAD lwz 67#define LOADX lwarx 68#define STOREX stwcx. 69#define STORE stw 70#define STU stwu 71#define CALLSIZE 8 72#define REDZONE 0 73#define THREAD_REG %r2 74#define ADDR(x) \ 75 .long x 76#define WORD_SIZE 4 77#endif 78 79 .text 80 .globl btext 81btext: 82 83/* 84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to 85 * mark the start of kernel text. 86 */ 87 .globl kernel_text 88kernel_text: 89 90/* 91 * Startup entry. Note, this must be the first thing in the text segment! 92 */ 93 .text 94 .globl __start 95__start: 96 97/* 98 * Assumptions on the boot loader: 99 * - System memory starts from physical address 0 100 * - It's mapped by a single TLB1 entry 101 * - TLB1 mapping is 1:1 pa to va 102 * - Kernel is loaded at 64MB boundary 103 * - All PID registers are set to the same value 104 * - CPU is running in AS=0 105 * 106 * Registers contents provided by the loader(8): 107 * r1 : stack pointer 108 * r3 : metadata pointer 109 * 110 * We rearrange the TLB1 layout as follows: 111 * - Find TLB1 entry we started in 112 * - Make sure it's protected, invalidate other entries 113 * - Create temp entry in the second AS (make sure it's not TLB[1]) 114 * - Switch to temp mapping 115 * - Map 64MB of RAM in TLB1[1] 116 * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address 117 * - Switch to TLB1[1] mapping 118 * - Invalidate temp mapping 119 * 120 * locore registers use: 121 * r1 : stack pointer 122 * r2 : trace pointer (AP only, for early diagnostics) 123 * r3-r27 : scratch registers 124 * r28 : temp TLB1 entry 125 * r29 : initial TLB1 entry we started in 126 * r30-r31 : arguments (metadata pointer) 127 */ 128 129/* 130 * Keep arguments in r30 & r31 for later use. 131 */ 132 mr %r30, %r3 133 mr %r31, %r4 134 135/* 136 * Initial cleanup 137 */ 138 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ 139#ifdef __powerpc64__ 140 oris %r3, %r3, PSL_CM@h 141#endif 142 mtmsr %r3 143 isync 144 145/* 146 * Initial HIDs configuration 147 */ 1481: 149 mfpvr %r3 150 rlwinm %r3, %r3, 16, 16, 31 151 152 lis %r4, HID0_E500_DEFAULT_SET@h 153 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 154 155 /* Check for e500mc and e5500 */ 156 cmpli 0, 0, %r3, FSL_E500mc 157 bne 2f 158 159 lis %r4, HID0_E500MC_DEFAULT_SET@h 160 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 161 b 3f 1622: 163 cmpli 0, 0, %r3, FSL_E5500 164 bne 3f 165 166 lis %r4, HID0_E5500_DEFAULT_SET@h 167 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 168 1693: 170 mtspr SPR_HID0, %r4 171 isync 172 173/* 174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on 175 * this core. 176 */ 177 cmpli 0, 0, %r3, FSL_E500mc 178 beq 1f 179 cmpli 0, 0, %r3, FSL_E5500 180 beq 1f 181 cmpli 0, 0, %r3, FSL_E6500 182 beq 1f 183 184 lis %r3, HID1_E500_DEFAULT_SET@h 185 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 186 mtspr SPR_HID1, %r3 187 isync 1881: 189 /* Invalidate all entries in TLB0 */ 190 li %r3, 0 191 bl tlb_inval_all 192 193 cmpwi %r30, 0 194 beq done_mapping 195 196/* 197 * Locate the TLB1 entry that maps this code 198 */ 199 bl 1f 2001: mflr %r3 201 bl tlb1_find_current /* the entry found is returned in r29 */ 202 203 bl tlb1_inval_all_but_current 204 205/* 206 * Create temporary mapping in AS=1 and switch to it 207 */ 208 bl tlb1_temp_mapping_as1 209 210 mfmsr %r3 211 ori %r3, %r3, (PSL_IS | PSL_DS) 212 bl 2f 2132: mflr %r4 214 addi %r4, %r4, (3f - 2b) 215 mtspr SPR_SRR0, %r4 216 mtspr SPR_SRR1, %r3 217 rfi /* Switch context */ 218 219/* 220 * Invalidate initial entry 221 */ 2223: 223 mr %r3, %r29 224 bl tlb1_inval_entry 225 226/* 227 * Setup final mapping in TLB1[1] and switch to it 228 */ 229 /* Final kernel mapping, map in 64 MB of RAM */ 230 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 231 li %r4, 0 /* Entry 0 */ 232 rlwimi %r3, %r4, 16, 10, 15 233 mtspr SPR_MAS0, %r3 234 isync 235 236 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 237 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 238 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 239 isync 240 241 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 242 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 243 mtspr SPR_MAS2, %r3 244 isync 245 246 /* Discover phys load address */ 247 bl 3f 2483: mflr %r4 /* Use current address */ 249 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ 250 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l 251 mtspr SPR_MAS3, %r4 /* Set RPN and protection */ 252 isync 253 li %r4, 0 254 mtspr SPR_MAS7, %r4 255 bl zero_mas8 256 isync 257 tlbwe 258 isync 259 msync 260 261 /* Switch to the above TLB1[1] mapping */ 262 bl 4f 2634: mflr %r4 264#ifdef __powerpc64__ 265 clrldi %r4, %r4, 38 266 clrrdi %r3, %r3, 12 267#else 268 rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */ 269 rlwinm %r3, %r3, 0, 0, 19 270#endif 271 add %r4, %r4, %r3 /* Convert to kernel virtual address */ 272 addi %r4, %r4, (5f - 4b) 273 li %r3, PSL_DE /* Note AS=0 */ 274#ifdef __powerpc64__ 275 oris %r3, %r3, PSL_CM@h 276#endif 277 mtspr SPR_SRR0, %r4 278 mtspr SPR_SRR1, %r3 279 rfi 280 281/* 282 * Invalidate temp mapping 283 */ 2845: 285 mr %r3, %r28 286 bl tlb1_inval_entry 287 288done_mapping: 289 290#ifdef __powerpc64__ 291 /* Set up the TOC pointer */ 292 b 0f 293 .align 3 2940: nop 295 bl 1f 296 .llong __tocbase + 0x8000 - . 2971: mflr %r2 298 ld %r1,0(%r2) 299 add %r2,%r1,%r2 300 mtspr SPR_SPRG8, %r2 301 nop 302 303 /* Get load offset */ 304 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ 305 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ 306 307 /* Set up the stack pointer */ 308 bl 1f 309 .llong tmpstack + TMPSTACKSZ - 96 - . 3101: mflr %r3 311 ld %r1,0(%r3) 312 add %r1,%r1,%r3 313 bl 1f 314 .llong _DYNAMIC-. 3151: mflr %r3 316 ld %r4,0(%r3) 317 add %r3,%r4,%r3 318 mr %r4,%r31 319#else 320/* 321 * Setup a temporary stack 322 */ 323 bl 1f 324 .long tmpstack-. 3251: mflr %r1 326 lwz %r2,0(%r1) 327 add %r1,%r1,%r2 328 addi %r1, %r1, (TMPSTACKSZ - 16) 329 330/* 331 * Relocate kernel 332 */ 333 bl 1f 334 .long _DYNAMIC-. 335 .long _GLOBAL_OFFSET_TABLE_-. 3361: mflr %r5 337 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ 338 add %r3,%r3,%r5 339 lwz %r4,4(%r5) /* GOT pointer */ 340 add %r4,%r4,%r5 341 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ 342 subf %r4,%r4,%r3 /* subtract to calculate relocbase */ 343#endif 344 bl CNAME(elf_reloc_self) 345 TOC_RESTORE 346 347/* 348 * Initialise exception vector offsets 349 */ 350 bl CNAME(ivor_setup) 351 TOC_RESTORE 352 353/* 354 * Set up arguments and jump to system initialization code 355 */ 356 mr %r3, %r30 357 mr %r4, %r31 358 359 /* Prepare core */ 360 bl CNAME(booke_init) 361 TOC_RESTORE 362 363 /* Switch to thread0.td_kstack now */ 364 mr %r1, %r3 365 li %r3, 0 366 STORE %r3, 0(%r1) 367 368 /* Machine independet part, does not return */ 369 bl CNAME(mi_startup) 370 TOC_RESTORE 371 /* NOT REACHED */ 3725: b 5b 373 374 375#ifdef SMP 376/************************************************************************/ 377/* AP Boot page */ 378/************************************************************************/ 379 .text 380 .globl __boot_page 381 .align 12 382__boot_page: 383 bl 1f 384 385 .globl bp_trace 386bp_trace: 387 .long 0 388 389 .globl bp_kernload 390bp_kernload: 391 .long 0 392 393/* 394 * Initial configuration 395 */ 3961: 397 mflr %r31 /* r31 hold the address of bp_trace */ 398 399 /* Set HIDs */ 400 mfpvr %r3 401 rlwinm %r3, %r3, 16, 16, 31 402 403 /* HID0 for E500 is default */ 404 lis %r4, HID0_E500_DEFAULT_SET@h 405 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 406 407 cmpli 0, 0, %r3, FSL_E500mc 408 bne 2f 409 lis %r4, HID0_E500MC_DEFAULT_SET@h 410 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 411 b 3f 4122: 413 cmpli 0, 0, %r3, FSL_E5500 414 bne 3f 415 lis %r4, HID0_E5500_DEFAULT_SET@h 416 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 4173: 418 mtspr SPR_HID0, %r4 419 isync 420 421 /* Enable branch prediction */ 422 li %r3, BUCSR_BPEN 423 mtspr SPR_BUCSR, %r3 424 isync 425 426 /* Invalidate all entries in TLB0 */ 427 li %r3, 0 428 bl tlb_inval_all 429 430/* 431 * Find TLB1 entry which is translating us now 432 */ 433 bl 2f 4342: mflr %r3 435 bl tlb1_find_current /* the entry number found is in r29 */ 436 437 bl tlb1_inval_all_but_current 438 439/* 440 * Create temporary translation in AS=1 and switch to it 441 */ 442 443 bl tlb1_temp_mapping_as1 444 445 mfmsr %r3 446 ori %r3, %r3, (PSL_IS | PSL_DS) 447#ifdef __powerpc64__ 448 oris %r3, %r3, PSL_CM@h 449#endif 450 bl 3f 4513: mflr %r4 452 addi %r4, %r4, (4f - 3b) 453 mtspr SPR_SRR0, %r4 454 mtspr SPR_SRR1, %r3 455 rfi /* Switch context */ 456 457/* 458 * Invalidate initial entry 459 */ 4604: 461 mr %r3, %r29 462 bl tlb1_inval_entry 463 464/* 465 * Setup final mapping in TLB1[1] and switch to it 466 */ 467 /* Final kernel mapping, map in 64 MB of RAM */ 468 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 469 li %r4, 0 /* Entry 0 */ 470 rlwimi %r3, %r4, 16, 4, 15 471 mtspr SPR_MAS0, %r3 472 isync 473 474 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 475 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 476 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 477 isync 478 479 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 480 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 481 mtspr SPR_MAS2, %r3 482 isync 483 484 /* Retrieve kernel load [physical] address from bp_kernload */ 485#ifdef __powerpc64__ 486 b 0f 487 .align 3 4880: 489 nop 490#endif 491 bl 5f 492 ADDR(bp_kernload) 493 ADDR(__boot_page) 4945: mflr %r3 495#ifdef __powerpc64__ 496 ld %r4, 0(%r3) 497 ld %r5, 8(%r3) 498 clrrdi %r3, %r3, 12 499#else 500 lwz %r4, 0(%r3) 501 lwz %r5, 4(%r3) 502 rlwinm %r3, %r3, 0, 0, 19 503#endif 504 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ 505 lwzx %r3, %r4, %r3 506 507 /* Set RPN and protection */ 508 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l 509 mtspr SPR_MAS3, %r3 510 isync 511 li %r4, 0 512 mtspr SPR_MAS7, %r4 513 bl zero_mas8 514 isync 515 tlbwe 516 isync 517 msync 518 519 /* Switch to the final mapping */ 520 bl 6f 5216: mflr %r3 522 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ 523 add %r3, %r3, %r5 /* Make this virtual address */ 524 addi %r3, %r3, (7f - 6b) 525#ifdef __powerpc64__ 526 lis %r4, PSL_CM@h /* Note AS=0 */ 527#else 528 li %r4, 0 /* Note AS=0 */ 529#endif 530 mtspr SPR_SRR0, %r3 531 mtspr SPR_SRR1, %r4 532 rfi 5337: 534 535/* 536 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and 537 * beyond so it's allowed to directly access all locations the kernel was linked 538 * against. 539 */ 540 541/* 542 * Invalidate temp mapping 543 */ 544 mr %r3, %r28 545 bl tlb1_inval_entry 546 547#ifdef __powerpc64__ 548 /* Set up the TOC pointer */ 549 b 0f 550 .align 3 5510: nop 552 bl 1f 553 .llong __tocbase + 0x8000 - . 5541: mflr %r2 555 ld %r1,0(%r2) 556 add %r2,%r1,%r2 557 mtspr SPR_SPRG8, %r2 558 559 /* Set up the stack pointer */ 560 addis %r1,%r2,TOC_REF(tmpstack)@ha 561 ld %r1,TOC_REF(tmpstack)@l(%r1) 562 addi %r1,%r1,TMPSTACKSZ-96 563#else 564/* 565 * Setup a temporary stack 566 */ 567 bl 1f 568 .long tmpstack-. 5691: mflr %r1 570 lwz %r2,0(%r1) 571 add %r1,%r1,%r2 572 stw %r1, 0(%r1) 573 addi %r1, %r1, (TMPSTACKSZ - 16) 574#endif 575 576/* 577 * Initialise exception vector offsets 578 */ 579 bl CNAME(ivor_setup) 580 TOC_RESTORE 581 582 /* 583 * Assign our pcpu instance 584 */ 585 bl 1f 586 .long ap_pcpu-. 5871: mflr %r4 588 lwz %r3, 0(%r4) 589 add %r3, %r3, %r4 590 LOAD %r3, 0(%r3) 591 mtsprg0 %r3 592 593 bl CNAME(pmap_bootstrap_ap) 594 TOC_RESTORE 595 596 bl CNAME(cpudep_ap_bootstrap) 597 TOC_RESTORE 598 /* Switch to the idle thread's kstack */ 599 mr %r1, %r3 600 601 bl CNAME(machdep_ap_bootstrap) 602 TOC_RESTORE 603 604 /* NOT REACHED */ 6056: b 6b 606#endif /* SMP */ 607 608#if defined (BOOKE_E500) 609/* 610 * Invalidate all entries in the given TLB. 611 * 612 * r3 TLBSEL 613 */ 614tlb_inval_all: 615 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ 616 ori %r3, %r3, (1 << 2) /* INVALL */ 617 tlbivax 0, %r3 618 isync 619 msync 620 621 tlbsync 622 msync 623 blr 624 625/* 626 * expects address to look up in r3, returns entry number in r29 627 * 628 * FIXME: the hidden assumption is we are now running in AS=0, but we should 629 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] 630 */ 631tlb1_find_current: 632 mfspr %r17, SPR_PID0 633 slwi %r17, %r17, MAS6_SPID0_SHIFT 634 mtspr SPR_MAS6, %r17 635 isync 636 tlbsx 0, %r3 637 mfspr %r17, SPR_MAS0 638 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ 639 640 /* Make sure we have IPROT set on the entry */ 641 mfspr %r17, SPR_MAS1 642 oris %r17, %r17, MAS1_IPROT@h 643 mtspr SPR_MAS1, %r17 644 isync 645 tlbwe 646 isync 647 msync 648 blr 649 650/* 651 * Invalidates a single entry in TLB1. 652 * 653 * r3 ESEL 654 * r4-r5 scratched 655 */ 656tlb1_inval_entry: 657 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ 658 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ 659 mtspr SPR_MAS0, %r4 660 isync 661 tlbre 662 li %r5, 0 /* MAS1[V] = 0 */ 663 mtspr SPR_MAS1, %r5 664 isync 665 tlbwe 666 isync 667 msync 668 blr 669 670/* 671 * r29 current entry number 672 * r28 returned temp entry 673 * r3-r5 scratched 674 */ 675tlb1_temp_mapping_as1: 676 /* Read our current translation */ 677 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 678 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ 679 mtspr SPR_MAS0, %r3 680 isync 681 tlbre 682 683 /* 684 * Prepare and write temp entry 685 * 686 * FIXME this is not robust against overflow i.e. when the current 687 * entry is the last in TLB1 688 */ 689 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 690 addi %r28, %r29, 1 /* Use next entry. */ 691 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ 692 mtspr SPR_MAS0, %r3 693 isync 694 mfspr %r5, SPR_MAS1 695 li %r4, 1 /* AS=1 */ 696 rlwimi %r5, %r4, 12, 19, 19 697 li %r4, 0 /* Global mapping, TID=0 */ 698 rlwimi %r5, %r4, 16, 8, 15 699 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h 700 mtspr SPR_MAS1, %r5 701 isync 702 mflr %r3 703 li %r4, 0 704 mtspr SPR_MAS7, %r4 705 bl zero_mas8 706 mtlr %r3 707 isync 708 tlbwe 709 isync 710 msync 711 blr 712 713/* 714 * Loops over TLB1, invalidates all entries skipping the one which currently 715 * maps this code. 716 * 717 * r29 current entry 718 * r3-r5 scratched 719 */ 720tlb1_inval_all_but_current: 721 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ 722 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l 723 li %r4, 0 /* Start from Entry 0 */ 7241: lis %r5, MAS0_TLBSEL1@h 725 rlwimi %r5, %r4, 16, 10, 15 726 mtspr SPR_MAS0, %r5 727 isync 728 tlbre 729 mfspr %r5, SPR_MAS1 730 cmpw %r4, %r29 /* our current entry? */ 731 beq 2f 732 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ 733 mtspr SPR_MAS1, %r5 734 isync 735 tlbwe 736 isync 737 msync 7382: addi %r4, %r4, 1 739 cmpw %r4, %r3 /* Check if this is the last entry */ 740 bne 1b 741 blr 742 743/* 744 * MAS8 conditional zeroing. 745 */ 746.globl zero_mas8 747zero_mas8: 748 mfpvr %r20 749 rlwinm %r20, %r20, 16, 16, 31 750 cmpli 0, 0, %r20, FSL_E500mc 751 beq 1f 752 cmpli 0, 0, %r20, FSL_E5500 753 beq 1f 754 755 blr 7561: 757 li %r20, 0 758 mtspr SPR_MAS8, %r20 759 blr 760#endif 761 762#ifdef SMP 763.globl __boot_tlb1 764 /* 765 * The __boot_tlb1 table is used to hold BSP TLB1 entries 766 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. 767 * The BSP fills in the table in tlb_ap_prep() function. Next, 768 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). 769 */ 770__boot_tlb1: 771 .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE 772 773__boot_page_padding: 774 /* 775 * Boot page needs to be exactly 4K, with the last word of this page 776 * acting as the reset vector, so we need to stuff the remainder. 777 * Upon release from holdoff CPU fetches the last word of the boot 778 * page. 779 */ 780 .space 4092 - (__boot_page_padding - __boot_page) 781 b __boot_page 782#endif /* SMP */ 783 784/************************************************************************/ 785/* locore subroutines */ 786/************************************************************************/ 787 788/* 789 * Cache disable/enable/inval sequences according 790 * to section 2.16 of E500CORE RM. 791 */ 792ENTRY(dcache_inval) 793 /* Invalidate d-cache */ 794 mfspr %r3, SPR_L1CSR0 795 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l 796 msync 797 isync 798 mtspr SPR_L1CSR0, %r3 799 isync 8001: mfspr %r3, SPR_L1CSR0 801 andi. %r3, %r3, L1CSR0_DCFI 802 bne 1b 803 blr 804 805ENTRY(dcache_disable) 806 /* Disable d-cache */ 807 mfspr %r3, SPR_L1CSR0 808 li %r4, L1CSR0_DCE@l 809 not %r4, %r4 810 and %r3, %r3, %r4 811 msync 812 isync 813 mtspr SPR_L1CSR0, %r3 814 isync 815 blr 816 817ENTRY(dcache_enable) 818 /* Enable d-cache */ 819 mfspr %r3, SPR_L1CSR0 820 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h 821 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l 822 msync 823 isync 824 mtspr SPR_L1CSR0, %r3 825 isync 826 blr 827 828ENTRY(icache_inval) 829 /* Invalidate i-cache */ 830 mfspr %r3, SPR_L1CSR1 831 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l 832 isync 833 mtspr SPR_L1CSR1, %r3 834 isync 8351: mfspr %r3, SPR_L1CSR1 836 andi. %r3, %r3, L1CSR1_ICFI 837 bne 1b 838 blr 839 840ENTRY(icache_disable) 841 /* Disable i-cache */ 842 mfspr %r3, SPR_L1CSR1 843 li %r4, L1CSR1_ICE@l 844 not %r4, %r4 845 and %r3, %r3, %r4 846 isync 847 mtspr SPR_L1CSR1, %r3 848 isync 849 blr 850 851ENTRY(icache_enable) 852 /* Enable i-cache */ 853 mfspr %r3, SPR_L1CSR1 854 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h 855 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l 856 isync 857 mtspr SPR_L1CSR1, %r3 858 isync 859 blr 860 861/* 862 * L2 cache disable/enable/inval sequences for E500mc. 863 */ 864 865ENTRY(l2cache_inval) 866 mfspr %r3, SPR_L2CSR0 867 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h 868 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l 869 isync 870 mtspr SPR_L2CSR0, %r3 871 isync 8721: mfspr %r3, SPR_L2CSR0 873 andis. %r3, %r3, L2CSR0_L2FI@h 874 bne 1b 875 blr 876 877ENTRY(l2cache_enable) 878 mfspr %r3, SPR_L2CSR0 879 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h 880 isync 881 mtspr SPR_L2CSR0, %r3 882 isync 883 blr 884 885/* 886 * Branch predictor setup. 887 */ 888ENTRY(bpred_enable) 889 mfspr %r3, SPR_BUCSR 890 ori %r3, %r3, BUCSR_BBFI 891 isync 892 mtspr SPR_BUCSR, %r3 893 isync 894 ori %r3, %r3, BUCSR_BPEN 895 isync 896 mtspr SPR_BUCSR, %r3 897 isync 898 blr 899 900/* 901 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is 902 * created. 903 */ 904ENTRY(get_spr) 905 mfspr %r3, 0 906 blr 907 908/************************************************************************/ 909/* Data section */ 910/************************************************************************/ 911 .data 912 .align 3 913GLOBAL(__startkernel) 914 ADDR(begin) 915GLOBAL(__endkernel) 916 ADDR(end) 917 .align 4 918tmpstack: 919 .space TMPSTACKSZ 920tmpstackbound: 921 .space 10240 /* XXX: this really should not be necessary */ 922#ifdef __powerpc64__ 923TOC_ENTRY(tmpstack) 924TOC_ENTRY(bp_kernload) 925#endif 926 927/* 928 * Compiled KERNBASE locations 929 */ 930 .globl kernbase 931 .set kernbase, KERNBASE 932 933#include <powerpc/booke/trap_subr.S> 934