1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.inc" 30 31#include "opt_hwpmc_hooks.h" 32 33#include <machine/asm.h> 34#include <machine/hid.h> 35#include <machine/param.h> 36#include <machine/spr.h> 37#include <machine/pte.h> 38#include <machine/trap.h> 39#include <machine/vmparam.h> 40#include <machine/tlb.h> 41 42#define TMPSTACKSZ 16384 43 44#ifdef __powerpc64__ 45#define GET_TOCBASE(r) \ 46 mfspr r, SPR_SPRG8 47#define TOC_RESTORE nop 48#define CMPI cmpdi 49#define CMPL cmpld 50#define LOAD ld 51#define LOADX ldarx 52#define STORE std 53#define STOREX stdcx. 54#define STU stdu 55#define CALLSIZE 48 56#define REDZONE 288 57#define THREAD_REG %r13 58#define ADDR(x) \ 59 .llong x 60#define WORD_SIZE 8 61#else 62#define GET_TOCBASE(r) 63#define TOC_RESTORE 64#define CMPI cmpwi 65#define CMPL cmplw 66#define LOAD lwz 67#define LOADX lwarx 68#define STOREX stwcx. 69#define STORE stw 70#define STU stwu 71#define CALLSIZE 8 72#define REDZONE 0 73#define THREAD_REG %r2 74#define ADDR(x) \ 75 .long x 76#define WORD_SIZE 4 77#endif 78 79 .text 80 .globl btext 81btext: 82 83/* 84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to 85 * mark the start of kernel text. 86 */ 87 .globl kernel_text 88kernel_text: 89 90/* 91 * Startup entry. Note, this must be the first thing in the text segment! 92 */ 93 .text 94 .globl __start 95__start: 96 97/* 98 * Assumptions on the boot loader: 99 * - System memory starts from physical address 0 100 * - It's mapped by a single TLB1 entry 101 * - TLB1 mapping is 1:1 pa to va 102 * - Kernel is loaded at 64MB boundary 103 * - All PID registers are set to the same value 104 * - CPU is running in AS=0 105 * 106 * Registers contents provided by the loader(8): 107 * r1 : stack pointer 108 * r3 : metadata pointer 109 * 110 * We rearrange the TLB1 layout as follows: 111 * - Find TLB1 entry we started in 112 * - Make sure it's protected, invalidate other entries 113 * - Create temp entry in the second AS (make sure it's not TLB[1]) 114 * - Switch to temp mapping 115 * - Map 64MB of RAM in TLB1[1] 116 * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address 117 * - Switch to TLB1[1] mapping 118 * - Invalidate temp mapping 119 * 120 * locore registers use: 121 * r1 : stack pointer 122 * r2 : trace pointer (AP only, for early diagnostics) 123 * r3-r27 : scratch registers 124 * r28 : temp TLB1 entry 125 * r29 : initial TLB1 entry we started in 126 * r30-r31 : arguments (metadata pointer) 127 */ 128 129/* 130 * Keep arguments in r30 & r31 for later use. 131 */ 132 mr %r30, %r3 133 mr %r31, %r4 134 135/* 136 * Initial cleanup 137 */ 138 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ 139#ifdef __powerpc64__ 140 oris %r3, %r3, PSL_CM@h 141#endif 142 mtmsr %r3 143 isync 144 145/* 146 * Initial HIDs configuration 147 */ 1481: 149 mfpvr %r3 150 rlwinm %r3, %r3, 16, 16, 31 151 152 lis %r4, HID0_E500_DEFAULT_SET@h 153 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 154 155 /* Check for e500mc and e5500 */ 156 cmpli 0, 0, %r3, FSL_E500mc 157 bne 2f 158 159 lis %r4, HID0_E500MC_DEFAULT_SET@h 160 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 161 b 3f 1622: 163 cmpli 0, 0, %r3, FSL_E5500 164 bne 3f 165 166 lis %r4, HID0_E5500_DEFAULT_SET@h 167 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 168 1693: 170 mtspr SPR_HID0, %r4 171 isync 172 173/* 174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on 175 * this core. 176 */ 177 cmpli 0, 0, %r3, FSL_E500mc 178 beq 1f 179 cmpli 0, 0, %r3, FSL_E5500 180 beq 1f 181 cmpli 0, 0, %r3, FSL_E6500 182 beq 1f 183 184 lis %r3, HID1_E500_DEFAULT_SET@h 185 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 186 mtspr SPR_HID1, %r3 187 isync 1881: 189 /* Invalidate all entries in TLB0 */ 190 li %r3, 0 191 bl tlb_inval_all 192 193 cmpwi %r30, 0 194 beq done_mapping 195 196/* 197 * Locate the TLB1 entry that maps this code 198 */ 199 bl 1f 2001: mflr %r3 201 bl tlb1_find_current /* the entry found is returned in r29 */ 202 203 bl tlb1_inval_all_but_current 204 205/* 206 * Create temporary mapping in AS=1 and switch to it 207 */ 208 bl tlb1_temp_mapping_as1 209 210 mfmsr %r3 211 ori %r3, %r3, (PSL_IS | PSL_DS) 212 bl 2f 2132: mflr %r4 214 addi %r4, %r4, (3f - 2b) 215 mtspr SPR_SRR0, %r4 216 mtspr SPR_SRR1, %r3 217 rfi /* Switch context */ 218 219/* 220 * Invalidate initial entry 221 */ 2223: 223 mr %r3, %r29 224 bl tlb1_inval_entry 225 226/* 227 * Setup final mapping in TLB1[1] and switch to it 228 */ 229 /* Final kernel mapping, map in 64 MB of RAM */ 230 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 231 li %r4, 0 /* Entry 0 */ 232 rlwimi %r3, %r4, 16, 10, 15 233 mtspr SPR_MAS0, %r3 234 isync 235 236 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 237 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 238 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 239 isync 240 241 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 242 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 243 mtspr SPR_MAS2, %r3 244 isync 245 246 /* Discover phys load address */ 247 bl 3f 2483: mflr %r4 /* Use current address */ 249 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ 250 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l 251 mtspr SPR_MAS3, %r4 /* Set RPN and protection */ 252 isync 253 bl zero_mas7 254 bl zero_mas8 255 tlbwe 256 isync 257 msync 258 259 /* Switch to the above TLB1[1] mapping */ 260 bl 4f 2614: mflr %r4 262#ifdef __powerpc64__ 263 clrldi %r4, %r4, 38 264 clrrdi %r3, %r3, 12 265#else 266 rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */ 267 rlwinm %r3, %r3, 0, 0, 19 268#endif 269 add %r4, %r4, %r3 /* Convert to kernel virtual address */ 270 addi %r4, %r4, (5f - 4b) 271 li %r3, PSL_DE /* Note AS=0 */ 272#ifdef __powerpc64__ 273 oris %r3, %r3, PSL_CM@h 274#endif 275 mtspr SPR_SRR0, %r4 276 mtspr SPR_SRR1, %r3 277 rfi 278 279/* 280 * Invalidate temp mapping 281 */ 2825: 283 mr %r3, %r28 284 bl tlb1_inval_entry 285 286done_mapping: 287 288#ifdef __powerpc64__ 289 /* Set up the TOC pointer */ 290 b 0f 291 .align 3 2920: nop 293 bl 1f 294 .llong __tocbase + 0x8000 - . 2951: mflr %r2 296 ld %r1,0(%r2) 297 add %r2,%r1,%r2 298 mtspr SPR_SPRG8, %r2 299 300 /* Get load offset */ 301 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ 302 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ 303 304 /* Set up the stack pointer */ 305 ld %r1,TOC_REF(tmpstack)(%r2) 306 addi %r1,%r1,TMPSTACKSZ-96 307 add %r1,%r1,%r31 308 bl 1f 309 .llong _DYNAMIC-. 3101: mflr %r3 311 ld %r4,0(%r3) 312 add %r3,%r4,%r3 313 mr %r4,%r31 314#else 315/* 316 * Setup a temporary stack 317 */ 318 bl 1f 319 .long tmpstack-. 3201: mflr %r1 321 lwz %r2,0(%r1) 322 add %r1,%r1,%r2 323 addi %r1, %r1, (TMPSTACKSZ - 16) 324 325/* 326 * Relocate kernel 327 */ 328 bl 1f 329 .long _DYNAMIC-. 330 .long _GLOBAL_OFFSET_TABLE_-. 3311: mflr %r5 332 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ 333 add %r3,%r3,%r5 334 lwz %r4,4(%r5) /* GOT pointer */ 335 add %r4,%r4,%r5 336 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ 337 subf %r4,%r4,%r3 /* subtract to calculate relocbase */ 338#endif 339 bl CNAME(elf_reloc_self) 340 TOC_RESTORE 341 342/* 343 * Initialise exception vector offsets 344 */ 345 bl CNAME(ivor_setup) 346 TOC_RESTORE 347 348/* 349 * Set up arguments and jump to system initialization code 350 */ 351 mr %r3, %r30 352 mr %r4, %r31 353 354 /* Prepare core */ 355 bl CNAME(booke_init) 356 TOC_RESTORE 357 358 /* Switch to thread0.td_kstack now */ 359 mr %r1, %r3 360 li %r3, 0 361 STORE %r3, 0(%r1) 362 363 /* Machine independet part, does not return */ 364 bl CNAME(mi_startup) 365 TOC_RESTORE 366 /* NOT REACHED */ 3675: b 5b 368 369 370#ifdef SMP 371/************************************************************************/ 372/* AP Boot page */ 373/************************************************************************/ 374 .text 375 .globl __boot_page 376 .align 12 377__boot_page: 378 bl 1f 379 380 .globl bp_trace 381bp_trace: 382 .long 0 383 384 .globl bp_kernload 385bp_kernload: 386 .long 0 387 388/* 389 * Initial configuration 390 */ 3911: 392 mflr %r31 /* r31 hold the address of bp_trace */ 393 394 /* Set HIDs */ 395 mfpvr %r3 396 rlwinm %r3, %r3, 16, 16, 31 397 398 /* HID0 for E500 is default */ 399 lis %r4, HID0_E500_DEFAULT_SET@h 400 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 401 402 cmpli 0, 0, %r3, FSL_E500mc 403 bne 2f 404 lis %r4, HID0_E500MC_DEFAULT_SET@h 405 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 406 b 3f 4072: 408 cmpli 0, 0, %r3, FSL_E5500 409 bne 3f 410 lis %r4, HID0_E5500_DEFAULT_SET@h 411 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 4123: 413 mtspr SPR_HID0, %r4 414 isync 415 416 /* Enable branch prediction */ 417 li %r3, BUCSR_BPEN 418 mtspr SPR_BUCSR, %r3 419 isync 420 421 /* Invalidate all entries in TLB0 */ 422 li %r3, 0 423 bl tlb_inval_all 424 425/* 426 * Find TLB1 entry which is translating us now 427 */ 428 bl 2f 4292: mflr %r3 430 bl tlb1_find_current /* the entry number found is in r29 */ 431 432 bl tlb1_inval_all_but_current 433 434/* 435 * Create temporary translation in AS=1 and switch to it 436 */ 437 438 bl tlb1_temp_mapping_as1 439 440 mfmsr %r3 441 ori %r3, %r3, (PSL_IS | PSL_DS) 442#ifdef __powerpc64__ 443 oris %r3, %r3, PSL_CM@h 444#endif 445 bl 3f 4463: mflr %r4 447 addi %r4, %r4, (4f - 3b) 448 mtspr SPR_SRR0, %r4 449 mtspr SPR_SRR1, %r3 450 rfi /* Switch context */ 451 452/* 453 * Invalidate initial entry 454 */ 4554: 456 mr %r3, %r29 457 bl tlb1_inval_entry 458 459/* 460 * Setup final mapping in TLB1[1] and switch to it 461 */ 462 /* Final kernel mapping, map in 64 MB of RAM */ 463 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 464 li %r4, 0 /* Entry 0 */ 465 rlwimi %r3, %r4, 16, 4, 15 466 mtspr SPR_MAS0, %r3 467 isync 468 469 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 470 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 471 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 472 isync 473 474 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) 475 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 476 mtspr SPR_MAS2, %r3 477 isync 478 479 /* Retrieve kernel load [physical] address from bp_kernload */ 480#ifdef __powerpc64__ 481 b 0f 482 .align 3 4830: 484 nop 485#endif 486 bl 5f 487 ADDR(bp_kernload) 488 ADDR(__boot_page) 4895: mflr %r3 490#ifdef __powerpc64__ 491 ld %r4, 0(%r3) 492 ld %r5, 8(%r3) 493 clrrdi %r3, %r3, 12 494#else 495 lwz %r4, 0(%r3) 496 lwz %r5, 4(%r3) 497 rlwinm %r3, %r3, 0, 0, 19 498#endif 499 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ 500 lwzx %r3, %r4, %r3 501 502 /* Set RPN and protection */ 503 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l 504 mtspr SPR_MAS3, %r3 505 isync 506 bl zero_mas7 507 bl zero_mas8 508 tlbwe 509 isync 510 msync 511 512 /* Switch to the final mapping */ 513 bl 6f 5146: mflr %r3 515 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ 516 add %r3, %r3, %r5 /* Make this virtual address */ 517 addi %r3, %r3, (7f - 6b) 518#ifdef __powerpc64__ 519 lis %r4, PSL_CM@h /* Note AS=0 */ 520#else 521 li %r4, 0 /* Note AS=0 */ 522#endif 523 mtspr SPR_SRR0, %r3 524 mtspr SPR_SRR1, %r4 525 rfi 5267: 527 528/* 529 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and 530 * beyond so it's allowed to directly access all locations the kernel was linked 531 * against. 532 */ 533 534/* 535 * Invalidate temp mapping 536 */ 537 mr %r3, %r28 538 bl tlb1_inval_entry 539 540#ifdef __powerpc64__ 541 /* Set up the TOC pointer */ 542 b 0f 543 .align 3 5440: nop 545 bl 1f 546 .llong __tocbase + 0x8000 - . 5471: mflr %r2 548 ld %r1,0(%r2) 549 add %r2,%r1,%r2 550 mtspr SPR_SPRG8, %r2 551 552 /* Set up the stack pointer */ 553 ld %r1,TOC_REF(tmpstack)(%r2) 554 addi %r1,%r1,TMPSTACKSZ-96 555#else 556/* 557 * Setup a temporary stack 558 */ 559 bl 1f 560 .long tmpstack-. 5611: mflr %r1 562 lwz %r2,0(%r1) 563 add %r1,%r1,%r2 564 stw %r1, 0(%r1) 565 addi %r1, %r1, (TMPSTACKSZ - 16) 566#endif 567 568/* 569 * Initialise exception vector offsets 570 */ 571 bl CNAME(ivor_setup) 572 TOC_RESTORE 573 574 /* 575 * Assign our pcpu instance 576 */ 577 bl 1f 578 .long ap_pcpu-. 5791: mflr %r4 580 lwz %r3, 0(%r4) 581 add %r3, %r3, %r4 582 LOAD %r3, 0(%r3) 583 mtsprg0 %r3 584 585 bl CNAME(pmap_bootstrap_ap) 586 TOC_RESTORE 587 588 bl CNAME(cpudep_ap_bootstrap) 589 TOC_RESTORE 590 /* Switch to the idle thread's kstack */ 591 mr %r1, %r3 592 593 bl CNAME(machdep_ap_bootstrap) 594 TOC_RESTORE 595 596 /* NOT REACHED */ 5976: b 6b 598#endif /* SMP */ 599 600#if defined (BOOKE_E500) 601/* 602 * Invalidate all entries in the given TLB. 603 * 604 * r3 TLBSEL 605 */ 606tlb_inval_all: 607 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ 608 ori %r3, %r3, (1 << 2) /* INVALL */ 609 tlbivax 0, %r3 610 isync 611 msync 612 613 tlbsync 614 msync 615 blr 616 617/* 618 * expects address to look up in r3, returns entry number in r29 619 * 620 * FIXME: the hidden assumption is we are now running in AS=0, but we should 621 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] 622 */ 623tlb1_find_current: 624 mfspr %r17, SPR_PID0 625 slwi %r17, %r17, MAS6_SPID0_SHIFT 626 mtspr SPR_MAS6, %r17 627 isync 628 tlbsx 0, %r3 629 mfspr %r17, SPR_MAS0 630 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ 631 632 /* Make sure we have IPROT set on the entry */ 633 mfspr %r17, SPR_MAS1 634 oris %r17, %r17, MAS1_IPROT@h 635 mtspr SPR_MAS1, %r17 636 isync 637 tlbwe 638 isync 639 msync 640 blr 641 642/* 643 * Invalidates a single entry in TLB1. 644 * 645 * r3 ESEL 646 * r4-r5 scratched 647 */ 648tlb1_inval_entry: 649 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ 650 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ 651 mtspr SPR_MAS0, %r4 652 isync 653 tlbre 654 li %r5, 0 /* MAS1[V] = 0 */ 655 mtspr SPR_MAS1, %r5 656 isync 657 tlbwe 658 isync 659 msync 660 blr 661 662/* 663 * r29 current entry number 664 * r28 returned temp entry 665 * r3-r5 scratched 666 */ 667tlb1_temp_mapping_as1: 668 /* Read our current translation */ 669 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 670 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ 671 mtspr SPR_MAS0, %r3 672 isync 673 tlbre 674 675 /* 676 * Prepare and write temp entry 677 * 678 * FIXME this is not robust against overflow i.e. when the current 679 * entry is the last in TLB1 680 */ 681 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 682 addi %r28, %r29, 1 /* Use next entry. */ 683 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ 684 mtspr SPR_MAS0, %r3 685 isync 686 mfspr %r5, SPR_MAS1 687 li %r4, 1 /* AS=1 */ 688 rlwimi %r5, %r4, 12, 19, 19 689 li %r4, 0 /* Global mapping, TID=0 */ 690 rlwimi %r5, %r4, 16, 8, 15 691 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h 692 mtspr SPR_MAS1, %r5 693 isync 694 mflr %r3 695 bl zero_mas7 696 bl zero_mas8 697 mtlr %r3 698 tlbwe 699 isync 700 msync 701 blr 702 703/* 704 * Loops over TLB1, invalidates all entries skipping the one which currently 705 * maps this code. 706 * 707 * r29 current entry 708 * r3-r5 scratched 709 */ 710tlb1_inval_all_but_current: 711 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ 712 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l 713 li %r4, 0 /* Start from Entry 0 */ 7141: lis %r5, MAS0_TLBSEL1@h 715 rlwimi %r5, %r4, 16, 10, 15 716 mtspr SPR_MAS0, %r5 717 isync 718 tlbre 719 mfspr %r5, SPR_MAS1 720 cmpw %r4, %r29 /* our current entry? */ 721 beq 2f 722 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ 723 mtspr SPR_MAS1, %r5 724 isync 725 tlbwe 726 isync 727 msync 7282: addi %r4, %r4, 1 729 cmpw %r4, %r3 /* Check if this is the last entry */ 730 bne 1b 731 blr 732 733/* 734 * MAS7 and MAS8 conditional zeroing. 735 */ 736.globl zero_mas7 737zero_mas7: 738 mfpvr %r20 739 rlwinm %r20, %r20, 16, 16, 31 740 cmpli 0, 0, %r20, FSL_E500v1 741 beq 1f 742 743 li %r20, 0 744 mtspr SPR_MAS7, %r20 745 isync 7461: 747 blr 748 749.globl zero_mas8 750zero_mas8: 751 mfpvr %r20 752 rlwinm %r20, %r20, 16, 16, 31 753 cmpli 0, 0, %r20, FSL_E500mc 754 beq 1f 755 cmpli 0, 0, %r20, FSL_E5500 756 beq 1f 757 758 blr 7591: 760 li %r20, 0 761 mtspr SPR_MAS8, %r20 762 isync 763 blr 764#endif 765 766#ifdef SMP 767.globl __boot_tlb1 768 /* 769 * The __boot_tlb1 table is used to hold BSP TLB1 entries 770 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. 771 * The BSP fills in the table in tlb_ap_prep() function. Next, 772 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). 773 */ 774__boot_tlb1: 775 .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE 776 777__boot_page_padding: 778 /* 779 * Boot page needs to be exactly 4K, with the last word of this page 780 * acting as the reset vector, so we need to stuff the remainder. 781 * Upon release from holdoff CPU fetches the last word of the boot 782 * page. 783 */ 784 .space 4092 - (__boot_page_padding - __boot_page) 785 b __boot_page 786#endif /* SMP */ 787 788/************************************************************************/ 789/* locore subroutines */ 790/************************************************************************/ 791 792/* 793 * Cache disable/enable/inval sequences according 794 * to section 2.16 of E500CORE RM. 795 */ 796ENTRY(dcache_inval) 797 /* Invalidate d-cache */ 798 mfspr %r3, SPR_L1CSR0 799 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l 800 msync 801 isync 802 mtspr SPR_L1CSR0, %r3 803 isync 8041: mfspr %r3, SPR_L1CSR0 805 andi. %r3, %r3, L1CSR0_DCFI 806 bne 1b 807 blr 808 809ENTRY(dcache_disable) 810 /* Disable d-cache */ 811 mfspr %r3, SPR_L1CSR0 812 li %r4, L1CSR0_DCE@l 813 not %r4, %r4 814 and %r3, %r3, %r4 815 msync 816 isync 817 mtspr SPR_L1CSR0, %r3 818 isync 819 blr 820 821ENTRY(dcache_enable) 822 /* Enable d-cache */ 823 mfspr %r3, SPR_L1CSR0 824 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h 825 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l 826 msync 827 isync 828 mtspr SPR_L1CSR0, %r3 829 isync 830 blr 831 832ENTRY(icache_inval) 833 /* Invalidate i-cache */ 834 mfspr %r3, SPR_L1CSR1 835 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l 836 isync 837 mtspr SPR_L1CSR1, %r3 838 isync 8391: mfspr %r3, SPR_L1CSR1 840 andi. %r3, %r3, L1CSR1_ICFI 841 bne 1b 842 blr 843 844ENTRY(icache_disable) 845 /* Disable i-cache */ 846 mfspr %r3, SPR_L1CSR1 847 li %r4, L1CSR1_ICE@l 848 not %r4, %r4 849 and %r3, %r3, %r4 850 isync 851 mtspr SPR_L1CSR1, %r3 852 isync 853 blr 854 855ENTRY(icache_enable) 856 /* Enable i-cache */ 857 mfspr %r3, SPR_L1CSR1 858 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h 859 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l 860 isync 861 mtspr SPR_L1CSR1, %r3 862 isync 863 blr 864 865/* 866 * L2 cache disable/enable/inval sequences for E500mc. 867 */ 868 869ENTRY(l2cache_inval) 870 mfspr %r3, SPR_L2CSR0 871 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h 872 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l 873 isync 874 mtspr SPR_L2CSR0, %r3 875 isync 8761: mfspr %r3, SPR_L2CSR0 877 andis. %r3, %r3, L2CSR0_L2FI@h 878 bne 1b 879 blr 880 881ENTRY(l2cache_enable) 882 mfspr %r3, SPR_L2CSR0 883 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h 884 isync 885 mtspr SPR_L2CSR0, %r3 886 isync 887 blr 888 889/* 890 * Branch predictor setup. 891 */ 892ENTRY(bpred_enable) 893 mfspr %r3, SPR_BUCSR 894 ori %r3, %r3, BUCSR_BBFI 895 isync 896 mtspr SPR_BUCSR, %r3 897 isync 898 ori %r3, %r3, BUCSR_BPEN 899 isync 900 mtspr SPR_BUCSR, %r3 901 isync 902 blr 903 904/* 905 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is 906 * created. 907 */ 908ENTRY(get_spr) 909 mfspr %r3, 0 910 blr 911 912/************************************************************************/ 913/* Data section */ 914/************************************************************************/ 915 .data 916 .align 3 917GLOBAL(__startkernel) 918 ADDR(begin) 919GLOBAL(__endkernel) 920 ADDR(end) 921 .align 4 922tmpstack: 923 .space TMPSTACKSZ 924tmpstackbound: 925 .space 10240 /* XXX: this really should not be necessary */ 926#ifdef __powerpc64__ 927TOC_ENTRY(tmpstack) 928TOC_ENTRY(bp_kernload) 929#endif 930 931/* 932 * Compiled KERNBASE locations 933 */ 934 .globl kernbase 935 .set kernbase, KERNBASE 936 937#include <powerpc/booke/trap_subr.S> 938