1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.s" 30 31#include "opt_hwpmc_hooks.h" 32 33#include <machine/asm.h> 34#include <machine/hid.h> 35#include <machine/param.h> 36#include <machine/spr.h> 37#include <machine/pte.h> 38#include <machine/trap.h> 39#include <machine/vmparam.h> 40#include <machine/tlb.h> 41 42#define TMPSTACKSZ 16384 43 44 .text 45 .globl btext 46btext: 47 48/* 49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to 50 * mark the start of kernel text. 51 */ 52 .globl kernel_text 53kernel_text: 54 55/* 56 * Startup entry. Note, this must be the first thing in the text segment! 57 */ 58 .text 59 .globl __start 60__start: 61 62/* 63 * Assumptions on the boot loader: 64 * - System memory starts from physical address 0 65 * - It's mapped by a single TLB1 entry 66 * - TLB1 mapping is 1:1 pa to va 67 * - Kernel is loaded at 64MB boundary 68 * - All PID registers are set to the same value 69 * - CPU is running in AS=0 70 * 71 * Registers contents provided by the loader(8): 72 * r1 : stack pointer 73 * r3 : metadata pointer 74 * 75 * We rearrange the TLB1 layout as follows: 76 * - Find TLB1 entry we started in 77 * - Make sure it's protected, invalidate other entries 78 * - Create temp entry in the second AS (make sure it's not TLB[1]) 79 * - Switch to temp mapping 80 * - Map 64MB of RAM in TLB1[1] 81 * - Use AS=1, set EPN to KERNBASE and RPN to kernel load address 82 * - Switch to to TLB1[1] mapping 83 * - Invalidate temp mapping 84 * 85 * locore registers use: 86 * r1 : stack pointer 87 * r2 : trace pointer (AP only, for early diagnostics) 88 * r3-r27 : scratch registers 89 * r28 : temp TLB1 entry 90 * r29 : initial TLB1 entry we started in 91 * r30-r31 : arguments (metadata pointer) 92 */ 93 94/* 95 * Keep arguments in r30 & r31 for later use. 96 */ 97 mr %r30, %r3 98 mr %r31, %r4 99 100/* 101 * Initial cleanup 102 */ 103 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ 104 mtmsr %r3 105 isync 106 107/* 108 * Initial HIDs configuration 109 */ 1101: 111 mfpvr %r3 112 rlwinm %r3, %r3, 16, 16, 31 113 114 lis %r4, HID0_E500_DEFAULT_SET@h 115 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 116 117 /* Check for e500mc and e5500 */ 118 cmpli 0, 0, %r3, FSL_E500mc 119 bne 2f 120 121 lis %r4, HID0_E500MC_DEFAULT_SET@h 122 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 123 b 3f 1242: 125 cmpli 0, 0, %r3, FSL_E5500 126 bne 3f 127 128 lis %r4, HID0_E5500_DEFAULT_SET@h 129 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 130 1313: 132 mtspr SPR_HID0, %r4 133 isync 134 135/* 136 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on 137 * this core. 138 */ 139 cmpli 0, 0, %r3, FSL_E500mc 140 beq 1f 141 cmpli 0, 0, %r3, FSL_E5500 142 beq 1f 143 144 lis %r3, HID1_E500_DEFAULT_SET@h 145 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 146 mtspr SPR_HID1, %r3 147 isync 1481: 149 /* Invalidate all entries in TLB0 */ 150 li %r3, 0 151 bl tlb_inval_all 152 153 cmpwi %r30, 0 154 beq done_mapping 155 156/* 157 * Locate the TLB1 entry that maps this code 158 */ 159 bl 1f 1601: mflr %r3 161 bl tlb1_find_current /* the entry found is returned in r29 */ 162 163 bl tlb1_inval_all_but_current 164 165/* 166 * Create temporary mapping in AS=1 and switch to it 167 */ 168 bl tlb1_temp_mapping_as1 169 170 mfmsr %r3 171 ori %r3, %r3, (PSL_IS | PSL_DS) 172 bl 2f 1732: mflr %r4 174 addi %r4, %r4, 20 175 mtspr SPR_SRR0, %r4 176 mtspr SPR_SRR1, %r3 177 rfi /* Switch context */ 178 179/* 180 * Invalidate initial entry 181 */ 182 mr %r3, %r29 183 bl tlb1_inval_entry 184 185/* 186 * Setup final mapping in TLB1[1] and switch to it 187 */ 188 /* Final kernel mapping, map in 64 MB of RAM */ 189 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 190 li %r4, 0 /* Entry 0 */ 191 rlwimi %r3, %r4, 16, 10, 15 192 mtspr SPR_MAS0, %r3 193 isync 194 195 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 196 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 197 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 198 isync 199 200 lis %r3, KERNBASE@h 201 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ 202#ifdef SMP 203 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 204#endif 205 mtspr SPR_MAS2, %r3 206 isync 207 208 /* Discover phys load address */ 209 bl 3f 2103: mflr %r4 /* Use current address */ 211 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ 212 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l 213 mtspr SPR_MAS3, %r4 /* Set RPN and protection */ 214 isync 215 bl zero_mas7 216 bl zero_mas8 217 tlbwe 218 isync 219 msync 220 221 /* Switch to the above TLB1[1] mapping */ 222 bl 4f 2234: mflr %r4 224 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */ 225 rlwinm %r3, %r3, 0, 0, 19 226 add %r4, %r4, %r3 /* Convert to kernel virtual address */ 227 addi %r4, %r4, 36 228 li %r3, PSL_DE /* Note AS=0 */ 229 mtspr SPR_SRR0, %r4 230 mtspr SPR_SRR1, %r3 231 rfi 232 233/* 234 * Invalidate temp mapping 235 */ 236 mr %r3, %r28 237 bl tlb1_inval_entry 238 239done_mapping: 240 241/* 242 * Setup a temporary stack 243 */ 244 bl 1f 245 .long tmpstack-. 2461: mflr %r1 247 lwz %r2,0(%r1) 248 add %r1,%r1,%r2 249 addi %r1, %r1, (TMPSTACKSZ - 16) 250 251/* 252 * Relocate kernel 253 */ 254 bl 1f 255 .long _DYNAMIC-. 256 .long _GLOBAL_OFFSET_TABLE_-. 2571: mflr %r5 258 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ 259 add %r3,%r3,%r5 260 lwz %r4,4(%r5) /* GOT pointer */ 261 add %r4,%r4,%r5 262 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ 263 subf %r4,%r4,%r3 /* subtract to calculate relocbase */ 264 bl elf_reloc_self 265 266/* 267 * Initialise exception vector offsets 268 */ 269 bl ivor_setup 270 271/* 272 * Set up arguments and jump to system initialization code 273 */ 274 mr %r3, %r30 275 mr %r4, %r31 276 277 /* Prepare core */ 278 bl booke_init 279 280 /* Switch to thread0.td_kstack now */ 281 mr %r1, %r3 282 li %r3, 0 283 stw %r3, 0(%r1) 284 285 /* Machine independet part, does not return */ 286 bl mi_startup 287 /* NOT REACHED */ 2885: b 5b 289 290 291#ifdef SMP 292/************************************************************************/ 293/* AP Boot page */ 294/************************************************************************/ 295 .text 296 .globl __boot_page 297 .align 12 298__boot_page: 299 bl 1f 300 301 .globl bp_trace 302bp_trace: 303 .long 0 304 305 .globl bp_kernload 306bp_kernload: 307 .long 0 308 309/* 310 * Initial configuration 311 */ 3121: 313 mflr %r31 /* r31 hold the address of bp_trace */ 314 315 /* Set HIDs */ 316 mfpvr %r3 317 rlwinm %r3, %r3, 16, 16, 31 318 319 /* HID0 for E500 is default */ 320 lis %r4, HID0_E500_DEFAULT_SET@h 321 ori %r4, %r4, HID0_E500_DEFAULT_SET@l 322 323 cmpli 0, 0, %r3, FSL_E500mc 324 bne 2f 325 lis %r4, HID0_E500MC_DEFAULT_SET@h 326 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l 327 b 3f 3282: 329 cmpli 0, 0, %r3, FSL_E5500 330 bne 3f 331 lis %r4, HID0_E5500_DEFAULT_SET@h 332 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3333: 334 mtspr SPR_HID0, %r4 335 isync 336 337 /* Enable branch prediction */ 338 li %r3, BUCSR_BPEN 339 mtspr SPR_BUCSR, %r3 340 isync 341 342 /* Invalidate all entries in TLB0 */ 343 li %r3, 0 344 bl tlb_inval_all 345 346/* 347 * Find TLB1 entry which is translating us now 348 */ 349 bl 2f 3502: mflr %r3 351 bl tlb1_find_current /* the entry number found is in r29 */ 352 353 bl tlb1_inval_all_but_current 354 355/* 356 * Create temporary translation in AS=1 and switch to it 357 */ 358 359 bl tlb1_temp_mapping_as1 360 361 mfmsr %r3 362 ori %r3, %r3, (PSL_IS | PSL_DS) 363 bl 3f 3643: mflr %r4 365 addi %r4, %r4, 20 366 mtspr SPR_SRR0, %r4 367 mtspr SPR_SRR1, %r3 368 rfi /* Switch context */ 369 370/* 371 * Invalidate initial entry 372 */ 373 mr %r3, %r29 374 bl tlb1_inval_entry 375 376/* 377 * Setup final mapping in TLB1[1] and switch to it 378 */ 379 /* Final kernel mapping, map in 64 MB of RAM */ 380 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 381 li %r4, 0 /* Entry 0 */ 382 rlwimi %r3, %r4, 16, 4, 15 383 mtspr SPR_MAS0, %r3 384 isync 385 386 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l 387 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 388 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 389 isync 390 391 lis %r3, KERNBASE@h 392 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ 393 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ 394 mtspr SPR_MAS2, %r3 395 isync 396 397 /* Retrieve kernel load [physical] address from bp_kernload */ 398 bl 4f 399 .long bp_kernload 400 .long __boot_page 4014: mflr %r3 402 lwz %r4, 0(%r3) 403 lwz %r5, 4(%r3) 404 rlwinm %r3, %r3, 0, 0, 19 405 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ 406 lwzx %r3, %r4, %r3 407 408 /* Set RPN and protection */ 409 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l 410 mtspr SPR_MAS3, %r3 411 isync 412 tlbwe 413 isync 414 msync 415 416 /* Switch to the final mapping */ 417 bl 5f 4185: mflr %r3 419 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ 420 add %r3, %r3, %r5 /* Make this virtual address */ 421 addi %r3, %r3, 32 422 li %r4, 0 /* Note AS=0 */ 423 mtspr SPR_SRR0, %r3 424 mtspr SPR_SRR1, %r4 425 rfi 426 427/* 428 * At this point we're running at virtual addresses KERNBASE and beyond so 429 * it's allowed to directly access all locations the kernel was linked 430 * against. 431 */ 432 433/* 434 * Invalidate temp mapping 435 */ 436 mr %r3, %r28 437 bl tlb1_inval_entry 438 439/* 440 * Setup a temporary stack 441 */ 442 bl 1f 443 .long tmpstack-. 4441: mflr %r1 445 lwz %r2,0(%r1) 446 add %r1,%r1,%r2 447 stw %r1, 0(%r1) 448 addi %r1, %r1, (TMPSTACKSZ - 16) 449 450/* 451 * Initialise exception vector offsets 452 */ 453 bl ivor_setup 454 455 /* 456 * Assign our pcpu instance 457 */ 458 bl 1f 459 .long ap_pcpu-. 4601: mflr %r4 461 lwz %r3, 0(%r4) 462 add %r3, %r3, %r4 463 lwz %r3, 0(%r3) 464 mtsprg0 %r3 465 466 bl pmap_bootstrap_ap 467 468 bl cpudep_ap_bootstrap 469 /* Switch to the idle thread's kstack */ 470 mr %r1, %r3 471 472 bl machdep_ap_bootstrap 473 474 /* NOT REACHED */ 4756: b 6b 476#endif /* SMP */ 477 478#if defined (BOOKE_E500) 479/* 480 * Invalidate all entries in the given TLB. 481 * 482 * r3 TLBSEL 483 */ 484tlb_inval_all: 485 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ 486 ori %r3, %r3, (1 << 2) /* INVALL */ 487 tlbivax 0, %r3 488 isync 489 msync 490 491 tlbsync 492 msync 493 blr 494 495/* 496 * expects address to look up in r3, returns entry number in r29 497 * 498 * FIXME: the hidden assumption is we are now running in AS=0, but we should 499 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] 500 */ 501tlb1_find_current: 502 mfspr %r17, SPR_PID0 503 slwi %r17, %r17, MAS6_SPID0_SHIFT 504 mtspr SPR_MAS6, %r17 505 isync 506 tlbsx 0, %r3 507 mfspr %r17, SPR_MAS0 508 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ 509 510 /* Make sure we have IPROT set on the entry */ 511 mfspr %r17, SPR_MAS1 512 oris %r17, %r17, MAS1_IPROT@h 513 mtspr SPR_MAS1, %r17 514 isync 515 tlbwe 516 isync 517 msync 518 blr 519 520/* 521 * Invalidates a single entry in TLB1. 522 * 523 * r3 ESEL 524 * r4-r5 scratched 525 */ 526tlb1_inval_entry: 527 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ 528 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ 529 mtspr SPR_MAS0, %r4 530 isync 531 tlbre 532 li %r5, 0 /* MAS1[V] = 0 */ 533 mtspr SPR_MAS1, %r5 534 isync 535 tlbwe 536 isync 537 msync 538 blr 539 540/* 541 * r29 current entry number 542 * r28 returned temp entry 543 * r3-r5 scratched 544 */ 545tlb1_temp_mapping_as1: 546 /* Read our current translation */ 547 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 548 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ 549 mtspr SPR_MAS0, %r3 550 isync 551 tlbre 552 553 /* 554 * Prepare and write temp entry 555 * 556 * FIXME this is not robust against overflow i.e. when the current 557 * entry is the last in TLB1 558 */ 559 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 560 addi %r28, %r29, 1 /* Use next entry. */ 561 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ 562 mtspr SPR_MAS0, %r3 563 isync 564 mfspr %r5, SPR_MAS1 565 li %r4, 1 /* AS=1 */ 566 rlwimi %r5, %r4, 12, 19, 19 567 li %r4, 0 /* Global mapping, TID=0 */ 568 rlwimi %r5, %r4, 16, 8, 15 569 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h 570 mtspr SPR_MAS1, %r5 571 isync 572 mflr %r3 573 bl zero_mas7 574 bl zero_mas8 575 mtlr %r3 576 tlbwe 577 isync 578 msync 579 blr 580 581/* 582 * Loops over TLB1, invalidates all entries skipping the one which currently 583 * maps this code. 584 * 585 * r29 current entry 586 * r3-r5 scratched 587 */ 588tlb1_inval_all_but_current: 589 mr %r6, %r3 590 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ 591 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l 592 li %r4, 0 /* Start from Entry 0 */ 5931: lis %r5, MAS0_TLBSEL1@h 594 rlwimi %r5, %r4, 16, 10, 15 595 mtspr SPR_MAS0, %r5 596 isync 597 tlbre 598 mfspr %r5, SPR_MAS1 599 cmpw %r4, %r29 /* our current entry? */ 600 beq 2f 601 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ 602 mtspr SPR_MAS1, %r5 603 isync 604 tlbwe 605 isync 606 msync 6072: addi %r4, %r4, 1 608 cmpw %r4, %r3 /* Check if this is the last entry */ 609 bne 1b 610 blr 611 612/* 613 * MAS7 and MAS8 conditional zeroing. 614 */ 615.globl zero_mas7 616zero_mas7: 617 mfpvr %r20 618 rlwinm %r20, %r20, 16, 16, 31 619 cmpli 0, 0, %r20, FSL_E500v1 620 beq 1f 621 622 li %r20, 0 623 mtspr SPR_MAS7, %r20 624 isync 6251: 626 blr 627 628.globl zero_mas8 629zero_mas8: 630 mfpvr %r20 631 rlwinm %r20, %r20, 16, 16, 31 632 cmpli 0, 0, %r20, FSL_E500mc 633 beq 1f 634 cmpli 0, 0, %r20, FSL_E5500 635 beq 1f 636 637 blr 6381: 639 li %r20, 0 640 mtspr SPR_MAS8, %r20 641 isync 642 blr 643#endif 644 645#ifdef SMP 646.globl __boot_tlb1 647 /* 648 * The __boot_tlb1 table is used to hold BSP TLB1 entries 649 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. 650 * The BSP fills in the table in tlb_ap_prep() function. Next, 651 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). 652 */ 653__boot_tlb1: 654 .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE 655 656__boot_page_padding: 657 /* 658 * Boot page needs to be exactly 4K, with the last word of this page 659 * acting as the reset vector, so we need to stuff the remainder. 660 * Upon release from holdoff CPU fetches the last word of the boot 661 * page. 662 */ 663 .space 4092 - (__boot_page_padding - __boot_page) 664 b __boot_page 665#endif /* SMP */ 666 667/************************************************************************/ 668/* locore subroutines */ 669/************************************************************************/ 670 671/* 672 * Cache disable/enable/inval sequences according 673 * to section 2.16 of E500CORE RM. 674 */ 675ENTRY(dcache_inval) 676 /* Invalidate d-cache */ 677 mfspr %r3, SPR_L1CSR0 678 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l 679 msync 680 isync 681 mtspr SPR_L1CSR0, %r3 682 isync 6831: mfspr %r3, SPR_L1CSR0 684 andi. %r3, %r3, L1CSR0_DCFI 685 bne 1b 686 blr 687 688ENTRY(dcache_disable) 689 /* Disable d-cache */ 690 mfspr %r3, SPR_L1CSR0 691 li %r4, L1CSR0_DCE@l 692 not %r4, %r4 693 and %r3, %r3, %r4 694 msync 695 isync 696 mtspr SPR_L1CSR0, %r3 697 isync 698 blr 699 700ENTRY(dcache_enable) 701 /* Enable d-cache */ 702 mfspr %r3, SPR_L1CSR0 703 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h 704 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l 705 msync 706 isync 707 mtspr SPR_L1CSR0, %r3 708 isync 709 blr 710 711ENTRY(icache_inval) 712 /* Invalidate i-cache */ 713 mfspr %r3, SPR_L1CSR1 714 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l 715 isync 716 mtspr SPR_L1CSR1, %r3 717 isync 7181: mfspr %r3, SPR_L1CSR1 719 andi. %r3, %r3, L1CSR1_ICFI 720 bne 1b 721 blr 722 723ENTRY(icache_disable) 724 /* Disable i-cache */ 725 mfspr %r3, SPR_L1CSR1 726 li %r4, L1CSR1_ICE@l 727 not %r4, %r4 728 and %r3, %r3, %r4 729 isync 730 mtspr SPR_L1CSR1, %r3 731 isync 732 blr 733 734ENTRY(icache_enable) 735 /* Enable i-cache */ 736 mfspr %r3, SPR_L1CSR1 737 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h 738 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l 739 isync 740 mtspr SPR_L1CSR1, %r3 741 isync 742 blr 743 744/* 745 * L2 cache disable/enable/inval sequences for E500mc. 746 */ 747 748ENTRY(l2cache_inval) 749 mfspr %r3, SPR_L2CSR0 750 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h 751 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l 752 isync 753 mtspr SPR_L2CSR0, %r3 754 isync 7551: mfspr %r3, SPR_L2CSR0 756 andis. %r3, %r3, L2CSR0_L2FI@h 757 bne 1b 758 blr 759 760ENTRY(l2cache_enable) 761 mfspr %r3, SPR_L2CSR0 762 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h 763 isync 764 mtspr SPR_L2CSR0, %r3 765 isync 766 blr 767 768/* 769 * Branch predictor setup. 770 */ 771ENTRY(bpred_enable) 772 mfspr %r3, SPR_BUCSR 773 ori %r3, %r3, BUCSR_BBFI 774 isync 775 mtspr SPR_BUCSR, %r3 776 isync 777 ori %r3, %r3, BUCSR_BPEN 778 isync 779 mtspr SPR_BUCSR, %r3 780 isync 781 blr 782 783ENTRY(dataloss_erratum_access) 784 /* Lock two cache lines into I-Cache */ 785 sync 786 mfspr %r11, SPR_L1CSR1 787 rlwinm %r11, %r11, 0, ~L1CSR1_ICUL 788 sync 789 isync 790 mtspr SPR_L1CSR1, %r11 791 isync 792 793 lis %r8, 2f@h 794 ori %r8, %r8, 2f@l 795 icbtls 0, 0, %r8 796 addi %r9, %r8, 64 797 798 sync 799 mfspr %r11, SPR_L1CSR1 8003: andi. %r11, %r11, L1CSR1_ICUL 801 bne 3b 802 803 icbtls 0, 0, %r9 804 805 sync 806 mfspr %r11, SPR_L1CSR1 8073: andi. %r11, %r11, L1CSR1_ICUL 808 bne 3b 809 810 b 2f 811 .align 6 812 /* Inside a locked cacheline, wait a while, write, then wait a while */ 8132: sync 814 815 mfspr %r5, TBR_TBL 8164: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ 817 mfspr %r5, TBR_TBL 818 subf. %r5, %r5, %r11 819 bgt 4b 820 821 stw %r4, 0(%r3) 822 823 mfspr %r5, TBR_TBL 8244: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ 825 mfspr %r5, TBR_TBL 826 subf. %r5, %r5, %r11 827 bgt 4b 828 829 sync 830 831 /* 832 * Fill out the rest of this cache line and the next with nops, 833 * to ensure that nothing outside the locked area will be 834 * fetched due to a branch. 835 */ 836 .rept 19 837 nop 838 .endr 839 840 icblc 0, 0, %r8 841 icblc 0, 0, %r9 842 843 blr 844 845/************************************************************************/ 846/* Data section */ 847/************************************************************************/ 848 .data 849 .align 3 850GLOBAL(__startkernel) 851 .long begin 852GLOBAL(__endkernel) 853 .long end 854 .align 4 855tmpstack: 856 .space TMPSTACKSZ 857tmpstackbound: 858 .space 10240 /* XXX: this really should not be necessary */ 859 860/* 861 * Compiled KERNBASE locations 862 */ 863 .globl kernbase 864 .set kernbase, KERNBASE 865 866#include <powerpc/booke/trap_subr.S> 867