1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.s" 30 31#include <sys/mutex.h> 32 33#include <machine/asm.h> 34#include <machine/hid.h> 35#include <machine/param.h> 36#include <machine/spr.h> 37#include <machine/psl.h> 38#include <machine/pte.h> 39#include <machine/trap.h> 40#include <machine/vmparam.h> 41#include <machine/tlb.h> 42#include <machine/bootinfo.h> 43 44#define TMPSTACKSZ 16384 45 46 .text 47 .globl btext 48btext: 49 50/* 51 * This symbol is here for the benefit of kvm_mkdb, and is supposed to 52 * mark the start of kernel text. 53 */ 54 .globl kernel_text 55kernel_text: 56 57/* 58 * Startup entry. Note, this must be the first thing in the text segment! 59 */ 60 .text 61 .globl __start 62__start: 63 64/* 65 * Assumptions on the boot loader: 66 * - system memory starts from physical address 0 67 * - it's mapped by a single TBL1 entry 68 * - TLB1 mapping is 1:1 pa to va 69 * - kernel is loaded at 16MB boundary 70 * - all PID registers are set to the same value 71 * - CPU is running in AS=0 72 * 73 * Registers contents provided by the loader(8): 74 * r1 : stack pointer 75 * r3 : metadata pointer 76 * 77 * We rearrange the TLB1 layout as follows: 78 * - find TLB1 entry we started in 79 * - make sure it's protected, ivalidate other entries 80 * - create temp entry in the second AS (make sure it's not TLB[1]) 81 * - switch to temp mapping 82 * - map 16MB of RAM in TLB1[1] 83 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address 84 * - switch to to TLB1[1] mapping 85 * - invalidate temp mapping 86 * 87 * locore registers use: 88 * r1 : stack pointer 89 * r2 : trace pointer (AP only, for early diagnostics) 90 * r3-r27 : scratch registers 91 * r28 : kernload 92 * r29 : temp TLB1 entry 93 * r30 : initial TLB1 entry we started in 94 * r31 : metadata pointer 95 */ 96 97/* 98 * Keep metadata ptr in r31 for later use. 99 */ 100 mr %r31, %r3 101 102/* 103 * Initial cleanup 104 */ 105 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ 106 mtmsr %r3 107 isync 108 109 lis %r3, HID0_E500_DEFAULT_SET@h 110 ori %r3, %r3, HID0_E500_DEFAULT_SET@l 111 mtspr SPR_HID0, %r3 112 isync 113 lis %r3, HID1_E500_DEFAULT_SET@h 114 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 115 mtspr SPR_HID1, %r3 116 isync 117 118 /* Invalidate all entries in TLB0 */ 119 li %r3, 0 120 bl tlb_inval_all 121 122/* 123 * Locate the TLB1 entry that maps this code 124 */ 125 bl 1f 1261: mflr %r3 127 bl tlb1_find_current /* the entry number found is returned in r30 */ 128 129 bl tlb1_inval_all_but_current 130/* 131 * Create temporary mapping in AS=1 and switch to it 132 */ 133 bl tlb1_temp_mapping_as1 134 135 mfmsr %r3 136 ori %r3, %r3, (PSL_IS | PSL_DS) 137 bl 2f 1382: mflr %r4 139 addi %r4, %r4, 20 140 mtspr SPR_SRR0, %r4 141 mtspr SPR_SRR1, %r3 142 rfi /* Switch context */ 143 144/* 145 * Invalidate initial entry 146 */ 147 mr %r3, %r30 148 bl tlb1_inval_entry 149 150/* 151 * Setup final mapping in TLB1[1] and switch to it 152 */ 153 /* Final kernel mapping, map in 16 MB of RAM */ 154 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 155 li %r4, 1 /* Entry 1 */ 156 rlwimi %r3, %r4, 16, 12, 15 157 mtspr SPR_MAS0, %r3 158 isync 159 160 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l 161 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 162 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 163 isync 164 165 lis %r3, KERNBASE@h 166 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ 167#ifdef SMP 168 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */ 169#endif 170 mtspr SPR_MAS2, %r3 171 isync 172 173 /* Discover phys load address */ 174 bl 3f 1753: mflr %r4 /* Use current address */ 176 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */ 177 mr %r28, %r4 /* Keep kernel load address */ 178 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l 179 mtspr SPR_MAS3, %r4 /* Set RPN and protection */ 180 isync 181 tlbwe 182 isync 183 msync 184 185 /* Switch to the above TLB1[1] mapping */ 186 bl 4f 1874: mflr %r4 188 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */ 189 rlwinm %r3, %r3, 0, 0, 19 190 add %r4, %r4, %r3 /* Convert to kernel virtual address */ 191 addi %r4, %r4, 36 192 li %r3, PSL_DE /* Note AS=0 */ 193 mtspr SPR_SRR0, %r4 194 mtspr SPR_SRR1, %r3 195 rfi 196 197/* 198 * Invalidate temp mapping 199 */ 200 mr %r3, %r29 201 bl tlb1_inval_entry 202 203/* 204 * Save kernel load address for later use. 205 */ 206 lis %r3, kernload@ha 207 addi %r3, %r3, kernload@l 208 stw %r28, 0(%r3) 209#ifdef SMP 210 /* 211 * APs need a separate copy of kernload info within the __boot_page 212 * area so they can access this value very early, before their TLBs 213 * are fully set up and the kernload global location is available. 214 */ 215 lis %r3, kernload_ap@ha 216 addi %r3, %r3, kernload_ap@l 217 stw %r28, 0(%r3) 218 msync 219#endif 220 221/* 222 * Setup a temporary stack 223 */ 224 lis %r1, tmpstack@ha 225 addi %r1, %r1, tmpstack@l 226 addi %r1, %r1, (TMPSTACKSZ - 8) 227 228/* 229 * Initialise exception vector offsets 230 */ 231 bl ivor_setup 232 233/* 234 * Set up arguments and jump to system initialization code 235 */ 236 lis %r3, kernel_text@ha 237 addi %r3, %r3, kernel_text@l 238 lis %r4, _end@ha 239 addi %r4, %r4, _end@l 240 mr %r5, %r31 /* metadata ptr */ 241 242 /* Prepare e500 core */ 243 bl e500_init 244 245 /* Switch to thread0.td_kstack now */ 246 mr %r1, %r3 247 li %r3, 0 248 stw %r3, 0(%r1) 249 250 /* Machine independet part, does not return */ 251 bl mi_startup 252 /* NOT REACHED */ 2535: b 5b 254 255 256#ifdef SMP 257/************************************************************************/ 258/* AP Boot page */ 259/************************************************************************/ 260 .text 261 .globl __boot_page 262 .align 12 263__boot_page: 264 bl 1f 265 266kernload_ap: 267 .long 0 268 269/* 270 * Initial configuration 271 */ 2721: 273 /* Set HIDs */ 274 lis %r3, HID0_E500_DEFAULT_SET@h 275 ori %r3, %r3, HID0_E500_DEFAULT_SET@l 276 mtspr SPR_HID0, %r3 277 isync 278 lis %r3, HID1_E500_DEFAULT_SET@h 279 ori %r3, %r3, HID1_E500_DEFAULT_SET@l 280 mtspr SPR_HID1, %r3 281 isync 282 283 /* Enable branch prediction */ 284 li %r3, BUCSR_BPEN 285 mtspr SPR_BUCSR, %r3 286 isync 287 288 /* Invalidate all entries in TLB0 */ 289 li %r3, 0 290 bl tlb_inval_all 291 292/* 293 * Find TLB1 entry which is translating us now 294 */ 295 bl 2f 2962: mflr %r3 297 bl tlb1_find_current /* the entry number found is in r30 */ 298 299 bl tlb1_inval_all_but_current 300/* 301 * Create temporary translation in AS=1 and switch to it 302 */ 303 bl tlb1_temp_mapping_as1 304 305 mfmsr %r3 306 ori %r3, %r3, (PSL_IS | PSL_DS) 307 bl 3f 3083: mflr %r4 309 addi %r4, %r4, 20 310 mtspr SPR_SRR0, %r4 311 mtspr SPR_SRR1, %r3 312 rfi /* Switch context */ 313 314/* 315 * Invalidate initial entry 316 */ 317 mr %r3, %r30 318 bl tlb1_inval_entry 319 320/* 321 * Setup final mapping in TLB1[1] and switch to it 322 */ 323 /* Final kernel mapping, map in 16 MB of RAM */ 324 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 325 li %r4, 1 /* Entry 1 */ 326 rlwimi %r3, %r4, 16, 4, 15 327 mtspr SPR_MAS0, %r3 328 isync 329 330 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l 331 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h 332 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ 333 isync 334 335 lis %r3, KERNBASE@h 336 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ 337 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */ 338 mtspr SPR_MAS2, %r3 339 isync 340 341 /* Retrieve kernel load [physical] address from kernload_ap */ 342 bl 4f 3434: mflr %r3 344 rlwinm %r3, %r3, 0, 0, 19 345 lis %r4, kernload_ap@h 346 ori %r4, %r4, kernload_ap@l 347 lis %r5, __boot_page@h 348 ori %r5, %r5, __boot_page@l 349 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */ 350 lwzx %r3, %r4, %r3 351 352 /* Set RPN and protection */ 353 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l 354 mtspr SPR_MAS3, %r3 355 isync 356 tlbwe 357 isync 358 msync 359 360 /* Switch to the final mapping */ 361 bl 5f 3625: mflr %r3 363 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ 364 add %r3, %r3, %r5 /* Make this virtual address */ 365 addi %r3, %r3, 32 366 li %r4, 0 /* Note AS=0 */ 367 mtspr SPR_SRR0, %r3 368 mtspr SPR_SRR1, %r4 369 rfi 370 371/* 372 * At this point we're running at virtual addresses KERNBASE and beyond so 373 * it's allowed to directly access all locations the kernel was linked 374 * against. 375 */ 376 377/* 378 * Invalidate temp mapping 379 */ 380 mr %r3, %r29 381 bl tlb1_inval_entry 382 383/* 384 * Setup a temporary stack 385 */ 386 lis %r1, tmpstack@ha 387 addi %r1, %r1, tmpstack@l 388 addi %r1, %r1, (TMPSTACKSZ - 8) 389 390/* 391 * Initialise exception vector offsets 392 */ 393 bl ivor_setup 394 395 /* 396 * Assign our pcpu instance 397 */ 398 lis %r3, ap_pcpu@h 399 ori %r3, %r3, ap_pcpu@l 400 lwz %r3, 0(%r3) 401 mtsprg0 %r3 402 403 bl pmap_bootstrap_ap 404 405 bl cpudep_ap_bootstrap 406 /* Switch to the idle thread's kstack */ 407 mr %r1, %r3 408 409 bl machdep_ap_bootstrap 410 411 /* NOT REACHED */ 4126: b 6b 413#endif /* SMP */ 414 415/* 416 * Invalidate all entries in the given TLB. 417 * 418 * r3 TLBSEL 419 */ 420tlb_inval_all: 421 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */ 422 ori %r3, %r3, 0x4 /* INVALL */ 423 tlbivax 0, %r3 424 isync 425 msync 426 427 tlbsync 428 msync 429 blr 430 431/* 432 * expects address to look up in r3, returns entry number in r30 433 * 434 * FIXME: the hidden assumption is we are now running in AS=0, but we should 435 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] 436 */ 437tlb1_find_current: 438 mfspr %r17, SPR_PID0 439 slwi %r17, %r17, MAS6_SPID0_SHIFT 440 mtspr SPR_MAS6, %r17 441 isync 442 tlbsx 0, %r3 443 mfspr %r17, SPR_MAS0 444 rlwinm %r30, %r17, 16, 20, 31 /* MAS0[ESEL] -> r30 */ 445 446 /* Make sure we have IPROT set on the entry */ 447 mfspr %r17, SPR_MAS1 448 oris %r17, %r17, MAS1_IPROT@h 449 mtspr SPR_MAS1, %r17 450 isync 451 tlbwe 452 isync 453 msync 454 blr 455 456/* 457 * Invalidates a single entry in TLB1. 458 * 459 * r3 ESEL 460 * r4-r5 scratched 461 */ 462tlb1_inval_entry: 463 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ 464 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */ 465 mtspr SPR_MAS0, %r4 466 isync 467 tlbre 468 li %r5, 0 /* MAS1[V] = 0 */ 469 mtspr SPR_MAS1, %r5 470 isync 471 tlbwe 472 isync 473 msync 474 blr 475 476/* 477 * r30 current entry number 478 * r29 returned temp entry 479 * r3-r5 scratched 480 */ 481tlb1_temp_mapping_as1: 482 /* Read our current translation */ 483 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 484 rlwimi %r3, %r30, 16, 12, 15 /* Select our current entry */ 485 mtspr SPR_MAS0, %r3 486 isync 487 tlbre 488 489 /* 490 * Prepare and write temp entry 491 * 492 * FIXME this is not robust against overflow i.e. when the current 493 * entry is the last in TLB1 494 */ 495 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ 496 addi %r29, %r30, 1 /* Use next entry. */ 497 li %r4, 1 498 cmpw %r4, %r29 499 bne 1f 500 addi %r29, %r29, 1 5011: rlwimi %r3, %r29, 16, 12, 15 /* Select temp entry */ 502 mtspr SPR_MAS0, %r3 503 isync 504 mfspr %r5, SPR_MAS1 505 li %r4, 1 /* AS=1 */ 506 rlwimi %r5, %r4, 12, 19, 19 507 li %r4, 0 /* Global mapping, TID=0 */ 508 rlwimi %r5, %r4, 16, 8, 15 509 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h 510 mtspr SPR_MAS1, %r5 511 isync 512 tlbwe 513 isync 514 msync 515 blr 516 517/* 518 * Loops over TLB1, invalidates all entries skipping the one which currently 519 * maps this code. 520 * 521 * r30 current entry 522 * r3-r5 scratched 523 */ 524tlb1_inval_all_but_current: 525 mr %r6, %r3 526 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ 527 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l 528 li %r4, 0 /* Start from Entry 0 */ 5291: lis %r5, MAS0_TLBSEL1@h 530 rlwimi %r5, %r4, 16, 12, 15 531 mtspr SPR_MAS0, %r5 532 isync 533 tlbre 534 mfspr %r5, SPR_MAS1 535 cmpw %r4, %r30 /* our current entry? */ 536 beq 2f 537 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ 538 mtspr SPR_MAS1, %r5 539 isync 540 tlbwe 541 isync 542 msync 5432: addi %r4, %r4, 1 544 cmpw %r4, %r3 /* Check if this is the last entry */ 545 bne 1b 546 blr 547 548#ifdef SMP 549__boot_page_padding: 550 /* 551 * Boot page needs to be exactly 4K, with the last word of this page 552 * acting as the reset vector, so we need to stuff the remainder. 553 * Upon release from holdoff CPU fetches the last word of the boot 554 * page. 555 */ 556 .space 4092 - (__boot_page_padding - __boot_page) 557 b __boot_page 558#endif /* SMP */ 559 560/************************************************************************/ 561/* locore subroutines */ 562/************************************************************************/ 563 564ivor_setup: 565 /* Set base address of interrupt handler routines */ 566 lis %r3, interrupt_vector_base@h 567 mtspr SPR_IVPR, %r3 568 569 /* Assign interrupt handler routines offsets */ 570 li %r3, int_critical_input@l 571 mtspr SPR_IVOR0, %r3 572 li %r3, int_machine_check@l 573 mtspr SPR_IVOR1, %r3 574 li %r3, int_data_storage@l 575 mtspr SPR_IVOR2, %r3 576 li %r3, int_instr_storage@l 577 mtspr SPR_IVOR3, %r3 578 li %r3, int_external_input@l 579 mtspr SPR_IVOR4, %r3 580 li %r3, int_alignment@l 581 mtspr SPR_IVOR5, %r3 582 li %r3, int_program@l 583 mtspr SPR_IVOR6, %r3 584 li %r3, int_syscall@l 585 mtspr SPR_IVOR8, %r3 586 li %r3, int_decrementer@l 587 mtspr SPR_IVOR10, %r3 588 li %r3, int_fixed_interval_timer@l 589 mtspr SPR_IVOR11, %r3 590 li %r3, int_watchdog@l 591 mtspr SPR_IVOR12, %r3 592 li %r3, int_data_tlb_error@l 593 mtspr SPR_IVOR13, %r3 594 li %r3, int_inst_tlb_error@l 595 mtspr SPR_IVOR14, %r3 596 li %r3, int_debug@l 597 mtspr SPR_IVOR15, %r3 598 blr 599 600/* 601 * void tid_flush(tlbtid_t tid); 602 * 603 * Invalidate all TLB0 entries which match the given TID. Note this is 604 * dedicated for cases when invalidation(s) should NOT be propagated to other 605 * CPUs. 606 * 607 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up 608 * correctly (by tlb0_get_tlbconf()). 609 * 610 */ 611ENTRY(tid_flush) 612 cmpwi %r3, TID_KERNEL 613 beq tid_flush_end /* don't evict kernel translations */ 614 615 /* Number of TLB0 ways */ 616 lis %r4, tlb0_ways@h 617 ori %r4, %r4, tlb0_ways@l 618 lwz %r4, 0(%r4) 619 620 /* Number of entries / way */ 621 lis %r5, tlb0_entries_per_way@h 622 ori %r5, %r5, tlb0_entries_per_way@l 623 lwz %r5, 0(%r5) 624 625 /* Disable interrupts */ 626 mfmsr %r10 627 wrteei 0 628 629 li %r6, 0 /* ways counter */ 630loop_ways: 631 li %r7, 0 /* entries [per way] counter */ 632loop_entries: 633 /* Select TLB0 and ESEL (way) */ 634 lis %r8, MAS0_TLBSEL0@h 635 rlwimi %r8, %r6, 16, 14, 15 636 mtspr SPR_MAS0, %r8 637 isync 638 639 /* Select EPN (entry within the way) */ 640 rlwinm %r8, %r7, 12, 13, 19 641 mtspr SPR_MAS2, %r8 642 isync 643 tlbre 644 645 /* Check if valid entry */ 646 mfspr %r8, SPR_MAS1 647 andis. %r9, %r8, MAS1_VALID@h 648 beq next_entry /* invalid entry */ 649 650 /* Check if this is our TID */ 651 rlwinm %r9, %r8, 16, 24, 31 652 653 cmplw %r9, %r3 654 bne next_entry /* not our TID */ 655 656 /* Clear VALID bit */ 657 rlwinm %r8, %r8, 0, 1, 31 658 mtspr SPR_MAS1, %r8 659 isync 660 tlbwe 661 isync 662 msync 663 664next_entry: 665 addi %r7, %r7, 1 666 cmpw %r7, %r5 667 bne loop_entries 668 669 /* Next way */ 670 addi %r6, %r6, 1 671 cmpw %r6, %r4 672 bne loop_ways 673 674 /* Restore MSR (possibly re-enable interrupts) */ 675 mtmsr %r10 676 isync 677 678tid_flush_end: 679 blr 680 681/* 682 * Cache disable/enable/inval sequences according 683 * to section 2.16 of E500CORE RM. 684 */ 685ENTRY(dcache_inval) 686 /* Invalidate d-cache */ 687 mfspr %r3, SPR_L1CSR0 688 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l 689 msync 690 isync 691 mtspr SPR_L1CSR0, %r3 692 isync 6931: mfspr %r3, SPR_L1CSR0 694 andi. %r3, %r3, L1CSR0_DCFI 695 bne 1b 696 blr 697 698ENTRY(dcache_disable) 699 /* Disable d-cache */ 700 mfspr %r3, SPR_L1CSR0 701 li %r4, L1CSR0_DCE@l 702 not %r4, %r4 703 and %r3, %r3, %r4 704 msync 705 isync 706 mtspr SPR_L1CSR0, %r3 707 isync 708 blr 709 710ENTRY(dcache_enable) 711 /* Enable d-cache */ 712 mfspr %r3, SPR_L1CSR0 713 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h 714 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l 715 msync 716 isync 717 mtspr SPR_L1CSR0, %r3 718 isync 719 blr 720 721ENTRY(icache_inval) 722 /* Invalidate i-cache */ 723 mfspr %r3, SPR_L1CSR1 724 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l 725 isync 726 mtspr SPR_L1CSR1, %r3 727 isync 7281: mfspr %r3, SPR_L1CSR1 729 andi. %r3, %r3, L1CSR1_ICFI 730 bne 1b 731 blr 732 733ENTRY(icache_disable) 734 /* Disable i-cache */ 735 mfspr %r3, SPR_L1CSR1 736 li %r4, L1CSR1_ICE@l 737 not %r4, %r4 738 and %r3, %r3, %r4 739 isync 740 mtspr SPR_L1CSR1, %r3 741 isync 742 blr 743 744ENTRY(icache_enable) 745 /* Enable i-cache */ 746 mfspr %r3, SPR_L1CSR1 747 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h 748 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l 749 isync 750 mtspr SPR_L1CSR1, %r3 751 isync 752 blr 753 754/* 755 * int setfault() 756 * 757 * Similar to setjmp to setup for handling faults on accesses to user memory. 758 * Any routine using this may only call bcopy, either the form below, 759 * or the (currently used) C code optimized, so it doesn't use any non-volatile 760 * registers. 761 */ 762 .globl setfault 763setfault: 764 mflr %r0 765 mfsprg0 %r4 766 lwz %r4, PC_CURTHREAD(%r4) 767 lwz %r4, TD_PCB(%r4) 768 stw %r3, PCB_ONFAULT(%r4) 769 mfcr %r10 770 mfctr %r11 771 mfxer %r12 772 stw %r0, 0(%r3) 773 stw %r1, 4(%r3) 774 stw %r2, 8(%r3) 775 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */ 776 li %r3, 0 /* return FALSE */ 777 blr 778 779/************************************************************************/ 780/* Data section */ 781/************************************************************************/ 782 .data 783 .align 4 784tmpstack: 785 .space TMPSTACKSZ 786 787/* 788 * Compiled KERNBASE locations 789 */ 790 .globl kernbase 791 .set kernbase, KERNBASE 792 793/* 794 * Globals 795 */ 796#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */ 797 798GLOBAL(kernload) 799 .long 0 800GLOBAL(intrnames) 801 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2 802GLOBAL(eintrnames) 803 .align 4 804GLOBAL(intrcnt) 805 .space INTRCNT_COUNT * 4 * 2 806GLOBAL(eintrcnt) 807 808#include <powerpc/booke/trap_subr.S> 809