1/* 2 * This file contains miscellaneous low-level functions. 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 6 * and Paul Mackerras. 7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 * 15 */ 16 17#include <linux/sys.h> 18#include <asm/unistd.h> 19#include <asm/errno.h> 20#include <asm/processor.h> 21#include <asm/page.h> 22#include <asm/cache.h> 23#include <asm/ppc_asm.h> 24#include <asm/asm-offsets.h> 25#include <asm/cputable.h> 26#include <asm/thread_info.h> 27#include <asm/kexec.h> 28#include <asm/ptrace.h> 29 30 .text 31 32_GLOBAL(call_do_softirq) 33 mflr r0 34 std r0,16(r1) 35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 36 mr r1,r3 37 bl __do_softirq 38 ld r1,0(r1) 39 ld r0,16(r1) 40 mtlr r0 41 blr 42 43_GLOBAL(call_do_irq) 44 mflr r0 45 std r0,16(r1) 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 47 mr r1,r4 48 bl __do_irq 49 ld r1,0(r1) 50 ld r0,16(r1) 51 mtlr r0 52 blr 53 54 .section ".toc","aw" 55PPC64_CACHES: 56 .tc ppc64_caches[TC],ppc64_caches 57 .section ".text" 58 59/* 60 * Write any modified data cache blocks out to memory 61 * and invalidate the corresponding instruction cache blocks. 62 * 63 * flush_icache_range(unsigned long start, unsigned long stop) 64 * 65 * flush all bytes from start through stop-1 inclusive 66 */ 67 68_KPROBE(flush_icache_range) 69BEGIN_FTR_SECTION 70 PURGE_PREFETCHED_INS 71 blr 72END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 73/* 74 * Flush the data cache to memory 75 * 76 * Different systems have different cache line sizes 77 * and in some cases i-cache and d-cache line sizes differ from 78 * each other. 79 */ 80 ld r10,PPC64_CACHES@toc(r2) 81 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ 82 addi r5,r7,-1 83 andc r6,r3,r5 /* round low to line bdy */ 84 subf r8,r6,r4 /* compute length */ 85 add r8,r8,r5 /* ensure we get enough */ 86 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ 87 srw. r8,r8,r9 /* compute line count */ 88 beqlr /* nothing to do? */ 89 mtctr r8 901: dcbst 0,r6 91 add r6,r6,r7 92 bdnz 1b 93 sync 94 95/* Now invalidate the instruction cache */ 96 97 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ 98 addi r5,r7,-1 99 andc r6,r3,r5 /* round low to line bdy */ 100 subf r8,r6,r4 /* compute length */ 101 add r8,r8,r5 102 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ 103 srw. r8,r8,r9 /* compute line count */ 104 beqlr /* nothing to do? */ 105 mtctr r8 1062: icbi 0,r6 107 add r6,r6,r7 108 bdnz 2b 109 isync 110 blr 111 .previous .text 112/* 113 * Like above, but only do the D-cache. 114 * 115 * flush_dcache_range(unsigned long start, unsigned long stop) 116 * 117 * flush all bytes from start to stop-1 inclusive 118 */ 119_GLOBAL(flush_dcache_range) 120 121/* 122 * Flush the data cache to memory 123 * 124 * Different systems have different cache line sizes 125 */ 126 ld r10,PPC64_CACHES@toc(r2) 127 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ 128 addi r5,r7,-1 129 andc r6,r3,r5 /* round low to line bdy */ 130 subf r8,r6,r4 /* compute length */ 131 add r8,r8,r5 /* ensure we get enough */ 132 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ 133 srw. r8,r8,r9 /* compute line count */ 134 beqlr /* nothing to do? */ 135 mtctr r8 1360: dcbst 0,r6 137 add r6,r6,r7 138 bdnz 0b 139 sync 140 blr 141 142/* 143 * Like above, but works on non-mapped physical addresses. 144 * Use only for non-LPAR setups ! It also assumes real mode 145 * is cacheable. Used for flushing out the DART before using 146 * it as uncacheable memory 147 * 148 * flush_dcache_phys_range(unsigned long start, unsigned long stop) 149 * 150 * flush all bytes from start to stop-1 inclusive 151 */ 152_GLOBAL(flush_dcache_phys_range) 153 ld r10,PPC64_CACHES@toc(r2) 154 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ 155 addi r5,r7,-1 156 andc r6,r3,r5 /* round low to line bdy */ 157 subf r8,r6,r4 /* compute length */ 158 add r8,r8,r5 /* ensure we get enough */ 159 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ 160 srw. r8,r8,r9 /* compute line count */ 161 beqlr /* nothing to do? */ 162 mfmsr r5 /* Disable MMU Data Relocation */ 163 ori r0,r5,MSR_DR 164 xori r0,r0,MSR_DR 165 sync 166 mtmsr r0 167 sync 168 isync 169 mtctr r8 1700: dcbst 0,r6 171 add r6,r6,r7 172 bdnz 0b 173 sync 174 isync 175 mtmsr r5 /* Re-enable MMU Data Relocation */ 176 sync 177 isync 178 blr 179 180_GLOBAL(flush_inval_dcache_range) 181 ld r10,PPC64_CACHES@toc(r2) 182 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ 183 addi r5,r7,-1 184 andc r6,r3,r5 /* round low to line bdy */ 185 subf r8,r6,r4 /* compute length */ 186 add r8,r8,r5 /* ensure we get enough */ 187 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ 188 srw. r8,r8,r9 /* compute line count */ 189 beqlr /* nothing to do? */ 190 sync 191 isync 192 mtctr r8 1930: dcbf 0,r6 194 add r6,r6,r7 195 bdnz 0b 196 sync 197 isync 198 blr 199 200 201/* 202 * Flush a particular page from the data cache to RAM. 203 * Note: this is necessary because the instruction cache does *not* 204 * snoop from the data cache. 205 * 206 * void __flush_dcache_icache(void *page) 207 */ 208_GLOBAL(__flush_dcache_icache) 209/* 210 * Flush the data cache to memory 211 * 212 * Different systems have different cache line sizes 213 */ 214 215BEGIN_FTR_SECTION 216 PURGE_PREFETCHED_INS 217 blr 218END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 219 220/* Flush the dcache */ 221 ld r7,PPC64_CACHES@toc(r2) 222 clrrdi r3,r3,PAGE_SHIFT /* Page align */ 223 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ 224 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ 225 mr r6,r3 226 mtctr r4 2270: dcbst 0,r6 228 add r6,r6,r5 229 bdnz 0b 230 sync 231 232/* Now invalidate the icache */ 233 234 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ 235 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ 236 mtctr r4 2371: icbi 0,r3 238 add r3,r3,r5 239 bdnz 1b 240 isync 241 blr 242 243_GLOBAL(__bswapdi2) 244 srdi r8,r3,32 245 rlwinm r7,r3,8,0xffffffff 246 rlwimi r7,r3,24,0,7 247 rlwinm r9,r8,8,0xffffffff 248 rlwimi r7,r3,24,16,23 249 rlwimi r9,r8,24,0,7 250 rlwimi r9,r8,24,16,23 251 sldi r7,r7,32 252 or r3,r7,r9 253 blr 254 255 256#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 257_GLOBAL(rmci_on) 258 sync 259 isync 260 li r3,0x100 261 rldicl r3,r3,32,0 262 mfspr r5,SPRN_HID4 263 or r5,r5,r3 264 sync 265 mtspr SPRN_HID4,r5 266 isync 267 slbia 268 isync 269 sync 270 blr 271 272_GLOBAL(rmci_off) 273 sync 274 isync 275 li r3,0x100 276 rldicl r3,r3,32,0 277 mfspr r5,SPRN_HID4 278 andc r5,r5,r3 279 sync 280 mtspr SPRN_HID4,r5 281 isync 282 slbia 283 isync 284 sync 285 blr 286#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 287 288#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 289 290/* 291 * Do an IO access in real mode 292 */ 293_GLOBAL(real_readb) 294 mfmsr r7 295 ori r0,r7,MSR_DR 296 xori r0,r0,MSR_DR 297 sync 298 mtmsrd r0 299 sync 300 isync 301 mfspr r6,SPRN_HID4 302 rldicl r5,r6,32,0 303 ori r5,r5,0x100 304 rldicl r5,r5,32,0 305 sync 306 mtspr SPRN_HID4,r5 307 isync 308 slbia 309 isync 310 lbz r3,0(r3) 311 sync 312 mtspr SPRN_HID4,r6 313 isync 314 slbia 315 isync 316 mtmsrd r7 317 sync 318 isync 319 blr 320 321 /* 322 * Do an IO access in real mode 323 */ 324_GLOBAL(real_writeb) 325 mfmsr r7 326 ori r0,r7,MSR_DR 327 xori r0,r0,MSR_DR 328 sync 329 mtmsrd r0 330 sync 331 isync 332 mfspr r6,SPRN_HID4 333 rldicl r5,r6,32,0 334 ori r5,r5,0x100 335 rldicl r5,r5,32,0 336 sync 337 mtspr SPRN_HID4,r5 338 isync 339 slbia 340 isync 341 stb r3,0(r4) 342 sync 343 mtspr SPRN_HID4,r6 344 isync 345 slbia 346 isync 347 mtmsrd r7 348 sync 349 isync 350 blr 351#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ 352 353#ifdef CONFIG_PPC_PASEMI 354 355_GLOBAL(real_205_readb) 356 mfmsr r7 357 ori r0,r7,MSR_DR 358 xori r0,r0,MSR_DR 359 sync 360 mtmsrd r0 361 sync 362 isync 363 LBZCIX(R3,R0,R3) 364 isync 365 mtmsrd r7 366 sync 367 isync 368 blr 369 370_GLOBAL(real_205_writeb) 371 mfmsr r7 372 ori r0,r7,MSR_DR 373 xori r0,r0,MSR_DR 374 sync 375 mtmsrd r0 376 sync 377 isync 378 STBCIX(R3,R0,R4) 379 isync 380 mtmsrd r7 381 sync 382 isync 383 blr 384 385#endif /* CONFIG_PPC_PASEMI */ 386 387 388#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) 389/* 390 * SCOM access functions for 970 (FX only for now) 391 * 392 * unsigned long scom970_read(unsigned int address); 393 * void scom970_write(unsigned int address, unsigned long value); 394 * 395 * The address passed in is the 24 bits register address. This code 396 * is 970 specific and will not check the status bits, so you should 397 * know what you are doing. 398 */ 399_GLOBAL(scom970_read) 400 /* interrupts off */ 401 mfmsr r4 402 ori r0,r4,MSR_EE 403 xori r0,r0,MSR_EE 404 mtmsrd r0,1 405 406 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 407 * (including parity). On current CPUs they must be 0'd, 408 * and finally or in RW bit 409 */ 410 rlwinm r3,r3,8,0,15 411 ori r3,r3,0x8000 412 413 /* do the actual scom read */ 414 sync 415 mtspr SPRN_SCOMC,r3 416 isync 417 mfspr r3,SPRN_SCOMD 418 isync 419 mfspr r0,SPRN_SCOMC 420 isync 421 422 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah 423 * that's the best we can do). Not implemented yet as we don't use 424 * the scom on any of the bogus CPUs yet, but may have to be done 425 * ultimately 426 */ 427 428 /* restore interrupts */ 429 mtmsrd r4,1 430 blr 431 432 433_GLOBAL(scom970_write) 434 /* interrupts off */ 435 mfmsr r5 436 ori r0,r5,MSR_EE 437 xori r0,r0,MSR_EE 438 mtmsrd r0,1 439 440 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 441 * (including parity). On current CPUs they must be 0'd. 442 */ 443 444 rlwinm r3,r3,8,0,15 445 446 sync 447 mtspr SPRN_SCOMD,r4 /* write data */ 448 isync 449 mtspr SPRN_SCOMC,r3 /* write command */ 450 isync 451 mfspr 3,SPRN_SCOMC 452 isync 453 454 /* restore interrupts */ 455 mtmsrd r5,1 456 blr 457#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ 458 459/* kexec_wait(phys_cpu) 460 * 461 * wait for the flag to change, indicating this kernel is going away but 462 * the slave code for the next one is at addresses 0 to 100. 463 * 464 * This is used by all slaves, even those that did not find a matching 465 * paca in the secondary startup code. 466 * 467 * Physical (hardware) cpu id should be in r3. 468 */ 469_GLOBAL(kexec_wait) 470 bl 1f 4711: mflr r5 472 addi r5,r5,kexec_flag-1b 473 47499: HMT_LOW 475#ifdef CONFIG_KEXEC /* use no memory without kexec */ 476 lwz r4,0(r5) 477 cmpwi 0,r4,0 478 beq 99b 479#ifdef CONFIG_PPC_BOOK3S_64 480 li r10,0x60 481 mfmsr r11 482 clrrdi r11,r11,1 /* Clear MSR_LE */ 483 mtsrr0 r10 484 mtsrr1 r11 485 rfid 486#else 487 ba 0x60 488#endif 489#endif 490 491/* this can be in text because we won't change it until we are 492 * running in real anyways 493 */ 494kexec_flag: 495 .long 0 496 497 498#ifdef CONFIG_KEXEC 499 500/* kexec_smp_wait(void) 501 * 502 * call with interrupts off 503 * note: this is a terminal routine, it does not save lr 504 * 505 * get phys id from paca 506 * switch to real mode 507 * mark the paca as no longer used 508 * join other cpus in kexec_wait(phys_id) 509 */ 510_GLOBAL(kexec_smp_wait) 511 lhz r3,PACAHWCPUID(r13) 512 bl real_mode 513 514 li r4,KEXEC_STATE_REAL_MODE 515 stb r4,PACAKEXECSTATE(r13) 516 SYNC 517 518 b kexec_wait 519 520/* 521 * switch to real mode (turn mmu off) 522 * we use the early kernel trick that the hardware ignores bits 523 * 0 and 1 (big endian) of the effective address in real mode 524 * 525 * don't overwrite r3 here, it is live for kexec_wait above. 526 */ 527real_mode: /* assume normal blr return */ 5281: li r9,MSR_RI 529 li r10,MSR_DR|MSR_IR 530 mflr r11 /* return address to SRR0 */ 531 mfmsr r12 532 andc r9,r12,r9 533 andc r10,r12,r10 534 535 mtmsrd r9,1 536 mtspr SPRN_SRR1,r10 537 mtspr SPRN_SRR0,r11 538 rfid 539 540 541/* 542 * kexec_sequence(newstack, start, image, control, clear_all()) 543 * 544 * does the grungy work with stack switching and real mode switches 545 * also does simple calls to other code 546 */ 547 548_GLOBAL(kexec_sequence) 549 mflr r0 550 std r0,16(r1) 551 552 /* switch stacks to newstack -- &kexec_stack.stack */ 553 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 554 mr r1,r3 555 556 li r0,0 557 std r0,16(r1) 558 559 /* save regs for local vars on new stack. 560 * yes, we won't go back, but ... 561 */ 562 std r31,-8(r1) 563 std r30,-16(r1) 564 std r29,-24(r1) 565 std r28,-32(r1) 566 std r27,-40(r1) 567 std r26,-48(r1) 568 std r25,-56(r1) 569 570 stdu r1,-STACK_FRAME_OVERHEAD-64(r1) 571 572 /* save args into preserved regs */ 573 mr r31,r3 /* newstack (both) */ 574 mr r30,r4 /* start (real) */ 575 mr r29,r5 /* image (virt) */ 576 mr r28,r6 /* control, unused */ 577 mr r27,r7 /* clear_all() fn desc */ 578 mr r26,r8 /* spare */ 579 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 580 581 /* disable interrupts, we are overwriting kernel data next */ 582 mfmsr r3 583 rlwinm r3,r3,0,17,15 584 mtmsrd r3,1 585 586 /* copy dest pages, flush whole dest image */ 587 mr r3,r29 588 bl kexec_copy_flush /* (image) */ 589 590 /* turn off mmu */ 591 bl real_mode 592 593 /* copy 0x100 bytes starting at start to 0 */ 594 li r3,0 595 mr r4,r30 /* start, aka phys mem offset */ 596 li r5,0x100 597 li r6,0 598 bl copy_and_flush /* (dest, src, copy limit, start offset) */ 5991: /* assume normal blr return */ 600 601 /* release other cpus to the new kernel secondary start at 0x60 */ 602 mflr r5 603 li r6,1 604 stw r6,kexec_flag-1b(5) 605 606 /* clear out hardware hash page table and tlb */ 607#if !defined(_CALL_ELF) || _CALL_ELF != 2 608 ld r12,0(r27) /* deref function descriptor */ 609#else 610 mr r12,r27 611#endif 612 mtctr r12 613 bctrl /* ppc_md.hpte_clear_all(void); */ 614 615/* 616 * kexec image calling is: 617 * the first 0x100 bytes of the entry point are copied to 0 618 * 619 * all slaves branch to slave = 0x60 (absolute) 620 * slave(phys_cpu_id); 621 * 622 * master goes to start = entry point 623 * start(phys_cpu_id, start, 0); 624 * 625 * 626 * a wrapper is needed to call existing kernels, here is an approximate 627 * description of one method: 628 * 629 * v2: (2.6.10) 630 * start will be near the boot_block (maybe 0x100 bytes before it?) 631 * it will have a 0x60, which will b to boot_block, where it will wait 632 * and 0 will store phys into struct boot-block and load r3 from there, 633 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again 634 * 635 * v1: (2.6.9) 636 * boot block will have all cpus scanning device tree to see if they 637 * are the boot cpu ????? 638 * other device tree differences (prop sizes, va vs pa, etc)... 639 */ 640 mr r3,r25 # my phys cpu 641 mr r4,r30 # start, aka phys mem offset 642 mtlr 4 643 li r5,0 644 blr /* image->start(physid, image->start, 0); */ 645#endif /* CONFIG_KEXEC */ 646 647#ifdef CONFIG_MODULES 648#if defined(_CALL_ELF) && _CALL_ELF == 2 649 650#ifdef CONFIG_MODVERSIONS 651.weak __crc_TOC. 652.section "___kcrctab+TOC.","a" 653.globl __kcrctab_TOC. 654__kcrctab_TOC.: 655 .llong __crc_TOC. 656#endif 657 658/* 659 * Export a fake .TOC. since both modpost and depmod will complain otherwise. 660 * Both modpost and depmod strip the leading . so we do the same here. 661 */ 662.section "__ksymtab_strings","a" 663__kstrtab_TOC.: 664 .asciz "TOC." 665 666.section "___ksymtab+TOC.","a" 667/* This symbol name is important: it's used by modpost to find exported syms */ 668.globl __ksymtab_TOC. 669__ksymtab_TOC.: 670 .llong 0 /* .value */ 671 .llong __kstrtab_TOC. 672#endif /* ELFv2 */ 673#endif /* MODULES */ 674