1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * This file contains miscellaneous low-level functions. 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 7 * and Paul Mackerras. 8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 10 */ 11 12#include <linux/export.h> 13#include <linux/linkage.h> 14#include <linux/sys.h> 15#include <asm/unistd.h> 16#include <asm/errno.h> 17#include <asm/processor.h> 18#include <asm/page.h> 19#include <asm/cache.h> 20#include <asm/ppc_asm.h> 21#include <asm/asm-offsets.h> 22#include <asm/cputable.h> 23#include <asm/thread_info.h> 24#include <asm/kexec.h> 25#include <asm/ptrace.h> 26#include <asm/mmu.h> 27#include <asm/feature-fixups.h> 28 29 .text 30 31_GLOBAL(__bswapdi2) 32EXPORT_SYMBOL(__bswapdi2) 33 srdi r8,r3,32 34 rlwinm r7,r3,8,0xffffffff 35 rlwimi r7,r3,24,0,7 36 rlwinm r9,r8,8,0xffffffff 37 rlwimi r7,r3,24,16,23 38 rlwimi r9,r8,24,0,7 39 rlwimi r9,r8,24,16,23 40 sldi r7,r7,32 41 or r3,r7,r9 42 blr 43 44 45#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 46_GLOBAL(rmci_on) 47 sync 48 isync 49 li r3,0x100 50 rldicl r3,r3,32,0 51 mfspr r5,SPRN_HID4 52 or r5,r5,r3 53 sync 54 mtspr SPRN_HID4,r5 55 isync 56 slbia 57 isync 58 sync 59 blr 60 61_GLOBAL(rmci_off) 62 sync 63 isync 64 li r3,0x100 65 rldicl r3,r3,32,0 66 mfspr r5,SPRN_HID4 67 andc r5,r5,r3 68 sync 69 mtspr SPRN_HID4,r5 70 isync 71 slbia 72 isync 73 sync 74 blr 75#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 76 77#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 78 79/* 80 * Do an IO access in real mode 81 */ 82_GLOBAL(real_readb) 83 mfmsr r7 84 ori r0,r7,MSR_DR 85 xori r0,r0,MSR_DR 86 sync 87 mtmsrd r0 88 sync 89 isync 90 mfspr r6,SPRN_HID4 91 rldicl r5,r6,32,0 92 ori r5,r5,0x100 93 rldicl r5,r5,32,0 94 sync 95 mtspr SPRN_HID4,r5 96 isync 97 slbia 98 isync 99 lbz r3,0(r3) 100 sync 101 mtspr SPRN_HID4,r6 102 isync 103 slbia 104 isync 105 mtmsrd r7 106 sync 107 isync 108 blr 109 110 /* 111 * Do an IO access in real mode 112 */ 113_GLOBAL(real_writeb) 114 mfmsr r7 115 ori r0,r7,MSR_DR 116 xori r0,r0,MSR_DR 117 sync 118 mtmsrd r0 119 sync 120 isync 121 mfspr r6,SPRN_HID4 122 rldicl r5,r6,32,0 123 ori r5,r5,0x100 124 rldicl r5,r5,32,0 125 sync 126 mtspr SPRN_HID4,r5 127 isync 128 slbia 129 isync 130 stb r3,0(r4) 131 sync 132 mtspr SPRN_HID4,r6 133 isync 134 slbia 135 isync 136 mtmsrd r7 137 sync 138 isync 139 blr 140#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ 141 142#ifdef CONFIG_PPC_PASEMI 143 144_GLOBAL(real_205_readb) 145 mfmsr r7 146 ori r0,r7,MSR_DR 147 xori r0,r0,MSR_DR 148 sync 149 mtmsrd r0 150 sync 151 isync 152 LBZCIX(R3,R0,R3) 153 isync 154 mtmsrd r7 155 sync 156 isync 157 blr 158 159_GLOBAL(real_205_writeb) 160 mfmsr r7 161 ori r0,r7,MSR_DR 162 xori r0,r0,MSR_DR 163 sync 164 mtmsrd r0 165 sync 166 isync 167 STBCIX(R3,R0,R4) 168 isync 169 mtmsrd r7 170 sync 171 isync 172 blr 173 174#endif /* CONFIG_PPC_PASEMI */ 175 176 177#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) 178/* 179 * SCOM access functions for 970 (FX only for now) 180 * 181 * unsigned long scom970_read(unsigned int address); 182 * void scom970_write(unsigned int address, unsigned long value); 183 * 184 * The address passed in is the 24 bits register address. This code 185 * is 970 specific and will not check the status bits, so you should 186 * know what you are doing. 187 */ 188_GLOBAL(scom970_read) 189 /* interrupts off */ 190 mfmsr r4 191 ori r0,r4,MSR_EE 192 xori r0,r0,MSR_EE 193 mtmsrd r0,1 194 195 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 196 * (including parity). On current CPUs they must be 0'd, 197 * and finally or in RW bit 198 */ 199 rlwinm r3,r3,8,0,15 200 ori r3,r3,0x8000 201 202 /* do the actual scom read */ 203 sync 204 mtspr SPRN_SCOMC,r3 205 isync 206 mfspr r3,SPRN_SCOMD 207 isync 208 mfspr r0,SPRN_SCOMC 209 isync 210 211 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah 212 * that's the best we can do). Not implemented yet as we don't use 213 * the scom on any of the bogus CPUs yet, but may have to be done 214 * ultimately 215 */ 216 217 /* restore interrupts */ 218 mtmsrd r4,1 219 blr 220 221 222_GLOBAL(scom970_write) 223 /* interrupts off */ 224 mfmsr r5 225 ori r0,r5,MSR_EE 226 xori r0,r0,MSR_EE 227 mtmsrd r0,1 228 229 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 230 * (including parity). On current CPUs they must be 0'd. 231 */ 232 233 rlwinm r3,r3,8,0,15 234 235 sync 236 mtspr SPRN_SCOMD,r4 /* write data */ 237 isync 238 mtspr SPRN_SCOMC,r3 /* write command */ 239 isync 240 mfspr 3,SPRN_SCOMC 241 isync 242 243 /* restore interrupts */ 244 mtmsrd r5,1 245 blr 246#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ 247 248/* kexec_wait(phys_cpu) 249 * 250 * wait for the flag to change, indicating this kernel is going away but 251 * the slave code for the next one is at addresses 0 to 100. 252 * 253 * This is used by all slaves, even those that did not find a matching 254 * paca in the secondary startup code. 255 * 256 * Physical (hardware) cpu id should be in r3. 257 */ 258_GLOBAL(kexec_wait) 259 bcl 20,31,$+4 2601: mflr r5 261 addi r5,r5,kexec_flag-1b 262 26399: HMT_LOW 264#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */ 265 lwz r4,0(r5) 266 cmpwi 0,r4,0 267 beq 99b 268#ifdef CONFIG_PPC_BOOK3S_64 269 li r10,0x60 270 mfmsr r11 271 clrrdi r11,r11,1 /* Clear MSR_LE */ 272 mtsrr0 r10 273 mtsrr1 r11 274 rfid 275#else 276 /* Create TLB entry in book3e_secondary_core_init */ 277 li r4,0 278 ba 0x60 279#endif 280#endif 281 282/* this can be in text because we won't change it until we are 283 * running in real anyways 284 */ 285kexec_flag: 286 .long 0 287 288 289#ifdef CONFIG_KEXEC_CORE 290#ifdef CONFIG_PPC_BOOK3E_64 291/* 292 * BOOK3E has no real MMU mode, so we have to setup the initial TLB 293 * for a core to identity map v:0 to p:0. This current implementation 294 * assumes that 1G is enough for kexec. 295 */ 296kexec_create_tlb: 297 /* 298 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict. 299 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict. 300 */ 301 PPC_TLBILX_ALL(0,R0) 302 sync 303 isync 304 305 mfspr r10,SPRN_TLB1CFG 306 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */ 307 subi r10,r10,1 /* Last entry: no conflict with kernel text */ 308 lis r9,MAS0_TLBSEL(1)@h 309 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */ 310 311/* Set up a temp identity mapping v:0 to p:0 and return to it. */ 312 mtspr SPRN_MAS0,r9 313 314 lis r9,(MAS1_VALID|MAS1_IPROT)@h 315 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l 316 mtspr SPRN_MAS1,r9 317 318 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED) 319 mtspr SPRN_MAS2,r9 320 321 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX) 322 mtspr SPRN_MAS3,r9 323 li r9,0 324 mtspr SPRN_MAS7,r9 325 326 tlbwe 327 isync 328 blr 329#endif 330 331/* kexec_smp_wait(void) 332 * 333 * call with interrupts off 334 * note: this is a terminal routine, it does not save lr 335 * 336 * get phys id from paca 337 * switch to real mode 338 * mark the paca as no longer used 339 * join other cpus in kexec_wait(phys_id) 340 */ 341_GLOBAL(kexec_smp_wait) 342 lhz r3,PACAHWCPUID(r13) 343 bl real_mode 344 345 li r4,KEXEC_STATE_REAL_MODE 346 stb r4,PACAKEXECSTATE(r13) 347 348 b kexec_wait 349 350/* 351 * switch to real mode (turn mmu off) 352 * we use the early kernel trick that the hardware ignores bits 353 * 0 and 1 (big endian) of the effective address in real mode 354 * 355 * don't overwrite r3 here, it is live for kexec_wait above. 356 */ 357SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */ 358#ifdef CONFIG_PPC_BOOK3E_64 359 /* Create an identity mapping. */ 360 b kexec_create_tlb 361#else 3621: li r9,MSR_RI 363 li r10,MSR_DR|MSR_IR 364 mflr r11 /* return address to SRR0 */ 365 mfmsr r12 366 andc r9,r12,r9 367 andc r10,r12,r10 368 369 mtmsrd r9,1 370 mtspr SPRN_SRR1,r10 371 mtspr SPRN_SRR0,r11 372 rfid 373#endif 374SYM_FUNC_END(real_mode) 375 376/* 377 * kexec_sequence(newstack, start, image, control, clear_all(), 378 copy_with_mmu_off) 379 * 380 * does the grungy work with stack switching and real mode switches 381 * also does simple calls to other code 382 */ 383 384_GLOBAL(kexec_sequence) 385 mflr r0 386 std r0,16(r1) 387 388 /* switch stacks to newstack -- &kexec_stack.stack */ 389 stdu r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r3) 390 mr r1,r3 391 392 li r0,0 393 std r0,16(r1) 394 395 /* save regs for local vars on new stack. 396 * yes, we won't go back, but ... 397 */ 398 std r31,-8(r1) 399 std r30,-16(r1) 400 std r29,-24(r1) 401 std r28,-32(r1) 402 std r27,-40(r1) 403 std r26,-48(r1) 404 std r25,-56(r1) 405 406 stdu r1,-STACK_FRAME_MIN_SIZE-64(r1) 407 408 /* save args into preserved regs */ 409 mr r31,r3 /* newstack (both) */ 410 mr r30,r4 /* start (real) */ 411 mr r29,r5 /* image (virt) */ 412 mr r28,r6 /* control, unused */ 413 mr r27,r7 /* clear_all() fn desc */ 414 mr r26,r8 /* copy_with_mmu_off */ 415 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 416 417 /* disable interrupts, we are overwriting kernel data next */ 418#ifdef CONFIG_PPC_BOOK3E_64 419 wrteei 0 420#else 421 mfmsr r3 422 rlwinm r3,r3,0,17,15 423 mtmsrd r3,1 424#endif 425 426 /* We need to turn the MMU off unless we are in hash mode 427 * under a hypervisor 428 */ 429 cmpdi r26,0 430 beq 1f 431 bl real_mode 4321: 433 /* copy dest pages, flush whole dest image */ 434 mr r3,r29 435 bl CFUNC(kexec_copy_flush) /* (image) */ 436 437 /* turn off mmu now if not done earlier */ 438 cmpdi r26,0 439 bne 1f 440 bl real_mode 441 442 /* copy 0x100 bytes starting at start to 0 */ 4431: li r3,0 444 mr r4,r30 /* start, aka phys mem offset */ 445 li r5,0x100 446 li r6,0 447 bl copy_and_flush /* (dest, src, copy limit, start offset) */ 4481: /* assume normal blr return */ 449 450 /* release other cpus to the new kernel secondary start at 0x60 */ 451 mflr r5 452 li r6,1 453 stw r6,kexec_flag-1b(5) 454 455 cmpdi r27,0 456 beq 1f 457 458 /* clear out hardware hash page table and tlb */ 459#ifdef CONFIG_PPC64_ELF_ABI_V1 460 ld r12,0(r27) /* deref function descriptor */ 461#else 462 mr r12,r27 463#endif 464 mtctr r12 465 bctrl /* mmu_hash_ops.hpte_clear_all(void); */ 466 467/* 468 * kexec image calling is: 469 * the first 0x100 bytes of the entry point are copied to 0 470 * 471 * all slaves branch to slave = 0x60 (absolute) 472 * slave(phys_cpu_id); 473 * 474 * master goes to start = entry point 475 * start(phys_cpu_id, start, 0); 476 * 477 * 478 * a wrapper is needed to call existing kernels, here is an approximate 479 * description of one method: 480 * 481 * v2: (2.6.10) 482 * start will be near the boot_block (maybe 0x100 bytes before it?) 483 * it will have a 0x60, which will b to boot_block, where it will wait 484 * and 0 will store phys into struct boot-block and load r3 from there, 485 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again 486 * 487 * v1: (2.6.9) 488 * boot block will have all cpus scanning device tree to see if they 489 * are the boot cpu ????? 490 * other device tree differences (prop sizes, va vs pa, etc)... 491 */ 4921: mr r3,r25 # my phys cpu 493 mr r4,r30 # start, aka phys mem offset 494 mtlr 4 495 li r5,0 496 blr /* image->start(physid, image->start, 0); */ 497#endif /* CONFIG_KEXEC_CORE */ 498