1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_ddb.h" 61 #include "opt_kstack_pages.h" 62 #include "opt_platform.h" 63 64 #include <sys/param.h> 65 #include <sys/proc.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/bus.h> 70 #include <sys/cons.h> 71 #include <sys/cpu.h> 72 #include <sys/eventhandler.h> 73 #include <sys/exec.h> 74 #include <sys/imgact.h> 75 #include <sys/kdb.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/linker.h> 79 #include <sys/lock.h> 80 #include <sys/malloc.h> 81 #include <sys/mbuf.h> 82 #include <sys/msgbuf.h> 83 #include <sys/mutex.h> 84 #include <sys/ptrace.h> 85 #include <sys/reboot.h> 86 #include <sys/rwlock.h> 87 #include <sys/signalvar.h> 88 #include <sys/syscallsubr.h> 89 #include <sys/sysctl.h> 90 #include <sys/sysent.h> 91 #include <sys/sysproto.h> 92 #include <sys/ucontext.h> 93 #include <sys/uio.h> 94 #include <sys/vmmeter.h> 95 #include <sys/vnode.h> 96 97 #include <net/netisr.h> 98 99 #include <vm/vm.h> 100 #include <vm/vm_extern.h> 101 #include <vm/vm_kern.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_map.h> 104 #include <vm/vm_object.h> 105 #include <vm/vm_pager.h> 106 107 #include <machine/altivec.h> 108 #ifndef __powerpc64__ 109 #include <machine/bat.h> 110 #endif 111 #include <machine/cpu.h> 112 #include <machine/elf.h> 113 #include <machine/fpu.h> 114 #include <machine/hid.h> 115 #include <machine/kdb.h> 116 #include <machine/md_var.h> 117 #include <machine/metadata.h> 118 #include <machine/mmuvar.h> 119 #include <machine/pcb.h> 120 #include <machine/reg.h> 121 #include <machine/sigframe.h> 122 #include <machine/spr.h> 123 #include <machine/trap.h> 124 #include <machine/vmparam.h> 125 #include <machine/ofw_machdep.h> 126 127 #include <ddb/ddb.h> 128 129 #include <dev/ofw/openfirm.h> 130 131 #ifdef __powerpc64__ 132 #include "mmu_oea64.h" 133 #endif 134 135 #ifndef __powerpc64__ 136 struct bat battable[16]; 137 #endif 138 139 #ifndef __powerpc64__ 140 /* Bits for running on 64-bit systems in 32-bit mode. */ 141 extern void *testppc64, *testppc64size; 142 extern void *restorebridge, *restorebridgesize; 143 extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 144 extern void *trapcode64; 145 146 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; 147 #endif 148 149 extern void *rstcode, *rstcodeend; 150 extern void *trapcode, *trapcodeend; 151 extern void *hypertrapcode, *hypertrapcodeend; 152 extern void *generictrap, *generictrap64; 153 extern void *alitrap, *aliend; 154 extern void *dsitrap, *dsiend; 155 extern void *decrint, *decrsize; 156 extern void *extint, *extsize; 157 extern void *dblow, *dbend; 158 extern void *imisstrap, *imisssize; 159 extern void *dlmisstrap, *dlmisssize; 160 extern void *dsmisstrap, *dsmisssize; 161 162 extern void *ap_pcpu; 163 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); 164 165 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, 166 void *mdp, uint32_t mdp_cookie); 167 void aim_cpu_init(vm_offset_t toc); 168 169 void 170 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, 171 uint32_t mdp_cookie) 172 { 173 register_t scratch; 174 175 /* 176 * If running from an FDT, make sure we are in real mode to avoid 177 * tromping on firmware page tables. Everything in the kernel assumes 178 * 1:1 mappings out of firmware, so this won't break anything not 179 * already broken. This doesn't work if there is live OF, since OF 180 * may internally use non-1:1 mappings. 181 */ 182 if (ofentry == 0) 183 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); 184 185 #ifdef __powerpc64__ 186 /* 187 * If in real mode, relocate to high memory so that the kernel 188 * can execute from the direct map. 189 */ 190 if (!(mfmsr() & PSL_DR) && 191 (vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) 192 __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie, 193 DMAP_BASE_ADDRESS, mfmsr()); 194 #endif 195 196 /* Various very early CPU fix ups */ 197 switch (mfpvr() >> 16) { 198 /* 199 * PowerPC 970 CPUs have a misfeature requested by Apple that 200 * makes them pretend they have a 32-byte cacheline. Turn this 201 * off before we measure the cacheline size. 202 */ 203 case IBM970: 204 case IBM970FX: 205 case IBM970MP: 206 case IBM970GX: 207 scratch = mfspr(SPR_HID5); 208 scratch &= ~HID5_970_DCBZ_SIZE_HI; 209 mtspr(SPR_HID5, scratch); 210 break; 211 #ifdef __powerpc64__ 212 case IBMPOWER7: 213 case IBMPOWER7PLUS: 214 case IBMPOWER8: 215 case IBMPOWER8E: 216 case IBMPOWER8NVL: 217 case IBMPOWER9: 218 /* XXX: get from ibm,slb-size in device tree */ 219 n_slbs = 32; 220 break; 221 #endif 222 } 223 } 224 225 void 226 aim_cpu_init(vm_offset_t toc) 227 { 228 size_t trap_offset, trapsize; 229 vm_offset_t trap; 230 register_t msr; 231 uint8_t *cache_check; 232 int cacheline_warn; 233 #ifndef __powerpc64__ 234 register_t scratch; 235 int ppc64; 236 #endif 237 238 trap_offset = 0; 239 cacheline_warn = 0; 240 241 /* General setup for AIM CPUs */ 242 psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 243 244 #ifdef __powerpc64__ 245 psl_kernset |= PSL_SF; 246 if (mfmsr() & PSL_HV) 247 psl_kernset |= PSL_HV; 248 #endif 249 psl_userset = psl_kernset | PSL_PR; 250 #ifdef __powerpc64__ 251 psl_userset32 = psl_userset & ~PSL_SF; 252 #endif 253 254 /* Bits that users aren't allowed to change */ 255 psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); 256 /* 257 * Mask bits from the SRR1 that aren't really the MSR: 258 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) 259 */ 260 psl_userstatic &= ~0x783f0000UL; 261 262 /* 263 * Initialize the interrupt tables and figure out our cache line 264 * size and whether or not we need the 64-bit bridge code. 265 */ 266 267 /* 268 * Disable translation in case the vector area hasn't been 269 * mapped (G5). Note that no OFW calls can be made until 270 * translation is re-enabled. 271 */ 272 273 msr = mfmsr(); 274 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 275 276 /* 277 * Measure the cacheline size using dcbz 278 * 279 * Use EXC_PGM as a playground. We are about to overwrite it 280 * anyway, we know it exists, and we know it is cache-aligned. 281 */ 282 283 cache_check = (void *)EXC_PGM; 284 285 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 286 cache_check[cacheline_size] = 0xff; 287 288 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 289 290 /* Find the first byte dcbz did not zero to get the cache line size */ 291 for (cacheline_size = 0; cacheline_size < 0x100 && 292 cache_check[cacheline_size] == 0; cacheline_size++); 293 294 /* Work around psim bug */ 295 if (cacheline_size == 0) { 296 cacheline_warn = 1; 297 cacheline_size = 32; 298 } 299 300 #ifndef __powerpc64__ 301 /* 302 * Figure out whether we need to use the 64 bit PMAP. This works by 303 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 304 * and setting ppc64 = 0 if that causes a trap. 305 */ 306 307 ppc64 = 1; 308 309 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 310 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 311 312 __asm __volatile("\ 313 mfmsr %0; \ 314 mtsprg2 %1; \ 315 \ 316 mtmsrd %0; \ 317 mfsprg2 %1;" 318 : "=r"(scratch), "=r"(ppc64)); 319 320 if (ppc64) 321 cpu_features |= PPC_FEATURE_64; 322 323 /* 324 * Now copy restorebridge into all the handlers, if necessary, 325 * and set up the trap tables. 326 */ 327 328 if (cpu_features & PPC_FEATURE_64) { 329 /* Patch the two instances of rfi -> rfid */ 330 bcopy(&rfid_patch,&rfi_patch1,4); 331 #ifdef KDB 332 /* rfi_patch2 is at the end of dbleave */ 333 bcopy(&rfid_patch,&rfi_patch2,4); 334 #endif 335 } 336 #else /* powerpc64 */ 337 cpu_features |= PPC_FEATURE_64; 338 #endif 339 340 trapsize = (size_t)&trapcodeend - (size_t)&trapcode; 341 342 /* 343 * Copy generic handler into every possible trap. Special cases will get 344 * different ones in a minute. 345 */ 346 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) 347 bcopy(&trapcode, (void *)trap, trapsize); 348 349 #ifndef __powerpc64__ 350 if (cpu_features & PPC_FEATURE_64) { 351 /* 352 * Copy a code snippet to restore 32-bit bridge mode 353 * to the top of every non-generic trap handler 354 */ 355 356 trap_offset += (size_t)&restorebridgesize; 357 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 358 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 359 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 360 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 361 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 362 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 363 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 364 } 365 #else 366 trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; 367 bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); 368 bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); 369 bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize); 370 bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize); 371 #endif 372 373 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - 374 (size_t)&rstcode); 375 376 #ifdef KDB 377 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - 378 (size_t)&dblow); 379 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - 380 (size_t)&dblow); 381 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - 382 (size_t)&dblow); 383 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - 384 (size_t)&dblow); 385 #endif 386 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - 387 (size_t)&alitrap); 388 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - 389 (size_t)&dsitrap); 390 391 /* Set address of generictrap for self-reloc calculations */ 392 *((void **)TRAP_GENTRAP) = &generictrap; 393 #ifdef __powerpc64__ 394 /* Set TOC base so that the interrupt code can get at it */ 395 *((void **)TRAP_ENTRY) = &generictrap; 396 *((register_t *)TRAP_TOCBASE) = toc; 397 #else 398 /* Set branch address for trap code */ 399 if (cpu_features & PPC_FEATURE_64) 400 *((void **)TRAP_ENTRY) = &generictrap64; 401 else 402 *((void **)TRAP_ENTRY) = &generictrap; 403 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; 404 405 /* G2-specific TLB miss helper handlers */ 406 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 407 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 408 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 409 #endif 410 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 411 412 /* 413 * Restore MSR 414 */ 415 mtmsr(msr); 416 417 /* Warn if cachline size was not determined */ 418 if (cacheline_warn == 1) { 419 printf("WARNING: cacheline size undetermined, setting to 32\n"); 420 } 421 422 /* 423 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 424 * in case the platform module had a better idea of what we 425 * should do. 426 */ 427 if (cpu_features & PPC_FEATURE_64) 428 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 429 else 430 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 431 } 432 433 /* 434 * Shutdown the CPU as much as possible. 435 */ 436 void 437 cpu_halt(void) 438 { 439 440 OF_exit(); 441 } 442 443 int 444 ptrace_single_step(struct thread *td) 445 { 446 struct trapframe *tf; 447 448 tf = td->td_frame; 449 tf->srr1 |= PSL_SE; 450 451 return (0); 452 } 453 454 int 455 ptrace_clear_single_step(struct thread *td) 456 { 457 struct trapframe *tf; 458 459 tf = td->td_frame; 460 tf->srr1 &= ~PSL_SE; 461 462 return (0); 463 } 464 465 void 466 kdb_cpu_clear_singlestep(void) 467 { 468 469 kdb_frame->srr1 &= ~PSL_SE; 470 } 471 472 void 473 kdb_cpu_set_singlestep(void) 474 { 475 476 kdb_frame->srr1 |= PSL_SE; 477 } 478 479 /* 480 * Initialise a struct pcpu. 481 */ 482 void 483 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 484 { 485 #ifdef __powerpc64__ 486 /* Copy the SLB contents from the current CPU */ 487 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb)); 488 #endif 489 } 490 491 #ifndef __powerpc64__ 492 uint64_t 493 va_to_vsid(pmap_t pm, vm_offset_t va) 494 { 495 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 496 } 497 498 #endif 499 500 /* 501 * These functions need to provide addresses that both (a) work in real mode 502 * (or whatever mode/circumstances the kernel is in in early boot (now)) and 503 * (b) can still, in principle, work once the kernel is going. Because these 504 * rely on existing mappings/real mode, unmap is a no-op. 505 */ 506 vm_offset_t 507 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 508 { 509 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); 510 511 /* 512 * If we have the MMU up in early boot, assume it is 1:1. Otherwise, 513 * try to get the address in a memory region compatible with the 514 * direct map for efficiency later. 515 */ 516 if (mfmsr() & PSL_DR) 517 return (pa); 518 else 519 return (DMAP_BASE_ADDRESS + pa); 520 } 521 522 void 523 pmap_early_io_unmap(vm_offset_t va, vm_size_t size) 524 { 525 526 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); 527 } 528 529 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 530 void 531 flush_disable_caches(void) 532 { 533 register_t msr; 534 register_t msscr0; 535 register_t cache_reg; 536 volatile uint32_t *memp; 537 uint32_t temp; 538 int i; 539 int x; 540 541 msr = mfmsr(); 542 powerpc_sync(); 543 mtmsr(msr & ~(PSL_EE | PSL_DR)); 544 msscr0 = mfspr(SPR_MSSCR0); 545 msscr0 &= ~MSSCR0_L2PFE; 546 mtspr(SPR_MSSCR0, msscr0); 547 powerpc_sync(); 548 isync(); 549 __asm__ __volatile__("dssall; sync"); 550 powerpc_sync(); 551 isync(); 552 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 553 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 554 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 555 556 /* Lock the L1 Data cache. */ 557 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 558 powerpc_sync(); 559 isync(); 560 561 mtspr(SPR_LDSTCR, 0); 562 563 /* 564 * Perform this in two stages: Flush the cache starting in RAM, then do it 565 * from ROM. 566 */ 567 memp = (volatile uint32_t *)0x00000000; 568 for (i = 0; i < 128 * 1024; i++) { 569 temp = *memp; 570 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 571 memp += 32/sizeof(*memp); 572 } 573 574 memp = (volatile uint32_t *)0xfff00000; 575 x = 0xfe; 576 577 for (; x != 0xff;) { 578 mtspr(SPR_LDSTCR, x); 579 for (i = 0; i < 128; i++) { 580 temp = *memp; 581 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 582 memp += 32/sizeof(*memp); 583 } 584 x = ((x << 1) | 1) & 0xff; 585 } 586 mtspr(SPR_LDSTCR, 0); 587 588 cache_reg = mfspr(SPR_L2CR); 589 if (cache_reg & L2CR_L2E) { 590 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 591 mtspr(SPR_L2CR, cache_reg); 592 powerpc_sync(); 593 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 594 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 595 ; /* Busy wait for cache to flush */ 596 powerpc_sync(); 597 cache_reg &= ~L2CR_L2E; 598 mtspr(SPR_L2CR, cache_reg); 599 powerpc_sync(); 600 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 601 powerpc_sync(); 602 while (mfspr(SPR_L2CR) & L2CR_L2I) 603 ; /* Busy wait for L2 cache invalidate */ 604 powerpc_sync(); 605 } 606 607 cache_reg = mfspr(SPR_L3CR); 608 if (cache_reg & L3CR_L3E) { 609 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 610 mtspr(SPR_L3CR, cache_reg); 611 powerpc_sync(); 612 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 613 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 614 ; /* Busy wait for cache to flush */ 615 powerpc_sync(); 616 cache_reg &= ~L3CR_L3E; 617 mtspr(SPR_L3CR, cache_reg); 618 powerpc_sync(); 619 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 620 powerpc_sync(); 621 while (mfspr(SPR_L3CR) & L3CR_L3I) 622 ; /* Busy wait for L3 cache invalidate */ 623 powerpc_sync(); 624 } 625 626 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 627 powerpc_sync(); 628 isync(); 629 630 mtmsr(msr); 631 } 632 633 void 634 cpu_sleep() 635 { 636 static u_quad_t timebase = 0; 637 static register_t sprgs[4]; 638 static register_t srrs[2]; 639 640 jmp_buf resetjb; 641 struct thread *fputd; 642 struct thread *vectd; 643 register_t hid0; 644 register_t msr; 645 register_t saved_msr; 646 647 ap_pcpu = pcpup; 648 649 PCPU_SET(restore, &resetjb); 650 651 saved_msr = mfmsr(); 652 fputd = PCPU_GET(fputhread); 653 vectd = PCPU_GET(vecthread); 654 if (fputd != NULL) 655 save_fpu(fputd); 656 if (vectd != NULL) 657 save_vec(vectd); 658 if (setjmp(resetjb) == 0) { 659 sprgs[0] = mfspr(SPR_SPRG0); 660 sprgs[1] = mfspr(SPR_SPRG1); 661 sprgs[2] = mfspr(SPR_SPRG2); 662 sprgs[3] = mfspr(SPR_SPRG3); 663 srrs[0] = mfspr(SPR_SRR0); 664 srrs[1] = mfspr(SPR_SRR1); 665 timebase = mftb(); 666 powerpc_sync(); 667 flush_disable_caches(); 668 hid0 = mfspr(SPR_HID0); 669 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 670 powerpc_sync(); 671 isync(); 672 msr = mfmsr() | PSL_POW; 673 mtspr(SPR_HID0, hid0); 674 powerpc_sync(); 675 676 while (1) 677 mtmsr(msr); 678 } 679 platform_smp_timebase_sync(timebase, 0); 680 PCPU_SET(curthread, curthread); 681 PCPU_SET(curpcb, curthread->td_pcb); 682 pmap_activate(curthread); 683 powerpc_sync(); 684 mtspr(SPR_SPRG0, sprgs[0]); 685 mtspr(SPR_SPRG1, sprgs[1]); 686 mtspr(SPR_SPRG2, sprgs[2]); 687 mtspr(SPR_SPRG3, sprgs[3]); 688 mtspr(SPR_SRR0, srrs[0]); 689 mtspr(SPR_SRR1, srrs[1]); 690 mtmsr(saved_msr); 691 if (fputd == curthread) 692 enable_fpu(curthread); 693 if (vectd == curthread) 694 enable_vec(curthread); 695 powerpc_sync(); 696 } 697 698