1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_ddb.h" 61 #include "opt_kstack_pages.h" 62 #include "opt_platform.h" 63 64 #include <sys/param.h> 65 #include <sys/proc.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/bus.h> 70 #include <sys/cons.h> 71 #include <sys/cpu.h> 72 #include <sys/eventhandler.h> 73 #include <sys/exec.h> 74 #include <sys/imgact.h> 75 #include <sys/kdb.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/linker.h> 79 #include <sys/lock.h> 80 #include <sys/malloc.h> 81 #include <sys/mbuf.h> 82 #include <sys/msgbuf.h> 83 #include <sys/mutex.h> 84 #include <sys/ptrace.h> 85 #include <sys/reboot.h> 86 #include <sys/rwlock.h> 87 #include <sys/signalvar.h> 88 #include <sys/syscallsubr.h> 89 #include <sys/sysctl.h> 90 #include <sys/sysent.h> 91 #include <sys/sysproto.h> 92 #include <sys/ucontext.h> 93 #include <sys/uio.h> 94 #include <sys/vmmeter.h> 95 #include <sys/vnode.h> 96 97 #include <net/netisr.h> 98 99 #include <vm/vm.h> 100 #include <vm/vm_extern.h> 101 #include <vm/vm_kern.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_map.h> 104 #include <vm/vm_object.h> 105 #include <vm/vm_pager.h> 106 107 #include <machine/altivec.h> 108 #ifndef __powerpc64__ 109 #include <machine/bat.h> 110 #endif 111 #include <machine/cpu.h> 112 #include <machine/elf.h> 113 #include <machine/fpu.h> 114 #include <machine/hid.h> 115 #include <machine/kdb.h> 116 #include <machine/md_var.h> 117 #include <machine/metadata.h> 118 #include <machine/mmuvar.h> 119 #include <machine/pcb.h> 120 #include <machine/reg.h> 121 #include <machine/sigframe.h> 122 #include <machine/spr.h> 123 #include <machine/trap.h> 124 #include <machine/vmparam.h> 125 #include <machine/ofw_machdep.h> 126 127 #include <ddb/ddb.h> 128 129 #include <dev/ofw/openfirm.h> 130 131 #ifdef __powerpc64__ 132 #include "mmu_oea64.h" 133 #endif 134 135 #ifndef __powerpc64__ 136 struct bat battable[16]; 137 #endif 138 139 #ifndef __powerpc64__ 140 /* Bits for running on 64-bit systems in 32-bit mode. */ 141 extern void *testppc64, *testppc64size; 142 extern void *restorebridge, *restorebridgesize; 143 extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 144 extern void *trapcode64; 145 146 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; 147 #endif 148 149 extern void *rstcode, *rstcodeend; 150 extern void *trapcode, *trapcodeend; 151 extern void *hypertrapcode, *hypertrapcodeend; 152 extern void *generictrap, *generictrap64; 153 extern void *alitrap, *aliend; 154 extern void *dsitrap, *dsiend; 155 extern void *decrint, *decrsize; 156 extern void *extint, *extsize; 157 extern void *dblow, *dbend; 158 extern void *imisstrap, *imisssize; 159 extern void *dlmisstrap, *dlmisssize; 160 extern void *dsmisstrap, *dsmisssize; 161 162 extern void *ap_pcpu; 163 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); 164 165 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, 166 void *mdp, uint32_t mdp_cookie); 167 void aim_cpu_init(vm_offset_t toc); 168 169 void 170 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, 171 uint32_t mdp_cookie) 172 { 173 register_t scratch; 174 175 /* 176 * If running from an FDT, make sure we are in real mode to avoid 177 * tromping on firmware page tables. Everything in the kernel assumes 178 * 1:1 mappings out of firmware, so this won't break anything not 179 * already broken. This doesn't work if there is live OF, since OF 180 * may internally use non-1:1 mappings. 181 */ 182 if (ofentry == 0) 183 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); 184 185 #ifdef __powerpc64__ 186 /* 187 * If in real mode, relocate to high memory so that the kernel 188 * can execute from the direct map. 189 */ 190 if (!(mfmsr() & PSL_DR) && 191 (vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) 192 __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie, 193 DMAP_BASE_ADDRESS, mfmsr()); 194 #endif 195 196 /* Various very early CPU fix ups */ 197 switch (mfpvr() >> 16) { 198 /* 199 * PowerPC 970 CPUs have a misfeature requested by Apple that 200 * makes them pretend they have a 32-byte cacheline. Turn this 201 * off before we measure the cacheline size. 202 */ 203 case IBM970: 204 case IBM970FX: 205 case IBM970MP: 206 case IBM970GX: 207 scratch = mfspr(SPR_HID5); 208 scratch &= ~HID5_970_DCBZ_SIZE_HI; 209 mtspr(SPR_HID5, scratch); 210 break; 211 #ifdef __powerpc64__ 212 case IBMPOWER7: 213 case IBMPOWER7PLUS: 214 case IBMPOWER8: 215 case IBMPOWER8E: 216 case IBMPOWER9: 217 /* XXX: get from ibm,slb-size in device tree */ 218 n_slbs = 32; 219 break; 220 #endif 221 } 222 } 223 224 void 225 aim_cpu_init(vm_offset_t toc) 226 { 227 size_t trap_offset, trapsize; 228 vm_offset_t trap; 229 register_t msr; 230 uint8_t *cache_check; 231 int cacheline_warn; 232 #ifndef __powerpc64__ 233 register_t scratch; 234 int ppc64; 235 #endif 236 237 trap_offset = 0; 238 cacheline_warn = 0; 239 240 /* General setup for AIM CPUs */ 241 psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 242 243 #ifdef __powerpc64__ 244 psl_kernset |= PSL_SF; 245 if (mfmsr() & PSL_HV) 246 psl_kernset |= PSL_HV; 247 #endif 248 psl_userset = psl_kernset | PSL_PR; 249 #ifdef __powerpc64__ 250 psl_userset32 = psl_userset & ~PSL_SF; 251 #endif 252 253 /* Bits that users aren't allowed to change */ 254 psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); 255 /* 256 * Mask bits from the SRR1 that aren't really the MSR: 257 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) 258 */ 259 psl_userstatic &= ~0x783f0000UL; 260 261 /* 262 * Initialize the interrupt tables and figure out our cache line 263 * size and whether or not we need the 64-bit bridge code. 264 */ 265 266 /* 267 * Disable translation in case the vector area hasn't been 268 * mapped (G5). Note that no OFW calls can be made until 269 * translation is re-enabled. 270 */ 271 272 msr = mfmsr(); 273 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 274 275 /* 276 * Measure the cacheline size using dcbz 277 * 278 * Use EXC_PGM as a playground. We are about to overwrite it 279 * anyway, we know it exists, and we know it is cache-aligned. 280 */ 281 282 cache_check = (void *)EXC_PGM; 283 284 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 285 cache_check[cacheline_size] = 0xff; 286 287 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 288 289 /* Find the first byte dcbz did not zero to get the cache line size */ 290 for (cacheline_size = 0; cacheline_size < 0x100 && 291 cache_check[cacheline_size] == 0; cacheline_size++); 292 293 /* Work around psim bug */ 294 if (cacheline_size == 0) { 295 cacheline_warn = 1; 296 cacheline_size = 32; 297 } 298 299 #ifndef __powerpc64__ 300 /* 301 * Figure out whether we need to use the 64 bit PMAP. This works by 302 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 303 * and setting ppc64 = 0 if that causes a trap. 304 */ 305 306 ppc64 = 1; 307 308 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 309 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 310 311 __asm __volatile("\ 312 mfmsr %0; \ 313 mtsprg2 %1; \ 314 \ 315 mtmsrd %0; \ 316 mfsprg2 %1;" 317 : "=r"(scratch), "=r"(ppc64)); 318 319 if (ppc64) 320 cpu_features |= PPC_FEATURE_64; 321 322 /* 323 * Now copy restorebridge into all the handlers, if necessary, 324 * and set up the trap tables. 325 */ 326 327 if (cpu_features & PPC_FEATURE_64) { 328 /* Patch the two instances of rfi -> rfid */ 329 bcopy(&rfid_patch,&rfi_patch1,4); 330 #ifdef KDB 331 /* rfi_patch2 is at the end of dbleave */ 332 bcopy(&rfid_patch,&rfi_patch2,4); 333 #endif 334 } 335 #else /* powerpc64 */ 336 cpu_features |= PPC_FEATURE_64; 337 #endif 338 339 trapsize = (size_t)&trapcodeend - (size_t)&trapcode; 340 341 /* 342 * Copy generic handler into every possible trap. Special cases will get 343 * different ones in a minute. 344 */ 345 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) 346 bcopy(&trapcode, (void *)trap, trapsize); 347 348 #ifndef __powerpc64__ 349 if (cpu_features & PPC_FEATURE_64) { 350 /* 351 * Copy a code snippet to restore 32-bit bridge mode 352 * to the top of every non-generic trap handler 353 */ 354 355 trap_offset += (size_t)&restorebridgesize; 356 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 357 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 358 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 359 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 360 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 361 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 362 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 363 } 364 #else 365 trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; 366 bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); 367 bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); 368 bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize); 369 bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize); 370 #endif 371 372 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - 373 (size_t)&rstcode); 374 375 #ifdef KDB 376 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - 377 (size_t)&dblow); 378 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - 379 (size_t)&dblow); 380 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - 381 (size_t)&dblow); 382 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - 383 (size_t)&dblow); 384 #endif 385 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - 386 (size_t)&alitrap); 387 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - 388 (size_t)&dsitrap); 389 390 #ifdef __powerpc64__ 391 /* Set TOC base so that the interrupt code can get at it */ 392 *((void **)TRAP_GENTRAP) = &generictrap; 393 *((register_t *)TRAP_TOCBASE) = toc; 394 #else 395 /* Set branch address for trap code */ 396 if (cpu_features & PPC_FEATURE_64) 397 *((void **)TRAP_GENTRAP) = &generictrap64; 398 else 399 *((void **)TRAP_GENTRAP) = &generictrap; 400 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; 401 402 /* G2-specific TLB miss helper handlers */ 403 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 404 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 405 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 406 #endif 407 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 408 409 /* 410 * Restore MSR 411 */ 412 mtmsr(msr); 413 414 /* Warn if cachline size was not determined */ 415 if (cacheline_warn == 1) { 416 printf("WARNING: cacheline size undetermined, setting to 32\n"); 417 } 418 419 /* 420 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 421 * in case the platform module had a better idea of what we 422 * should do. 423 */ 424 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) 425 pmap_mmu_install(MMU_TYPE_P9H, BUS_PROBE_GENERIC); 426 else if (cpu_features & PPC_FEATURE_64) 427 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 428 else 429 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 430 } 431 432 /* 433 * Shutdown the CPU as much as possible. 434 */ 435 void 436 cpu_halt(void) 437 { 438 439 OF_exit(); 440 } 441 442 int 443 ptrace_single_step(struct thread *td) 444 { 445 struct trapframe *tf; 446 447 tf = td->td_frame; 448 tf->srr1 |= PSL_SE; 449 450 return (0); 451 } 452 453 int 454 ptrace_clear_single_step(struct thread *td) 455 { 456 struct trapframe *tf; 457 458 tf = td->td_frame; 459 tf->srr1 &= ~PSL_SE; 460 461 return (0); 462 } 463 464 void 465 kdb_cpu_clear_singlestep(void) 466 { 467 468 kdb_frame->srr1 &= ~PSL_SE; 469 } 470 471 void 472 kdb_cpu_set_singlestep(void) 473 { 474 475 kdb_frame->srr1 |= PSL_SE; 476 } 477 478 /* 479 * Initialise a struct pcpu. 480 */ 481 void 482 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 483 { 484 #ifdef __powerpc64__ 485 /* Copy the SLB contents from the current CPU */ 486 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb)); 487 #endif 488 } 489 490 #ifndef __powerpc64__ 491 uint64_t 492 va_to_vsid(pmap_t pm, vm_offset_t va) 493 { 494 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 495 } 496 497 #endif 498 499 /* 500 * These functions need to provide addresses that both (a) work in real mode 501 * (or whatever mode/circumstances the kernel is in in early boot (now)) and 502 * (b) can still, in principle, work once the kernel is going. Because these 503 * rely on existing mappings/real mode, unmap is a no-op. 504 */ 505 vm_offset_t 506 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 507 { 508 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); 509 510 /* 511 * If we have the MMU up in early boot, assume it is 1:1. Otherwise, 512 * try to get the address in a memory region compatible with the 513 * direct map for efficiency later. 514 */ 515 if (mfmsr() & PSL_DR) 516 return (pa); 517 else 518 return (DMAP_BASE_ADDRESS + pa); 519 } 520 521 void 522 pmap_early_io_unmap(vm_offset_t va, vm_size_t size) 523 { 524 525 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); 526 } 527 528 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 529 void 530 flush_disable_caches(void) 531 { 532 register_t msr; 533 register_t msscr0; 534 register_t cache_reg; 535 volatile uint32_t *memp; 536 uint32_t temp; 537 int i; 538 int x; 539 540 msr = mfmsr(); 541 powerpc_sync(); 542 mtmsr(msr & ~(PSL_EE | PSL_DR)); 543 msscr0 = mfspr(SPR_MSSCR0); 544 msscr0 &= ~MSSCR0_L2PFE; 545 mtspr(SPR_MSSCR0, msscr0); 546 powerpc_sync(); 547 isync(); 548 __asm__ __volatile__("dssall; sync"); 549 powerpc_sync(); 550 isync(); 551 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 552 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 553 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 554 555 /* Lock the L1 Data cache. */ 556 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 557 powerpc_sync(); 558 isync(); 559 560 mtspr(SPR_LDSTCR, 0); 561 562 /* 563 * Perform this in two stages: Flush the cache starting in RAM, then do it 564 * from ROM. 565 */ 566 memp = (volatile uint32_t *)0x00000000; 567 for (i = 0; i < 128 * 1024; i++) { 568 temp = *memp; 569 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 570 memp += 32/sizeof(*memp); 571 } 572 573 memp = (volatile uint32_t *)0xfff00000; 574 x = 0xfe; 575 576 for (; x != 0xff;) { 577 mtspr(SPR_LDSTCR, x); 578 for (i = 0; i < 128; i++) { 579 temp = *memp; 580 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 581 memp += 32/sizeof(*memp); 582 } 583 x = ((x << 1) | 1) & 0xff; 584 } 585 mtspr(SPR_LDSTCR, 0); 586 587 cache_reg = mfspr(SPR_L2CR); 588 if (cache_reg & L2CR_L2E) { 589 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 590 mtspr(SPR_L2CR, cache_reg); 591 powerpc_sync(); 592 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 593 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 594 ; /* Busy wait for cache to flush */ 595 powerpc_sync(); 596 cache_reg &= ~L2CR_L2E; 597 mtspr(SPR_L2CR, cache_reg); 598 powerpc_sync(); 599 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 600 powerpc_sync(); 601 while (mfspr(SPR_L2CR) & L2CR_L2I) 602 ; /* Busy wait for L2 cache invalidate */ 603 powerpc_sync(); 604 } 605 606 cache_reg = mfspr(SPR_L3CR); 607 if (cache_reg & L3CR_L3E) { 608 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 609 mtspr(SPR_L3CR, cache_reg); 610 powerpc_sync(); 611 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 612 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 613 ; /* Busy wait for cache to flush */ 614 powerpc_sync(); 615 cache_reg &= ~L3CR_L3E; 616 mtspr(SPR_L3CR, cache_reg); 617 powerpc_sync(); 618 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 619 powerpc_sync(); 620 while (mfspr(SPR_L3CR) & L3CR_L3I) 621 ; /* Busy wait for L3 cache invalidate */ 622 powerpc_sync(); 623 } 624 625 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 626 powerpc_sync(); 627 isync(); 628 629 mtmsr(msr); 630 } 631 632 void 633 cpu_sleep() 634 { 635 static u_quad_t timebase = 0; 636 static register_t sprgs[4]; 637 static register_t srrs[2]; 638 639 jmp_buf resetjb; 640 struct thread *fputd; 641 struct thread *vectd; 642 register_t hid0; 643 register_t msr; 644 register_t saved_msr; 645 646 ap_pcpu = pcpup; 647 648 PCPU_SET(restore, &resetjb); 649 650 saved_msr = mfmsr(); 651 fputd = PCPU_GET(fputhread); 652 vectd = PCPU_GET(vecthread); 653 if (fputd != NULL) 654 save_fpu(fputd); 655 if (vectd != NULL) 656 save_vec(vectd); 657 if (setjmp(resetjb) == 0) { 658 sprgs[0] = mfspr(SPR_SPRG0); 659 sprgs[1] = mfspr(SPR_SPRG1); 660 sprgs[2] = mfspr(SPR_SPRG2); 661 sprgs[3] = mfspr(SPR_SPRG3); 662 srrs[0] = mfspr(SPR_SRR0); 663 srrs[1] = mfspr(SPR_SRR1); 664 timebase = mftb(); 665 powerpc_sync(); 666 flush_disable_caches(); 667 hid0 = mfspr(SPR_HID0); 668 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 669 powerpc_sync(); 670 isync(); 671 msr = mfmsr() | PSL_POW; 672 mtspr(SPR_HID0, hid0); 673 powerpc_sync(); 674 675 while (1) 676 mtmsr(msr); 677 } 678 platform_smp_timebase_sync(timebase, 0); 679 PCPU_SET(curthread, curthread); 680 PCPU_SET(curpcb, curthread->td_pcb); 681 pmap_activate(curthread); 682 powerpc_sync(); 683 mtspr(SPR_SPRG0, sprgs[0]); 684 mtspr(SPR_SPRG1, sprgs[1]); 685 mtspr(SPR_SPRG2, sprgs[2]); 686 mtspr(SPR_SPRG3, sprgs[3]); 687 mtspr(SPR_SRR0, srrs[0]); 688 mtspr(SPR_SRR1, srrs[1]); 689 mtmsr(saved_msr); 690 if (fputd == curthread) 691 enable_fpu(curthread); 692 if (vectd == curthread) 693 enable_vec(curthread); 694 powerpc_sync(); 695 } 696 697