1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_compat.h" 61 #include "opt_ddb.h" 62 #include "opt_kstack_pages.h" 63 #include "opt_platform.h" 64 65 #include <sys/param.h> 66 #include <sys/proc.h> 67 #include <sys/systm.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/bus.h> 71 #include <sys/cons.h> 72 #include <sys/cpu.h> 73 #include <sys/eventhandler.h> 74 #include <sys/exec.h> 75 #include <sys/imgact.h> 76 #include <sys/kdb.h> 77 #include <sys/kernel.h> 78 #include <sys/ktr.h> 79 #include <sys/linker.h> 80 #include <sys/lock.h> 81 #include <sys/malloc.h> 82 #include <sys/mbuf.h> 83 #include <sys/msgbuf.h> 84 #include <sys/mutex.h> 85 #include <sys/ptrace.h> 86 #include <sys/reboot.h> 87 #include <sys/rwlock.h> 88 #include <sys/signalvar.h> 89 #include <sys/syscallsubr.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysent.h> 92 #include <sys/sysproto.h> 93 #include <sys/ucontext.h> 94 #include <sys/uio.h> 95 #include <sys/vmmeter.h> 96 #include <sys/vnode.h> 97 98 #include <net/netisr.h> 99 100 #include <vm/vm.h> 101 #include <vm/vm_extern.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_map.h> 105 #include <vm/vm_object.h> 106 #include <vm/vm_pager.h> 107 108 #include <machine/altivec.h> 109 #ifndef __powerpc64__ 110 #include <machine/bat.h> 111 #endif 112 #include <machine/cpu.h> 113 #include <machine/elf.h> 114 #include <machine/fpu.h> 115 #include <machine/hid.h> 116 #include <machine/kdb.h> 117 #include <machine/md_var.h> 118 #include <machine/metadata.h> 119 #include <machine/mmuvar.h> 120 #include <machine/pcb.h> 121 #include <machine/reg.h> 122 #include <machine/sigframe.h> 123 #include <machine/spr.h> 124 #include <machine/trap.h> 125 #include <machine/vmparam.h> 126 #include <machine/ofw_machdep.h> 127 128 #include <ddb/ddb.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #ifdef __powerpc64__ 133 #include "mmu_oea64.h" 134 #endif 135 136 #ifndef __powerpc64__ 137 struct bat battable[16]; 138 #endif 139 140 #ifndef __powerpc64__ 141 /* Bits for running on 64-bit systems in 32-bit mode. */ 142 extern void *testppc64, *testppc64size; 143 extern void *restorebridge, *restorebridgesize; 144 extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 145 extern void *trapcode64; 146 147 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; 148 #endif 149 150 extern void *rstcode, *rstcodeend; 151 extern void *trapcode, *trapcodeend; 152 extern void *generictrap, *generictrap64; 153 extern void *slbtrap, *slbtrapend; 154 extern void *alitrap, *aliend; 155 extern void *dsitrap, *dsiend; 156 extern void *decrint, *decrsize; 157 extern void *extint, *extsize; 158 extern void *dblow, *dbend; 159 extern void *imisstrap, *imisssize; 160 extern void *dlmisstrap, *dlmisssize; 161 extern void *dsmisstrap, *dsmisssize; 162 163 extern void *ap_pcpu; 164 165 void aim_cpu_init(vm_offset_t toc); 166 167 void 168 aim_cpu_init(vm_offset_t toc) 169 { 170 size_t trap_offset, trapsize; 171 vm_offset_t trap; 172 register_t msr, scratch; 173 uint8_t *cache_check; 174 int cacheline_warn; 175 #ifndef __powerpc64__ 176 int ppc64; 177 #endif 178 179 trap_offset = 0; 180 cacheline_warn = 0; 181 182 /* Various very early CPU fix ups */ 183 switch (mfpvr() >> 16) { 184 /* 185 * PowerPC 970 CPUs have a misfeature requested by Apple that 186 * makes them pretend they have a 32-byte cacheline. Turn this 187 * off before we measure the cacheline size. 188 */ 189 case IBM970: 190 case IBM970FX: 191 case IBM970MP: 192 case IBM970GX: 193 scratch = mfspr(SPR_HID5); 194 scratch &= ~HID5_970_DCBZ_SIZE_HI; 195 mtspr(SPR_HID5, scratch); 196 break; 197 #ifdef __powerpc64__ 198 case IBMPOWER7: 199 case IBMPOWER7PLUS: 200 case IBMPOWER8: 201 case IBMPOWER8E: 202 /* XXX: get from ibm,slb-size in device tree */ 203 n_slbs = 32; 204 break; 205 #endif 206 } 207 208 /* 209 * Initialize the interrupt tables and figure out our cache line 210 * size and whether or not we need the 64-bit bridge code. 211 */ 212 213 /* 214 * Disable translation in case the vector area hasn't been 215 * mapped (G5). Note that no OFW calls can be made until 216 * translation is re-enabled. 217 */ 218 219 msr = mfmsr(); 220 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 221 222 /* 223 * Measure the cacheline size using dcbz 224 * 225 * Use EXC_PGM as a playground. We are about to overwrite it 226 * anyway, we know it exists, and we know it is cache-aligned. 227 */ 228 229 cache_check = (void *)EXC_PGM; 230 231 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 232 cache_check[cacheline_size] = 0xff; 233 234 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 235 236 /* Find the first byte dcbz did not zero to get the cache line size */ 237 for (cacheline_size = 0; cacheline_size < 0x100 && 238 cache_check[cacheline_size] == 0; cacheline_size++); 239 240 /* Work around psim bug */ 241 if (cacheline_size == 0) { 242 cacheline_warn = 1; 243 cacheline_size = 32; 244 } 245 246 #ifndef __powerpc64__ 247 /* 248 * Figure out whether we need to use the 64 bit PMAP. This works by 249 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 250 * and setting ppc64 = 0 if that causes a trap. 251 */ 252 253 ppc64 = 1; 254 255 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 256 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 257 258 __asm __volatile("\ 259 mfmsr %0; \ 260 mtsprg2 %1; \ 261 \ 262 mtmsrd %0; \ 263 mfsprg2 %1;" 264 : "=r"(scratch), "=r"(ppc64)); 265 266 if (ppc64) 267 cpu_features |= PPC_FEATURE_64; 268 269 /* 270 * Now copy restorebridge into all the handlers, if necessary, 271 * and set up the trap tables. 272 */ 273 274 if (cpu_features & PPC_FEATURE_64) { 275 /* Patch the two instances of rfi -> rfid */ 276 bcopy(&rfid_patch,&rfi_patch1,4); 277 #ifdef KDB 278 /* rfi_patch2 is at the end of dbleave */ 279 bcopy(&rfid_patch,&rfi_patch2,4); 280 #endif 281 } 282 #else /* powerpc64 */ 283 cpu_features |= PPC_FEATURE_64; 284 #endif 285 286 trapsize = (size_t)&trapcodeend - (size_t)&trapcode; 287 288 /* 289 * Copy generic handler into every possible trap. Special cases will get 290 * different ones in a minute. 291 */ 292 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) 293 bcopy(&trapcode, (void *)trap, trapsize); 294 295 #ifndef __powerpc64__ 296 if (cpu_features & PPC_FEATURE_64) { 297 /* 298 * Copy a code snippet to restore 32-bit bridge mode 299 * to the top of every non-generic trap handler 300 */ 301 302 trap_offset += (size_t)&restorebridgesize; 303 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 304 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 305 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 306 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 307 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 308 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 309 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 310 } 311 #endif 312 313 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - 314 (size_t)&rstcode); 315 316 #ifdef KDB 317 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - 318 (size_t)&dblow); 319 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - 320 (size_t)&dblow); 321 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - 322 (size_t)&dblow); 323 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - 324 (size_t)&dblow); 325 #endif 326 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - 327 (size_t)&alitrap); 328 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - 329 (size_t)&dsitrap); 330 331 #ifdef __powerpc64__ 332 /* Set TOC base so that the interrupt code can get at it */ 333 *((void **)TRAP_GENTRAP) = &generictrap; 334 *((register_t *)TRAP_TOCBASE) = toc; 335 336 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); 337 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); 338 #else 339 /* Set branch address for trap code */ 340 if (cpu_features & PPC_FEATURE_64) 341 *((void **)TRAP_GENTRAP) = &generictrap64; 342 else 343 *((void **)TRAP_GENTRAP) = &generictrap; 344 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; 345 346 /* G2-specific TLB miss helper handlers */ 347 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 348 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 349 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 350 #endif 351 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 352 353 /* 354 * Restore MSR 355 */ 356 mtmsr(msr); 357 358 /* Warn if cachline size was not determined */ 359 if (cacheline_warn == 1) { 360 printf("WARNING: cacheline size undetermined, setting to 32\n"); 361 } 362 363 /* 364 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 365 * in case the platform module had a better idea of what we 366 * should do. 367 */ 368 if (cpu_features & PPC_FEATURE_64) 369 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 370 else 371 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 372 } 373 374 /* 375 * Shutdown the CPU as much as possible. 376 */ 377 void 378 cpu_halt(void) 379 { 380 381 OF_exit(); 382 } 383 384 int 385 ptrace_single_step(struct thread *td) 386 { 387 struct trapframe *tf; 388 389 tf = td->td_frame; 390 tf->srr1 |= PSL_SE; 391 392 return (0); 393 } 394 395 int 396 ptrace_clear_single_step(struct thread *td) 397 { 398 struct trapframe *tf; 399 400 tf = td->td_frame; 401 tf->srr1 &= ~PSL_SE; 402 403 return (0); 404 } 405 406 void 407 kdb_cpu_clear_singlestep(void) 408 { 409 410 kdb_frame->srr1 &= ~PSL_SE; 411 } 412 413 void 414 kdb_cpu_set_singlestep(void) 415 { 416 417 kdb_frame->srr1 |= PSL_SE; 418 } 419 420 /* 421 * Initialise a struct pcpu. 422 */ 423 void 424 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 425 { 426 #ifdef __powerpc64__ 427 /* Copy the SLB contents from the current CPU */ 428 memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); 429 #endif 430 } 431 432 #ifndef __powerpc64__ 433 uint64_t 434 va_to_vsid(pmap_t pm, vm_offset_t va) 435 { 436 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 437 } 438 439 #endif 440 441 vm_offset_t 442 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 443 { 444 445 return (pa); 446 } 447 448 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 449 void 450 flush_disable_caches(void) 451 { 452 register_t msr; 453 register_t msscr0; 454 register_t cache_reg; 455 volatile uint32_t *memp; 456 uint32_t temp; 457 int i; 458 int x; 459 460 msr = mfmsr(); 461 powerpc_sync(); 462 mtmsr(msr & ~(PSL_EE | PSL_DR)); 463 msscr0 = mfspr(SPR_MSSCR0); 464 msscr0 &= ~MSSCR0_L2PFE; 465 mtspr(SPR_MSSCR0, msscr0); 466 powerpc_sync(); 467 isync(); 468 __asm__ __volatile__("dssall; sync"); 469 powerpc_sync(); 470 isync(); 471 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 472 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 473 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 474 475 /* Lock the L1 Data cache. */ 476 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 477 powerpc_sync(); 478 isync(); 479 480 mtspr(SPR_LDSTCR, 0); 481 482 /* 483 * Perform this in two stages: Flush the cache starting in RAM, then do it 484 * from ROM. 485 */ 486 memp = (volatile uint32_t *)0x00000000; 487 for (i = 0; i < 128 * 1024; i++) { 488 temp = *memp; 489 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 490 memp += 32/sizeof(*memp); 491 } 492 493 memp = (volatile uint32_t *)0xfff00000; 494 x = 0xfe; 495 496 for (; x != 0xff;) { 497 mtspr(SPR_LDSTCR, x); 498 for (i = 0; i < 128; i++) { 499 temp = *memp; 500 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 501 memp += 32/sizeof(*memp); 502 } 503 x = ((x << 1) | 1) & 0xff; 504 } 505 mtspr(SPR_LDSTCR, 0); 506 507 cache_reg = mfspr(SPR_L2CR); 508 if (cache_reg & L2CR_L2E) { 509 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 510 mtspr(SPR_L2CR, cache_reg); 511 powerpc_sync(); 512 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 513 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 514 ; /* Busy wait for cache to flush */ 515 powerpc_sync(); 516 cache_reg &= ~L2CR_L2E; 517 mtspr(SPR_L2CR, cache_reg); 518 powerpc_sync(); 519 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 520 powerpc_sync(); 521 while (mfspr(SPR_L2CR) & L2CR_L2I) 522 ; /* Busy wait for L2 cache invalidate */ 523 powerpc_sync(); 524 } 525 526 cache_reg = mfspr(SPR_L3CR); 527 if (cache_reg & L3CR_L3E) { 528 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 529 mtspr(SPR_L3CR, cache_reg); 530 powerpc_sync(); 531 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 532 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 533 ; /* Busy wait for cache to flush */ 534 powerpc_sync(); 535 cache_reg &= ~L3CR_L3E; 536 mtspr(SPR_L3CR, cache_reg); 537 powerpc_sync(); 538 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 539 powerpc_sync(); 540 while (mfspr(SPR_L3CR) & L3CR_L3I) 541 ; /* Busy wait for L3 cache invalidate */ 542 powerpc_sync(); 543 } 544 545 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 546 powerpc_sync(); 547 isync(); 548 549 mtmsr(msr); 550 } 551 552 void 553 cpu_sleep() 554 { 555 static u_quad_t timebase = 0; 556 static register_t sprgs[4]; 557 static register_t srrs[2]; 558 559 jmp_buf resetjb; 560 struct thread *fputd; 561 struct thread *vectd; 562 register_t hid0; 563 register_t msr; 564 register_t saved_msr; 565 566 ap_pcpu = pcpup; 567 568 PCPU_SET(restore, &resetjb); 569 570 saved_msr = mfmsr(); 571 fputd = PCPU_GET(fputhread); 572 vectd = PCPU_GET(vecthread); 573 if (fputd != NULL) 574 save_fpu(fputd); 575 if (vectd != NULL) 576 save_vec(vectd); 577 if (setjmp(resetjb) == 0) { 578 sprgs[0] = mfspr(SPR_SPRG0); 579 sprgs[1] = mfspr(SPR_SPRG1); 580 sprgs[2] = mfspr(SPR_SPRG2); 581 sprgs[3] = mfspr(SPR_SPRG3); 582 srrs[0] = mfspr(SPR_SRR0); 583 srrs[1] = mfspr(SPR_SRR1); 584 timebase = mftb(); 585 powerpc_sync(); 586 flush_disable_caches(); 587 hid0 = mfspr(SPR_HID0); 588 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 589 powerpc_sync(); 590 isync(); 591 msr = mfmsr() | PSL_POW; 592 mtspr(SPR_HID0, hid0); 593 powerpc_sync(); 594 595 while (1) 596 mtmsr(msr); 597 } 598 platform_smp_timebase_sync(timebase, 0); 599 PCPU_SET(curthread, curthread); 600 PCPU_SET(curpcb, curthread->td_pcb); 601 pmap_activate(curthread); 602 powerpc_sync(); 603 mtspr(SPR_SPRG0, sprgs[0]); 604 mtspr(SPR_SPRG1, sprgs[1]); 605 mtspr(SPR_SPRG2, sprgs[2]); 606 mtspr(SPR_SPRG3, sprgs[3]); 607 mtspr(SPR_SRR0, srrs[0]); 608 mtspr(SPR_SRR1, srrs[1]); 609 mtmsr(saved_msr); 610 if (fputd == curthread) 611 enable_fpu(curthread); 612 if (vectd == curthread) 613 enable_vec(curthread); 614 powerpc_sync(); 615 } 616 617