1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_compat.h" 61 #include "opt_ddb.h" 62 #include "opt_kstack_pages.h" 63 #include "opt_platform.h" 64 65 #include <sys/param.h> 66 #include <sys/proc.h> 67 #include <sys/systm.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/bus.h> 71 #include <sys/cons.h> 72 #include <sys/cpu.h> 73 #include <sys/eventhandler.h> 74 #include <sys/exec.h> 75 #include <sys/imgact.h> 76 #include <sys/kdb.h> 77 #include <sys/kernel.h> 78 #include <sys/ktr.h> 79 #include <sys/linker.h> 80 #include <sys/lock.h> 81 #include <sys/malloc.h> 82 #include <sys/mbuf.h> 83 #include <sys/msgbuf.h> 84 #include <sys/mutex.h> 85 #include <sys/ptrace.h> 86 #include <sys/reboot.h> 87 #include <sys/rwlock.h> 88 #include <sys/signalvar.h> 89 #include <sys/syscallsubr.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysent.h> 92 #include <sys/sysproto.h> 93 #include <sys/ucontext.h> 94 #include <sys/uio.h> 95 #include <sys/vmmeter.h> 96 #include <sys/vnode.h> 97 98 #include <net/netisr.h> 99 100 #include <vm/vm.h> 101 #include <vm/vm_extern.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_map.h> 105 #include <vm/vm_object.h> 106 #include <vm/vm_pager.h> 107 108 #include <machine/altivec.h> 109 #ifndef __powerpc64__ 110 #include <machine/bat.h> 111 #endif 112 #include <machine/cpu.h> 113 #include <machine/elf.h> 114 #include <machine/fpu.h> 115 #include <machine/hid.h> 116 #include <machine/kdb.h> 117 #include <machine/md_var.h> 118 #include <machine/metadata.h> 119 #include <machine/mmuvar.h> 120 #include <machine/pcb.h> 121 #include <machine/reg.h> 122 #include <machine/sigframe.h> 123 #include <machine/spr.h> 124 #include <machine/trap.h> 125 #include <machine/vmparam.h> 126 #include <machine/ofw_machdep.h> 127 128 #include <ddb/ddb.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #ifdef __powerpc64__ 133 extern int n_slbs; 134 #endif 135 136 #ifndef __powerpc64__ 137 struct bat battable[16]; 138 #endif 139 140 #ifndef __powerpc64__ 141 /* Bits for running on 64-bit systems in 32-bit mode. */ 142 extern void *testppc64, *testppc64size; 143 extern void *restorebridge, *restorebridgesize; 144 extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 145 extern void *trapcode64; 146 147 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; 148 #endif 149 150 extern void *rstcode, *rstcodeend; 151 extern void *trapcode, *trapcodeend; 152 extern void *generictrap, *generictrap64; 153 extern void *slbtrap, *slbtrapend; 154 extern void *alitrap, *aliend; 155 extern void *dsitrap, *dsiend; 156 extern void *decrint, *decrsize; 157 extern void *extint, *extsize; 158 extern void *dblow, *dbend; 159 extern void *imisstrap, *imisssize; 160 extern void *dlmisstrap, *dlmisssize; 161 extern void *dsmisstrap, *dsmisssize; 162 163 extern void *ap_pcpu; 164 165 void aim_cpu_init(vm_offset_t toc); 166 167 void 168 aim_cpu_init(vm_offset_t toc) 169 { 170 size_t trap_offset, trapsize; 171 vm_offset_t trap; 172 register_t msr, scratch; 173 uint8_t *cache_check; 174 int cacheline_warn; 175 #ifndef __powerpc64__ 176 int ppc64; 177 #endif 178 179 trap_offset = 0; 180 cacheline_warn = 0; 181 182 #ifdef __powerpc64__ 183 /* 184 * Switch to 64-bit mode, if the bootloader didn't, before we start 185 * using memory beyond what the bootloader might have set up. 186 * Guaranteed not to cause an implicit branch since we either (a) 187 * started with a 32-bit bootloader below 4 GB or (b) were already in 188 * 64-bit mode, making this a no-op. 189 */ 190 mtmsrd(mfmsr() | PSL_SF); 191 #endif 192 193 /* Various very early CPU fix ups */ 194 switch (mfpvr() >> 16) { 195 /* 196 * PowerPC 970 CPUs have a misfeature requested by Apple that 197 * makes them pretend they have a 32-byte cacheline. Turn this 198 * off before we measure the cacheline size. 199 */ 200 case IBM970: 201 case IBM970FX: 202 case IBM970MP: 203 case IBM970GX: 204 scratch = mfspr(SPR_HID5); 205 scratch &= ~HID5_970_DCBZ_SIZE_HI; 206 mtspr(SPR_HID5, scratch); 207 break; 208 #ifdef __powerpc64__ 209 case IBMPOWER7: 210 case IBMPOWER7PLUS: 211 case IBMPOWER8: 212 case IBMPOWER8E: 213 /* XXX: get from ibm,slb-size in device tree */ 214 n_slbs = 32; 215 break; 216 #endif 217 } 218 219 /* 220 * Initialize the interrupt tables and figure out our cache line 221 * size and whether or not we need the 64-bit bridge code. 222 */ 223 224 /* 225 * Disable translation in case the vector area hasn't been 226 * mapped (G5). Note that no OFW calls can be made until 227 * translation is re-enabled. 228 */ 229 230 msr = mfmsr(); 231 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 232 233 /* 234 * Measure the cacheline size using dcbz 235 * 236 * Use EXC_PGM as a playground. We are about to overwrite it 237 * anyway, we know it exists, and we know it is cache-aligned. 238 */ 239 240 cache_check = (void *)EXC_PGM; 241 242 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 243 cache_check[cacheline_size] = 0xff; 244 245 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 246 247 /* Find the first byte dcbz did not zero to get the cache line size */ 248 for (cacheline_size = 0; cacheline_size < 0x100 && 249 cache_check[cacheline_size] == 0; cacheline_size++); 250 251 /* Work around psim bug */ 252 if (cacheline_size == 0) { 253 cacheline_warn = 1; 254 cacheline_size = 32; 255 } 256 257 #ifndef __powerpc64__ 258 /* 259 * Figure out whether we need to use the 64 bit PMAP. This works by 260 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 261 * and setting ppc64 = 0 if that causes a trap. 262 */ 263 264 ppc64 = 1; 265 266 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 267 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 268 269 __asm __volatile("\ 270 mfmsr %0; \ 271 mtsprg2 %1; \ 272 \ 273 mtmsrd %0; \ 274 mfsprg2 %1;" 275 : "=r"(scratch), "=r"(ppc64)); 276 277 if (ppc64) 278 cpu_features |= PPC_FEATURE_64; 279 280 /* 281 * Now copy restorebridge into all the handlers, if necessary, 282 * and set up the trap tables. 283 */ 284 285 if (cpu_features & PPC_FEATURE_64) { 286 /* Patch the two instances of rfi -> rfid */ 287 bcopy(&rfid_patch,&rfi_patch1,4); 288 #ifdef KDB 289 /* rfi_patch2 is at the end of dbleave */ 290 bcopy(&rfid_patch,&rfi_patch2,4); 291 #endif 292 } 293 #else /* powerpc64 */ 294 cpu_features |= PPC_FEATURE_64; 295 #endif 296 297 trapsize = (size_t)&trapcodeend - (size_t)&trapcode; 298 299 /* 300 * Copy generic handler into every possible trap. Special cases will get 301 * different ones in a minute. 302 */ 303 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) 304 bcopy(&trapcode, (void *)trap, trapsize); 305 306 #ifndef __powerpc64__ 307 if (cpu_features & PPC_FEATURE_64) { 308 /* 309 * Copy a code snippet to restore 32-bit bridge mode 310 * to the top of every non-generic trap handler 311 */ 312 313 trap_offset += (size_t)&restorebridgesize; 314 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 315 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 316 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 317 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 318 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 319 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 320 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 321 } 322 #endif 323 324 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - 325 (size_t)&rstcode); 326 327 #ifdef KDB 328 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - 329 (size_t)&dblow); 330 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - 331 (size_t)&dblow); 332 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - 333 (size_t)&dblow); 334 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - 335 (size_t)&dblow); 336 #endif 337 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - 338 (size_t)&alitrap); 339 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - 340 (size_t)&dsitrap); 341 342 #ifdef __powerpc64__ 343 /* Set TOC base so that the interrupt code can get at it */ 344 *((void **)TRAP_GENTRAP) = &generictrap; 345 *((register_t *)TRAP_TOCBASE) = toc; 346 347 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); 348 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); 349 #else 350 /* Set branch address for trap code */ 351 if (cpu_features & PPC_FEATURE_64) 352 *((void **)TRAP_GENTRAP) = &generictrap64; 353 else 354 *((void **)TRAP_GENTRAP) = &generictrap; 355 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; 356 357 /* G2-specific TLB miss helper handlers */ 358 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 359 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 360 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 361 #endif 362 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 363 364 /* 365 * Restore MSR 366 */ 367 mtmsr(msr); 368 369 /* Warn if cachline size was not determined */ 370 if (cacheline_warn == 1) { 371 printf("WARNING: cacheline size undetermined, setting to 32\n"); 372 } 373 374 /* 375 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 376 * in case the platform module had a better idea of what we 377 * should do. 378 */ 379 if (cpu_features & PPC_FEATURE_64) 380 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 381 else 382 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 383 } 384 385 /* 386 * Shutdown the CPU as much as possible. 387 */ 388 void 389 cpu_halt(void) 390 { 391 392 OF_exit(); 393 } 394 395 int 396 ptrace_single_step(struct thread *td) 397 { 398 struct trapframe *tf; 399 400 tf = td->td_frame; 401 tf->srr1 |= PSL_SE; 402 403 return (0); 404 } 405 406 int 407 ptrace_clear_single_step(struct thread *td) 408 { 409 struct trapframe *tf; 410 411 tf = td->td_frame; 412 tf->srr1 &= ~PSL_SE; 413 414 return (0); 415 } 416 417 void 418 kdb_cpu_clear_singlestep(void) 419 { 420 421 kdb_frame->srr1 &= ~PSL_SE; 422 } 423 424 void 425 kdb_cpu_set_singlestep(void) 426 { 427 428 kdb_frame->srr1 |= PSL_SE; 429 } 430 431 /* 432 * Initialise a struct pcpu. 433 */ 434 void 435 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 436 { 437 #ifdef __powerpc64__ 438 /* Copy the SLB contents from the current CPU */ 439 memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); 440 #endif 441 } 442 443 #ifndef __powerpc64__ 444 uint64_t 445 va_to_vsid(pmap_t pm, vm_offset_t va) 446 { 447 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 448 } 449 450 #endif 451 452 vm_offset_t 453 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 454 { 455 456 return (pa); 457 } 458 459 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 460 void 461 flush_disable_caches(void) 462 { 463 register_t msr; 464 register_t msscr0; 465 register_t cache_reg; 466 volatile uint32_t *memp; 467 uint32_t temp; 468 int i; 469 int x; 470 471 msr = mfmsr(); 472 powerpc_sync(); 473 mtmsr(msr & ~(PSL_EE | PSL_DR)); 474 msscr0 = mfspr(SPR_MSSCR0); 475 msscr0 &= ~MSSCR0_L2PFE; 476 mtspr(SPR_MSSCR0, msscr0); 477 powerpc_sync(); 478 isync(); 479 __asm__ __volatile__("dssall; sync"); 480 powerpc_sync(); 481 isync(); 482 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 483 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 484 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 485 486 /* Lock the L1 Data cache. */ 487 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 488 powerpc_sync(); 489 isync(); 490 491 mtspr(SPR_LDSTCR, 0); 492 493 /* 494 * Perform this in two stages: Flush the cache starting in RAM, then do it 495 * from ROM. 496 */ 497 memp = (volatile uint32_t *)0x00000000; 498 for (i = 0; i < 128 * 1024; i++) { 499 temp = *memp; 500 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 501 memp += 32/sizeof(*memp); 502 } 503 504 memp = (volatile uint32_t *)0xfff00000; 505 x = 0xfe; 506 507 for (; x != 0xff;) { 508 mtspr(SPR_LDSTCR, x); 509 for (i = 0; i < 128; i++) { 510 temp = *memp; 511 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 512 memp += 32/sizeof(*memp); 513 } 514 x = ((x << 1) | 1) & 0xff; 515 } 516 mtspr(SPR_LDSTCR, 0); 517 518 cache_reg = mfspr(SPR_L2CR); 519 if (cache_reg & L2CR_L2E) { 520 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 521 mtspr(SPR_L2CR, cache_reg); 522 powerpc_sync(); 523 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 524 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 525 ; /* Busy wait for cache to flush */ 526 powerpc_sync(); 527 cache_reg &= ~L2CR_L2E; 528 mtspr(SPR_L2CR, cache_reg); 529 powerpc_sync(); 530 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 531 powerpc_sync(); 532 while (mfspr(SPR_L2CR) & L2CR_L2I) 533 ; /* Busy wait for L2 cache invalidate */ 534 powerpc_sync(); 535 } 536 537 cache_reg = mfspr(SPR_L3CR); 538 if (cache_reg & L3CR_L3E) { 539 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 540 mtspr(SPR_L3CR, cache_reg); 541 powerpc_sync(); 542 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 543 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 544 ; /* Busy wait for cache to flush */ 545 powerpc_sync(); 546 cache_reg &= ~L3CR_L3E; 547 mtspr(SPR_L3CR, cache_reg); 548 powerpc_sync(); 549 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 550 powerpc_sync(); 551 while (mfspr(SPR_L3CR) & L3CR_L3I) 552 ; /* Busy wait for L3 cache invalidate */ 553 powerpc_sync(); 554 } 555 556 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 557 powerpc_sync(); 558 isync(); 559 560 mtmsr(msr); 561 } 562 563 void 564 cpu_sleep() 565 { 566 static u_quad_t timebase = 0; 567 static register_t sprgs[4]; 568 static register_t srrs[2]; 569 570 jmp_buf resetjb; 571 struct thread *fputd; 572 struct thread *vectd; 573 register_t hid0; 574 register_t msr; 575 register_t saved_msr; 576 577 ap_pcpu = pcpup; 578 579 PCPU_SET(restore, &resetjb); 580 581 saved_msr = mfmsr(); 582 fputd = PCPU_GET(fputhread); 583 vectd = PCPU_GET(vecthread); 584 if (fputd != NULL) 585 save_fpu(fputd); 586 if (vectd != NULL) 587 save_vec(vectd); 588 if (setjmp(resetjb) == 0) { 589 sprgs[0] = mfspr(SPR_SPRG0); 590 sprgs[1] = mfspr(SPR_SPRG1); 591 sprgs[2] = mfspr(SPR_SPRG2); 592 sprgs[3] = mfspr(SPR_SPRG3); 593 srrs[0] = mfspr(SPR_SRR0); 594 srrs[1] = mfspr(SPR_SRR1); 595 timebase = mftb(); 596 powerpc_sync(); 597 flush_disable_caches(); 598 hid0 = mfspr(SPR_HID0); 599 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 600 powerpc_sync(); 601 isync(); 602 msr = mfmsr() | PSL_POW; 603 mtspr(SPR_HID0, hid0); 604 powerpc_sync(); 605 606 while (1) 607 mtmsr(msr); 608 } 609 mttb(timebase); 610 PCPU_SET(curthread, curthread); 611 PCPU_SET(curpcb, curthread->td_pcb); 612 pmap_activate(curthread); 613 powerpc_sync(); 614 mtspr(SPR_SPRG0, sprgs[0]); 615 mtspr(SPR_SPRG1, sprgs[1]); 616 mtspr(SPR_SPRG2, sprgs[2]); 617 mtspr(SPR_SPRG3, sprgs[3]); 618 mtspr(SPR_SRR0, srrs[0]); 619 mtspr(SPR_SRR1, srrs[1]); 620 mtmsr(saved_msr); 621 if (fputd == curthread) 622 enable_fpu(curthread); 623 if (vectd == curthread) 624 enable_vec(curthread); 625 powerpc_sync(); 626 } 627 628