1 /*- 2 * Initial implementation: 3 * Copyright (c) 2001 Robert Drehmel 4 * All rights reserved. 5 * 6 * As long as the above copyright statement and this notice remain 7 * unchanged, you can do what ever you want with this file. 8 */ 9 /*- 10 * Copyright (c) 2008 - 2012 Marius Strobl <marius@FreeBSD.org> 11 * All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 37 /* 38 * FreeBSD/sparc64 kernel loader - machine dependent part 39 * 40 * - implements copyin and readin functions that map kernel 41 * pages on demand. The machine independent code does not 42 * know the size of the kernel early enough to pre-enter 43 * TTEs and install just one 4MB mapping seemed to limiting 44 * to me. 45 */ 46 47 #include <stand.h> 48 #include <sys/param.h> 49 #include <sys/exec.h> 50 #include <sys/linker.h> 51 #include <sys/queue.h> 52 #include <sys/types.h> 53 #ifdef LOADER_ZFS_SUPPORT 54 #include <sys/vtoc.h> 55 #include "../zfs/libzfs.h" 56 #endif 57 58 #include <vm/vm.h> 59 #include <machine/asi.h> 60 #include <machine/cmt.h> 61 #include <machine/cpufunc.h> 62 #include <machine/elf.h> 63 #include <machine/fireplane.h> 64 #include <machine/jbus.h> 65 #include <machine/lsu.h> 66 #include <machine/metadata.h> 67 #include <machine/tte.h> 68 #include <machine/tlb.h> 69 #include <machine/upa.h> 70 #include <machine/ver.h> 71 #include <machine/vmparam.h> 72 73 #include "bootstrap.h" 74 #include "libofw.h" 75 #include "dev_net.h" 76 77 extern char bootprog_info[]; 78 79 enum { 80 HEAPVA = 0x800000, 81 HEAPSZ = 0x1000000, 82 LOADSZ = 0x1000000 /* for kernel and modules */ 83 }; 84 85 /* At least Sun Fire V1280 require page sized allocations to be claimed. */ 86 CTASSERT(HEAPSZ % PAGE_SIZE == 0); 87 88 static struct mmu_ops { 89 void (*tlb_init)(void); 90 int (*mmu_mapin)(vm_offset_t va, vm_size_t len); 91 } *mmu_ops; 92 93 typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3, 94 void *openfirmware); 95 96 static inline u_long dtlb_get_data_sun4u(u_int, u_int); 97 static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t); 98 static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t); 99 static inline u_long itlb_get_data_sun4u(u_int, u_int); 100 static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t); 101 static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t); 102 static void itlb_relocate_locked0_sun4u(void); 103 extern vm_offset_t md_load(char *, vm_offset_t *, vm_offset_t *); 104 static int sparc64_autoload(void); 105 static ssize_t sparc64_readin(const int, vm_offset_t, const size_t); 106 static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t); 107 static vm_offset_t claim_virt(vm_offset_t, size_t, int); 108 static vm_offset_t alloc_phys(size_t, int); 109 static int map_phys(int, size_t, vm_offset_t, vm_offset_t); 110 static void release_phys(vm_offset_t, u_int); 111 static int __elfN(exec)(struct preloaded_file *); 112 static int mmu_mapin_sun4u(vm_offset_t, vm_size_t); 113 static vm_offset_t init_heap(void); 114 static phandle_t find_bsp_sun4u(phandle_t, uint32_t); 115 const char *cpu_cpuid_prop_sun4u(void); 116 uint32_t cpu_get_mid_sun4u(void); 117 static void tlb_init_sun4u(void); 118 119 #ifdef LOADER_DEBUG 120 typedef u_int64_t tte_t; 121 122 static void pmap_print_tlb_sun4u(void); 123 static void pmap_print_tte_sun4u(tte_t, tte_t); 124 #endif 125 126 static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u }; 127 128 /* sun4u */ 129 struct tlb_entry *dtlb_store; 130 struct tlb_entry *itlb_store; 131 u_int dtlb_slot; 132 u_int itlb_slot; 133 static int cpu_impl; 134 static u_int dtlb_slot_max; 135 static u_int itlb_slot_max; 136 static u_int tlb_locked; 137 138 static vm_offset_t curkva = 0; 139 static vm_offset_t heapva; 140 141 static char bootpath[64]; 142 static phandle_t root; 143 144 #ifdef LOADER_ZFS_SUPPORT 145 static struct zfs_devdesc zfs_currdev; 146 #endif 147 148 /* 149 * Machine dependent structures that the machine independent 150 * loader part uses. 151 */ 152 struct devsw *devsw[] = { 153 #ifdef LOADER_DISK_SUPPORT 154 &ofwdisk, 155 #endif 156 #ifdef LOADER_NET_SUPPORT 157 &netdev, 158 #endif 159 #ifdef LOADER_ZFS_SUPPORT 160 &zfs_dev, 161 #endif 162 NULL 163 }; 164 165 struct arch_switch archsw; 166 167 static struct file_format sparc64_elf = { 168 __elfN(loadfile), 169 __elfN(exec) 170 }; 171 172 struct file_format *file_formats[] = { 173 &sparc64_elf, 174 NULL 175 }; 176 177 struct fs_ops *file_system[] = { 178 #ifdef LOADER_ZFS_SUPPORT 179 &zfs_fsops, 180 #endif 181 #ifdef LOADER_UFS_SUPPORT 182 &ufs_fsops, 183 #endif 184 #ifdef LOADER_CD9660_SUPPORT 185 &cd9660_fsops, 186 #endif 187 #ifdef LOADER_ZIP_SUPPORT 188 &zipfs_fsops, 189 #endif 190 #ifdef LOADER_GZIP_SUPPORT 191 &gzipfs_fsops, 192 #endif 193 #ifdef LOADER_BZIP2_SUPPORT 194 &bzipfs_fsops, 195 #endif 196 #ifdef LOADER_NFS_SUPPORT 197 &nfs_fsops, 198 #endif 199 #ifdef LOADER_TFTP_SUPPORT 200 &tftp_fsops, 201 #endif 202 NULL 203 }; 204 205 struct netif_driver *netif_drivers[] = { 206 #ifdef LOADER_NET_SUPPORT 207 &ofwnet, 208 #endif 209 NULL 210 }; 211 212 extern struct console ofwconsole; 213 struct console *consoles[] = { 214 &ofwconsole, 215 NULL 216 }; 217 218 #ifdef LOADER_DEBUG 219 static int 220 watch_phys_set_mask(vm_offset_t pa, u_long mask) 221 { 222 u_long lsucr; 223 224 stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3)); 225 lsucr = ldxa(0, ASI_LSU_CTL_REG); 226 lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) | 227 (mask << LSU_PM_SHIFT); 228 stxa(0, ASI_LSU_CTL_REG, lsucr); 229 return (0); 230 } 231 232 static int 233 watch_phys_set(vm_offset_t pa, int sz) 234 { 235 u_long off; 236 237 off = (u_long)pa & 7; 238 /* Test for misaligned watch points. */ 239 if (off + sz > 8) 240 return (-1); 241 return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off)); 242 } 243 244 245 static int 246 watch_virt_set_mask(vm_offset_t va, u_long mask) 247 { 248 u_long lsucr; 249 250 stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3)); 251 lsucr = ldxa(0, ASI_LSU_CTL_REG); 252 lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) | 253 (mask << LSU_VM_SHIFT); 254 stxa(0, ASI_LSU_CTL_REG, lsucr); 255 return (0); 256 } 257 258 static int 259 watch_virt_set(vm_offset_t va, int sz) 260 { 261 u_long off; 262 263 off = (u_long)va & 7; 264 /* Test for misaligned watch points. */ 265 if (off + sz > 8) 266 return (-1); 267 return (watch_virt_set_mask(va, ((1 << sz) - 1) << off)); 268 } 269 #endif 270 271 /* 272 * archsw functions 273 */ 274 static int 275 sparc64_autoload(void) 276 { 277 278 return (0); 279 } 280 281 static ssize_t 282 sparc64_readin(const int fd, vm_offset_t va, const size_t len) 283 { 284 285 mmu_ops->mmu_mapin(va, len); 286 return (read(fd, (void *)va, len)); 287 } 288 289 static ssize_t 290 sparc64_copyin(const void *src, vm_offset_t dest, size_t len) 291 { 292 293 mmu_ops->mmu_mapin(dest, len); 294 memcpy((void *)dest, src, len); 295 return (len); 296 } 297 298 /* 299 * other MD functions 300 */ 301 static vm_offset_t 302 claim_virt(vm_offset_t virt, size_t size, int align) 303 { 304 vm_offset_t mva; 305 306 if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1) 307 return ((vm_offset_t)-1); 308 return (mva); 309 } 310 311 static vm_offset_t 312 alloc_phys(size_t size, int align) 313 { 314 cell_t phys_hi, phys_low; 315 316 if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low, 317 &phys_hi) == -1) 318 return ((vm_offset_t)-1); 319 return ((vm_offset_t)phys_hi << 32 | phys_low); 320 } 321 322 static int 323 map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys) 324 { 325 326 return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys, 327 (uint32_t)(phys >> 32), virt, size, mode)); 328 } 329 330 static void 331 release_phys(vm_offset_t phys, u_int size) 332 { 333 334 (void)OF_call_method("release", memory, 3, 0, (uint32_t)phys, 335 (uint32_t)(phys >> 32), size); 336 } 337 338 static int 339 __elfN(exec)(struct preloaded_file *fp) 340 { 341 struct file_metadata *fmp; 342 vm_offset_t mdp, dtbp; 343 Elf_Addr entry; 344 Elf_Ehdr *e; 345 int error; 346 347 if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0) 348 return (EFTYPE); 349 e = (Elf_Ehdr *)&fmp->md_data; 350 351 if ((error = md_load(fp->f_args, &mdp, &dtbp)) != 0) 352 return (error); 353 354 printf("jumping to kernel entry at %#lx.\n", e->e_entry); 355 #ifdef LOADER_DEBUG 356 pmap_print_tlb_sun4u(); 357 #endif 358 359 dev_cleanup(); 360 361 entry = e->e_entry; 362 363 OF_release((void *)heapva, HEAPSZ); 364 365 ((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware); 366 367 panic("%s: exec returned", __func__); 368 } 369 370 static inline u_long 371 dtlb_get_data_sun4u(u_int tlb, u_int slot) 372 { 373 u_long data, pstate; 374 375 slot = TLB_DAR_SLOT(tlb, slot); 376 /* 377 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to 378 * work around errata of USIII and beyond. 379 */ 380 pstate = rdpr(pstate); 381 wrpr(pstate, pstate & ~PSTATE_IE, 0); 382 (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 383 data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 384 wrpr(pstate, pstate, 0); 385 return (data); 386 } 387 388 static inline u_long 389 itlb_get_data_sun4u(u_int tlb, u_int slot) 390 { 391 u_long data, pstate; 392 393 slot = TLB_DAR_SLOT(tlb, slot); 394 /* 395 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to 396 * work around errata of USIII and beyond. 397 */ 398 pstate = rdpr(pstate); 399 wrpr(pstate, pstate & ~PSTATE_IE, 0); 400 (void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG); 401 data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG); 402 wrpr(pstate, pstate, 0); 403 return (data); 404 } 405 406 static vm_offset_t 407 dtlb_va_to_pa_sun4u(vm_offset_t va) 408 { 409 u_long pstate, reg; 410 u_int i, tlb; 411 412 pstate = rdpr(pstate); 413 wrpr(pstate, pstate & ~PSTATE_IE, 0); 414 for (i = 0; i < dtlb_slot_max; i++) { 415 reg = ldxa(TLB_DAR_SLOT(tlb_locked, i), 416 ASI_DTLB_TAG_READ_REG); 417 if (TLB_TAR_VA(reg) != va) 418 continue; 419 reg = dtlb_get_data_sun4u(tlb_locked, i); 420 wrpr(pstate, pstate, 0); 421 reg >>= TD_PA_SHIFT; 422 if (cpu_impl == CPU_IMPL_SPARC64V || 423 cpu_impl >= CPU_IMPL_ULTRASPARCIII) 424 return (reg & TD_PA_CH_MASK); 425 return (reg & TD_PA_SF_MASK); 426 } 427 wrpr(pstate, pstate, 0); 428 return (-1); 429 } 430 431 static vm_offset_t 432 itlb_va_to_pa_sun4u(vm_offset_t va) 433 { 434 u_long pstate, reg; 435 int i; 436 437 pstate = rdpr(pstate); 438 wrpr(pstate, pstate & ~PSTATE_IE, 0); 439 for (i = 0; i < itlb_slot_max; i++) { 440 reg = ldxa(TLB_DAR_SLOT(tlb_locked, i), 441 ASI_ITLB_TAG_READ_REG); 442 if (TLB_TAR_VA(reg) != va) 443 continue; 444 reg = itlb_get_data_sun4u(tlb_locked, i); 445 wrpr(pstate, pstate, 0); 446 reg >>= TD_PA_SHIFT; 447 if (cpu_impl == CPU_IMPL_SPARC64V || 448 cpu_impl >= CPU_IMPL_ULTRASPARCIII) 449 return (reg & TD_PA_CH_MASK); 450 return (reg & TD_PA_SF_MASK); 451 } 452 wrpr(pstate, pstate, 0); 453 return (-1); 454 } 455 456 static int 457 dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt) 458 { 459 460 return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data, 461 virt)); 462 } 463 464 static int 465 itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt) 466 { 467 468 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 && 469 (data & TD_L) != 0) 470 panic("%s: won't enter locked TLB entry at index 0 on USIII+", 471 __func__); 472 return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data, 473 virt)); 474 } 475 476 static void 477 itlb_relocate_locked0_sun4u(void) 478 { 479 u_long data, pstate, tag; 480 int i; 481 482 if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp) 483 return; 484 485 pstate = rdpr(pstate); 486 wrpr(pstate, pstate & ~PSTATE_IE, 0); 487 488 data = itlb_get_data_sun4u(tlb_locked, 0); 489 if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) { 490 wrpr(pstate, pstate, 0); 491 return; 492 } 493 494 /* Flush the mapping of slot 0. */ 495 tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG); 496 stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY | 497 TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0); 498 flush(0); /* The USIII-family ignores the address. */ 499 500 /* 501 * Search a replacement slot != 0 and enter the data and tag 502 * that formerly were in slot 0. 503 */ 504 for (i = 1; i < itlb_slot_max; i++) { 505 if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0) 506 continue; 507 508 stxa(AA_IMMU_TAR, ASI_IMMU, tag); 509 stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG, 510 data); 511 flush(0); /* The USIII-family ignores the address. */ 512 break; 513 } 514 wrpr(pstate, pstate, 0); 515 if (i == itlb_slot_max) 516 panic("%s: could not find a replacement slot", __func__); 517 } 518 519 static int 520 mmu_mapin_sun4u(vm_offset_t va, vm_size_t len) 521 { 522 vm_offset_t pa, mva; 523 u_long data; 524 u_int index; 525 526 if (va + len > curkva) 527 curkva = va + len; 528 529 pa = (vm_offset_t)-1; 530 len += va & PAGE_MASK_4M; 531 va &= ~PAGE_MASK_4M; 532 while (len) { 533 if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 || 534 itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) { 535 /* Allocate a physical page, claim the virtual area. */ 536 if (pa == (vm_offset_t)-1) { 537 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M); 538 if (pa == (vm_offset_t)-1) 539 panic("%s: out of memory", __func__); 540 mva = claim_virt(va, PAGE_SIZE_4M, 0); 541 if (mva != va) 542 panic("%s: can't claim virtual page " 543 "(wanted %#lx, got %#lx)", 544 __func__, va, mva); 545 /* 546 * The mappings may have changed, be paranoid. 547 */ 548 continue; 549 } 550 /* 551 * Actually, we can only allocate two pages less at 552 * most (depending on the kernel TSB size). 553 */ 554 if (dtlb_slot >= dtlb_slot_max) 555 panic("%s: out of dtlb_slots", __func__); 556 if (itlb_slot >= itlb_slot_max) 557 panic("%s: out of itlb_slots", __func__); 558 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | 559 TD_CV | TD_P | TD_W; 560 dtlb_store[dtlb_slot].te_pa = pa; 561 dtlb_store[dtlb_slot].te_va = va; 562 index = dtlb_slot_max - dtlb_slot - 1; 563 if (dtlb_enter_sun4u(index, data, va) < 0) 564 panic("%s: can't enter dTLB slot %d data " 565 "%#lx va %#lx", __func__, index, data, 566 va); 567 dtlb_slot++; 568 itlb_store[itlb_slot].te_pa = pa; 569 itlb_store[itlb_slot].te_va = va; 570 index = itlb_slot_max - itlb_slot - 1; 571 if (itlb_enter_sun4u(index, data, va) < 0) 572 panic("%s: can't enter iTLB slot %d data " 573 "%#lx va %#lxd", __func__, index, data, 574 va); 575 itlb_slot++; 576 pa = (vm_offset_t)-1; 577 } 578 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len; 579 va += PAGE_SIZE_4M; 580 } 581 if (pa != (vm_offset_t)-1) 582 release_phys(pa, PAGE_SIZE_4M); 583 return (0); 584 } 585 586 static vm_offset_t 587 init_heap(void) 588 { 589 590 /* There is no need for continuous physical heap memory. */ 591 heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32); 592 return (heapva); 593 } 594 595 static phandle_t 596 find_bsp_sun4u(phandle_t node, uint32_t bspid) 597 { 598 char type[sizeof("cpu")]; 599 phandle_t child; 600 uint32_t cpuid; 601 602 for (; node > 0; node = OF_peer(node)) { 603 child = OF_child(node); 604 if (child > 0) { 605 child = find_bsp_sun4u(child, bspid); 606 if (child > 0) 607 return (child); 608 } else { 609 if (OF_getprop(node, "device_type", type, 610 sizeof(type)) <= 0) 611 continue; 612 if (strcmp(type, "cpu") != 0) 613 continue; 614 if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid, 615 sizeof(cpuid)) <= 0) 616 continue; 617 if (cpuid == bspid) 618 return (node); 619 } 620 } 621 return (0); 622 } 623 624 const char * 625 cpu_cpuid_prop_sun4u(void) 626 { 627 628 switch (cpu_impl) { 629 case CPU_IMPL_SPARC64: 630 case CPU_IMPL_SPARC64V: 631 case CPU_IMPL_ULTRASPARCI: 632 case CPU_IMPL_ULTRASPARCII: 633 case CPU_IMPL_ULTRASPARCIIi: 634 case CPU_IMPL_ULTRASPARCIIe: 635 return ("upa-portid"); 636 case CPU_IMPL_ULTRASPARCIII: 637 case CPU_IMPL_ULTRASPARCIIIp: 638 case CPU_IMPL_ULTRASPARCIIIi: 639 case CPU_IMPL_ULTRASPARCIIIip: 640 return ("portid"); 641 case CPU_IMPL_ULTRASPARCIV: 642 case CPU_IMPL_ULTRASPARCIVp: 643 return ("cpuid"); 644 default: 645 return (""); 646 } 647 } 648 649 uint32_t 650 cpu_get_mid_sun4u(void) 651 { 652 653 switch (cpu_impl) { 654 case CPU_IMPL_SPARC64: 655 case CPU_IMPL_SPARC64V: 656 case CPU_IMPL_ULTRASPARCI: 657 case CPU_IMPL_ULTRASPARCII: 658 case CPU_IMPL_ULTRASPARCIIi: 659 case CPU_IMPL_ULTRASPARCIIe: 660 return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG))); 661 case CPU_IMPL_ULTRASPARCIII: 662 case CPU_IMPL_ULTRASPARCIIIp: 663 return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG, 664 ASI_FIREPLANE_CONFIG_REG))); 665 case CPU_IMPL_ULTRASPARCIIIi: 666 case CPU_IMPL_ULTRASPARCIIIip: 667 return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG))); 668 case CPU_IMPL_ULTRASPARCIV: 669 case CPU_IMPL_ULTRASPARCIVp: 670 return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID))); 671 default: 672 return (0); 673 } 674 } 675 676 static void 677 tlb_init_sun4u(void) 678 { 679 phandle_t bsp; 680 681 cpu_impl = VER_IMPL(rdpr(ver)); 682 switch (cpu_impl) { 683 case CPU_IMPL_SPARC64: 684 case CPU_IMPL_ULTRASPARCI: 685 case CPU_IMPL_ULTRASPARCII: 686 case CPU_IMPL_ULTRASPARCIIi: 687 case CPU_IMPL_ULTRASPARCIIe: 688 tlb_locked = TLB_DAR_T32; 689 break; 690 case CPU_IMPL_ULTRASPARCIII: 691 case CPU_IMPL_ULTRASPARCIIIp: 692 case CPU_IMPL_ULTRASPARCIIIi: 693 case CPU_IMPL_ULTRASPARCIIIip: 694 case CPU_IMPL_ULTRASPARCIV: 695 case CPU_IMPL_ULTRASPARCIVp: 696 tlb_locked = TLB_DAR_T16; 697 break; 698 case CPU_IMPL_SPARC64V: 699 tlb_locked = TLB_DAR_FTLB; 700 break; 701 } 702 bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u()); 703 if (bsp == 0) 704 panic("%s: no node for bootcpu?!?!", __func__); 705 706 if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max, 707 sizeof(dtlb_slot_max)) == -1 || 708 OF_getprop(bsp, "#itlb-entries", &itlb_slot_max, 709 sizeof(itlb_slot_max)) == -1) 710 panic("%s: can't get TLB slot max.", __func__); 711 712 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) { 713 #ifdef LOADER_DEBUG 714 printf("pre fixup:\n"); 715 pmap_print_tlb_sun4u(); 716 #endif 717 718 /* 719 * Relocate the locked entry in it16 slot 0 (if existent) 720 * as part of working around Cheetah+ erratum 34. 721 */ 722 itlb_relocate_locked0_sun4u(); 723 724 #ifdef LOADER_DEBUG 725 printf("post fixup:\n"); 726 pmap_print_tlb_sun4u(); 727 #endif 728 } 729 730 dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store)); 731 itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store)); 732 if (dtlb_store == NULL || itlb_store == NULL) 733 panic("%s: can't allocate TLB store", __func__); 734 } 735 736 #ifdef LOADER_ZFS_SUPPORT 737 static void 738 sparc64_zfs_probe(void) 739 { 740 struct vtoc8 vtoc; 741 char alias[64], devname[sizeof(alias) + sizeof(":x") - 1]; 742 char type[sizeof("device_type")]; 743 char *bdev, *dev, *odev; 744 uint64_t guid; 745 int fd, len, part; 746 phandle_t aliases, options; 747 748 /* Get the GUID of the ZFS pool on the boot device. */ 749 guid = 0; 750 zfs_probe_dev(bootpath, &guid); 751 752 /* 753 * Get the GUIDs of the ZFS pools on any additional disks listed in 754 * the boot-device environment variable. 755 */ 756 if ((aliases = OF_finddevice("/aliases")) == -1) 757 goto out; 758 options = OF_finddevice("/options"); 759 len = OF_getproplen(options, "boot-device"); 760 if (len <= 0) 761 goto out; 762 bdev = odev = malloc(len + 1); 763 if (bdev == NULL) 764 goto out; 765 if (OF_getprop(options, "boot-device", bdev, len) <= 0) 766 goto out; 767 bdev[len] = '\0'; 768 while ((dev = strsep(&bdev, " ")) != NULL) { 769 if (*dev == '\0') 770 continue; 771 strcpy(alias, dev); 772 (void)OF_getprop(aliases, dev, alias, sizeof(alias)); 773 /* 774 * Don't probe the boot disk twice. Note that bootpath 775 * includes the partition specifier. 776 */ 777 if (strncmp(alias, bootpath, strlen(alias)) == 0) 778 continue; 779 if (OF_getprop(OF_finddevice(alias), "device_type", type, 780 sizeof(type)) == -1) 781 continue; 782 if (strcmp(type, "block") != 0) 783 continue; 784 785 /* Find freebsd-zfs slices in the VTOC. */ 786 fd = open(alias, O_RDONLY); 787 if (fd == -1) 788 continue; 789 lseek(fd, 0, SEEK_SET); 790 if (read(fd, &vtoc, sizeof(vtoc)) != sizeof(vtoc)) { 791 close(fd); 792 continue; 793 } 794 close(fd); 795 796 for (part = 0; part < 8; part++) { 797 if (part == 2 || vtoc.part[part].tag != 798 VTOC_TAG_FREEBSD_ZFS) 799 continue; 800 (void)sprintf(devname, "%s:%c", alias, part + 'a'); 801 if (zfs_probe_dev(devname, NULL) == ENXIO) 802 break; 803 } 804 } 805 free(odev); 806 807 out: 808 if (guid != 0) { 809 zfs_currdev.pool_guid = guid; 810 zfs_currdev.root_guid = 0; 811 zfs_currdev.d_dev = &zfs_dev; 812 zfs_currdev.d_type = zfs_currdev.d_dev->dv_type; 813 } 814 } 815 #endif /* LOADER_ZFS_SUPPORT */ 816 817 int 818 main(int (*openfirm)(void *)) 819 { 820 char compatible[32]; 821 struct devsw **dp; 822 823 /* 824 * Tell the Open Firmware functions where they find the OFW gate. 825 */ 826 OF_init(openfirm); 827 828 archsw.arch_getdev = ofw_getdev; 829 archsw.arch_copyin = sparc64_copyin; 830 archsw.arch_copyout = ofw_copyout; 831 archsw.arch_readin = sparc64_readin; 832 archsw.arch_autoload = sparc64_autoload; 833 #ifdef LOADER_ZFS_SUPPORT 834 archsw.arch_zfs_probe = sparc64_zfs_probe; 835 #endif 836 837 if (init_heap() == (vm_offset_t)-1) 838 OF_exit(); 839 setheap((void *)heapva, (void *)(heapva + HEAPSZ)); 840 841 /* 842 * Probe for a console. 843 */ 844 cons_probe(); 845 846 if ((root = OF_peer(0)) == -1) 847 panic("%s: can't get root phandle", __func__); 848 OF_getprop(root, "compatible", compatible, sizeof(compatible)); 849 mmu_ops = &mmu_ops_sun4u; 850 851 mmu_ops->tlb_init(); 852 853 /* 854 * Set up the current device. 855 */ 856 OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath)); 857 858 /* 859 * Initialize devices. 860 */ 861 for (dp = devsw; *dp != 0; dp++) 862 if ((*dp)->dv_init != 0) 863 (*dp)->dv_init(); 864 865 #ifdef LOADER_ZFS_SUPPORT 866 if (zfs_currdev.pool_guid != 0) { 867 (void)strncpy(bootpath, zfs_fmtdev(&zfs_currdev), 868 sizeof(bootpath) - 1); 869 bootpath[sizeof(bootpath) - 1] = '\0'; 870 } else 871 #endif 872 873 /* 874 * Sun compatible bootable CD-ROMs have a disk label placed before 875 * the ISO 9660 data, with the actual file system being in the first 876 * partition, while the other partitions contain pseudo disk labels 877 * with embedded boot blocks for different architectures, which may 878 * be followed by UFS file systems. 879 * The firmware will set the boot path to the partition it boots from 880 * ('f' in the sun4u/sun4v case), but we want the kernel to be loaded 881 * from the ISO 9660 file system ('a'), so the boot path needs to be 882 * altered. 883 */ 884 if (bootpath[strlen(bootpath) - 2] == ':' && 885 bootpath[strlen(bootpath) - 1] == 'f') 886 bootpath[strlen(bootpath) - 1] = 'a'; 887 888 env_setenv("currdev", EV_VOLATILE, bootpath, 889 ofw_setcurrdev, env_nounset); 890 env_setenv("loaddev", EV_VOLATILE, bootpath, 891 env_noset, env_nounset); 892 893 printf("\n%s", bootprog_info); 894 printf("bootpath=\"%s\"\n", bootpath); 895 896 /* Give control to the machine independent loader code. */ 897 interact(NULL); 898 return (1); 899 } 900 901 COMMAND_SET(heap, "heap", "show heap usage", command_heap); 902 903 static int 904 command_heap(int argc, char *argv[]) 905 { 906 907 mallocstats(); 908 printf("heap base at %p, top at %p, upper limit at %p\n", heapva, 909 sbrk(0), heapva + HEAPSZ); 910 return(CMD_OK); 911 } 912 913 COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot); 914 915 static int 916 command_reboot(int argc, char *argv[]) 917 { 918 int i; 919 920 for (i = 0; devsw[i] != NULL; ++i) 921 if (devsw[i]->dv_cleanup != NULL) 922 (devsw[i]->dv_cleanup)(); 923 924 printf("Rebooting...\n"); 925 OF_exit(); 926 } 927 928 /* provide this for panic, as it's not in the startup code */ 929 void 930 exit(int code) 931 { 932 933 OF_exit(); 934 } 935 936 #ifdef LOADER_DEBUG 937 static const char *const page_sizes[] = { 938 " 8k", " 64k", "512k", " 4m" 939 }; 940 941 static void 942 pmap_print_tte_sun4u(tte_t tag, tte_t tte) 943 { 944 945 printf("%s %s ", 946 page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK], 947 tag & TD_G ? "G" : " "); 948 printf(tte & TD_W ? "W " : " "); 949 printf(tte & TD_P ? "\e[33mP\e[0m " : " "); 950 printf(tte & TD_E ? "E " : " "); 951 printf(tte & TD_CV ? "CV " : " "); 952 printf(tte & TD_CP ? "CP " : " "); 953 printf(tte & TD_L ? "\e[32mL\e[0m " : " "); 954 printf(tte & TD_IE ? "IE " : " "); 955 printf(tte & TD_NFO ? "NFO " : " "); 956 printf("pa=0x%lx va=0x%lx ctx=%ld\n", 957 TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag)); 958 } 959 960 static void 961 pmap_print_tlb_sun4u(void) 962 { 963 tte_t tag, tte; 964 u_long pstate; 965 int i; 966 967 pstate = rdpr(pstate); 968 for (i = 0; i < itlb_slot_max; i++) { 969 wrpr(pstate, pstate & ~PSTATE_IE, 0); 970 tte = itlb_get_data_sun4u(tlb_locked, i); 971 wrpr(pstate, pstate, 0); 972 if (!(tte & TD_V)) 973 continue; 974 tag = ldxa(TLB_DAR_SLOT(tlb_locked, i), 975 ASI_ITLB_TAG_READ_REG); 976 printf("iTLB-%2u: ", i); 977 pmap_print_tte_sun4u(tag, tte); 978 } 979 for (i = 0; i < dtlb_slot_max; i++) { 980 wrpr(pstate, pstate & ~PSTATE_IE, 0); 981 tte = dtlb_get_data_sun4u(tlb_locked, i); 982 wrpr(pstate, pstate, 0); 983 if (!(tte & TD_V)) 984 continue; 985 tag = ldxa(TLB_DAR_SLOT(tlb_locked, i), 986 ASI_DTLB_TAG_READ_REG); 987 printf("dTLB-%2u: ", i); 988 pmap_print_tte_sun4u(tag, tte); 989 } 990 } 991 #endif 992