1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_ddb.h" 61 #include "opt_kstack_pages.h" 62 #include "opt_platform.h" 63 64 #include <sys/param.h> 65 #include <sys/proc.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/bus.h> 70 #include <sys/cons.h> 71 #include <sys/cpu.h> 72 #include <sys/eventhandler.h> 73 #include <sys/exec.h> 74 #include <sys/imgact.h> 75 #include <sys/kdb.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/linker.h> 79 #include <sys/lock.h> 80 #include <sys/malloc.h> 81 #include <sys/mbuf.h> 82 #include <sys/msgbuf.h> 83 #include <sys/mutex.h> 84 #include <sys/ptrace.h> 85 #include <sys/reboot.h> 86 #include <sys/reg.h> 87 #include <sys/rwlock.h> 88 #include <sys/signalvar.h> 89 #include <sys/syscallsubr.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysent.h> 92 #include <sys/sysproto.h> 93 #include <sys/ucontext.h> 94 #include <sys/uio.h> 95 #include <sys/vmmeter.h> 96 #include <sys/vnode.h> 97 98 #include <net/netisr.h> 99 100 #include <vm/vm.h> 101 #include <vm/vm_extern.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_phys.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_pager.h> 108 109 #include <machine/altivec.h> 110 #ifndef __powerpc64__ 111 #include <machine/bat.h> 112 #endif 113 #include <machine/cpu.h> 114 #include <machine/elf.h> 115 #include <machine/fpu.h> 116 #include <machine/hid.h> 117 #include <machine/ifunc.h> 118 #include <machine/kdb.h> 119 #include <machine/md_var.h> 120 #include <machine/metadata.h> 121 #include <machine/mmuvar.h> 122 #include <machine/pcb.h> 123 #include <machine/sigframe.h> 124 #include <machine/spr.h> 125 #include <machine/trap.h> 126 #include <machine/vmparam.h> 127 #include <machine/ofw_machdep.h> 128 129 #include <ddb/ddb.h> 130 131 #include <dev/ofw/openfirm.h> 132 #include <dev/ofw/ofw_subr.h> 133 134 int cold = 1; 135 #ifdef __powerpc64__ 136 int cacheline_size = 128; 137 #else 138 int cacheline_size = 32; 139 #endif 140 #ifdef __powerpc64__ 141 int hw_direct_map = -1; 142 #else 143 int hw_direct_map = 1; 144 #endif 145 146 #ifdef BOOKE 147 extern vm_paddr_t kernload; 148 #endif 149 150 extern void *ap_pcpu; 151 152 struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE); 153 static char init_kenv[2048]; 154 155 static struct trapframe frame0; 156 157 char machine[] = "powerpc"; 158 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); 159 160 static void cpu_startup(void *); 161 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 162 163 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, 164 CTLFLAG_RD, &cacheline_size, 0, ""); 165 166 uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *, 167 uint32_t); 168 169 static void fake_preload_metadata(void); 170 171 long Maxmem = 0; 172 long realmem = 0; 173 174 /* Default MSR values set in the AIM/Book-E early startup code */ 175 register_t psl_kernset; 176 register_t psl_userset; 177 register_t psl_userstatic; 178 #ifdef __powerpc64__ 179 register_t psl_userset32; 180 #endif 181 182 struct kva_md_info kmi; 183 184 static void 185 cpu_startup(void *dummy) 186 { 187 188 /* 189 * Initialise the decrementer-based clock. 190 */ 191 decr_init(); 192 193 /* 194 * Good {morning,afternoon,evening,night}. 195 */ 196 cpu_setup(PCPU_GET(cpuid)); 197 198 #ifdef PERFMON 199 perfmon_init(); 200 #endif 201 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem), 202 ptoa((uintmax_t)physmem) / 1048576); 203 realmem = physmem; 204 205 if (bootverbose) 206 printf("available KVA = %zu (%zu MB)\n", 207 virtual_end - virtual_avail, 208 (virtual_end - virtual_avail) / 1048576); 209 210 /* 211 * Display any holes after the first chunk of extended memory. 212 */ 213 if (bootverbose) { 214 int indx; 215 216 printf("Physical memory chunk(s):\n"); 217 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 218 vm_paddr_t size1 = 219 phys_avail[indx + 1] - phys_avail[indx]; 220 221 #ifdef __powerpc64__ 222 printf("0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 223 #else 224 printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n", 225 #endif 226 (uintmax_t)phys_avail[indx], 227 (uintmax_t)phys_avail[indx + 1] - 1, 228 (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE); 229 } 230 } 231 232 vm_ksubmap_init(&kmi); 233 234 printf("avail memory = %ju (%ju MB)\n", 235 ptoa((uintmax_t)vm_free_count()), 236 ptoa((uintmax_t)vm_free_count()) / 1048576); 237 238 /* 239 * Set up buffers, so they can be used to read disk labels. 240 */ 241 bufinit(); 242 vm_pager_bufferinit(); 243 } 244 245 extern vm_offset_t __startkernel, __endkernel; 246 extern unsigned char __bss_start[]; 247 extern unsigned char __sbss_start[]; 248 extern unsigned char __sbss_end[]; 249 extern unsigned char _end[]; 250 251 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, 252 void *mdp, uint32_t mdp_cookie); 253 void aim_cpu_init(vm_offset_t toc); 254 void booke_cpu_init(void); 255 256 #ifdef DDB 257 static void load_external_symtab(void); 258 #endif 259 260 uintptr_t 261 powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, 262 uint32_t mdp_cookie) 263 { 264 struct pcpu *pc; 265 struct cpuref bsp; 266 vm_offset_t startkernel, endkernel; 267 char *env; 268 void *kmdp = NULL; 269 bool ofw_bootargs = false; 270 #ifdef DDB 271 bool symbols_provided = false; 272 vm_offset_t ksym_start; 273 vm_offset_t ksym_end; 274 vm_offset_t ksym_sz; 275 #endif 276 277 /* First guess at start/end kernel positions */ 278 startkernel = __startkernel; 279 endkernel = __endkernel; 280 281 /* 282 * If the metadata pointer cookie is not set to the magic value, 283 * the number in mdp should be treated as nonsense. 284 */ 285 if (mdp_cookie != 0xfb5d104d) 286 mdp = NULL; 287 288 #if !defined(BOOKE) 289 /* 290 * On BOOKE the BSS is already cleared and some variables 291 * initialized. Do not wipe them out. 292 */ 293 bzero(__sbss_start, __sbss_end - __sbss_start); 294 bzero(__bss_start, _end - __bss_start); 295 #endif 296 297 cpu_feature_setup(); 298 299 #ifdef AIM 300 aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie); 301 #endif 302 303 /* 304 * At this point, we are executing in our correct memory space. 305 * Book-E started there, and AIM has done an rfi and restarted 306 * execution from _start. 307 * 308 * We may still be in real mode, however. If we are running out of 309 * the direct map on 64 bit, this is possible to do. 310 */ 311 312 /* 313 * Parse metadata if present and fetch parameters. Must be done 314 * before console is inited so cninit gets the right value of 315 * boothowto. 316 */ 317 if (mdp != NULL) { 318 /* 319 * Starting up from loader. 320 * 321 * Full metadata has been provided, but we need to figure 322 * out the correct address to relocate it to. 323 */ 324 char *envp = NULL; 325 uintptr_t md_offset = 0; 326 vm_paddr_t kernelstartphys, kernelendphys; 327 328 #ifdef AIM 329 if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS) 330 md_offset = DMAP_BASE_ADDRESS; 331 #else /* BOOKE */ 332 md_offset = VM_MIN_KERNEL_ADDRESS - kernload; 333 #endif 334 335 preload_metadata = mdp; 336 if (md_offset > 0) { 337 /* Translate phys offset into DMAP offset. */ 338 preload_metadata += md_offset; 339 preload_bootstrap_relocate(md_offset); 340 } 341 kmdp = preload_search_by_type("elf kernel"); 342 if (kmdp != NULL) { 343 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 344 envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 345 if (envp != NULL) 346 envp += md_offset; 347 init_static_kenv(envp, 0); 348 if (fdt == 0) { 349 fdt = MD_FETCH(kmdp, MODINFOMD_DTBP, uintptr_t); 350 if (fdt != 0) 351 fdt += md_offset; 352 } 353 kernelstartphys = MD_FETCH(kmdp, MODINFO_ADDR, 354 vm_offset_t); 355 /* kernelstartphys is already relocated. */ 356 kernelendphys = MD_FETCH(kmdp, MODINFOMD_KERNEND, 357 vm_offset_t); 358 if (kernelendphys != 0) 359 kernelendphys += md_offset; 360 endkernel = ulmax(endkernel, kernelendphys); 361 #ifdef DDB 362 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 363 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 364 ksym_sz = *(Elf_Size*)ksym_start; 365 366 db_fetch_ksymtab(ksym_start, ksym_end, md_offset); 367 /* Symbols provided by loader. */ 368 symbols_provided = true; 369 #endif 370 } 371 } else { 372 /* 373 * Self-loading kernel, we have to fake up metadata. 374 * 375 * Since we are creating the metadata from the final 376 * memory space, we don't need to call 377 * preload_boostrap_relocate(). 378 */ 379 fake_preload_metadata(); 380 kmdp = preload_search_by_type("elf kernel"); 381 init_static_kenv(init_kenv, sizeof(init_kenv)); 382 ofw_bootargs = true; 383 } 384 385 /* Store boot environment state */ 386 OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry); 387 388 /* 389 * Init params/tunables that can be overridden by the loader 390 */ 391 init_param1(); 392 393 /* 394 * Start initializing proc0 and thread0. 395 */ 396 proc_linkup0(&proc0, &thread0); 397 thread0.td_frame = &frame0; 398 #ifdef __powerpc64__ 399 __asm __volatile("mr 13,%0" :: "r"(&thread0)); 400 #else 401 __asm __volatile("mr 2,%0" :: "r"(&thread0)); 402 #endif 403 404 /* 405 * Init mutexes, which we use heavily in PMAP 406 */ 407 mutex_init(); 408 409 /* 410 * Install the OF client interface 411 */ 412 OF_bootstrap(); 413 414 #ifdef DDB 415 if (!symbols_provided && hw_direct_map) 416 load_external_symtab(); 417 #endif 418 419 if (ofw_bootargs) 420 ofw_parse_bootargs(); 421 422 #ifdef AIM 423 /* 424 * Early I/O map needs to be initialized before console, in order to 425 * map frame buffers properly, and after boot args have been parsed, 426 * to handle tunables properly. 427 */ 428 pmap_early_io_map_init(); 429 #endif 430 431 /* 432 * Initialize the console before printing anything. 433 */ 434 cninit(); 435 436 #ifdef AIM 437 aim_cpu_init(toc); 438 #else /* BOOKE */ 439 booke_cpu_init(); 440 441 /* Make sure the kernel icache is valid before we go too much further */ 442 __syncicache((caddr_t)startkernel, endkernel - startkernel); 443 #endif 444 445 /* 446 * Choose a platform module so we can get the physical memory map. 447 */ 448 449 platform_probe_and_attach(); 450 451 /* 452 * Set up per-cpu data for the BSP now that the platform can tell 453 * us which that is. 454 */ 455 if (platform_smp_get_bsp(&bsp) != 0) 456 bsp.cr_cpuid = 0; 457 pc = &__pcpu[bsp.cr_cpuid]; 458 __asm __volatile("mtsprg 0, %0" :: "r"(pc)); 459 pcpu_init(pc, bsp.cr_cpuid, sizeof(struct pcpu)); 460 pc->pc_curthread = &thread0; 461 thread0.td_oncpu = bsp.cr_cpuid; 462 pc->pc_cpuid = bsp.cr_cpuid; 463 pc->pc_hwref = bsp.cr_hwref; 464 465 /* 466 * Init KDB 467 */ 468 kdb_init(); 469 470 /* 471 * Bring up MMU 472 */ 473 pmap_mmu_init(); 474 link_elf_ireloc(kmdp); 475 pmap_bootstrap(startkernel, endkernel); 476 mtmsr(psl_kernset & ~PSL_EE); 477 478 /* 479 * Initialize params/tunables that are derived from memsize 480 */ 481 init_param2(physmem); 482 483 /* 484 * Grab booted kernel's name 485 */ 486 env = kern_getenv("kernelname"); 487 if (env != NULL) { 488 strlcpy(kernelname, env, sizeof(kernelname)); 489 freeenv(env); 490 } 491 492 /* 493 * Finish setting up thread0. 494 */ 495 thread0.td_pcb = (struct pcb *) 496 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - 497 sizeof(struct pcb)) & ~15UL); 498 bzero((void *)thread0.td_pcb, sizeof(struct pcb)); 499 pc->pc_curpcb = thread0.td_pcb; 500 501 /* Initialise the message buffer. */ 502 msgbufinit(msgbufp, msgbufsize); 503 504 #ifdef KDB 505 if (boothowto & RB_KDB) 506 kdb_enter(KDB_WHY_BOOTFLAGS, 507 "Boot flags requested debugger"); 508 #endif 509 510 return (((uintptr_t)thread0.td_pcb - 511 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); 512 } 513 514 #ifdef DDB 515 /* 516 * On powernv and some booke systems, we might not have symbols loaded via 517 * loader. However, if the user passed the kernel in as the initrd as well, 518 * we can manually load it via reinterpreting the initrd copy of the kernel. 519 * 520 * In the BOOKE case, we don't actually have a DMAP yet, so we have to use 521 * temporary maps to inspect the memory, but write DMAP addresses to the 522 * configuration variables. 523 */ 524 static void 525 load_external_symtab(void) { 526 phandle_t chosen; 527 vm_paddr_t start, end; 528 pcell_t cell[2]; 529 ssize_t size; 530 u_char *kernelimg; /* Temporary map */ 531 u_char *kernelimg_final; /* Final location */ 532 533 int i; 534 535 Elf_Ehdr *ehdr; 536 Elf_Phdr *phdr; 537 Elf_Shdr *shdr; 538 539 vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz, 540 ksym_start_final, kstr_start_final; 541 542 if (!hw_direct_map) 543 return; 544 545 chosen = OF_finddevice("/chosen"); 546 if (chosen <= 0) 547 return; 548 549 if (!OF_hasprop(chosen, "linux,initrd-start") || 550 !OF_hasprop(chosen, "linux,initrd-end")) 551 return; 552 553 size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell)); 554 if (size == 4) 555 start = cell[0]; 556 else if (size == 8) 557 start = (uint64_t)cell[0] << 32 | cell[1]; 558 else 559 return; 560 561 size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell)); 562 if (size == 4) 563 end = cell[0]; 564 else if (size == 8) 565 end = (uint64_t)cell[0] << 32 | cell[1]; 566 else 567 return; 568 569 if (!(end - start > 0)) 570 return; 571 572 kernelimg_final = (u_char *) PHYS_TO_DMAP(start); 573 #ifdef AIM 574 kernelimg = kernelimg_final; 575 #else /* BOOKE */ 576 kernelimg = (u_char *)pmap_early_io_map(start, PAGE_SIZE); 577 #endif 578 ehdr = (Elf_Ehdr *)kernelimg; 579 580 if (!IS_ELF(*ehdr)) { 581 #ifdef BOOKE 582 pmap_early_io_unmap(start, PAGE_SIZE); 583 #endif 584 return; 585 } 586 587 #ifdef BOOKE 588 pmap_early_io_unmap(start, PAGE_SIZE); 589 kernelimg = (u_char *)pmap_early_io_map(start, (end - start)); 590 #endif 591 592 phdr = (Elf_Phdr *)(kernelimg + ehdr->e_phoff); 593 shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff); 594 595 ksym_start = 0; 596 ksym_sz = 0; 597 ksym_start_final = 0; 598 kstr_start = 0; 599 kstr_sz = 0; 600 kstr_start_final = 0; 601 for (i = 0; i < ehdr->e_shnum; i++) { 602 if (shdr[i].sh_type == SHT_SYMTAB) { 603 ksym_start = (vm_offset_t)(kernelimg + 604 shdr[i].sh_offset); 605 ksym_start_final = (vm_offset_t) 606 (kernelimg_final + shdr[i].sh_offset); 607 ksym_sz = (vm_offset_t)(shdr[i].sh_size); 608 kstr_start = (vm_offset_t)(kernelimg + 609 shdr[shdr[i].sh_link].sh_offset); 610 kstr_start_final = (vm_offset_t) 611 (kernelimg_final + 612 shdr[shdr[i].sh_link].sh_offset); 613 614 kstr_sz = (vm_offset_t) 615 (shdr[shdr[i].sh_link].sh_size); 616 } 617 } 618 619 if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 && 620 kstr_sz != 0 && ksym_start < kstr_start) { 621 /* 622 * We can't use db_fetch_ksymtab() here, because we need to 623 * feed in DMAP addresses that are not mapped yet on booke. 624 * 625 * Write the variables directly, where db_init() will pick 626 * them up later, after the DMAP is up. 627 */ 628 ksymtab = ksym_start_final; 629 ksymtab_size = ksym_sz; 630 kstrtab = kstr_start_final; 631 ksymtab_relbase = (__startkernel - KERNBASE); 632 } 633 634 #ifdef BOOKE 635 pmap_early_io_unmap(start, (end - start)); 636 #endif 637 638 }; 639 #endif 640 641 /* 642 * When not being loaded from loader, we need to create our own metadata 643 * so we can interact with the kernel linker. 644 */ 645 static void 646 fake_preload_metadata(void) { 647 /* We depend on dword alignment here. */ 648 static uint32_t fake_preload[36] __aligned(8); 649 int i = 0; 650 651 fake_preload[i++] = MODINFO_NAME; 652 fake_preload[i++] = strlen("kernel") + 1; 653 strcpy((char*)&fake_preload[i], "kernel"); 654 /* ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */ 655 i += 2; 656 657 fake_preload[i++] = MODINFO_TYPE; 658 fake_preload[i++] = strlen("elf kernel") + 1; 659 strcpy((char*)&fake_preload[i], "elf kernel"); 660 /* ['e' 'l' 'f' ' '] ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */ 661 i += 3; 662 663 #ifdef __powerpc64__ 664 /* Padding -- Fields start on u_long boundaries */ 665 fake_preload[i++] = 0; 666 #endif 667 668 fake_preload[i++] = MODINFO_ADDR; 669 fake_preload[i++] = sizeof(vm_offset_t); 670 *(vm_offset_t *)&fake_preload[i] = 671 (vm_offset_t)(__startkernel); 672 i += (sizeof(vm_offset_t) / 4); 673 674 fake_preload[i++] = MODINFO_SIZE; 675 fake_preload[i++] = sizeof(vm_offset_t); 676 *(vm_offset_t *)&fake_preload[i] = 677 (vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel); 678 i += (sizeof(vm_offset_t) / 4); 679 680 /* 681 * MODINFOMD_SSYM and MODINFOMD_ESYM cannot be provided here, 682 * as the memory comes from outside the loaded ELF sections. 683 * 684 * If the symbols are being provided by other means (MFS), the 685 * tables will be loaded into the debugger directly. 686 */ 687 688 /* Null field at end to mark end of data. */ 689 fake_preload[i++] = 0; 690 fake_preload[i] = 0; 691 preload_metadata = (void*)fake_preload; 692 } 693 694 /* 695 * Flush the D-cache for non-DMA I/O so that the I-cache can 696 * be made coherent later. 697 */ 698 void 699 cpu_flush_dcache(void *ptr, size_t len) 700 { 701 register_t addr, off; 702 703 /* 704 * Align the address to a cacheline and adjust the length 705 * accordingly. Then round the length to a multiple of the 706 * cacheline for easy looping. 707 */ 708 addr = (uintptr_t)ptr; 709 off = addr & (cacheline_size - 1); 710 addr -= off; 711 len = roundup2(len + off, cacheline_size); 712 713 while (len > 0) { 714 __asm __volatile ("dcbf 0,%0" :: "r"(addr)); 715 __asm __volatile ("sync"); 716 addr += cacheline_size; 717 len -= cacheline_size; 718 } 719 } 720 721 int 722 ptrace_set_pc(struct thread *td, unsigned long addr) 723 { 724 struct trapframe *tf; 725 726 tf = td->td_frame; 727 tf->srr0 = (register_t)addr; 728 729 return (0); 730 } 731 732 void 733 spinlock_enter(void) 734 { 735 struct thread *td; 736 register_t msr; 737 738 td = curthread; 739 if (td->td_md.md_spinlock_count == 0) { 740 nop_prio_mhigh(); 741 msr = intr_disable(); 742 td->td_md.md_spinlock_count = 1; 743 td->td_md.md_saved_msr = msr; 744 critical_enter(); 745 } else 746 td->td_md.md_spinlock_count++; 747 } 748 749 void 750 spinlock_exit(void) 751 { 752 struct thread *td; 753 register_t msr; 754 755 td = curthread; 756 msr = td->td_md.md_saved_msr; 757 td->td_md.md_spinlock_count--; 758 if (td->td_md.md_spinlock_count == 0) { 759 critical_exit(); 760 intr_restore(msr); 761 nop_prio_medium(); 762 } 763 } 764 765 /* 766 * Simple ddb(4) command/hack to view any SPR on the running CPU. 767 * Uses a trivial asm function to perform the mfspr, and rewrites the mfspr 768 * instruction each time. 769 * XXX: Since it uses code modification, it won't work if the kernel code pages 770 * are marked RO. 771 */ 772 extern register_t get_spr(int); 773 774 #ifdef DDB 775 DB_SHOW_COMMAND(spr, db_show_spr) 776 { 777 register_t spr; 778 volatile uint32_t *p; 779 int sprno, saved_sprno; 780 781 if (!have_addr) 782 return; 783 784 saved_sprno = sprno = (intptr_t) addr; 785 sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5); 786 p = (uint32_t *)(void *)&get_spr; 787 #ifdef __powerpc64__ 788 #if defined(_CALL_ELF) && _CALL_ELF == 2 789 /* Account for ELFv2 function prologue. */ 790 p += 2; 791 #else 792 p = *(volatile uint32_t * volatile *)p; 793 #endif 794 #endif 795 *p = (*p & ~0x001ff800) | (sprno << 11); 796 __syncicache(__DEVOLATILE(uint32_t *, p), cacheline_size); 797 spr = get_spr(sprno); 798 799 db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno, 800 (unsigned long)spr); 801 } 802 803 DB_SHOW_COMMAND(frame, db_show_frame) 804 { 805 struct trapframe *tf; 806 long reg; 807 int i; 808 809 tf = have_addr ? (struct trapframe *)addr : curthread->td_frame; 810 811 /* 812 * Everything casts through long to simplify the printing. 813 * 'long' is native register size anyway. 814 */ 815 db_printf("trap frame %p\n", tf); 816 for (i = 0; i < nitems(tf->fixreg); i++) { 817 reg = tf->fixreg[i]; 818 db_printf(" r%d:\t%#lx (%ld)\n", i, reg, reg); 819 } 820 reg = tf->lr; 821 db_printf(" lr:\t%#lx\n", reg); 822 reg = tf->cr; 823 db_printf(" cr:\t%#lx\n", reg); 824 reg = tf->xer; 825 db_printf(" xer:\t%#lx\n", reg); 826 reg = tf->ctr; 827 db_printf(" ctr:\t%#lx (%ld)\n", reg, reg); 828 reg = tf->srr0; 829 db_printf(" srr0:\t%#lx\n", reg); 830 reg = tf->srr1; 831 db_printf(" srr1:\t%#lx\n", reg); 832 reg = tf->exc; 833 db_printf(" exc:\t%#lx\n", reg); 834 reg = tf->dar; 835 db_printf(" dar:\t%#lx\n", reg); 836 #ifdef AIM 837 reg = tf->cpu.aim.dsisr; 838 db_printf(" dsisr:\t%#lx\n", reg); 839 #else 840 reg = tf->cpu.booke.esr; 841 db_printf(" esr:\t%#lx\n", reg); 842 reg = tf->cpu.booke.dbcr0; 843 db_printf(" dbcr0:\t%#lx\n", reg); 844 #endif 845 } 846 #endif 847 848 /* __stack_chk_fail_local() is called in secure-plt (32-bit). */ 849 #if !defined(__powerpc64__) 850 extern void __stack_chk_fail(void); 851 void __stack_chk_fail_local(void); 852 853 void 854 __stack_chk_fail_local(void) 855 { 856 857 __stack_chk_fail(); 858 } 859 #endif 860