1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_compat.h" 61 #include "opt_ddb.h" 62 #include "opt_kstack_pages.h" 63 #include "opt_msgbuf.h" 64 65 #include <sys/param.h> 66 #include <sys/proc.h> 67 #include <sys/systm.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/bus.h> 71 #include <sys/cons.h> 72 #include <sys/cpu.h> 73 #include <sys/eventhandler.h> 74 #include <sys/exec.h> 75 #include <sys/imgact.h> 76 #include <sys/kdb.h> 77 #include <sys/kernel.h> 78 #include <sys/ktr.h> 79 #include <sys/linker.h> 80 #include <sys/lock.h> 81 #include <sys/malloc.h> 82 #include <sys/mbuf.h> 83 #include <sys/msgbuf.h> 84 #include <sys/mutex.h> 85 #include <sys/ptrace.h> 86 #include <sys/reboot.h> 87 #include <sys/signalvar.h> 88 #include <sys/sysctl.h> 89 #include <sys/sysent.h> 90 #include <sys/sysproto.h> 91 #include <sys/ucontext.h> 92 #include <sys/uio.h> 93 #include <sys/vmmeter.h> 94 #include <sys/vnode.h> 95 96 #include <net/netisr.h> 97 98 #include <vm/vm.h> 99 #include <vm/vm_extern.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_page.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_object.h> 104 #include <vm/vm_pager.h> 105 106 #include <machine/bat.h> 107 #include <machine/clock.h> 108 #include <machine/cpu.h> 109 #include <machine/elf.h> 110 #include <machine/fpu.h> 111 #include <machine/md_var.h> 112 #include <machine/metadata.h> 113 #include <machine/mmuvar.h> 114 #include <machine/pcb.h> 115 #include <machine/powerpc.h> 116 #include <machine/reg.h> 117 #include <machine/sigframe.h> 118 #include <machine/trap.h> 119 #include <machine/vmparam.h> 120 121 #include <ddb/ddb.h> 122 123 #include <dev/ofw/openfirm.h> 124 125 #ifdef DDB 126 extern vm_offset_t ksym_start, ksym_end; 127 #endif 128 129 int cold = 1; 130 131 struct pcpu __pcpu[MAXCPU]; 132 struct trapframe frame0; 133 134 vm_offset_t kstack0; 135 vm_offset_t kstack0_phys; 136 137 char machine[] = "powerpc"; 138 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); 139 140 static char model[128]; 141 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, ""); 142 143 static int cacheline_size = CACHELINESIZE; 144 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, 145 CTLFLAG_RD, &cacheline_size, 0, ""); 146 147 static void cpu_startup(void *); 148 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 149 150 void powerpc_init(u_int, u_int, u_int, void *); 151 152 int save_ofw_mapping(void); 153 int restore_ofw_mapping(void); 154 155 void install_extint(void (*)(void)); 156 157 int setfault(faultbuf); /* defined in locore.S */ 158 159 static int grab_mcontext(struct thread *, mcontext_t *, int); 160 161 void asm_panic(char *); 162 163 long Maxmem = 0; 164 long realmem = 0; 165 166 struct pmap ofw_pmap; 167 extern int ofmsr; 168 169 struct bat battable[16]; 170 171 struct kva_md_info kmi; 172 173 void setPQL2(int *const size, int *const ways); 174 175 void 176 setPQL2(int *const size, int *const ways) 177 { 178 return; 179 } 180 181 static void 182 powerpc_ofw_shutdown(void *junk, int howto) 183 { 184 if (howto & RB_HALT) { 185 OF_halt(); 186 } 187 OF_reboot(); 188 } 189 190 static void 191 cpu_startup(void *dummy) 192 { 193 194 /* 195 * Initialise the decrementer-based clock. 196 */ 197 decr_init(); 198 199 /* 200 * Good {morning,afternoon,evening,night}. 201 */ 202 cpu_setup(PCPU_GET(cpuid)); 203 204 /* startrtclock(); */ 205 #ifdef PERFMON 206 perfmon_init(); 207 #endif 208 printf("real memory = %ld (%ld MB)\n", ptoa(physmem), 209 ptoa(physmem) / 1048576); 210 realmem = physmem; 211 212 /* 213 * Display any holes after the first chunk of extended memory. 214 */ 215 if (bootverbose) { 216 int indx; 217 218 printf("Physical memory chunk(s):\n"); 219 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 220 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 221 222 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n", 223 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 224 size1 / PAGE_SIZE); 225 } 226 } 227 228 vm_ksubmap_init(&kmi); 229 230 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count), 231 ptoa(cnt.v_free_count) / 1048576); 232 233 /* 234 * Set up buffers, so they can be used to read disk labels. 235 */ 236 bufinit(); 237 vm_pager_bufferinit(); 238 239 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0, 240 SHUTDOWN_PRI_LAST); 241 242 #ifdef SMP 243 /* 244 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 245 */ 246 mp_start(); /* fire up the secondaries */ 247 mp_announce(); 248 #endif /* SMP */ 249 } 250 251 extern char kernel_text[], _end[]; 252 253 extern void *trapcode, *trapsize; 254 extern void *alitrap, *alisize; 255 extern void *dsitrap, *dsisize; 256 extern void *decrint, *decrsize; 257 extern void *extint, *extsize; 258 extern void *dblow, *dbsize; 259 extern void *vectrap, *vectrapsize; 260 261 void 262 powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp) 263 { 264 struct pcpu *pc; 265 vm_offset_t end, off; 266 void *kmdp; 267 char *env; 268 269 end = 0; 270 kmdp = NULL; 271 272 /* 273 * Parse metadata if present and fetch parameters. Must be done 274 * before console is inited so cninit gets the right value of 275 * boothowto. 276 */ 277 if (mdp != NULL) { 278 preload_metadata = mdp; 279 kmdp = preload_search_by_type("elf kernel"); 280 if (kmdp != NULL) { 281 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 282 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 283 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 284 #ifdef DDB 285 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 286 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 287 #endif 288 } 289 } 290 291 /* 292 * Init params/tunables that can be overridden by the loader 293 */ 294 init_param1(); 295 296 /* 297 * Start initializing proc0 and thread0. 298 */ 299 proc_linkup(&proc0, &ksegrp0, &thread0); 300 thread0.td_frame = &frame0; 301 302 /* 303 * Set up per-cpu data. 304 */ 305 pc = &__pcpu[0]; 306 pcpu_init(pc, 0, sizeof(struct pcpu)); 307 pc->pc_curthread = &thread0; 308 pc->pc_curpcb = thread0.td_pcb; 309 pc->pc_cpuid = 0; 310 311 __asm __volatile("mtsprg 0, %0" :: "r"(pc)); 312 313 mutex_init(); 314 315 /* 316 * Initialize the console before printing anything. 317 */ 318 cninit(); 319 320 /* 321 * Complain if there is no metadata. 322 */ 323 if (mdp == NULL || kmdp == NULL) { 324 printf("powerpc_init: no loader metadata.\n"); 325 } 326 327 kdb_init(); 328 329 kobj_machdep_init(); 330 331 /* 332 * XXX: Initialize the interrupt tables. 333 * Disable translation in case the vector area 334 * hasn't been mapped (G5) 335 */ 336 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); 337 isync(); 338 bcopy(&trapcode, (void *)EXC_RST, (size_t)&trapsize); 339 bcopy(&trapcode, (void *)EXC_MCHK, (size_t)&trapsize); 340 bcopy(&dsitrap, (void *)EXC_DSI, (size_t)&dsisize); 341 bcopy(&trapcode, (void *)EXC_ISI, (size_t)&trapsize); 342 bcopy(&trapcode, (void *)EXC_EXI, (size_t)&trapsize); 343 bcopy(&trapcode, (void *)EXC_ALI, (size_t)&trapsize); 344 bcopy(&trapcode, (void *)EXC_PGM, (size_t)&trapsize); 345 bcopy(&trapcode, (void *)EXC_FPU, (size_t)&trapsize); 346 bcopy(&trapcode, (void *)EXC_DECR, (size_t)&trapsize); 347 bcopy(&trapcode, (void *)EXC_SC, (size_t)&trapsize); 348 bcopy(&trapcode, (void *)EXC_TRC, (size_t)&trapsize); 349 bcopy(&trapcode, (void *)EXC_FPA, (size_t)&trapsize); 350 bcopy(&vectrap, (void *)EXC_VEC, (size_t)&vectrapsize); 351 bcopy(&trapcode, (void *)EXC_VECAST, (size_t)&trapsize); 352 bcopy(&trapcode, (void *)EXC_THRM, (size_t)&trapsize); 353 bcopy(&trapcode, (void *)EXC_BPT, (size_t)&trapsize); 354 #ifdef KDB 355 bcopy(&dblow, (void *)EXC_RST, (size_t)&dbsize); 356 bcopy(&dblow, (void *)EXC_MCHK, (size_t)&dbsize); 357 bcopy(&dblow, (void *)EXC_PGM, (size_t)&dbsize); 358 bcopy(&dblow, (void *)EXC_TRC, (size_t)&dbsize); 359 bcopy(&dblow, (void *)EXC_BPT, (size_t)&dbsize); 360 #endif 361 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 362 363 /* 364 * Make sure translation has been enabled 365 */ 366 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI); 367 isync(); 368 369 /* 370 * Initialise virtual memory. 371 */ 372 pmap_mmu_install(MMU_TYPE_OEA, 0); /* XXX temporary */ 373 pmap_bootstrap(startkernel, endkernel); 374 375 /* 376 * Initialize params/tunables that are derived from memsize 377 */ 378 init_param2(physmem); 379 380 /* 381 * Grab booted kernel's name 382 */ 383 env = getenv("kernelname"); 384 if (env != NULL) { 385 strlcpy(kernelname, env, sizeof(kernelname)); 386 freeenv(env); 387 } 388 389 /* 390 * Finish setting up thread0. 391 */ 392 thread0.td_kstack = kstack0; 393 thread0.td_pcb = (struct pcb *) 394 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 395 396 /* 397 * Map and initialise the message buffer. 398 */ 399 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 400 pmap_kenter((vm_offset_t)msgbufp + off, msgbuf_phys + off); 401 msgbufinit(msgbufp, MSGBUF_SIZE); 402 403 #ifdef KDB 404 if (boothowto & RB_KDB) 405 kdb_enter("Boot flags requested debugger"); 406 #endif 407 } 408 409 void 410 bzero(void *buf, size_t len) 411 { 412 caddr_t p; 413 414 p = buf; 415 416 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { 417 *p++ = 0; 418 len--; 419 } 420 421 while (len >= sizeof(u_long) * 8) { 422 *(u_long*) p = 0; 423 *((u_long*) p + 1) = 0; 424 *((u_long*) p + 2) = 0; 425 *((u_long*) p + 3) = 0; 426 len -= sizeof(u_long) * 8; 427 *((u_long*) p + 4) = 0; 428 *((u_long*) p + 5) = 0; 429 *((u_long*) p + 6) = 0; 430 *((u_long*) p + 7) = 0; 431 p += sizeof(u_long) * 8; 432 } 433 434 while (len >= sizeof(u_long)) { 435 *(u_long*) p = 0; 436 len -= sizeof(u_long); 437 p += sizeof(u_long); 438 } 439 440 while (len) { 441 *p++ = 0; 442 len--; 443 } 444 } 445 446 void 447 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 448 { 449 struct trapframe *tf; 450 struct sigframe *sfp; 451 struct sigacts *psp; 452 struct sigframe sf; 453 struct thread *td; 454 struct proc *p; 455 int oonstack, rndfsize; 456 int sig; 457 int code; 458 459 td = curthread; 460 p = td->td_proc; 461 PROC_LOCK_ASSERT(p, MA_OWNED); 462 sig = ksi->ksi_signo; 463 code = ksi->ksi_code; 464 psp = p->p_sigacts; 465 mtx_assert(&psp->ps_mtx, MA_OWNED); 466 tf = td->td_frame; 467 oonstack = sigonstack(tf->fixreg[1]); 468 469 rndfsize = ((sizeof(sf) + 15) / 16) * 16; 470 471 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 472 catcher, sig); 473 474 /* 475 * Save user context 476 */ 477 memset(&sf, 0, sizeof(sf)); 478 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0); 479 sf.sf_uc.uc_sigmask = *mask; 480 sf.sf_uc.uc_stack = td->td_sigstk; 481 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 482 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 483 484 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 485 486 /* 487 * Allocate and validate space for the signal handler context. 488 */ 489 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 490 SIGISMEMBER(psp->ps_sigonstack, sig)) { 491 sfp = (struct sigframe *)((caddr_t)td->td_sigstk.ss_sp + 492 td->td_sigstk.ss_size - rndfsize); 493 } else { 494 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize); 495 } 496 497 /* 498 * Translate the signal if appropriate (Linux emu ?) 499 */ 500 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 501 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 502 503 /* 504 * Save the floating-point state, if necessary, then copy it. 505 */ 506 /* XXX */ 507 508 /* 509 * Set up the registers to return to sigcode. 510 * 511 * r1/sp - sigframe ptr 512 * lr - sig function, dispatched to by blrl in trampoline 513 * r3 - sig number 514 * r4 - SIGINFO ? &siginfo : exception code 515 * r5 - user context 516 * srr0 - trampoline function addr 517 */ 518 tf->lr = (register_t)catcher; 519 tf->fixreg[1] = (register_t)sfp; 520 tf->fixreg[FIRSTARG] = sig; 521 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc; 522 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 523 /* 524 * Signal handler installed with SA_SIGINFO. 525 */ 526 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si; 527 528 /* 529 * Fill siginfo structure. 530 */ 531 sf.sf_si = ksi->ksi_info; 532 sf.sf_si.si_signo = sig; 533 sf.sf_si.si_addr = (void *) ((tf->exc == EXC_DSI) ? 534 tf->dar : tf->srr0); 535 } else { 536 /* Old FreeBSD-style arguments. */ 537 tf->fixreg[FIRSTARG+1] = code; 538 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ? 539 tf->dar : tf->srr0; 540 } 541 mtx_unlock(&psp->ps_mtx); 542 PROC_UNLOCK(p); 543 544 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 545 546 /* 547 * copy the frame out to userland. 548 */ 549 if (copyout((caddr_t)&sf, (caddr_t)sfp, sizeof(sf)) != 0) { 550 /* 551 * Process has trashed its stack. Kill it. 552 */ 553 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); 554 PROC_LOCK(p); 555 sigexit(td, SIGILL); 556 } 557 558 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, 559 tf->srr0, tf->fixreg[1]); 560 561 PROC_LOCK(p); 562 mtx_lock(&psp->ps_mtx); 563 } 564 565 int 566 sigreturn(struct thread *td, struct sigreturn_args *uap) 567 { 568 struct proc *p; 569 ucontext_t uc; 570 int error; 571 572 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); 573 574 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { 575 CTR1(KTR_SIG, "sigreturn: efault td=%p", td); 576 return (EFAULT); 577 } 578 579 error = set_mcontext(td, &uc.uc_mcontext); 580 if (error != 0) 581 return (error); 582 583 p = td->td_proc; 584 PROC_LOCK(p); 585 td->td_sigmask = uc.uc_sigmask; 586 SIG_CANTMASK(td->td_sigmask); 587 signotify(td); 588 PROC_UNLOCK(p); 589 590 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", 591 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); 592 593 return (EJUSTRETURN); 594 } 595 596 #ifdef COMPAT_FREEBSD4 597 int 598 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 599 { 600 601 return sigreturn(td, (struct sigreturn_args *)uap); 602 } 603 #endif 604 605 /* 606 * Construct a PCB from a trapframe. This is called from kdb_trap() where 607 * we want to start a backtrace from the function that caused us to enter 608 * the debugger. We have the context in the trapframe, but base the trace 609 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 610 * enough for a backtrace. 611 */ 612 void 613 makectx(struct trapframe *tf, struct pcb *pcb) 614 { 615 616 pcb->pcb_lr = tf->srr0; 617 pcb->pcb_sp = tf->fixreg[1]; 618 } 619 620 /* 621 * get_mcontext/sendsig helper routine that doesn't touch the 622 * proc lock 623 */ 624 static int 625 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags) 626 { 627 struct pcb *pcb; 628 629 pcb = td->td_pcb; 630 631 memset(mcp, 0, sizeof(mcontext_t)); 632 633 mcp->mc_vers = _MC_VERSION; 634 mcp->mc_flags = 0; 635 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe)); 636 if (flags & GET_MC_CLEAR_RET) { 637 mcp->mc_gpr[3] = 0; 638 mcp->mc_gpr[4] = 0; 639 } 640 641 /* 642 * This assumes that floating-point context is *not* lazy, 643 * so if the thread has used FP there would have been a 644 * FP-unavailable exception that would have set things up 645 * correctly. 646 */ 647 if (pcb->pcb_flags & PCB_FPU) { 648 KASSERT(td == curthread, 649 ("get_mcontext: fp save not curthread")); 650 critical_enter(); 651 save_fpu(td); 652 critical_exit(); 653 mcp->mc_flags |= _MC_FP_VALID; 654 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); 655 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double)); 656 } 657 658 /* XXX Altivec context ? */ 659 660 mcp->mc_len = sizeof(*mcp); 661 662 return (0); 663 } 664 665 int 666 get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 667 { 668 int error; 669 670 error = grab_mcontext(td, mcp, flags); 671 if (error == 0) { 672 PROC_LOCK(curthread->td_proc); 673 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); 674 PROC_UNLOCK(curthread->td_proc); 675 } 676 677 return (error); 678 } 679 680 int 681 set_mcontext(struct thread *td, const mcontext_t *mcp) 682 { 683 struct pcb *pcb; 684 struct trapframe *tf; 685 686 pcb = td->td_pcb; 687 tf = td->td_frame; 688 689 if (mcp->mc_vers != _MC_VERSION || 690 mcp->mc_len != sizeof(*mcp)) 691 return (EINVAL); 692 693 /* 694 * Don't let the user set privileged MSR bits 695 */ 696 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) { 697 return (EINVAL); 698 } 699 700 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame)); 701 702 if (mcp->mc_flags & _MC_FP_VALID) { 703 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) { 704 critical_enter(); 705 enable_fpu(td); 706 critical_exit(); 707 } 708 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double)); 709 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double)); 710 } 711 712 /* XXX Altivec context? */ 713 714 return (0); 715 } 716 717 void 718 cpu_boot(int howto) 719 { 720 } 721 722 /* Get current clock frequency for the given cpu id. */ 723 int 724 cpu_est_clockrate(int cpu_id, uint64_t *rate) 725 { 726 727 return (ENXIO); 728 } 729 730 /* 731 * Shutdown the CPU as much as possible. 732 */ 733 void 734 cpu_halt(void) 735 { 736 737 OF_exit(); 738 } 739 740 void 741 cpu_idle(void) 742 { 743 /* TODO: Insert code to halt (until next interrupt) */ 744 745 #ifdef INVARIANTS 746 if ((mfmsr() & PSL_EE) != PSL_EE) { 747 struct thread *td = curthread; 748 printf("td msr %x\n", td->td_md.md_saved_msr); 749 panic("ints disabled in idleproc!"); 750 } 751 #endif 752 } 753 754 /* 755 * Set set up registers on exec. 756 */ 757 void 758 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings) 759 { 760 struct trapframe *tf; 761 struct ps_strings arginfo; 762 763 tf = trapframe(td); 764 bzero(tf, sizeof *tf); 765 tf->fixreg[1] = -roundup(-stack + 8, 16); 766 767 /* 768 * XXX Machine-independent code has already copied arguments and 769 * XXX environment to userland. Get them back here. 770 */ 771 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo)); 772 773 /* 774 * Set up arguments for _start(): 775 * _start(argc, argv, envp, obj, cleanup, ps_strings); 776 * 777 * Notes: 778 * - obj and cleanup are the auxilliary and termination 779 * vectors. They are fixed up by ld.elf_so. 780 * - ps_strings is a NetBSD extention, and will be 781 * ignored by executables which are strictly 782 * compliant with the SVR4 ABI. 783 * 784 * XXX We have to set both regs and retval here due to different 785 * XXX calling convention in trap.c and init_main.c. 786 */ 787 /* 788 * XXX PG: these get overwritten in the syscall return code. 789 * execve() should return EJUSTRETURN, like it does on NetBSD. 790 * Emulate by setting the syscall return value cells. The 791 * registers still have to be set for init's fork trampoline. 792 */ 793 td->td_retval[0] = arginfo.ps_nargvstr; 794 td->td_retval[1] = (register_t)arginfo.ps_argvstr; 795 tf->fixreg[3] = arginfo.ps_nargvstr; 796 tf->fixreg[4] = (register_t)arginfo.ps_argvstr; 797 tf->fixreg[5] = (register_t)arginfo.ps_envstr; 798 tf->fixreg[6] = 0; /* auxillary vector */ 799 tf->fixreg[7] = 0; /* termination vector */ 800 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */ 801 802 tf->srr0 = entry; 803 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT; 804 td->td_pcb->pcb_flags = 0; 805 } 806 807 int 808 fill_regs(struct thread *td, struct reg *regs) 809 { 810 struct trapframe *tf; 811 812 tf = td->td_frame; 813 memcpy(regs, tf, sizeof(struct reg)); 814 815 return (0); 816 } 817 818 int 819 fill_dbregs(struct thread *td, struct dbreg *dbregs) 820 { 821 /* No debug registers on PowerPC */ 822 return (ENOSYS); 823 } 824 825 int 826 fill_fpregs(struct thread *td, struct fpreg *fpregs) 827 { 828 struct pcb *pcb; 829 830 pcb = td->td_pcb; 831 832 if ((pcb->pcb_flags & PCB_FPU) == 0) 833 memset(fpregs, 0, sizeof(struct fpreg)); 834 else 835 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg)); 836 837 return (0); 838 } 839 840 int 841 set_regs(struct thread *td, struct reg *regs) 842 { 843 struct trapframe *tf; 844 845 tf = td->td_frame; 846 memcpy(tf, regs, sizeof(struct reg)); 847 848 return (0); 849 } 850 851 int 852 set_dbregs(struct thread *td, struct dbreg *dbregs) 853 { 854 /* No debug registers on PowerPC */ 855 return (ENOSYS); 856 } 857 858 int 859 set_fpregs(struct thread *td, struct fpreg *fpregs) 860 { 861 struct pcb *pcb; 862 863 pcb = td->td_pcb; 864 if ((pcb->pcb_flags & PCB_FPU) == 0) 865 enable_fpu(td); 866 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg)); 867 868 return (0); 869 } 870 871 int 872 ptrace_set_pc(struct thread *td, unsigned long addr) 873 { 874 struct trapframe *tf; 875 876 tf = td->td_frame; 877 tf->srr0 = (register_t)addr; 878 879 return (0); 880 } 881 882 int 883 ptrace_single_step(struct thread *td) 884 { 885 struct trapframe *tf; 886 887 tf = td->td_frame; 888 tf->srr1 |= PSL_SE; 889 890 return (0); 891 } 892 893 int 894 ptrace_clear_single_step(struct thread *td) 895 { 896 struct trapframe *tf; 897 898 tf = td->td_frame; 899 tf->srr1 &= ~PSL_SE; 900 901 return (0); 902 } 903 904 /* 905 * Initialise a struct pcpu. 906 */ 907 void 908 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 909 { 910 911 } 912 913 void 914 spinlock_enter(void) 915 { 916 struct thread *td; 917 918 td = curthread; 919 if (td->td_md.md_spinlock_count == 0) 920 td->td_md.md_saved_msr = intr_disable(); 921 td->td_md.md_spinlock_count++; 922 critical_enter(); 923 } 924 925 void 926 spinlock_exit(void) 927 { 928 struct thread *td; 929 930 td = curthread; 931 critical_exit(); 932 td->td_md.md_spinlock_count--; 933 if (td->td_md.md_spinlock_count == 0) 934 intr_restore(td->td_md.md_saved_msr); 935 } 936 937 /* 938 * kcopy(const void *src, void *dst, size_t len); 939 * 940 * Copy len bytes from src to dst, aborting if we encounter a fatal 941 * page fault. 942 * 943 * kcopy() _must_ save and restore the old fault handler since it is 944 * called by uiomove(), which may be in the path of servicing a non-fatal 945 * page fault. 946 */ 947 int 948 kcopy(const void *src, void *dst, size_t len) 949 { 950 struct thread *td; 951 faultbuf env, *oldfault; 952 int rv; 953 954 td = PCPU_GET(curthread); 955 oldfault = td->td_pcb->pcb_onfault; 956 if ((rv = setfault(env)) != 0) { 957 td->td_pcb->pcb_onfault = oldfault; 958 return rv; 959 } 960 961 memcpy(dst, src, len); 962 963 td->td_pcb->pcb_onfault = oldfault; 964 return (0); 965 } 966 967 void 968 asm_panic(char *pstr) 969 { 970 panic(pstr); 971 } 972 973 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ 974 975 int 976 db_trap_glue(struct trapframe *frame) 977 { 978 if (!(frame->srr1 & PSL_PR) 979 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC 980 || (frame->exc == EXC_PGM 981 && (frame->srr1 & 0x20000)) 982 || frame->exc == EXC_BPT 983 || frame->exc == EXC_DSI)) { 984 int type = frame->exc; 985 if (type == EXC_PGM && (frame->srr1 & 0x20000)) { 986 type = T_BREAKPOINT; 987 } 988 return (kdb_trap(type, 0, frame)); 989 } 990 991 return (0); 992 } 993