1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause
3 *
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (C) 2001 Benno Rice
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 */
58
59 #include <sys/cdefs.h>
60 #include "opt_fpu_emu.h"
61
62 #include <sys/param.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/bio.h>
66 #include <sys/buf.h>
67 #include <sys/bus.h>
68 #include <sys/cons.h>
69 #include <sys/cpu.h>
70 #include <sys/exec.h>
71 #include <sys/imgact.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/mutex.h>
77 #include <sys/reg.h>
78 #include <sys/signalvar.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/syscall.h>
81 #include <sys/sysent.h>
82 #include <sys/sysproto.h>
83 #include <sys/ucontext.h>
84 #include <sys/uio.h>
85
86 #include <machine/altivec.h>
87 #include <machine/cpu.h>
88 #include <machine/elf.h>
89 #include <machine/fpu.h>
90 #include <machine/pcb.h>
91 #include <machine/sigframe.h>
92 #include <machine/trap.h>
93 #include <machine/vmparam.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_map.h>
99
100 #ifdef FPU_EMU
101 #include <powerpc/fpu/fpu_extern.h>
102 #endif
103
104 #ifdef COMPAT_FREEBSD32
105 #include <compat/freebsd32/freebsd32_signal.h>
106 #include <compat/freebsd32/freebsd32_util.h>
107 #include <compat/freebsd32/freebsd32_proto.h>
108
109 typedef struct __ucontext32 {
110 sigset_t uc_sigmask;
111 mcontext32_t uc_mcontext;
112 uint32_t uc_link;
113 struct sigaltstack32 uc_stack;
114 uint32_t uc_flags;
115 uint32_t __spare__[4];
116 } ucontext32_t;
117
118 struct sigframe32 {
119 ucontext32_t sf_uc;
120 struct __siginfo32 sf_si;
121 };
122
123 static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
124 #endif
125
126 static int grab_mcontext(struct thread *, mcontext_t *, int);
127
128 static void cleanup_power_extras(struct thread *);
129
130 #ifdef __powerpc64__
131 extern struct sysentvec elf64_freebsd_sysvec_v2;
132 #endif
133
134 #ifdef __powerpc64__
135 _Static_assert(sizeof(mcontext_t) == 1392, "mcontext_t size incorrect");
136 _Static_assert(sizeof(ucontext_t) == 1472, "ucontext_t size incorrect");
137 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
138 #ifdef COMPAT_FREEBSD32
139 _Static_assert(sizeof(mcontext32_t) == 1224, "mcontext32_t size incorrect");
140 _Static_assert(sizeof(ucontext32_t) == 1280, "ucontext32_t size incorrect");
141 _Static_assert(sizeof(struct __siginfo32) == 64, "struct __siginfo32 size incorrect");
142 #endif /* COMPAT_FREEBSD32 */
143 #else /* powerpc */
144 _Static_assert(sizeof(mcontext_t) == 1224, "mcontext_t size incorrect");
145 _Static_assert(sizeof(ucontext_t) == 1280, "ucontext_t size incorrect");
146 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect");
147 #endif
148
149 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)150 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
151 {
152 struct trapframe *tf;
153 struct sigacts *psp;
154 struct sigframe sf;
155 struct thread *td;
156 struct proc *p;
157 #ifdef COMPAT_FREEBSD32
158 struct __siginfo32 siginfo32;
159 struct sigframe32 sf32;
160 #endif
161 size_t sfpsize;
162 caddr_t sfp, usfp;
163 register_t sp;
164 int oonstack, rndfsize;
165 int sig;
166 int code;
167
168 td = curthread;
169 p = td->td_proc;
170 PROC_LOCK_ASSERT(p, MA_OWNED);
171
172 psp = p->p_sigacts;
173 mtx_assert(&psp->ps_mtx, MA_OWNED);
174 tf = td->td_frame;
175
176 /*
177 * Fill siginfo structure.
178 */
179 ksi->ksi_info.si_signo = ksi->ksi_signo;
180 ksi->ksi_info.si_addr =
181 (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ?
182 tf->dar : tf->srr0);
183
184 #ifdef COMPAT_FREEBSD32
185 if (SV_PROC_FLAG(p, SV_ILP32)) {
186 siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
187 sig = siginfo32.si_signo;
188 code = siginfo32.si_code;
189 sfp = (caddr_t)&sf32;
190 sfpsize = sizeof(sf32);
191 rndfsize = roundup(sizeof(sf32), 16);
192 sp = (uint32_t)tf->fixreg[1];
193 oonstack = sigonstack(sp);
194
195 /*
196 * Save user context
197 */
198
199 memset(&sf32, 0, sizeof(sf32));
200 grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
201
202 sf32.sf_uc.uc_sigmask = *mask;
203 sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
204 sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
205 sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
206 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
207
208 sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
209 } else {
210 #endif
211 sig = ksi->ksi_signo;
212 code = ksi->ksi_code;
213 sfp = (caddr_t)&sf;
214 sfpsize = sizeof(sf);
215 #ifdef __powerpc64__
216 /*
217 * 64-bit PPC defines a 512 byte red zone below
218 * the existing stack (ELF ABI v2 §2.2.2.4)
219 */
220 rndfsize = 512 + roundup(sizeof(sf), 48);
221 #else
222 rndfsize = roundup(sizeof(sf), 16);
223 #endif
224 sp = tf->fixreg[1];
225 oonstack = sigonstack(sp);
226
227 /*
228 * Save user context
229 */
230
231 memset(&sf, 0, sizeof(sf));
232 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
233
234 sf.sf_uc.uc_sigmask = *mask;
235 sf.sf_uc.uc_stack = td->td_sigstk;
236 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
237 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
238
239 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
240 #ifdef COMPAT_FREEBSD32
241 }
242 #endif
243
244 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
245 catcher, sig);
246
247 /*
248 * Allocate and validate space for the signal handler context.
249 */
250 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
251 SIGISMEMBER(psp->ps_sigonstack, sig)) {
252 usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp +
253 td->td_sigstk.ss_size - rndfsize) & ~0xFul);
254 } else {
255 usfp = (void *)((sp - rndfsize) & ~0xFul);
256 }
257
258 /*
259 * Set Floating Point facility to "Ignore Exceptions Mode" so signal
260 * handler can run.
261 */
262 if (td->td_pcb->pcb_flags & PCB_FPU)
263 tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1);
264
265 /*
266 * Set up the registers to return to sigcode.
267 *
268 * r1/sp - sigframe ptr
269 * lr - sig function, dispatched to by blrl in trampoline
270 * r3 - sig number
271 * r4 - SIGINFO ? &siginfo : exception code
272 * r5 - user context
273 * srr0 - trampoline function addr
274 */
275 tf->lr = (register_t)catcher;
276 tf->fixreg[1] = (register_t)usfp;
277 tf->fixreg[FIRSTARG] = sig;
278 #ifdef COMPAT_FREEBSD32
279 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
280 ((SV_PROC_FLAG(p, SV_ILP32)) ?
281 offsetof(struct sigframe32, sf_uc) :
282 offsetof(struct sigframe, sf_uc));
283 #else
284 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
285 offsetof(struct sigframe, sf_uc);
286 #endif
287 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
288 /*
289 * Signal handler installed with SA_SIGINFO.
290 */
291 #ifdef COMPAT_FREEBSD32
292 if (SV_PROC_FLAG(p, SV_ILP32)) {
293 sf32.sf_si = siginfo32;
294 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
295 offsetof(struct sigframe32, sf_si);
296 sf32.sf_si = siginfo32;
297 } else {
298 #endif
299 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
300 offsetof(struct sigframe, sf_si);
301 sf.sf_si = ksi->ksi_info;
302 #ifdef COMPAT_FREEBSD32
303 }
304 #endif
305 } else {
306 /* Old FreeBSD-style arguments. */
307 tf->fixreg[FIRSTARG+1] = code;
308 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
309 tf->dar : tf->srr0;
310 }
311 mtx_unlock(&psp->ps_mtx);
312 PROC_UNLOCK(p);
313
314 tf->srr0 = (register_t)PROC_SIGCODE(p);
315
316 /*
317 * copy the frame out to userland.
318 */
319 if (copyout(sfp, usfp, sfpsize) != 0) {
320 /*
321 * Process has trashed its stack. Kill it.
322 */
323 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
324 PROC_LOCK(p);
325 sigexit(td, SIGILL);
326 }
327
328 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
329 tf->srr0, tf->fixreg[1]);
330
331 PROC_LOCK(p);
332 mtx_lock(&psp->ps_mtx);
333 }
334
335 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)336 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
337 {
338 ucontext_t uc;
339 int error;
340
341 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
342
343 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
344 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
345 return (EFAULT);
346 }
347
348 error = set_mcontext(td, &uc.uc_mcontext);
349 if (error != 0)
350 return (error);
351
352 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
353
354 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
355 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
356
357 return (EJUSTRETURN);
358 }
359
360 #ifdef COMPAT_FREEBSD4
361 int
freebsd4_sigreturn(struct thread * td,struct freebsd4_sigreturn_args * uap)362 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
363 {
364
365 return sys_sigreturn(td, (struct sigreturn_args *)uap);
366 }
367 #endif
368
369 /*
370 * Construct a PCB from a trapframe. This is called from kdb_trap() where
371 * we want to start a backtrace from the function that caused us to enter
372 * the debugger. We have the context in the trapframe, but base the trace
373 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
374 * enough for a backtrace.
375 */
376 void
makectx(struct trapframe * tf,struct pcb * pcb)377 makectx(struct trapframe *tf, struct pcb *pcb)
378 {
379
380 pcb->pcb_lr = tf->srr0;
381 pcb->pcb_sp = tf->fixreg[1];
382 }
383
384 /*
385 * get_mcontext/sendsig helper routine that doesn't touch the
386 * proc lock
387 */
388 static int
grab_mcontext(struct thread * td,mcontext_t * mcp,int flags)389 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
390 {
391 struct pcb *pcb;
392 int i;
393
394 pcb = td->td_pcb;
395
396 memset(mcp, 0, sizeof(mcontext_t));
397
398 mcp->mc_vers = _MC_VERSION;
399 mcp->mc_flags = 0;
400 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
401 if (flags & GET_MC_CLEAR_RET) {
402 mcp->mc_gpr[3] = 0;
403 mcp->mc_gpr[4] = 0;
404 }
405
406 /*
407 * This assumes that floating-point context is *not* lazy,
408 * so if the thread has used FP there would have been a
409 * FP-unavailable exception that would have set things up
410 * correctly.
411 */
412 if (pcb->pcb_flags & PCB_FPREGS) {
413 if (pcb->pcb_flags & PCB_FPU) {
414 KASSERT(td == curthread,
415 ("get_mcontext: fp save not curthread"));
416 critical_enter();
417 save_fpu(td);
418 critical_exit();
419 }
420 mcp->mc_flags |= _MC_FP_VALID;
421 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
422 for (i = 0; i < 32; i++)
423 memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
424 sizeof(double));
425 }
426
427 if (pcb->pcb_flags & PCB_VSX) {
428 mcp->mc_flags |= _MC_VS_VALID;
429 for (i = 0; i < 32; i++)
430 memcpy(&mcp->mc_vsxfpreg[i],
431 &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
432 }
433
434 /*
435 * Repeat for Altivec context
436 */
437
438 if (pcb->pcb_flags & PCB_VECREGS) {
439 if (pcb->pcb_flags & PCB_VEC) {
440 KASSERT(td == curthread,
441 ("get_mcontext: altivec save not curthread"));
442 critical_enter();
443 save_vec(td);
444 critical_exit();
445 }
446 mcp->mc_flags |= _MC_AV_VALID;
447 mcp->mc_vscr = pcb->pcb_vec.vscr;
448 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
449 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
450 }
451
452 mcp->mc_len = sizeof(*mcp);
453
454 return (0);
455 }
456
457 int
get_mcontext(struct thread * td,mcontext_t * mcp,int flags)458 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
459 {
460 int error;
461
462 error = grab_mcontext(td, mcp, flags);
463 if (error == 0) {
464 PROC_LOCK(curthread->td_proc);
465 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
466 PROC_UNLOCK(curthread->td_proc);
467 }
468
469 return (error);
470 }
471
472 int
set_mcontext(struct thread * td,mcontext_t * mcp)473 set_mcontext(struct thread *td, mcontext_t *mcp)
474 {
475 struct pcb *pcb;
476 struct trapframe *tf;
477 register_t tls;
478 register_t msr;
479 int i;
480
481 pcb = td->td_pcb;
482 tf = td->td_frame;
483
484 if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp))
485 return (EINVAL);
486
487 /*
488 * Don't let the user change privileged MSR bits.
489 *
490 * psl_userstatic is used here to mask off any bits that can
491 * legitimately vary between user contexts (Floating point
492 * exception control and any facilities that we are using the
493 * "enable on first use" pattern with.)
494 *
495 * All other bits are required to match psl_userset(32).
496 *
497 * Remember to update the platform cpu_init code when implementing
498 * support for a new conditional facility!
499 */
500 if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) {
501 return (EINVAL);
502 }
503
504 /* Copy trapframe, preserving TLS pointer across context change */
505 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
506 tls = tf->fixreg[13];
507 else
508 tls = tf->fixreg[2];
509 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
510 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
511 tf->fixreg[13] = tls;
512 else
513 tf->fixreg[2] = tls;
514
515 /*
516 * Force the FPU back off to ensure the new context will not bypass
517 * the enable_fpu() setup code accidentally.
518 *
519 * This prevents an issue where a process that uses floating point
520 * inside a signal handler could end up in a state where the MSR
521 * did not match pcb_flags.
522 *
523 * Additionally, ensure VSX is disabled as well, as it is illegal
524 * to leave it turned on when FP or VEC are off.
525 */
526 tf->srr1 &= ~(PSL_FP | PSL_VSX | PSL_VEC);
527 pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX | PCB_VEC);
528
529 /*
530 * Ensure the FPU is also disabled in hardware.
531 *
532 * Without this, it's possible for the register reload to fail if we
533 * don't switch to a FPU disabled context before resuming the original
534 * thread. Specifically, if the FPU/VSX unavailable exception is never
535 * hit, then whatever data is still in the FP/VSX registers when
536 * sigresume is callled will used by the resumed thread, instead of the
537 * previously saved data from the mcontext.
538 */
539 critical_enter();
540 msr = mfmsr() & ~(PSL_FP | PSL_VSX | PSL_VEC);
541 isync();
542 mtmsr(msr);
543 critical_exit();
544
545 if (mcp->mc_flags & _MC_FP_VALID) {
546 /* enable_fpu() will happen lazily on a fault */
547 pcb->pcb_flags |= PCB_FPREGS;
548 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
549 bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr));
550 for (i = 0; i < 32; i++) {
551 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
552 sizeof(double));
553 }
554 if (mcp->mc_flags & _MC_VS_VALID) {
555 for (i = 0; i < 32; i++) {
556 memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
557 &mcp->mc_vsxfpreg[i], sizeof(double));
558 }
559 }
560 }
561
562 if (mcp->mc_flags & _MC_AV_VALID) {
563 /* enable_vec() will happen lazily on a fault */
564 pcb->pcb_flags |= PCB_VECREGS;
565 pcb->pcb_vec.vscr = mcp->mc_vscr;
566 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
567 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
568 }
569
570 return (0);
571 }
572
573 /*
574 * Clean up extra POWER state. Some per-process registers and states are not
575 * managed by the MSR, so must be cleaned up explicitly on thread exit.
576 *
577 * Currently this includes:
578 * DSCR -- Data stream control register (PowerISA 2.06+)
579 * FSCR -- Facility Status and Control Register (PowerISA 2.07+)
580 */
581 static void
cleanup_power_extras(struct thread * td)582 cleanup_power_extras(struct thread *td)
583 {
584 uint32_t pcb_flags;
585
586 if (td != curthread)
587 return;
588
589 pcb_flags = td->td_pcb->pcb_flags;
590 /* Clean up registers not managed by MSR. */
591 if (pcb_flags & PCB_CFSCR)
592 mtspr(SPR_FSCR, 0);
593 if (pcb_flags & PCB_CDSCR)
594 mtspr(SPR_DSCRP, 0);
595
596 if (pcb_flags & PCB_FPU)
597 cleanup_fpscr();
598 }
599
600 /*
601 * Ensure the PCB has been updated in preparation for copying a thread.
602 *
603 * This is needed because normally this only happens during switching tasks,
604 * but when we are cloning a thread, we need the updated state before doing
605 * the actual copy, so the new thread inherits the current state instead of
606 * the state at the last task switch.
607 *
608 * Keep this in sync with the assembly code in cpu_switch()!
609 */
610 void
cpu_update_pcb(struct thread * td)611 cpu_update_pcb(struct thread *td)
612 {
613 uint32_t pcb_flags;
614 struct pcb *pcb;
615
616 KASSERT(td == curthread,
617 ("cpu_update_pcb: td is not curthread"));
618
619 pcb = td->td_pcb;
620
621 pcb_flags = pcb->pcb_flags;
622
623 #if defined(__powerpc64__)
624 /* Are *any* FSCR flags in use? */
625 if (pcb_flags & PCB_CFSCR) {
626 pcb->pcb_fscr = mfspr(SPR_FSCR);
627
628 if (pcb->pcb_fscr & FSCR_EBB) {
629 pcb->pcb_ebb.ebbhr = mfspr(SPR_EBBHR);
630 pcb->pcb_ebb.ebbrr = mfspr(SPR_EBBRR);
631 pcb->pcb_ebb.bescr = mfspr(SPR_BESCR);
632 }
633 if (pcb->pcb_fscr & FSCR_LM) {
634 pcb->pcb_lm.lmrr = mfspr(SPR_LMRR);
635 pcb->pcb_lm.lmser = mfspr(SPR_LMSER);
636 }
637 if (pcb->pcb_fscr & FSCR_TAR)
638 pcb->pcb_tar = mfspr(SPR_TAR);
639 }
640
641 /*
642 * This is outside of the PCB_CFSCR check because it can be set
643 * independently when running on POWER7/POWER8.
644 */
645 if (pcb_flags & PCB_CDSCR)
646 pcb->pcb_dscr = mfspr(SPR_DSCRP);
647 #endif
648
649 #if defined(__SPE__)
650 /*
651 * On E500v2, single-precision scalar instructions and access to
652 * SPEFSCR may be used without PSL_VEC turned on, as long as they
653 * limit themselves to the low word of the registers.
654 *
655 * As such, we need to unconditionally save SPEFSCR, even though
656 * it is also updated in save_vec_nodrop().
657 */
658 pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
659 #endif
660
661 if (pcb_flags & PCB_FPU)
662 save_fpu_nodrop(td);
663
664 if (pcb_flags & PCB_VEC)
665 save_vec_nodrop(td);
666 }
667
668 /*
669 * Set set up registers on exec.
670 */
671 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)672 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
673 {
674 struct trapframe *tf;
675 register_t argc;
676
677 tf = trapframe(td);
678 bzero(tf, sizeof *tf);
679 #ifdef __powerpc64__
680 tf->fixreg[1] = -roundup(-stack + 48, 16);
681 #else
682 tf->fixreg[1] = -roundup(-stack + 8, 16);
683 #endif
684
685 /*
686 * Set up arguments for _start():
687 * _start(argc, argv, envp, obj, cleanup, ps_strings);
688 *
689 * Notes:
690 * - obj and cleanup are the auxilliary and termination
691 * vectors. They are fixed up by ld.elf_so.
692 * - ps_strings is a NetBSD extention, and will be
693 * ignored by executables which are strictly
694 * compliant with the SVR4 ABI.
695 */
696
697 /* Collect argc from the user stack */
698 argc = fuword((void *)stack);
699
700 tf->fixreg[3] = argc;
701 tf->fixreg[4] = stack + sizeof(register_t);
702 tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t);
703 tf->fixreg[6] = 0; /* auxiliary vector */
704 tf->fixreg[7] = 0; /* termination vector */
705 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
706
707 tf->srr0 = imgp->entry_addr;
708 #ifdef __powerpc64__
709 tf->fixreg[12] = imgp->entry_addr;
710 #endif
711 tf->srr1 = psl_userset | PSL_FE_DFLT;
712 cleanup_power_extras(td);
713 td->td_pcb->pcb_flags = 0;
714 }
715
716 #ifdef COMPAT_FREEBSD32
717 void
ppc32_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)718 ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
719 {
720 struct trapframe *tf;
721 uint32_t argc;
722
723 tf = trapframe(td);
724 bzero(tf, sizeof *tf);
725 tf->fixreg[1] = -roundup(-stack + 8, 16);
726
727 argc = fuword32((void *)stack);
728
729 tf->fixreg[3] = argc;
730 tf->fixreg[4] = stack + sizeof(uint32_t);
731 tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
732 tf->fixreg[6] = 0; /* auxiliary vector */
733 tf->fixreg[7] = 0; /* termination vector */
734 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
735
736 tf->srr0 = imgp->entry_addr;
737 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
738 cleanup_power_extras(td);
739 td->td_pcb->pcb_flags = 0;
740 }
741 #endif
742
743 int
fill_regs(struct thread * td,struct reg * regs)744 fill_regs(struct thread *td, struct reg *regs)
745 {
746 struct trapframe *tf;
747
748 tf = td->td_frame;
749 memcpy(regs, tf, sizeof(struct reg));
750
751 return (0);
752 }
753
754 int
fill_dbregs(struct thread * td,struct dbreg * dbregs)755 fill_dbregs(struct thread *td, struct dbreg *dbregs)
756 {
757 /* No debug registers on PowerPC */
758 return (ENOSYS);
759 }
760
761 int
fill_fpregs(struct thread * td,struct fpreg * fpregs)762 fill_fpregs(struct thread *td, struct fpreg *fpregs)
763 {
764 struct pcb *pcb;
765 int i;
766
767 pcb = td->td_pcb;
768
769 if ((pcb->pcb_flags & PCB_FPREGS) == 0)
770 memset(fpregs, 0, sizeof(struct fpreg));
771 else {
772 memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
773 for (i = 0; i < 32; i++)
774 memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
775 sizeof(double));
776 }
777
778 return (0);
779 }
780
781 int
set_regs(struct thread * td,struct reg * regs)782 set_regs(struct thread *td, struct reg *regs)
783 {
784 struct trapframe *tf;
785
786 tf = td->td_frame;
787 memcpy(tf, regs, sizeof(struct reg));
788
789 return (0);
790 }
791
792 int
set_dbregs(struct thread * td,struct dbreg * dbregs)793 set_dbregs(struct thread *td, struct dbreg *dbregs)
794 {
795 /* No debug registers on PowerPC */
796 return (ENOSYS);
797 }
798
799 int
set_fpregs(struct thread * td,struct fpreg * fpregs)800 set_fpregs(struct thread *td, struct fpreg *fpregs)
801 {
802 struct pcb *pcb;
803 int i;
804
805 pcb = td->td_pcb;
806 pcb->pcb_flags |= PCB_FPREGS;
807 memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double));
808 for (i = 0; i < 32; i++) {
809 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i],
810 sizeof(double));
811 }
812
813 return (0);
814 }
815
816 #ifdef COMPAT_FREEBSD32
817 int
set_regs32(struct thread * td,struct reg32 * regs)818 set_regs32(struct thread *td, struct reg32 *regs)
819 {
820 struct trapframe *tf;
821 int i;
822
823 tf = td->td_frame;
824 for (i = 0; i < 32; i++)
825 tf->fixreg[i] = regs->fixreg[i];
826 tf->lr = regs->lr;
827 tf->cr = regs->cr;
828 tf->xer = regs->xer;
829 tf->ctr = regs->ctr;
830 tf->srr0 = regs->pc;
831
832 return (0);
833 }
834
835 int
fill_regs32(struct thread * td,struct reg32 * regs)836 fill_regs32(struct thread *td, struct reg32 *regs)
837 {
838 struct trapframe *tf;
839 int i;
840
841 tf = td->td_frame;
842 for (i = 0; i < 32; i++)
843 regs->fixreg[i] = tf->fixreg[i];
844 regs->lr = tf->lr;
845 regs->cr = tf->cr;
846 regs->xer = tf->xer;
847 regs->ctr = tf->ctr;
848 regs->pc = tf->srr0;
849
850 return (0);
851 }
852
853 static int
grab_mcontext32(struct thread * td,mcontext32_t * mcp,int flags)854 grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
855 {
856 mcontext_t mcp64;
857 int i, error;
858
859 error = grab_mcontext(td, &mcp64, flags);
860 if (error != 0)
861 return (error);
862
863 mcp->mc_vers = mcp64.mc_vers;
864 mcp->mc_flags = mcp64.mc_flags;
865 mcp->mc_onstack = mcp64.mc_onstack;
866 mcp->mc_len = mcp64.mc_len;
867 memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
868 memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
869 for (i = 0; i < 42; i++)
870 mcp->mc_frame[i] = mcp64.mc_frame[i];
871 memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
872 memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
873
874 return (0);
875 }
876
877 static int
get_mcontext32(struct thread * td,mcontext32_t * mcp,int flags)878 get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
879 {
880 int error;
881
882 error = grab_mcontext32(td, mcp, flags);
883 if (error == 0) {
884 PROC_LOCK(curthread->td_proc);
885 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
886 PROC_UNLOCK(curthread->td_proc);
887 }
888
889 return (error);
890 }
891
892 static int
set_mcontext32(struct thread * td,mcontext32_t * mcp)893 set_mcontext32(struct thread *td, mcontext32_t *mcp)
894 {
895 mcontext_t mcp64;
896 int i, error;
897
898 mcp64.mc_vers = mcp->mc_vers;
899 mcp64.mc_flags = mcp->mc_flags;
900 mcp64.mc_onstack = mcp->mc_onstack;
901 mcp64.mc_len = mcp->mc_len;
902 memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
903 memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
904 for (i = 0; i < 42; i++)
905 mcp64.mc_frame[i] = mcp->mc_frame[i];
906 mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL);
907 memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
908 memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
909
910 error = set_mcontext(td, &mcp64);
911
912 return (error);
913 }
914 #endif
915
916 #ifdef COMPAT_FREEBSD32
917 int
freebsd32_sigreturn(struct thread * td,struct freebsd32_sigreturn_args * uap)918 freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
919 {
920 ucontext32_t uc;
921 int error;
922
923 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
924
925 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
926 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
927 return (EFAULT);
928 }
929
930 error = set_mcontext32(td, &uc.uc_mcontext);
931 if (error != 0)
932 return (error);
933
934 /*
935 * Save FPU state if needed. User may have changed it on
936 * signal handler
937 */
938 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
939 save_fpu(td);
940
941 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
942
943 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
944 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
945
946 return (EJUSTRETURN);
947 }
948
949 /*
950 * The first two fields of a ucontext_t are the signal mask and the machine
951 * context. The next field is uc_link; we want to avoid destroying the link
952 * when copying out contexts.
953 */
954 #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
955
956 int
freebsd32_getcontext(struct thread * td,struct freebsd32_getcontext_args * uap)957 freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
958 {
959 ucontext32_t uc;
960 int ret;
961
962 if (uap->ucp == NULL)
963 ret = EINVAL;
964 else {
965 bzero(&uc, sizeof(uc));
966 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
967 PROC_LOCK(td->td_proc);
968 uc.uc_sigmask = td->td_sigmask;
969 PROC_UNLOCK(td->td_proc);
970 ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
971 }
972 return (ret);
973 }
974
975 int
freebsd32_setcontext(struct thread * td,struct freebsd32_setcontext_args * uap)976 freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
977 {
978 ucontext32_t uc;
979 int ret;
980
981 if (uap->ucp == NULL)
982 ret = EINVAL;
983 else {
984 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
985 if (ret == 0) {
986 ret = set_mcontext32(td, &uc.uc_mcontext);
987 if (ret == 0) {
988 kern_sigprocmask(td, SIG_SETMASK,
989 &uc.uc_sigmask, NULL, 0);
990 }
991 }
992 }
993 return (ret == 0 ? EJUSTRETURN : ret);
994 }
995
996 int
freebsd32_swapcontext(struct thread * td,struct freebsd32_swapcontext_args * uap)997 freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
998 {
999 ucontext32_t uc;
1000 int ret;
1001
1002 if (uap->oucp == NULL || uap->ucp == NULL)
1003 ret = EINVAL;
1004 else {
1005 bzero(&uc, sizeof(uc));
1006 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
1007 PROC_LOCK(td->td_proc);
1008 uc.uc_sigmask = td->td_sigmask;
1009 PROC_UNLOCK(td->td_proc);
1010 ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
1011 if (ret == 0) {
1012 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
1013 if (ret == 0) {
1014 ret = set_mcontext32(td, &uc.uc_mcontext);
1015 if (ret == 0) {
1016 kern_sigprocmask(td, SIG_SETMASK,
1017 &uc.uc_sigmask, NULL, 0);
1018 }
1019 }
1020 }
1021 }
1022 return (ret == 0 ? EJUSTRETURN : ret);
1023 }
1024
1025 #endif
1026
1027 void
cpu_set_syscall_retval(struct thread * td,int error)1028 cpu_set_syscall_retval(struct thread *td, int error)
1029 {
1030 struct proc *p;
1031 struct trapframe *tf;
1032 int fixup;
1033
1034 if (error == EJUSTRETURN)
1035 return;
1036
1037 p = td->td_proc;
1038 tf = td->td_frame;
1039
1040 if (tf->fixreg[0] == SYS___syscall &&
1041 (SV_PROC_FLAG(p, SV_ILP32))) {
1042 int code = tf->fixreg[FIRSTARG + 1];
1043 fixup = (
1044 #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek)
1045 code != SYS_freebsd6_lseek &&
1046 #endif
1047 code != SYS_lseek) ? 1 : 0;
1048 } else
1049 fixup = 0;
1050
1051 switch (error) {
1052 case 0:
1053 if (fixup) {
1054 /*
1055 * 64-bit return, 32-bit syscall. Fixup byte order
1056 */
1057 tf->fixreg[FIRSTARG] = 0;
1058 tf->fixreg[FIRSTARG + 1] = td->td_retval[0];
1059 } else {
1060 tf->fixreg[FIRSTARG] = td->td_retval[0];
1061 tf->fixreg[FIRSTARG + 1] = td->td_retval[1];
1062 }
1063 tf->cr &= ~0x10000000; /* Unset summary overflow */
1064 break;
1065 case ERESTART:
1066 /*
1067 * Set user's pc back to redo the system call.
1068 */
1069 tf->srr0 -= 4;
1070 break;
1071 default:
1072 tf->fixreg[FIRSTARG] = error;
1073 tf->cr |= 0x10000000; /* Set summary overflow */
1074 break;
1075 }
1076 }
1077
1078 /*
1079 * Threading functions
1080 */
1081 void
cpu_thread_exit(struct thread * td)1082 cpu_thread_exit(struct thread *td)
1083 {
1084 cleanup_power_extras(td);
1085 }
1086
1087 void
cpu_thread_clean(struct thread * td)1088 cpu_thread_clean(struct thread *td)
1089 {
1090 }
1091
1092 void
cpu_thread_alloc(struct thread * td)1093 cpu_thread_alloc(struct thread *td)
1094 {
1095 struct pcb *pcb;
1096
1097 pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
1098 sizeof(struct pcb)) & ~0x2fUL);
1099 td->td_pcb = pcb;
1100 td->td_frame = (struct trapframe *)pcb - 1;
1101 }
1102
1103 void
cpu_thread_free(struct thread * td)1104 cpu_thread_free(struct thread *td)
1105 {
1106 }
1107
1108 int
cpu_set_user_tls(struct thread * td,void * tls_base,int thr_flags __unused)1109 cpu_set_user_tls(struct thread *td, void *tls_base, int thr_flags __unused)
1110 {
1111
1112 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
1113 td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
1114 else
1115 td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
1116 return (0);
1117 }
1118
1119 void
cpu_copy_thread(struct thread * td,struct thread * td0)1120 cpu_copy_thread(struct thread *td, struct thread *td0)
1121 {
1122 struct pcb *pcb2;
1123 struct trapframe *tf;
1124 struct callframe *cf;
1125
1126 /* Ensure td0 pcb is up to date. */
1127 if (td0 == curthread)
1128 cpu_update_pcb(td0);
1129
1130 pcb2 = td->td_pcb;
1131
1132 /* Copy the upcall pcb */
1133 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
1134
1135 /* Create a stack for the new thread */
1136 tf = td->td_frame;
1137 bcopy(td0->td_frame, tf, sizeof(struct trapframe));
1138 tf->fixreg[FIRSTARG] = 0;
1139 tf->fixreg[FIRSTARG + 1] = 0;
1140 tf->cr &= ~0x10000000;
1141
1142 /* Set registers for trampoline to user mode. */
1143 cf = (struct callframe *)tf - 1;
1144 memset(cf, 0, sizeof(struct callframe));
1145 cf->cf_func = (register_t)fork_return;
1146 cf->cf_arg0 = (register_t)td;
1147 cf->cf_arg1 = (register_t)tf;
1148
1149 pcb2->pcb_sp = (register_t)cf;
1150 #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
1151 pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
1152 pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
1153 #else
1154 pcb2->pcb_lr = (register_t)fork_trampoline;
1155 pcb2->pcb_context[0] = pcb2->pcb_lr;
1156 #endif
1157 pcb2->pcb_cpu.aim.usr_vsid = 0;
1158 #ifdef __SPE__
1159 pcb2->pcb_vec.vscr = SPEFSCR_DFLT;
1160 #endif
1161
1162 /* Setup to release spin count in fork_exit(). */
1163 td->td_md.md_spinlock_count = 1;
1164 td->td_md.md_saved_msr = psl_kernset;
1165 }
1166
1167 int
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)1168 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
1169 stack_t *stack)
1170 {
1171 struct trapframe *tf;
1172 uintptr_t sp;
1173 #ifdef __powerpc64__
1174 int error;
1175 #endif
1176
1177 tf = td->td_frame;
1178 /* align stack and alloc space for frame ptr and saved LR */
1179 #ifdef __powerpc64__
1180 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
1181 ~0x1f;
1182 #else
1183 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
1184 ~0x1f;
1185 #endif
1186 bzero(tf, sizeof(struct trapframe));
1187
1188 tf->fixreg[1] = (register_t)sp;
1189 tf->fixreg[3] = (register_t)arg;
1190 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1191 tf->srr0 = (register_t)entry;
1192 #ifdef __powerpc64__
1193 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
1194 #else
1195 tf->srr1 = psl_userset | PSL_FE_DFLT;
1196 #endif
1197 } else {
1198 #ifdef __powerpc64__
1199 if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) {
1200 tf->srr0 = (register_t)entry;
1201 /* ELFv2 ABI requires that the global entry point be in r12. */
1202 tf->fixreg[12] = (register_t)entry;
1203 } else {
1204 register_t entry_desc[3];
1205 error = copyin((void *)entry, entry_desc,
1206 sizeof(entry_desc));
1207 if (error != 0)
1208 return (error);
1209 tf->srr0 = entry_desc[0];
1210 tf->fixreg[2] = entry_desc[1];
1211 tf->fixreg[11] = entry_desc[2];
1212 }
1213 tf->srr1 = psl_userset | PSL_FE_DFLT;
1214 #endif
1215 }
1216
1217 td->td_pcb->pcb_flags = 0;
1218 #ifdef __SPE__
1219 td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT;
1220 #endif
1221
1222 td->td_retval[0] = (register_t)entry;
1223 td->td_retval[1] = 0;
1224 return (0);
1225 }
1226
1227 static int
emulate_mfspr(int spr,int reg,struct trapframe * frame)1228 emulate_mfspr(int spr, int reg, struct trapframe *frame){
1229 struct thread *td;
1230
1231 td = curthread;
1232
1233 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1234 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1235 return (SIGILL);
1236 // If DSCR was never set, get the default DSCR
1237 if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0)
1238 td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP);
1239
1240 frame->fixreg[reg] = td->td_pcb->pcb_dscr;
1241 frame->srr0 += 4;
1242 return (0);
1243 } else
1244 return (SIGILL);
1245 }
1246
1247 static int
emulate_mtspr(int spr,int reg,struct trapframe * frame)1248 emulate_mtspr(int spr, int reg, struct trapframe *frame){
1249 struct thread *td;
1250
1251 td = curthread;
1252
1253 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1254 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1255 return (SIGILL);
1256 td->td_pcb->pcb_flags |= PCB_CDSCR;
1257 td->td_pcb->pcb_dscr = frame->fixreg[reg];
1258 mtspr(SPR_DSCRP, frame->fixreg[reg]);
1259 frame->srr0 += 4;
1260 return (0);
1261 } else
1262 return (SIGILL);
1263 }
1264
1265 #define XFX 0xFC0007FF
1266 int
ppc_instr_emulate(struct trapframe * frame,struct thread * td)1267 ppc_instr_emulate(struct trapframe *frame, struct thread *td)
1268 {
1269 struct pcb *pcb;
1270 uint32_t instr;
1271 int reg, sig;
1272 int rs, spr;
1273
1274 instr = fuword32((void *)frame->srr0);
1275 sig = SIGILL;
1276
1277 if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */
1278 reg = (instr & ~0xfc1fffff) >> 21;
1279 frame->fixreg[reg] = mfpvr();
1280 frame->srr0 += 4;
1281 return (0);
1282 } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */
1283 rs = (instr & 0x3e00000) >> 21;
1284 spr = (instr & 0x1ff800) >> 16;
1285 return emulate_mfspr(spr, rs, frame);
1286 } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */
1287 rs = (instr & 0x3e00000) >> 21;
1288 spr = (instr & 0x1ff800) >> 16;
1289 return emulate_mtspr(spr, rs, frame);
1290 } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */
1291 powerpc_sync(); /* Do a heavy-weight sync */
1292 frame->srr0 += 4;
1293 return (0);
1294 }
1295
1296 pcb = td->td_pcb;
1297 #ifdef FPU_EMU
1298 if (!(pcb->pcb_flags & PCB_FPREGS)) {
1299 bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu));
1300 pcb->pcb_flags |= PCB_FPREGS;
1301 } else if (pcb->pcb_flags & PCB_FPU)
1302 save_fpu(td);
1303 sig = fpu_emulate(frame, &pcb->pcb_fpu);
1304 if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU)
1305 enable_fpu(td);
1306 #endif
1307 if (sig == SIGILL) {
1308 if (pcb->pcb_lastill != frame->srr0) {
1309 /* Allow a second chance, in case of cache sync issues. */
1310 sig = 0;
1311 pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4);
1312 pcb->pcb_lastill = frame->srr0;
1313 }
1314 }
1315
1316 return (sig);
1317 }
1318