1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause
3 *
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (C) 2001 Benno Rice
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 */
58
59 #include <sys/cdefs.h>
60 #include "opt_fpu_emu.h"
61
62 #include <sys/param.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/bio.h>
66 #include <sys/buf.h>
67 #include <sys/bus.h>
68 #include <sys/cons.h>
69 #include <sys/cpu.h>
70 #include <sys/exec.h>
71 #include <sys/imgact.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/mutex.h>
77 #include <sys/reg.h>
78 #include <sys/signalvar.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/syscall.h>
81 #include <sys/sysent.h>
82 #include <sys/sysproto.h>
83 #include <sys/ucontext.h>
84 #include <sys/uio.h>
85
86 #include <machine/altivec.h>
87 #include <machine/cpu.h>
88 #include <machine/elf.h>
89 #include <machine/fpu.h>
90 #include <machine/pcb.h>
91 #include <machine/sigframe.h>
92 #include <machine/trap.h>
93 #include <machine/vmparam.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_map.h>
99
100 #ifdef FPU_EMU
101 #include <powerpc/fpu/fpu_extern.h>
102 #endif
103
104 #ifdef COMPAT_FREEBSD32
105 #include <compat/freebsd32/freebsd32_signal.h>
106 #include <compat/freebsd32/freebsd32_util.h>
107 #include <compat/freebsd32/freebsd32_proto.h>
108
109 typedef struct __ucontext32 {
110 sigset_t uc_sigmask;
111 mcontext32_t uc_mcontext;
112 uint32_t uc_link;
113 struct sigaltstack32 uc_stack;
114 uint32_t uc_flags;
115 uint32_t __spare__[4];
116 } ucontext32_t;
117
118 struct sigframe32 {
119 ucontext32_t sf_uc;
120 struct __siginfo32 sf_si;
121 };
122
123 static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
124 #endif
125
126 static int grab_mcontext(struct thread *, mcontext_t *, int);
127
128 static void cleanup_power_extras(struct thread *);
129
130 #ifdef __powerpc64__
131 extern struct sysentvec elf64_freebsd_sysvec_v2;
132 #endif
133
134 #ifdef __powerpc64__
135 _Static_assert(sizeof(mcontext_t) == 1392, "mcontext_t size incorrect");
136 _Static_assert(sizeof(ucontext_t) == 1472, "ucontext_t size incorrect");
137 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
138 #ifdef COMPAT_FREEBSD32
139 _Static_assert(sizeof(mcontext32_t) == 1224, "mcontext32_t size incorrect");
140 _Static_assert(sizeof(ucontext32_t) == 1280, "ucontext32_t size incorrect");
141 _Static_assert(sizeof(struct __siginfo32) == 64, "struct __siginfo32 size incorrect");
142 #endif /* COMPAT_FREEBSD32 */
143 #else /* powerpc */
144 _Static_assert(sizeof(mcontext_t) == 1224, "mcontext_t size incorrect");
145 _Static_assert(sizeof(ucontext_t) == 1280, "ucontext_t size incorrect");
146 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect");
147 #endif
148
149 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)150 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
151 {
152 struct trapframe *tf;
153 struct sigacts *psp;
154 struct sigframe sf;
155 struct thread *td;
156 struct proc *p;
157 #ifdef COMPAT_FREEBSD32
158 struct __siginfo32 siginfo32;
159 struct sigframe32 sf32;
160 #endif
161 size_t sfpsize;
162 caddr_t sfp, usfp;
163 register_t sp;
164 int oonstack, rndfsize;
165 int sig;
166 int code;
167
168 td = curthread;
169 p = td->td_proc;
170 PROC_LOCK_ASSERT(p, MA_OWNED);
171
172 psp = p->p_sigacts;
173 mtx_assert(&psp->ps_mtx, MA_OWNED);
174 tf = td->td_frame;
175
176 /*
177 * Fill siginfo structure.
178 */
179 ksi->ksi_info.si_signo = ksi->ksi_signo;
180 ksi->ksi_info.si_addr =
181 (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ?
182 tf->dar : tf->srr0);
183
184 #ifdef COMPAT_FREEBSD32
185 if (SV_PROC_FLAG(p, SV_ILP32)) {
186 siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
187 sig = siginfo32.si_signo;
188 code = siginfo32.si_code;
189 sfp = (caddr_t)&sf32;
190 sfpsize = sizeof(sf32);
191 rndfsize = roundup(sizeof(sf32), 16);
192 sp = (uint32_t)tf->fixreg[1];
193 oonstack = sigonstack(sp);
194
195 /*
196 * Save user context
197 */
198
199 memset(&sf32, 0, sizeof(sf32));
200 grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
201
202 sf32.sf_uc.uc_sigmask = *mask;
203 sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
204 sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
205 sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
206 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
207
208 sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
209 } else {
210 #endif
211 sig = ksi->ksi_signo;
212 code = ksi->ksi_code;
213 sfp = (caddr_t)&sf;
214 sfpsize = sizeof(sf);
215 #ifdef __powerpc64__
216 /*
217 * 64-bit PPC defines a 288 byte scratch region
218 * below the stack.
219 */
220 rndfsize = 288 + roundup(sizeof(sf), 48);
221 #else
222 rndfsize = roundup(sizeof(sf), 16);
223 #endif
224 sp = tf->fixreg[1];
225 oonstack = sigonstack(sp);
226
227 /*
228 * Save user context
229 */
230
231 memset(&sf, 0, sizeof(sf));
232 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
233
234 sf.sf_uc.uc_sigmask = *mask;
235 sf.sf_uc.uc_stack = td->td_sigstk;
236 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
237 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
238
239 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
240 #ifdef COMPAT_FREEBSD32
241 }
242 #endif
243
244 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
245 catcher, sig);
246
247 /*
248 * Allocate and validate space for the signal handler context.
249 */
250 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
251 SIGISMEMBER(psp->ps_sigonstack, sig)) {
252 usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp +
253 td->td_sigstk.ss_size - rndfsize) & ~0xFul);
254 } else {
255 usfp = (void *)((sp - rndfsize) & ~0xFul);
256 }
257
258 /*
259 * Set Floating Point facility to "Ignore Exceptions Mode" so signal
260 * handler can run.
261 */
262 if (td->td_pcb->pcb_flags & PCB_FPU)
263 tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1);
264
265 /*
266 * Set up the registers to return to sigcode.
267 *
268 * r1/sp - sigframe ptr
269 * lr - sig function, dispatched to by blrl in trampoline
270 * r3 - sig number
271 * r4 - SIGINFO ? &siginfo : exception code
272 * r5 - user context
273 * srr0 - trampoline function addr
274 */
275 tf->lr = (register_t)catcher;
276 tf->fixreg[1] = (register_t)usfp;
277 tf->fixreg[FIRSTARG] = sig;
278 #ifdef COMPAT_FREEBSD32
279 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
280 ((SV_PROC_FLAG(p, SV_ILP32)) ?
281 offsetof(struct sigframe32, sf_uc) :
282 offsetof(struct sigframe, sf_uc));
283 #else
284 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
285 offsetof(struct sigframe, sf_uc);
286 #endif
287 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
288 /*
289 * Signal handler installed with SA_SIGINFO.
290 */
291 #ifdef COMPAT_FREEBSD32
292 if (SV_PROC_FLAG(p, SV_ILP32)) {
293 sf32.sf_si = siginfo32;
294 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
295 offsetof(struct sigframe32, sf_si);
296 sf32.sf_si = siginfo32;
297 } else {
298 #endif
299 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
300 offsetof(struct sigframe, sf_si);
301 sf.sf_si = ksi->ksi_info;
302 #ifdef COMPAT_FREEBSD32
303 }
304 #endif
305 } else {
306 /* Old FreeBSD-style arguments. */
307 tf->fixreg[FIRSTARG+1] = code;
308 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
309 tf->dar : tf->srr0;
310 }
311 mtx_unlock(&psp->ps_mtx);
312 PROC_UNLOCK(p);
313
314 tf->srr0 = (register_t)PROC_SIGCODE(p);
315
316 /*
317 * copy the frame out to userland.
318 */
319 if (copyout(sfp, usfp, sfpsize) != 0) {
320 /*
321 * Process has trashed its stack. Kill it.
322 */
323 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
324 PROC_LOCK(p);
325 sigexit(td, SIGILL);
326 }
327
328 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
329 tf->srr0, tf->fixreg[1]);
330
331 PROC_LOCK(p);
332 mtx_lock(&psp->ps_mtx);
333 }
334
335 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)336 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
337 {
338 ucontext_t uc;
339 int error;
340
341 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
342
343 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
344 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
345 return (EFAULT);
346 }
347
348 error = set_mcontext(td, &uc.uc_mcontext);
349 if (error != 0)
350 return (error);
351
352 /*
353 * Save FPU state if needed. User may have changed it on
354 * signal handler
355 */
356 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
357 save_fpu(td);
358
359 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
360
361 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
362 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
363
364 return (EJUSTRETURN);
365 }
366
367 #ifdef COMPAT_FREEBSD4
368 int
freebsd4_sigreturn(struct thread * td,struct freebsd4_sigreturn_args * uap)369 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
370 {
371
372 return sys_sigreturn(td, (struct sigreturn_args *)uap);
373 }
374 #endif
375
376 /*
377 * Construct a PCB from a trapframe. This is called from kdb_trap() where
378 * we want to start a backtrace from the function that caused us to enter
379 * the debugger. We have the context in the trapframe, but base the trace
380 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
381 * enough for a backtrace.
382 */
383 void
makectx(struct trapframe * tf,struct pcb * pcb)384 makectx(struct trapframe *tf, struct pcb *pcb)
385 {
386
387 pcb->pcb_lr = tf->srr0;
388 pcb->pcb_sp = tf->fixreg[1];
389 }
390
391 /*
392 * get_mcontext/sendsig helper routine that doesn't touch the
393 * proc lock
394 */
395 static int
grab_mcontext(struct thread * td,mcontext_t * mcp,int flags)396 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
397 {
398 struct pcb *pcb;
399 int i;
400
401 pcb = td->td_pcb;
402
403 memset(mcp, 0, sizeof(mcontext_t));
404
405 mcp->mc_vers = _MC_VERSION;
406 mcp->mc_flags = 0;
407 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
408 if (flags & GET_MC_CLEAR_RET) {
409 mcp->mc_gpr[3] = 0;
410 mcp->mc_gpr[4] = 0;
411 }
412
413 /*
414 * This assumes that floating-point context is *not* lazy,
415 * so if the thread has used FP there would have been a
416 * FP-unavailable exception that would have set things up
417 * correctly.
418 */
419 if (pcb->pcb_flags & PCB_FPREGS) {
420 if (pcb->pcb_flags & PCB_FPU) {
421 KASSERT(td == curthread,
422 ("get_mcontext: fp save not curthread"));
423 critical_enter();
424 save_fpu(td);
425 critical_exit();
426 }
427 mcp->mc_flags |= _MC_FP_VALID;
428 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
429 for (i = 0; i < 32; i++)
430 memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
431 sizeof(double));
432 }
433
434 if (pcb->pcb_flags & PCB_VSX) {
435 for (i = 0; i < 32; i++)
436 memcpy(&mcp->mc_vsxfpreg[i],
437 &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
438 }
439
440 /*
441 * Repeat for Altivec context
442 */
443
444 if (pcb->pcb_flags & PCB_VECREGS) {
445 if (pcb->pcb_flags & PCB_VEC) {
446 KASSERT(td == curthread,
447 ("get_mcontext: altivec save not curthread"));
448 critical_enter();
449 save_vec(td);
450 critical_exit();
451 }
452 mcp->mc_flags |= _MC_AV_VALID;
453 mcp->mc_vscr = pcb->pcb_vec.vscr;
454 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
455 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
456 }
457
458 mcp->mc_len = sizeof(*mcp);
459
460 return (0);
461 }
462
463 int
get_mcontext(struct thread * td,mcontext_t * mcp,int flags)464 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
465 {
466 int error;
467
468 error = grab_mcontext(td, mcp, flags);
469 if (error == 0) {
470 PROC_LOCK(curthread->td_proc);
471 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
472 PROC_UNLOCK(curthread->td_proc);
473 }
474
475 return (error);
476 }
477
478 int
set_mcontext(struct thread * td,mcontext_t * mcp)479 set_mcontext(struct thread *td, mcontext_t *mcp)
480 {
481 struct pcb *pcb;
482 struct trapframe *tf;
483 register_t tls;
484 int i;
485
486 pcb = td->td_pcb;
487 tf = td->td_frame;
488
489 if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp))
490 return (EINVAL);
491
492 /*
493 * Don't let the user change privileged MSR bits.
494 *
495 * psl_userstatic is used here to mask off any bits that can
496 * legitimately vary between user contexts (Floating point
497 * exception control and any facilities that we are using the
498 * "enable on first use" pattern with.)
499 *
500 * All other bits are required to match psl_userset(32).
501 *
502 * Remember to update the platform cpu_init code when implementing
503 * support for a new conditional facility!
504 */
505 if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) {
506 return (EINVAL);
507 }
508
509 /* Copy trapframe, preserving TLS pointer across context change */
510 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
511 tls = tf->fixreg[13];
512 else
513 tls = tf->fixreg[2];
514 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
515 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
516 tf->fixreg[13] = tls;
517 else
518 tf->fixreg[2] = tls;
519
520 /*
521 * Force the FPU back off to ensure the new context will not bypass
522 * the enable_fpu() setup code accidentally.
523 *
524 * This prevents an issue where a process that uses floating point
525 * inside a signal handler could end up in a state where the MSR
526 * did not match pcb_flags.
527 *
528 * Additionally, ensure VSX is disabled as well, as it is illegal
529 * to leave it turned on when FP or VEC are off.
530 */
531 tf->srr1 &= ~(PSL_FP | PSL_VSX | PSL_VEC);
532 pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX | PCB_VEC);
533
534 if (mcp->mc_flags & _MC_FP_VALID) {
535 /* enable_fpu() will happen lazily on a fault */
536 pcb->pcb_flags |= PCB_FPREGS;
537 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
538 bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr));
539 for (i = 0; i < 32; i++) {
540 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
541 sizeof(double));
542 memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
543 &mcp->mc_vsxfpreg[i], sizeof(double));
544 }
545 }
546
547 if (mcp->mc_flags & _MC_AV_VALID) {
548 /* enable_vec() will happen lazily on a fault */
549 pcb->pcb_flags |= PCB_VECREGS;
550 pcb->pcb_vec.vscr = mcp->mc_vscr;
551 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
552 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
553 }
554
555 return (0);
556 }
557
558 /*
559 * Clean up extra POWER state. Some per-process registers and states are not
560 * managed by the MSR, so must be cleaned up explicitly on thread exit.
561 *
562 * Currently this includes:
563 * DSCR -- Data stream control register (PowerISA 2.06+)
564 * FSCR -- Facility Status and Control Register (PowerISA 2.07+)
565 */
566 static void
cleanup_power_extras(struct thread * td)567 cleanup_power_extras(struct thread *td)
568 {
569 uint32_t pcb_flags;
570
571 if (td != curthread)
572 return;
573
574 pcb_flags = td->td_pcb->pcb_flags;
575 /* Clean up registers not managed by MSR. */
576 if (pcb_flags & PCB_CFSCR)
577 mtspr(SPR_FSCR, 0);
578 if (pcb_flags & PCB_CDSCR)
579 mtspr(SPR_DSCRP, 0);
580
581 if (pcb_flags & PCB_FPU)
582 cleanup_fpscr();
583 }
584
585 /*
586 * Ensure the PCB has been updated in preparation for copying a thread.
587 *
588 * This is needed because normally this only happens during switching tasks,
589 * but when we are cloning a thread, we need the updated state before doing
590 * the actual copy, so the new thread inherits the current state instead of
591 * the state at the last task switch.
592 *
593 * Keep this in sync with the assembly code in cpu_switch()!
594 */
595 void
cpu_save_thread_regs(struct thread * td)596 cpu_save_thread_regs(struct thread *td)
597 {
598 uint32_t pcb_flags;
599 struct pcb *pcb;
600
601 KASSERT(td == curthread,
602 ("cpu_save_thread_regs: td is not curthread"));
603
604 pcb = td->td_pcb;
605
606 pcb_flags = pcb->pcb_flags;
607
608 #if defined(__powerpc64__)
609 /* Are *any* FSCR flags in use? */
610 if (pcb_flags & PCB_CFSCR) {
611 pcb->pcb_fscr = mfspr(SPR_FSCR);
612
613 if (pcb->pcb_fscr & FSCR_EBB) {
614 pcb->pcb_ebb.ebbhr = mfspr(SPR_EBBHR);
615 pcb->pcb_ebb.ebbrr = mfspr(SPR_EBBRR);
616 pcb->pcb_ebb.bescr = mfspr(SPR_BESCR);
617 }
618 if (pcb->pcb_fscr & FSCR_LM) {
619 pcb->pcb_lm.lmrr = mfspr(SPR_LMRR);
620 pcb->pcb_lm.lmser = mfspr(SPR_LMSER);
621 }
622 if (pcb->pcb_fscr & FSCR_TAR)
623 pcb->pcb_tar = mfspr(SPR_TAR);
624 }
625
626 /*
627 * This is outside of the PCB_CFSCR check because it can be set
628 * independently when running on POWER7/POWER8.
629 */
630 if (pcb_flags & PCB_CDSCR)
631 pcb->pcb_dscr = mfspr(SPR_DSCRP);
632 #endif
633
634 #if defined(__SPE__)
635 /*
636 * On E500v2, single-precision scalar instructions and access to
637 * SPEFSCR may be used without PSL_VEC turned on, as long as they
638 * limit themselves to the low word of the registers.
639 *
640 * As such, we need to unconditionally save SPEFSCR, even though
641 * it is also updated in save_vec_nodrop().
642 */
643 pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
644 #endif
645
646 if (pcb_flags & PCB_FPU)
647 save_fpu_nodrop(td);
648
649 if (pcb_flags & PCB_VEC)
650 save_vec_nodrop(td);
651 }
652
653 /*
654 * Set set up registers on exec.
655 */
656 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)657 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
658 {
659 struct trapframe *tf;
660 register_t argc;
661
662 tf = trapframe(td);
663 bzero(tf, sizeof *tf);
664 #ifdef __powerpc64__
665 tf->fixreg[1] = -roundup(-stack + 48, 16);
666 #else
667 tf->fixreg[1] = -roundup(-stack + 8, 16);
668 #endif
669
670 /*
671 * Set up arguments for _start():
672 * _start(argc, argv, envp, obj, cleanup, ps_strings);
673 *
674 * Notes:
675 * - obj and cleanup are the auxilliary and termination
676 * vectors. They are fixed up by ld.elf_so.
677 * - ps_strings is a NetBSD extention, and will be
678 * ignored by executables which are strictly
679 * compliant with the SVR4 ABI.
680 */
681
682 /* Collect argc from the user stack */
683 argc = fuword((void *)stack);
684
685 tf->fixreg[3] = argc;
686 tf->fixreg[4] = stack + sizeof(register_t);
687 tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t);
688 tf->fixreg[6] = 0; /* auxiliary vector */
689 tf->fixreg[7] = 0; /* termination vector */
690 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
691
692 tf->srr0 = imgp->entry_addr;
693 #ifdef __powerpc64__
694 tf->fixreg[12] = imgp->entry_addr;
695 #endif
696 tf->srr1 = psl_userset | PSL_FE_DFLT;
697 cleanup_power_extras(td);
698 td->td_pcb->pcb_flags = 0;
699 }
700
701 #ifdef COMPAT_FREEBSD32
702 void
ppc32_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)703 ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
704 {
705 struct trapframe *tf;
706 uint32_t argc;
707
708 tf = trapframe(td);
709 bzero(tf, sizeof *tf);
710 tf->fixreg[1] = -roundup(-stack + 8, 16);
711
712 argc = fuword32((void *)stack);
713
714 tf->fixreg[3] = argc;
715 tf->fixreg[4] = stack + sizeof(uint32_t);
716 tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
717 tf->fixreg[6] = 0; /* auxiliary vector */
718 tf->fixreg[7] = 0; /* termination vector */
719 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
720
721 tf->srr0 = imgp->entry_addr;
722 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
723 cleanup_power_extras(td);
724 td->td_pcb->pcb_flags = 0;
725 }
726 #endif
727
728 int
fill_regs(struct thread * td,struct reg * regs)729 fill_regs(struct thread *td, struct reg *regs)
730 {
731 struct trapframe *tf;
732
733 tf = td->td_frame;
734 memcpy(regs, tf, sizeof(struct reg));
735
736 return (0);
737 }
738
739 int
fill_dbregs(struct thread * td,struct dbreg * dbregs)740 fill_dbregs(struct thread *td, struct dbreg *dbregs)
741 {
742 /* No debug registers on PowerPC */
743 return (ENOSYS);
744 }
745
746 int
fill_fpregs(struct thread * td,struct fpreg * fpregs)747 fill_fpregs(struct thread *td, struct fpreg *fpregs)
748 {
749 struct pcb *pcb;
750 int i;
751
752 pcb = td->td_pcb;
753
754 if ((pcb->pcb_flags & PCB_FPREGS) == 0)
755 memset(fpregs, 0, sizeof(struct fpreg));
756 else {
757 memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
758 for (i = 0; i < 32; i++)
759 memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
760 sizeof(double));
761 }
762
763 return (0);
764 }
765
766 int
set_regs(struct thread * td,struct reg * regs)767 set_regs(struct thread *td, struct reg *regs)
768 {
769 struct trapframe *tf;
770
771 tf = td->td_frame;
772 memcpy(tf, regs, sizeof(struct reg));
773
774 return (0);
775 }
776
777 int
set_dbregs(struct thread * td,struct dbreg * dbregs)778 set_dbregs(struct thread *td, struct dbreg *dbregs)
779 {
780 /* No debug registers on PowerPC */
781 return (ENOSYS);
782 }
783
784 int
set_fpregs(struct thread * td,struct fpreg * fpregs)785 set_fpregs(struct thread *td, struct fpreg *fpregs)
786 {
787 struct pcb *pcb;
788 int i;
789
790 pcb = td->td_pcb;
791 pcb->pcb_flags |= PCB_FPREGS;
792 memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double));
793 for (i = 0; i < 32; i++) {
794 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i],
795 sizeof(double));
796 }
797
798 return (0);
799 }
800
801 #ifdef COMPAT_FREEBSD32
802 int
set_regs32(struct thread * td,struct reg32 * regs)803 set_regs32(struct thread *td, struct reg32 *regs)
804 {
805 struct trapframe *tf;
806 int i;
807
808 tf = td->td_frame;
809 for (i = 0; i < 32; i++)
810 tf->fixreg[i] = regs->fixreg[i];
811 tf->lr = regs->lr;
812 tf->cr = regs->cr;
813 tf->xer = regs->xer;
814 tf->ctr = regs->ctr;
815 tf->srr0 = regs->pc;
816
817 return (0);
818 }
819
820 int
fill_regs32(struct thread * td,struct reg32 * regs)821 fill_regs32(struct thread *td, struct reg32 *regs)
822 {
823 struct trapframe *tf;
824 int i;
825
826 tf = td->td_frame;
827 for (i = 0; i < 32; i++)
828 regs->fixreg[i] = tf->fixreg[i];
829 regs->lr = tf->lr;
830 regs->cr = tf->cr;
831 regs->xer = tf->xer;
832 regs->ctr = tf->ctr;
833 regs->pc = tf->srr0;
834
835 return (0);
836 }
837
838 static int
grab_mcontext32(struct thread * td,mcontext32_t * mcp,int flags)839 grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
840 {
841 mcontext_t mcp64;
842 int i, error;
843
844 error = grab_mcontext(td, &mcp64, flags);
845 if (error != 0)
846 return (error);
847
848 mcp->mc_vers = mcp64.mc_vers;
849 mcp->mc_flags = mcp64.mc_flags;
850 mcp->mc_onstack = mcp64.mc_onstack;
851 mcp->mc_len = mcp64.mc_len;
852 memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
853 memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
854 for (i = 0; i < 42; i++)
855 mcp->mc_frame[i] = mcp64.mc_frame[i];
856 memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
857 memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
858
859 return (0);
860 }
861
862 static int
get_mcontext32(struct thread * td,mcontext32_t * mcp,int flags)863 get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
864 {
865 int error;
866
867 error = grab_mcontext32(td, mcp, flags);
868 if (error == 0) {
869 PROC_LOCK(curthread->td_proc);
870 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
871 PROC_UNLOCK(curthread->td_proc);
872 }
873
874 return (error);
875 }
876
877 static int
set_mcontext32(struct thread * td,mcontext32_t * mcp)878 set_mcontext32(struct thread *td, mcontext32_t *mcp)
879 {
880 mcontext_t mcp64;
881 int i, error;
882
883 mcp64.mc_vers = mcp->mc_vers;
884 mcp64.mc_flags = mcp->mc_flags;
885 mcp64.mc_onstack = mcp->mc_onstack;
886 mcp64.mc_len = mcp->mc_len;
887 memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
888 memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
889 for (i = 0; i < 42; i++)
890 mcp64.mc_frame[i] = mcp->mc_frame[i];
891 mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL);
892 memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
893 memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
894
895 error = set_mcontext(td, &mcp64);
896
897 return (error);
898 }
899 #endif
900
901 #ifdef COMPAT_FREEBSD32
902 int
freebsd32_sigreturn(struct thread * td,struct freebsd32_sigreturn_args * uap)903 freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
904 {
905 ucontext32_t uc;
906 int error;
907
908 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
909
910 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
911 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
912 return (EFAULT);
913 }
914
915 error = set_mcontext32(td, &uc.uc_mcontext);
916 if (error != 0)
917 return (error);
918
919 /*
920 * Save FPU state if needed. User may have changed it on
921 * signal handler
922 */
923 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
924 save_fpu(td);
925
926 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
927
928 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
929 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
930
931 return (EJUSTRETURN);
932 }
933
934 /*
935 * The first two fields of a ucontext_t are the signal mask and the machine
936 * context. The next field is uc_link; we want to avoid destroying the link
937 * when copying out contexts.
938 */
939 #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
940
941 int
freebsd32_getcontext(struct thread * td,struct freebsd32_getcontext_args * uap)942 freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
943 {
944 ucontext32_t uc;
945 int ret;
946
947 if (uap->ucp == NULL)
948 ret = EINVAL;
949 else {
950 bzero(&uc, sizeof(uc));
951 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
952 PROC_LOCK(td->td_proc);
953 uc.uc_sigmask = td->td_sigmask;
954 PROC_UNLOCK(td->td_proc);
955 ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
956 }
957 return (ret);
958 }
959
960 int
freebsd32_setcontext(struct thread * td,struct freebsd32_setcontext_args * uap)961 freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
962 {
963 ucontext32_t uc;
964 int ret;
965
966 if (uap->ucp == NULL)
967 ret = EINVAL;
968 else {
969 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
970 if (ret == 0) {
971 ret = set_mcontext32(td, &uc.uc_mcontext);
972 if (ret == 0) {
973 kern_sigprocmask(td, SIG_SETMASK,
974 &uc.uc_sigmask, NULL, 0);
975 }
976 }
977 }
978 return (ret == 0 ? EJUSTRETURN : ret);
979 }
980
981 int
freebsd32_swapcontext(struct thread * td,struct freebsd32_swapcontext_args * uap)982 freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
983 {
984 ucontext32_t uc;
985 int ret;
986
987 if (uap->oucp == NULL || uap->ucp == NULL)
988 ret = EINVAL;
989 else {
990 bzero(&uc, sizeof(uc));
991 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
992 PROC_LOCK(td->td_proc);
993 uc.uc_sigmask = td->td_sigmask;
994 PROC_UNLOCK(td->td_proc);
995 ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
996 if (ret == 0) {
997 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
998 if (ret == 0) {
999 ret = set_mcontext32(td, &uc.uc_mcontext);
1000 if (ret == 0) {
1001 kern_sigprocmask(td, SIG_SETMASK,
1002 &uc.uc_sigmask, NULL, 0);
1003 }
1004 }
1005 }
1006 }
1007 return (ret == 0 ? EJUSTRETURN : ret);
1008 }
1009
1010 #endif
1011
1012 void
cpu_set_syscall_retval(struct thread * td,int error)1013 cpu_set_syscall_retval(struct thread *td, int error)
1014 {
1015 struct proc *p;
1016 struct trapframe *tf;
1017 int fixup;
1018
1019 if (error == EJUSTRETURN)
1020 return;
1021
1022 p = td->td_proc;
1023 tf = td->td_frame;
1024
1025 if (tf->fixreg[0] == SYS___syscall &&
1026 (SV_PROC_FLAG(p, SV_ILP32))) {
1027 int code = tf->fixreg[FIRSTARG + 1];
1028 fixup = (
1029 #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek)
1030 code != SYS_freebsd6_lseek &&
1031 #endif
1032 code != SYS_lseek) ? 1 : 0;
1033 } else
1034 fixup = 0;
1035
1036 switch (error) {
1037 case 0:
1038 if (fixup) {
1039 /*
1040 * 64-bit return, 32-bit syscall. Fixup byte order
1041 */
1042 tf->fixreg[FIRSTARG] = 0;
1043 tf->fixreg[FIRSTARG + 1] = td->td_retval[0];
1044 } else {
1045 tf->fixreg[FIRSTARG] = td->td_retval[0];
1046 tf->fixreg[FIRSTARG + 1] = td->td_retval[1];
1047 }
1048 tf->cr &= ~0x10000000; /* Unset summary overflow */
1049 break;
1050 case ERESTART:
1051 /*
1052 * Set user's pc back to redo the system call.
1053 */
1054 tf->srr0 -= 4;
1055 break;
1056 default:
1057 tf->fixreg[FIRSTARG] = error;
1058 tf->cr |= 0x10000000; /* Set summary overflow */
1059 break;
1060 }
1061 }
1062
1063 /*
1064 * Threading functions
1065 */
1066 void
cpu_thread_exit(struct thread * td)1067 cpu_thread_exit(struct thread *td)
1068 {
1069 cleanup_power_extras(td);
1070 }
1071
1072 void
cpu_thread_clean(struct thread * td)1073 cpu_thread_clean(struct thread *td)
1074 {
1075 }
1076
1077 void
cpu_thread_alloc(struct thread * td)1078 cpu_thread_alloc(struct thread *td)
1079 {
1080 struct pcb *pcb;
1081
1082 pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
1083 sizeof(struct pcb)) & ~0x2fUL);
1084 td->td_pcb = pcb;
1085 td->td_frame = (struct trapframe *)pcb - 1;
1086 }
1087
1088 void
cpu_thread_free(struct thread * td)1089 cpu_thread_free(struct thread *td)
1090 {
1091 }
1092
1093 int
cpu_set_user_tls(struct thread * td,void * tls_base)1094 cpu_set_user_tls(struct thread *td, void *tls_base)
1095 {
1096
1097 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
1098 td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
1099 else
1100 td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
1101 return (0);
1102 }
1103
1104 void
cpu_copy_thread(struct thread * td,struct thread * td0)1105 cpu_copy_thread(struct thread *td, struct thread *td0)
1106 {
1107 struct pcb *pcb2;
1108 struct trapframe *tf;
1109 struct callframe *cf;
1110
1111 /* Ensure td0 pcb is up to date. */
1112 if (td0 == curthread)
1113 cpu_save_thread_regs(td0);
1114
1115 pcb2 = td->td_pcb;
1116
1117 /* Copy the upcall pcb */
1118 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
1119
1120 /* Create a stack for the new thread */
1121 tf = td->td_frame;
1122 bcopy(td0->td_frame, tf, sizeof(struct trapframe));
1123 tf->fixreg[FIRSTARG] = 0;
1124 tf->fixreg[FIRSTARG + 1] = 0;
1125 tf->cr &= ~0x10000000;
1126
1127 /* Set registers for trampoline to user mode. */
1128 cf = (struct callframe *)tf - 1;
1129 memset(cf, 0, sizeof(struct callframe));
1130 cf->cf_func = (register_t)fork_return;
1131 cf->cf_arg0 = (register_t)td;
1132 cf->cf_arg1 = (register_t)tf;
1133
1134 pcb2->pcb_sp = (register_t)cf;
1135 #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
1136 pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
1137 pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
1138 #else
1139 pcb2->pcb_lr = (register_t)fork_trampoline;
1140 pcb2->pcb_context[0] = pcb2->pcb_lr;
1141 #endif
1142 pcb2->pcb_cpu.aim.usr_vsid = 0;
1143 #ifdef __SPE__
1144 pcb2->pcb_vec.vscr = SPEFSCR_DFLT;
1145 #endif
1146
1147 /* Setup to release spin count in fork_exit(). */
1148 td->td_md.md_spinlock_count = 1;
1149 td->td_md.md_saved_msr = psl_kernset;
1150 }
1151
1152 int
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)1153 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
1154 stack_t *stack)
1155 {
1156 struct trapframe *tf;
1157 uintptr_t sp;
1158 #ifdef __powerpc64__
1159 int error;
1160 #endif
1161
1162 tf = td->td_frame;
1163 /* align stack and alloc space for frame ptr and saved LR */
1164 #ifdef __powerpc64__
1165 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
1166 ~0x1f;
1167 #else
1168 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
1169 ~0x1f;
1170 #endif
1171 bzero(tf, sizeof(struct trapframe));
1172
1173 tf->fixreg[1] = (register_t)sp;
1174 tf->fixreg[3] = (register_t)arg;
1175 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1176 tf->srr0 = (register_t)entry;
1177 #ifdef __powerpc64__
1178 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
1179 #else
1180 tf->srr1 = psl_userset | PSL_FE_DFLT;
1181 #endif
1182 } else {
1183 #ifdef __powerpc64__
1184 if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) {
1185 tf->srr0 = (register_t)entry;
1186 /* ELFv2 ABI requires that the global entry point be in r12. */
1187 tf->fixreg[12] = (register_t)entry;
1188 } else {
1189 register_t entry_desc[3];
1190 error = copyin((void *)entry, entry_desc,
1191 sizeof(entry_desc));
1192 if (error != 0)
1193 return (error);
1194 tf->srr0 = entry_desc[0];
1195 tf->fixreg[2] = entry_desc[1];
1196 tf->fixreg[11] = entry_desc[2];
1197 }
1198 tf->srr1 = psl_userset | PSL_FE_DFLT;
1199 #endif
1200 }
1201
1202 td->td_pcb->pcb_flags = 0;
1203 #ifdef __SPE__
1204 td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT;
1205 #endif
1206
1207 td->td_retval[0] = (register_t)entry;
1208 td->td_retval[1] = 0;
1209 return (0);
1210 }
1211
1212 static int
emulate_mfspr(int spr,int reg,struct trapframe * frame)1213 emulate_mfspr(int spr, int reg, struct trapframe *frame){
1214 struct thread *td;
1215
1216 td = curthread;
1217
1218 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1219 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1220 return (SIGILL);
1221 // If DSCR was never set, get the default DSCR
1222 if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0)
1223 td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP);
1224
1225 frame->fixreg[reg] = td->td_pcb->pcb_dscr;
1226 frame->srr0 += 4;
1227 return (0);
1228 } else
1229 return (SIGILL);
1230 }
1231
1232 static int
emulate_mtspr(int spr,int reg,struct trapframe * frame)1233 emulate_mtspr(int spr, int reg, struct trapframe *frame){
1234 struct thread *td;
1235
1236 td = curthread;
1237
1238 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1239 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1240 return (SIGILL);
1241 td->td_pcb->pcb_flags |= PCB_CDSCR;
1242 td->td_pcb->pcb_dscr = frame->fixreg[reg];
1243 mtspr(SPR_DSCRP, frame->fixreg[reg]);
1244 frame->srr0 += 4;
1245 return (0);
1246 } else
1247 return (SIGILL);
1248 }
1249
1250 #define XFX 0xFC0007FF
1251 int
ppc_instr_emulate(struct trapframe * frame,struct thread * td)1252 ppc_instr_emulate(struct trapframe *frame, struct thread *td)
1253 {
1254 struct pcb *pcb;
1255 uint32_t instr;
1256 int reg, sig;
1257 int rs, spr;
1258
1259 instr = fuword32((void *)frame->srr0);
1260 sig = SIGILL;
1261
1262 if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */
1263 reg = (instr & ~0xfc1fffff) >> 21;
1264 frame->fixreg[reg] = mfpvr();
1265 frame->srr0 += 4;
1266 return (0);
1267 } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */
1268 rs = (instr & 0x3e00000) >> 21;
1269 spr = (instr & 0x1ff800) >> 16;
1270 return emulate_mfspr(spr, rs, frame);
1271 } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */
1272 rs = (instr & 0x3e00000) >> 21;
1273 spr = (instr & 0x1ff800) >> 16;
1274 return emulate_mtspr(spr, rs, frame);
1275 } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */
1276 powerpc_sync(); /* Do a heavy-weight sync */
1277 frame->srr0 += 4;
1278 return (0);
1279 }
1280
1281 pcb = td->td_pcb;
1282 #ifdef FPU_EMU
1283 if (!(pcb->pcb_flags & PCB_FPREGS)) {
1284 bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu));
1285 pcb->pcb_flags |= PCB_FPREGS;
1286 } else if (pcb->pcb_flags & PCB_FPU)
1287 save_fpu(td);
1288 sig = fpu_emulate(frame, &pcb->pcb_fpu);
1289 if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU)
1290 enable_fpu(td);
1291 #endif
1292 if (sig == SIGILL) {
1293 if (pcb->pcb_lastill != frame->srr0) {
1294 /* Allow a second chance, in case of cache sync issues. */
1295 sig = 0;
1296 pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4);
1297 pcb->pcb_lastill = frame->srr0;
1298 }
1299 }
1300
1301 return (sig);
1302 }
1303