1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/exec.h>
31 #include <sys/imgact.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/reg.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysent.h>
45 #include <sys/sysproto.h>
46 #include <sys/ucontext.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52
53 #include <machine/armreg.h>
54 #include <machine/kdb.h>
55 #include <machine/md_var.h>
56 #include <machine/pcb.h>
57
58 #ifdef VFP
59 #include <machine/vfp.h>
60 #endif
61
62 _Static_assert(sizeof(mcontext_t) == 880, "mcontext_t size incorrect");
63 _Static_assert(sizeof(ucontext_t) == 960, "ucontext_t size incorrect");
64 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
65
66 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
67 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
68
69 int
fill_regs(struct thread * td,struct reg * regs)70 fill_regs(struct thread *td, struct reg *regs)
71 {
72 struct trapframe *frame;
73
74 frame = td->td_frame;
75 regs->sp = frame->tf_sp;
76 regs->lr = frame->tf_lr;
77 regs->elr = frame->tf_elr;
78 regs->spsr = frame->tf_spsr;
79
80 memcpy(regs->x, frame->tf_x, sizeof(regs->x));
81
82 #ifdef COMPAT_FREEBSD32
83 /*
84 * We may be called here for a 32bits process, if we're using a
85 * 64bits debugger. If so, put PC and SPSR where it expects it.
86 */
87 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
88 regs->x[15] = frame->tf_elr;
89 regs->x[16] = frame->tf_spsr;
90 }
91 #endif
92 return (0);
93 }
94
95 int
set_regs(struct thread * td,struct reg * regs)96 set_regs(struct thread *td, struct reg *regs)
97 {
98 struct trapframe *frame;
99
100 frame = td->td_frame;
101 frame->tf_sp = regs->sp;
102 frame->tf_lr = regs->lr;
103
104 memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
105
106 #ifdef COMPAT_FREEBSD32
107 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
108 /*
109 * We may be called for a 32bits process if we're using
110 * a 64bits debugger. If so, get PC and SPSR from where
111 * it put it.
112 */
113 frame->tf_elr = regs->x[15];
114 frame->tf_spsr &= ~PSR_SETTABLE_32;
115 frame->tf_spsr |= regs->x[16] & PSR_SETTABLE_32;
116 /* Don't allow userspace to ask to continue single stepping.
117 * The SPSR.SS field doesn't exist when the EL1 is AArch32.
118 * As the SPSR.DIT field has moved in its place don't
119 * allow userspace to set the SPSR.SS field.
120 */
121 } else
122 #endif
123 {
124 frame->tf_elr = regs->elr;
125 /*
126 * frame->tf_spsr and regs->spsr on FreeBSD 13 was 32-bit
127 * where from 14 they are 64 bit. As PSR_SETTABLE_64 clears
128 * the upper 32 bits no compatibility handling is needed,
129 * however if this is ever not the case we will need to add
130 * these, similar to how it is done in set_mcontext.
131 */
132 frame->tf_spsr &= ~PSR_SETTABLE_64;
133 frame->tf_spsr |= regs->spsr & PSR_SETTABLE_64;
134 /* Enable single stepping if userspace asked fot it */
135 if ((frame->tf_spsr & PSR_SS) != 0) {
136 td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
137
138 WRITE_SPECIALREG(mdscr_el1,
139 READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
140 isb();
141 }
142 }
143 return (0);
144 }
145
146 int
fill_fpregs(struct thread * td,struct fpreg * regs)147 fill_fpregs(struct thread *td, struct fpreg *regs)
148 {
149 #ifdef VFP
150 struct pcb *pcb;
151
152 pcb = td->td_pcb;
153 if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
154 /*
155 * If we have just been running VFP instructions we will
156 * need to save the state to memcpy it below.
157 */
158 if (td == curthread)
159 vfp_save_state(td, pcb);
160 }
161
162 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
163 ("Called fill_fpregs while the kernel is using the VFP"));
164 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
165 sizeof(regs->fp_q));
166 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
167 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
168 #else
169 memset(regs, 0, sizeof(*regs));
170 #endif
171 return (0);
172 }
173
174 int
set_fpregs(struct thread * td,struct fpreg * regs)175 set_fpregs(struct thread *td, struct fpreg *regs)
176 {
177 #ifdef VFP
178 struct pcb *pcb;
179
180 pcb = td->td_pcb;
181 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
182 ("Called set_fpregs while the kernel is using the VFP"));
183 memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
184 pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
185 pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
186 #endif
187 return (0);
188 }
189
190 int
fill_dbregs(struct thread * td,struct dbreg * regs)191 fill_dbregs(struct thread *td, struct dbreg *regs)
192 {
193 struct debug_monitor_state *monitor;
194 uint64_t dfr0;
195 int i;
196 uint8_t debug_ver, nbkpts, nwtpts;
197
198 memset(regs, 0, sizeof(*regs));
199
200 /*
201 * Read these the Debug Feature Register 0 to get info we need.
202 * It will be identical on FreeBSD and Linux, so there is no need
203 * to check which the target is.
204 */
205 if (!get_user_reg(ID_AA64DFR0_EL1, &dfr0, true)) {
206 debug_ver = ID_AA64DFR0_DebugVer_8;
207 nbkpts = 0;
208 nwtpts = 0;
209 } else {
210 debug_ver = ID_AA64DFR0_DebugVer_VAL(dfr0) >>
211 ID_AA64DFR0_DebugVer_SHIFT;
212 nbkpts = ID_AA64DFR0_BRPs_VAL(dfr0) >> ID_AA64DFR0_BRPs_SHIFT;
213 nwtpts = ID_AA64DFR0_WRPs_VAL(dfr0) >> ID_AA64DFR0_WRPs_SHIFT;
214 }
215
216 /*
217 * The BRPs field contains the number of breakpoints - 1. Armv8-A
218 * allows the hardware to provide 2-16 breakpoints so this won't
219 * overflow an 8 bit value. The same applies to the WRPs field.
220 */
221 nbkpts++;
222 nwtpts++;
223
224 regs->db_debug_ver = debug_ver;
225 regs->db_nbkpts = nbkpts;
226 regs->db_nwtpts = nwtpts;
227
228 monitor = &td->td_pcb->pcb_dbg_regs;
229 if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
230 for (i = 0; i < nbkpts; i++) {
231 regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
232 regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
233 }
234 for (i = 0; i < nwtpts; i++) {
235 regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
236 regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
237 }
238 }
239
240 return (0);
241 }
242
243 int
set_dbregs(struct thread * td,struct dbreg * regs)244 set_dbregs(struct thread *td, struct dbreg *regs)
245 {
246 struct debug_monitor_state *monitor;
247 uint64_t addr;
248 uint32_t ctrl;
249 int i;
250
251 monitor = &td->td_pcb->pcb_dbg_regs;
252 monitor->dbg_enable_count = 0;
253
254 for (i = 0; i < DBG_BRP_MAX; i++) {
255 addr = regs->db_breakregs[i].dbr_addr;
256 ctrl = regs->db_breakregs[i].dbr_ctrl;
257
258 /*
259 * Don't let the user set a breakpoint on a kernel or
260 * non-canonical user address.
261 */
262 if (addr >= VM_MAXUSER_ADDRESS)
263 return (EINVAL);
264
265 /*
266 * The lowest 2 bits are ignored, so record the effective
267 * address.
268 */
269 addr = rounddown2(addr, 4);
270
271 /*
272 * Some control fields are ignored, and other bits reserved.
273 * Only unlinked, address-matching breakpoints are supported.
274 *
275 * XXX: fields that appear unvalidated, such as BAS, have
276 * constrained undefined behaviour. If the user mis-programs
277 * these, there is no risk to the system.
278 */
279 ctrl &= DBGBCR_EN | DBGBCR_PMC | DBGBCR_BAS;
280 if ((ctrl & DBGBCR_EN) != 0) {
281 /* Only target EL0. */
282 if ((ctrl & DBGBCR_PMC) != DBGBCR_PMC_EL0)
283 return (EINVAL);
284
285 monitor->dbg_enable_count++;
286 }
287
288 monitor->dbg_bvr[i] = addr;
289 monitor->dbg_bcr[i] = ctrl;
290 }
291
292 for (i = 0; i < DBG_WRP_MAX; i++) {
293 addr = regs->db_watchregs[i].dbw_addr;
294 ctrl = regs->db_watchregs[i].dbw_ctrl;
295
296 /*
297 * Don't let the user set a watchpoint on a kernel or
298 * non-canonical user address.
299 */
300 if (addr >= VM_MAXUSER_ADDRESS)
301 return (EINVAL);
302
303 /*
304 * Some control fields are ignored, and other bits reserved.
305 * Only unlinked watchpoints are supported.
306 */
307 ctrl &= DBGWCR_EN | DBGWCR_PAC | DBGWCR_LSC | DBGWCR_BAS |
308 DBGWCR_MASK;
309
310 if ((ctrl & DBGWCR_EN) != 0) {
311 /* Only target EL0. */
312 if ((ctrl & DBGWCR_PAC) != DBGWCR_PAC_EL0)
313 return (EINVAL);
314
315 /* Must set at least one of the load/store bits. */
316 if ((ctrl & DBGWCR_LSC) == 0)
317 return (EINVAL);
318
319 /*
320 * When specifying the address range with BAS, the MASK
321 * field must be zero.
322 */
323 if ((ctrl & DBGWCR_BAS) != DBGWCR_BAS &&
324 (ctrl & DBGWCR_MASK) != 0)
325 return (EINVAL);
326
327 monitor->dbg_enable_count++;
328 }
329 monitor->dbg_wvr[i] = addr;
330 monitor->dbg_wcr[i] = ctrl;
331 }
332
333 if (monitor->dbg_enable_count > 0)
334 monitor->dbg_flags |= DBGMON_ENABLED;
335
336 return (0);
337 }
338
339 #ifdef COMPAT_FREEBSD32
340 int
fill_regs32(struct thread * td,struct reg32 * regs)341 fill_regs32(struct thread *td, struct reg32 *regs)
342 {
343 int i;
344 struct trapframe *tf;
345
346 tf = td->td_frame;
347 for (i = 0; i < 13; i++)
348 regs->r[i] = tf->tf_x[i];
349 /* For arm32, SP is r13 and LR is r14 */
350 regs->r_sp = tf->tf_x[13];
351 regs->r_lr = tf->tf_x[14];
352 regs->r_pc = tf->tf_elr;
353 regs->r_cpsr = tf->tf_spsr;
354
355 return (0);
356 }
357
358 int
set_regs32(struct thread * td,struct reg32 * regs)359 set_regs32(struct thread *td, struct reg32 *regs)
360 {
361 int i;
362 struct trapframe *tf;
363
364 tf = td->td_frame;
365 for (i = 0; i < 13; i++)
366 tf->tf_x[i] = regs->r[i];
367 /* For arm 32, SP is r13 an LR is r14 */
368 tf->tf_x[13] = regs->r_sp;
369 tf->tf_x[14] = regs->r_lr;
370 tf->tf_elr = regs->r_pc;
371 tf->tf_spsr &= ~PSR_SETTABLE_32;
372 tf->tf_spsr |= regs->r_cpsr & PSR_SETTABLE_32;
373
374 return (0);
375 }
376
377 /* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
378 int
fill_fpregs32(struct thread * td,struct fpreg32 * regs)379 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
380 {
381
382 memset(regs, 0, sizeof(*regs));
383 return (0);
384 }
385
386 int
set_fpregs32(struct thread * td,struct fpreg32 * regs)387 set_fpregs32(struct thread *td, struct fpreg32 *regs)
388 {
389
390 return (0);
391 }
392
393 int
fill_dbregs32(struct thread * td,struct dbreg32 * regs)394 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
395 {
396
397 memset(regs, 0, sizeof(*regs));
398 return (0);
399 }
400
401 int
set_dbregs32(struct thread * td,struct dbreg32 * regs)402 set_dbregs32(struct thread *td, struct dbreg32 *regs)
403 {
404
405 return (0);
406 }
407 #endif
408
409 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)410 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
411 {
412 struct trapframe *tf = td->td_frame;
413 struct pcb *pcb = td->td_pcb;
414
415 memset(tf, 0, sizeof(struct trapframe));
416
417 tf->tf_x[0] = stack;
418 tf->tf_sp = STACKALIGN(stack);
419 tf->tf_lr = imgp->entry_addr;
420 tf->tf_elr = imgp->entry_addr;
421
422 td->td_pcb->pcb_tpidr_el0 = 0;
423 td->td_pcb->pcb_tpidrro_el0 = 0;
424 WRITE_SPECIALREG(tpidrro_el0, 0);
425 WRITE_SPECIALREG(tpidr_el0, 0);
426
427 #ifdef VFP
428 vfp_reset_state(td, pcb);
429 #endif
430
431 /*
432 * Clear debug register state. It is not applicable to the new process.
433 */
434 bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
435
436 /* Generate new pointer authentication keys */
437 ptrauth_exec(td);
438 }
439
440 /* Sanity check these are the same size, they will be memcpy'd to and from */
441 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
442 sizeof((struct gpregs *)0)->gp_x);
443 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
444 sizeof((struct reg *)0)->x);
445
446 int
get_mcontext(struct thread * td,mcontext_t * mcp,int clear_ret)447 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
448 {
449 struct trapframe *tf = td->td_frame;
450
451 if (clear_ret & GET_MC_CLEAR_RET) {
452 mcp->mc_gpregs.gp_x[0] = 0;
453 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
454 } else {
455 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
456 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
457 }
458
459 memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
460 sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
461
462 mcp->mc_gpregs.gp_sp = tf->tf_sp;
463 mcp->mc_gpregs.gp_lr = tf->tf_lr;
464 mcp->mc_gpregs.gp_elr = tf->tf_elr;
465 get_fpcontext(td, mcp);
466
467 return (0);
468 }
469
470 int
set_mcontext(struct thread * td,mcontext_t * mcp)471 set_mcontext(struct thread *td, mcontext_t *mcp)
472 {
473 #define PSR_13_MASK 0xfffffffful
474 struct arm64_reg_context ctx;
475 struct trapframe *tf = td->td_frame;
476 struct pcb *pcb;
477 uint64_t spsr;
478 vm_offset_t addr;
479 int error, seen_types;
480 bool done;
481
482 spsr = mcp->mc_gpregs.gp_spsr;
483 #ifdef COMPAT_FREEBSD13
484 if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
485 /*
486 * Before FreeBSD 14 gp_spsr was 32 bit. The size of mc_gpregs
487 * was identical because of padding so mask of the upper bits
488 * that may be invalid on earlier releases.
489 */
490 spsr &= PSR_13_MASK;
491 }
492 #endif
493
494 if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
495 (spsr & PSR_AARCH32) != 0 ||
496 (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
497 return (EINVAL);
498
499 memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
500
501 tf->tf_sp = mcp->mc_gpregs.gp_sp;
502 tf->tf_lr = mcp->mc_gpregs.gp_lr;
503 tf->tf_elr = mcp->mc_gpregs.gp_elr;
504 #ifdef COMPAT_FREEBSD13
505 if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
506 /* Keep the upper 32 bits of spsr on older releases */
507 tf->tf_spsr &= ~PSR_13_MASK;
508 tf->tf_spsr |= spsr;
509 } else
510 #endif
511 tf->tf_spsr = spsr;
512 if ((tf->tf_spsr & PSR_SS) != 0) {
513 td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
514
515 WRITE_SPECIALREG(mdscr_el1,
516 READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
517 isb();
518 }
519
520 set_fpcontext(td, mcp);
521
522 /* Read any register contexts we find */
523 if (mcp->mc_ptr != 0) {
524 addr = mcp->mc_ptr;
525 pcb = td->td_pcb;
526
527 #define CTX_TYPE_FLAG_SVE (1 << 0)
528
529 seen_types = 0;
530 done = false;
531 do {
532 if (!__is_aligned(addr,
533 _Alignof(struct arm64_reg_context)))
534 return (EINVAL);
535
536 error = copyin((const void *)addr, &ctx, sizeof(ctx));
537 if (error != 0)
538 return (error);
539
540 switch (ctx.ctx_id) {
541 #ifdef VFP
542 case ARM64_CTX_SVE: {
543 struct sve_context sve_ctx;
544 size_t buf_size;
545
546 if ((seen_types & CTX_TYPE_FLAG_SVE) != 0)
547 return (EINVAL);
548 seen_types |= CTX_TYPE_FLAG_SVE;
549
550 if (pcb->pcb_svesaved == NULL)
551 return (EINVAL);
552
553 /* XXX: Check pcb_svesaved is valid */
554
555 buf_size = sve_buf_size(td);
556 /* Check the size is valid */
557 if (ctx.ctx_size !=
558 (sizeof(sve_ctx) + buf_size))
559 return (EINVAL);
560
561 memset(pcb->pcb_svesaved, 0,
562 sve_max_buf_size());
563
564 /* Copy the SVE registers from userspace */
565 if (copyin((void *)(addr + sizeof(sve_ctx)),
566 pcb->pcb_svesaved, buf_size) != 0)
567 return (EINVAL);
568
569 pcb->pcb_fpflags |= PCB_FP_SVEVALID;
570 break;
571 }
572 #endif
573 case ARM64_CTX_END:
574 done = true;
575 break;
576 default:
577 return (EINVAL);
578 }
579
580 addr += ctx.ctx_size;
581 } while (!done);
582
583 #undef CTX_TYPE_FLAG_SVE
584 }
585
586 return (0);
587 #undef PSR_13_MASK
588 }
589
590 static void
get_fpcontext(struct thread * td,mcontext_t * mcp)591 get_fpcontext(struct thread *td, mcontext_t *mcp)
592 {
593 #ifdef VFP
594 struct pcb *curpcb;
595
596 MPASS(td == curthread);
597
598 curpcb = curthread->td_pcb;
599 if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
600 /*
601 * If we have just been running VFP instructions we will
602 * need to save the state to memcpy it below.
603 */
604 vfp_save_state(td, curpcb);
605 }
606
607 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
608 ("Called get_fpcontext while the kernel is using the VFP"));
609 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
610 ("Non-userspace FPU flags set in get_fpcontext"));
611 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
612 sizeof(mcp->mc_fpregs.fp_q));
613 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
614 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
615 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
616 mcp->mc_flags |= _MC_FP_VALID;
617 #endif
618 }
619
620 static void
set_fpcontext(struct thread * td,mcontext_t * mcp)621 set_fpcontext(struct thread *td, mcontext_t *mcp)
622 {
623 #ifdef VFP
624 struct pcb *curpcb;
625
626 MPASS(td == curthread);
627 if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
628 curpcb = curthread->td_pcb;
629
630 /*
631 * Discard any vfp state for the current thread, we
632 * are about to override it.
633 */
634 critical_enter();
635 vfp_discard(td);
636 critical_exit();
637
638 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
639 ("Called set_fpcontext while the kernel is using the VFP"));
640 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
641 sizeof(mcp->mc_fpregs.fp_q));
642 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
643 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
644 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_STARTED;
645 }
646 #endif
647 }
648
649 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)650 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
651 {
652 ucontext_t uc;
653 int error;
654
655 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
656 return (EFAULT);
657
658 /* Stop an interrupt from causing the sve state to be dropped */
659 td->td_sa.code = -1;
660 error = set_mcontext(td, &uc.uc_mcontext);
661 if (error != 0)
662 return (error);
663
664 /*
665 * Sync the VFP and SVE registers. To be backwards compatible we
666 * use the VFP registers to restore the lower bits of the SVE
667 * register it aliases.
668 */
669 vfp_to_sve_sync(td);
670
671 /* Restore signal mask. */
672 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
673
674 return (EJUSTRETURN);
675 }
676
677 static bool
sendsig_ctx_end(struct thread * td,vm_offset_t * addrp)678 sendsig_ctx_end(struct thread *td, vm_offset_t *addrp)
679 {
680 struct arm64_reg_context end_ctx;
681 vm_offset_t ctx_addr;
682
683 *addrp -= sizeof(end_ctx);
684 ctx_addr = *addrp;
685
686 memset(&end_ctx, 0, sizeof(end_ctx));
687 end_ctx.ctx_id = ARM64_CTX_END;
688 end_ctx.ctx_size = sizeof(end_ctx);
689
690 if (copyout(&end_ctx, (void *)ctx_addr, sizeof(end_ctx)) != 0)
691 return (false);
692
693 return (true);
694 }
695
696 static bool
sendsig_ctx_sve(struct thread * td,vm_offset_t * addrp)697 sendsig_ctx_sve(struct thread *td, vm_offset_t *addrp)
698 {
699 struct sve_context ctx;
700 struct pcb *pcb;
701 size_t buf_size;
702 vm_offset_t ctx_addr;
703
704 pcb = td->td_pcb;
705 /* Do nothing if sve hasn't started */
706 if (pcb->pcb_svesaved == NULL)
707 return (true);
708
709 MPASS(pcb->pcb_svesaved != NULL);
710
711 buf_size = sve_buf_size(td);
712
713 /* Address for the full context */
714 *addrp -= sizeof(ctx) + buf_size;
715 ctx_addr = *addrp;
716
717 memset(&ctx, 0, sizeof(ctx));
718 ctx.sve_ctx.ctx_id = ARM64_CTX_SVE;
719 ctx.sve_ctx.ctx_size = sizeof(ctx) + buf_size;
720 ctx.sve_vector_len = pcb->pcb_sve_len;
721 ctx.sve_flags = 0;
722
723 /* Copy out the header and data */
724 if (copyout(&ctx, (void *)ctx_addr, sizeof(ctx)) != 0)
725 return (false);
726 if (copyout(pcb->pcb_svesaved, (void *)(ctx_addr + sizeof(ctx)),
727 buf_size) != 0)
728 return (false);
729
730 return (true);
731 }
732
733 typedef bool(*ctx_func)(struct thread *, vm_offset_t *);
734 static const ctx_func ctx_funcs[] = {
735 sendsig_ctx_end, /* Must be first to end the linked list */
736 sendsig_ctx_sve,
737 NULL,
738 };
739
740 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)741 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
742 {
743 struct thread *td;
744 struct proc *p;
745 struct trapframe *tf;
746 struct sigframe *fp, frame;
747 struct sigacts *psp;
748 vm_offset_t addr;
749 int onstack, sig;
750
751 td = curthread;
752 p = td->td_proc;
753 PROC_LOCK_ASSERT(p, MA_OWNED);
754
755 sig = ksi->ksi_signo;
756 psp = p->p_sigacts;
757 mtx_assert(&psp->ps_mtx, MA_OWNED);
758
759 tf = td->td_frame;
760 onstack = sigonstack(tf->tf_sp);
761
762 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
763 catcher, sig);
764
765 /* Allocate and validate space for the signal handler context. */
766 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
767 SIGISMEMBER(psp->ps_sigonstack, sig)) {
768 addr = ((uintptr_t)td->td_sigstk.ss_sp +
769 td->td_sigstk.ss_size);
770 #if defined(COMPAT_43)
771 td->td_sigstk.ss_flags |= SS_ONSTACK;
772 #endif
773 } else {
774 addr = td->td_frame->tf_sp;
775 }
776
777 /* Fill in the frame to copy out */
778 bzero(&frame, sizeof(frame));
779 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
780 frame.sf_si = ksi->ksi_info;
781 frame.sf_uc.uc_sigmask = *mask;
782 frame.sf_uc.uc_stack = td->td_sigstk;
783 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
784 (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
785 mtx_unlock(&psp->ps_mtx);
786 PROC_UNLOCK(td->td_proc);
787
788 for (int i = 0; ctx_funcs[i] != NULL; i++) {
789 if (!ctx_funcs[i](td, &addr)) {
790 /* Process has trashed its stack. Kill it. */
791 CTR4(KTR_SIG,
792 "sendsig: frame sigexit td=%p fp=%#lx func[%d]=%p",
793 td, addr, i, ctx_funcs[i]);
794 PROC_LOCK(p);
795 sigexit(td, SIGILL);
796 /* NOTREACHED */
797 }
798 }
799
800 /* Point at the first context */
801 frame.sf_uc.uc_mcontext.mc_ptr = addr;
802
803 /* Make room, keeping the stack aligned */
804 fp = (struct sigframe *)addr;
805 fp--;
806 fp = (struct sigframe *)STACKALIGN(fp);
807
808 /* Copy the sigframe out to the user's stack. */
809 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
810 /* Process has trashed its stack. Kill it. */
811 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
812 PROC_LOCK(p);
813 sigexit(td, SIGILL);
814 }
815
816 tf->tf_x[0] = sig;
817 tf->tf_x[1] = (register_t)&fp->sf_si;
818 tf->tf_x[2] = (register_t)&fp->sf_uc;
819 tf->tf_x[8] = (register_t)catcher;
820 tf->tf_sp = (register_t)fp;
821 tf->tf_elr = (register_t)PROC_SIGCODE(p);
822
823 /* Clear the single step flag while in the signal handler */
824 if ((td->td_pcb->pcb_flags & PCB_SINGLE_STEP) != 0) {
825 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
826 WRITE_SPECIALREG(mdscr_el1,
827 READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
828 isb();
829 }
830
831 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
832 tf->tf_sp);
833
834 PROC_LOCK(p);
835 mtx_lock(&psp->ps_mtx);
836 }
837