xref: /freebsd/sys/arm64/arm64/exec_machdep.c (revision 4c6c27d3fb4ad15931aae2eaf8e624aed99a3fd9)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/exec.h>
31 #include <sys/imgact.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/reg.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysent.h>
45 #include <sys/sysproto.h>
46 #include <sys/ucontext.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52 
53 #include <machine/armreg.h>
54 #include <machine/elf.h>
55 #include <machine/kdb.h>
56 #include <machine/md_var.h>
57 #include <machine/pcb.h>
58 
59 #ifdef VFP
60 #include <machine/vfp.h>
61 #endif
62 
63 _Static_assert(sizeof(mcontext_t) == 880, "mcontext_t size incorrect");
64 _Static_assert(sizeof(ucontext_t) == 960, "ucontext_t size incorrect");
65 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
66 
67 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
68 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
69 
70 int
fill_regs(struct thread * td,struct reg * regs)71 fill_regs(struct thread *td, struct reg *regs)
72 {
73 	struct trapframe *frame;
74 
75 	frame = td->td_frame;
76 	regs->sp = frame->tf_sp;
77 	regs->lr = frame->tf_lr;
78 	regs->elr = frame->tf_elr;
79 	regs->spsr = frame->tf_spsr;
80 
81 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
82 
83 #ifdef COMPAT_FREEBSD32
84 	/*
85 	 * We may be called here for a 32bits process, if we're using a
86 	 * 64bits debugger. If so, put PC and SPSR where it expects it.
87 	 */
88 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
89 		regs->x[15] = frame->tf_elr;
90 		regs->x[16] = frame->tf_spsr;
91 	}
92 #endif
93 	return (0);
94 }
95 
96 int
set_regs(struct thread * td,struct reg * regs)97 set_regs(struct thread *td, struct reg *regs)
98 {
99 	struct trapframe *frame;
100 
101 	frame = td->td_frame;
102 	frame->tf_sp = regs->sp;
103 	frame->tf_lr = regs->lr;
104 
105 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
106 
107 #ifdef COMPAT_FREEBSD32
108 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
109 		/*
110 		 * We may be called for a 32bits process if we're using
111 		 * a 64bits debugger. If so, get PC and SPSR from where
112 		 * it put it.
113 		 */
114 		frame->tf_elr = regs->x[15];
115 		frame->tf_spsr &= ~PSR_SETTABLE_32;
116 		frame->tf_spsr |= regs->x[16] & PSR_SETTABLE_32;
117 		/* Don't allow userspace to ask to continue single stepping.
118 		 * The SPSR.SS field doesn't exist when the EL1 is AArch32.
119 		 * As the SPSR.DIT field has moved in its place don't
120 		 * allow userspace to set the SPSR.SS field.
121 		 */
122 	} else
123 #endif
124 	{
125 		frame->tf_elr = regs->elr;
126 		/*
127 		 * frame->tf_spsr and regs->spsr on FreeBSD 13 was 32-bit
128 		 * where from 14 they are 64 bit. As PSR_SETTABLE_64 clears
129 		 * the upper 32 bits no compatibility handling is needed,
130 		 * however if this is ever not the case we will need to add
131 		 * these, similar to how it is done in set_mcontext.
132 		 */
133 		frame->tf_spsr &= ~PSR_SETTABLE_64;
134 		frame->tf_spsr |= regs->spsr & PSR_SETTABLE_64;
135 		/* Enable single stepping if userspace asked fot it */
136 		if ((frame->tf_spsr & PSR_SS) != 0) {
137 			td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
138 
139 			WRITE_SPECIALREG(mdscr_el1,
140 			    READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
141 			isb();
142 		}
143 	}
144 	return (0);
145 }
146 
147 int
fill_fpregs(struct thread * td,struct fpreg * regs)148 fill_fpregs(struct thread *td, struct fpreg *regs)
149 {
150 #ifdef VFP
151 	struct pcb *pcb;
152 
153 	pcb = td->td_pcb;
154 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
155 		/*
156 		 * If we have just been running VFP instructions we will
157 		 * need to save the state to memcpy it below.
158 		 */
159 		if (td == curthread)
160 			vfp_save_state(td, pcb);
161 	}
162 
163 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
164 	    ("Called fill_fpregs while the kernel is using the VFP"));
165 	memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
166 	    sizeof(regs->fp_q));
167 	regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
168 	regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
169 #else
170 	memset(regs, 0, sizeof(*regs));
171 #endif
172 	return (0);
173 }
174 
175 int
set_fpregs(struct thread * td,struct fpreg * regs)176 set_fpregs(struct thread *td, struct fpreg *regs)
177 {
178 #ifdef VFP
179 	struct pcb *pcb;
180 
181 	pcb = td->td_pcb;
182 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
183 	    ("Called set_fpregs while the kernel is using the VFP"));
184 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
185 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
186 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
187 #endif
188 	return (0);
189 }
190 
191 int
fill_dbregs(struct thread * td,struct dbreg * regs)192 fill_dbregs(struct thread *td, struct dbreg *regs)
193 {
194 	struct debug_monitor_state *monitor;
195 	uint64_t dfr0;
196 	int i;
197 	uint8_t debug_ver, nbkpts, nwtpts;
198 
199 	memset(regs, 0, sizeof(*regs));
200 
201 	/*
202 	 * Read these the Debug Feature Register 0 to get info we need.
203 	 * It will be identical on FreeBSD and Linux, so there is no need
204 	 * to check which the target is.
205 	 */
206 	if (!get_user_reg(ID_AA64DFR0_EL1, &dfr0, true)) {
207 		debug_ver = ID_AA64DFR0_DebugVer_8;
208 		nbkpts = 0;
209 		nwtpts = 0;
210 	} else {
211 		debug_ver = ID_AA64DFR0_DebugVer_VAL(dfr0) >>
212 		    ID_AA64DFR0_DebugVer_SHIFT;
213 		nbkpts = ID_AA64DFR0_BRPs_VAL(dfr0) >> ID_AA64DFR0_BRPs_SHIFT;
214 		nwtpts = ID_AA64DFR0_WRPs_VAL(dfr0) >> ID_AA64DFR0_WRPs_SHIFT;
215 	}
216 
217 	/*
218 	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
219 	 * allows the hardware to provide 2-16 breakpoints so this won't
220 	 * overflow an 8 bit value. The same applies to the WRPs field.
221 	 */
222 	nbkpts++;
223 	nwtpts++;
224 
225 	regs->db_debug_ver = debug_ver;
226 	regs->db_nbkpts = nbkpts;
227 	regs->db_nwtpts = nwtpts;
228 
229 	monitor = &td->td_pcb->pcb_dbg_regs;
230 	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
231 		for (i = 0; i < nbkpts; i++) {
232 			regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
233 			regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
234 		}
235 		for (i = 0; i < nwtpts; i++) {
236 			regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
237 			regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
238 		}
239 	}
240 
241 	return (0);
242 }
243 
244 int
set_dbregs(struct thread * td,struct dbreg * regs)245 set_dbregs(struct thread *td, struct dbreg *regs)
246 {
247 	struct debug_monitor_state *monitor;
248 	uint64_t addr;
249 	uint32_t ctrl;
250 	int i;
251 
252 	monitor = &td->td_pcb->pcb_dbg_regs;
253 	monitor->dbg_enable_count = 0;
254 
255 	for (i = 0; i < DBG_BRP_MAX; i++) {
256 		addr = regs->db_breakregs[i].dbr_addr;
257 		ctrl = regs->db_breakregs[i].dbr_ctrl;
258 
259 		/*
260 		 * Don't let the user set a breakpoint on a kernel or
261 		 * non-canonical user address.
262 		 */
263 		if (addr >= VM_MAXUSER_ADDRESS)
264 			return (EINVAL);
265 
266 		/*
267 		 * The lowest 2 bits are ignored, so record the effective
268 		 * address.
269 		 */
270 		addr = rounddown2(addr, 4);
271 
272 		/*
273 		 * Some control fields are ignored, and other bits reserved.
274 		 * Only unlinked, address-matching breakpoints are supported.
275 		 *
276 		 * XXX: fields that appear unvalidated, such as BAS, have
277 		 * constrained undefined behaviour. If the user mis-programs
278 		 * these, there is no risk to the system.
279 		 */
280 		ctrl &= DBGBCR_EN | DBGBCR_PMC | DBGBCR_BAS;
281 		if ((ctrl & DBGBCR_EN) != 0) {
282 			/* Only target EL0. */
283 			if ((ctrl & DBGBCR_PMC) != DBGBCR_PMC_EL0)
284 				return (EINVAL);
285 
286 			monitor->dbg_enable_count++;
287 		}
288 
289 		monitor->dbg_bvr[i] = addr;
290 		monitor->dbg_bcr[i] = ctrl;
291 	}
292 
293 	for (i = 0; i < DBG_WRP_MAX; i++) {
294 		addr = regs->db_watchregs[i].dbw_addr;
295 		ctrl = regs->db_watchregs[i].dbw_ctrl;
296 
297 		/*
298 		 * Don't let the user set a watchpoint on a kernel or
299 		 * non-canonical user address.
300 		 */
301 		if (addr >= VM_MAXUSER_ADDRESS)
302 			return (EINVAL);
303 
304 		/*
305 		 * Some control fields are ignored, and other bits reserved.
306 		 * Only unlinked watchpoints are supported.
307 		 */
308 		ctrl &= DBGWCR_EN | DBGWCR_PAC | DBGWCR_LSC | DBGWCR_BAS |
309 		    DBGWCR_MASK;
310 
311 		if ((ctrl & DBGWCR_EN) != 0) {
312 			/* Only target EL0. */
313 			if ((ctrl & DBGWCR_PAC) != DBGWCR_PAC_EL0)
314 				return (EINVAL);
315 
316 			/* Must set at least one of the load/store bits. */
317 			if ((ctrl & DBGWCR_LSC) == 0)
318 				return (EINVAL);
319 
320 			/*
321 			 * When specifying the address range with BAS, the MASK
322 			 * field must be zero.
323 			 */
324 			if ((ctrl & DBGWCR_BAS) != DBGWCR_BAS &&
325 			    (ctrl & DBGWCR_MASK) != 0)
326 				return (EINVAL);
327 
328 			monitor->dbg_enable_count++;
329 		}
330 		monitor->dbg_wvr[i] = addr;
331 		monitor->dbg_wcr[i] = ctrl;
332 	}
333 
334 	if (monitor->dbg_enable_count > 0)
335 		monitor->dbg_flags |= DBGMON_ENABLED;
336 
337 	return (0);
338 }
339 
340 #ifdef COMPAT_FREEBSD32
341 int
fill_regs32(struct thread * td,struct reg32 * regs)342 fill_regs32(struct thread *td, struct reg32 *regs)
343 {
344 	int i;
345 	struct trapframe *tf;
346 
347 	tf = td->td_frame;
348 	for (i = 0; i < 13; i++)
349 		regs->r[i] = tf->tf_x[i];
350 	/* For arm32, SP is r13 and LR is r14 */
351 	regs->r_sp = tf->tf_x[13];
352 	regs->r_lr = tf->tf_x[14];
353 	regs->r_pc = tf->tf_elr;
354 	regs->r_cpsr = tf->tf_spsr;
355 
356 	return (0);
357 }
358 
359 int
set_regs32(struct thread * td,struct reg32 * regs)360 set_regs32(struct thread *td, struct reg32 *regs)
361 {
362 	int i;
363 	struct trapframe *tf;
364 
365 	tf = td->td_frame;
366 	for (i = 0; i < 13; i++)
367 		tf->tf_x[i] = regs->r[i];
368 	/* For arm 32, SP is r13 an LR is r14 */
369 	tf->tf_x[13] = regs->r_sp;
370 	tf->tf_x[14] = regs->r_lr;
371 	tf->tf_elr = regs->r_pc;
372 	tf->tf_spsr &= ~PSR_SETTABLE_32;
373 	tf->tf_spsr |= regs->r_cpsr & PSR_SETTABLE_32;
374 
375 	return (0);
376 }
377 
378 /* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
379 int
fill_fpregs32(struct thread * td,struct fpreg32 * regs)380 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
381 {
382 
383 	memset(regs, 0, sizeof(*regs));
384 	return (0);
385 }
386 
387 int
set_fpregs32(struct thread * td,struct fpreg32 * regs)388 set_fpregs32(struct thread *td, struct fpreg32 *regs)
389 {
390 
391 	return (0);
392 }
393 
394 int
fill_dbregs32(struct thread * td,struct dbreg32 * regs)395 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
396 {
397 
398 	memset(regs, 0, sizeof(*regs));
399 	return (0);
400 }
401 
402 int
set_dbregs32(struct thread * td,struct dbreg32 * regs)403 set_dbregs32(struct thread *td, struct dbreg32 *regs)
404 {
405 
406 	return (0);
407 }
408 #endif
409 
410 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)411 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
412 {
413 	struct trapframe *tf = td->td_frame;
414 	struct pcb *pcb = td->td_pcb;
415 	uint64_t new_tcr, tcr;
416 
417 	memset(tf, 0, sizeof(struct trapframe));
418 
419 	tf->tf_x[0] = stack;
420 	tf->tf_sp = STACKALIGN(stack);
421 	tf->tf_lr = imgp->entry_addr;
422 	tf->tf_elr = imgp->entry_addr;
423 
424 	td->td_pcb->pcb_tpidr_el0 = 0;
425 	td->td_pcb->pcb_tpidrro_el0 = 0;
426 	WRITE_SPECIALREG(tpidrro_el0, 0);
427 	WRITE_SPECIALREG(tpidr_el0, 0);
428 
429 #ifdef VFP
430 	vfp_reset_state(td, pcb);
431 #endif
432 
433 	/*
434 	 * Clear debug register state. It is not applicable to the new process.
435 	 */
436 	bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
437 
438 	/* If the process is new enough enable TBI */
439 	if (td->td_proc->p_osrel >= TBI_VERSION)
440 		new_tcr = TCR_TBI0;
441 	else
442 		new_tcr = 0;
443 	td->td_proc->p_md.md_tcr = new_tcr;
444 
445 	/* TODO: should create a pmap function for this... */
446 	tcr = READ_SPECIALREG(tcr_el1);
447 	if ((tcr & MD_TCR_FIELDS) != new_tcr) {
448 		uint64_t asid;
449 
450 		tcr &= ~MD_TCR_FIELDS;
451 		tcr |= new_tcr;
452 		WRITE_SPECIALREG(tcr_el1, tcr);
453 		isb();
454 
455 		/*
456 		 * TCR_EL1.TBI0 is permitted to be cached in the TLB, so
457 		 * we need to perform a TLB invalidation.
458 		 */
459 		asid = READ_SPECIALREG(ttbr0_el1) & TTBR_ASID_MASK;
460 		__asm __volatile(
461 		    "tlbi aside1is, %0		\n"
462 		    "dsb ish			\n"
463 		    "isb			\n"
464 		    : : "r" (asid));
465 	}
466 
467 	/* Generate new pointer authentication keys */
468 	ptrauth_exec(td);
469 }
470 
471 /* Sanity check these are the same size, they will be memcpy'd to and from */
472 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
473     sizeof((struct gpregs *)0)->gp_x);
474 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
475     sizeof((struct reg *)0)->x);
476 
477 int
get_mcontext(struct thread * td,mcontext_t * mcp,int clear_ret)478 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
479 {
480 	struct trapframe *tf = td->td_frame;
481 
482 	if (clear_ret & GET_MC_CLEAR_RET) {
483 		mcp->mc_gpregs.gp_x[0] = 0;
484 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
485 	} else {
486 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
487 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
488 	}
489 
490 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
491 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
492 
493 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
494 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
495 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
496 	get_fpcontext(td, mcp);
497 
498 	return (0);
499 }
500 
501 int
set_mcontext(struct thread * td,mcontext_t * mcp)502 set_mcontext(struct thread *td, mcontext_t *mcp)
503 {
504 #define	PSR_13_MASK	0xfffffffful
505 	struct arm64_reg_context ctx;
506 	struct trapframe *tf = td->td_frame;
507 	struct pcb *pcb;
508 	uint64_t spsr;
509 	vm_offset_t addr;
510 	int error, seen_types;
511 	bool done;
512 
513 	spsr = mcp->mc_gpregs.gp_spsr;
514 #ifdef COMPAT_FREEBSD13
515 	if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
516 		/*
517 		 * Before FreeBSD 14 gp_spsr was 32 bit. The size of mc_gpregs
518 		 * was identical because of padding so mask of the upper bits
519 		 * that may be invalid on earlier releases.
520 		 */
521 		spsr &= PSR_13_MASK;
522 	}
523 #endif
524 
525 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
526 	    (spsr & PSR_AARCH32) != 0 ||
527 	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
528 		return (EINVAL);
529 
530 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
531 
532 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
533 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
534 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
535 #ifdef COMPAT_FREEBSD13
536 	if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
537 		/* Keep the upper 32 bits of spsr on older releases */
538 		tf->tf_spsr &= ~PSR_13_MASK;
539 		tf->tf_spsr |= spsr;
540 	} else
541 #endif
542 		tf->tf_spsr = spsr;
543 	if ((tf->tf_spsr & PSR_SS) != 0) {
544 		td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
545 
546 		WRITE_SPECIALREG(mdscr_el1,
547 		    READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
548 		isb();
549 	}
550 
551 	set_fpcontext(td, mcp);
552 
553 	/* Read any register contexts we find */
554 	if (mcp->mc_ptr != 0) {
555 		addr = mcp->mc_ptr;
556 		pcb = td->td_pcb;
557 
558 #define	CTX_TYPE_FLAG_SVE	(1 << 0)
559 
560 		seen_types = 0;
561 		done = false;
562 		do {
563 			if (!__is_aligned(addr,
564 			    _Alignof(struct arm64_reg_context)))
565 				return (EINVAL);
566 
567 			error = copyin((const void *)addr, &ctx, sizeof(ctx));
568 			if (error != 0)
569 				return (error);
570 
571 			switch (ctx.ctx_id) {
572 #ifdef VFP
573 			case ARM64_CTX_SVE: {
574 				struct sve_context sve_ctx;
575 				size_t buf_size;
576 
577 				if ((seen_types & CTX_TYPE_FLAG_SVE) != 0)
578 					return (EINVAL);
579 				seen_types |= CTX_TYPE_FLAG_SVE;
580 
581 				if (pcb->pcb_svesaved == NULL)
582 					return (EINVAL);
583 
584 				/* XXX: Check pcb_svesaved is valid */
585 
586 				buf_size = sve_buf_size(td);
587 				/* Check the size is valid */
588 				if (ctx.ctx_size !=
589 				    (sizeof(sve_ctx) + buf_size))
590 					return (EINVAL);
591 
592 				memset(pcb->pcb_svesaved, 0,
593 				    sve_max_buf_size());
594 
595 				/* Copy the SVE registers from userspace */
596 				if (copyin((void *)(addr + sizeof(sve_ctx)),
597 				    pcb->pcb_svesaved, buf_size) != 0)
598 					return (EINVAL);
599 
600 				pcb->pcb_fpflags |= PCB_FP_SVEVALID;
601 				break;
602 			}
603 #endif
604 			case ARM64_CTX_END:
605 				done = true;
606 				break;
607 			default:
608 				return (EINVAL);
609 			}
610 
611 			addr += ctx.ctx_size;
612 		} while (!done);
613 
614 #undef CTX_TYPE_FLAG_SVE
615 	}
616 
617 	return (0);
618 #undef PSR_13_MASK
619 }
620 
621 static void
get_fpcontext(struct thread * td,mcontext_t * mcp)622 get_fpcontext(struct thread *td, mcontext_t *mcp)
623 {
624 #ifdef VFP
625 	struct pcb *curpcb;
626 
627 	MPASS(td == curthread);
628 
629 	curpcb = curthread->td_pcb;
630 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
631 		/*
632 		 * If we have just been running VFP instructions we will
633 		 * need to save the state to memcpy it below.
634 		 */
635 		vfp_save_state(td, curpcb);
636 	}
637 
638 	KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
639 	    ("Called get_fpcontext while the kernel is using the VFP"));
640 	KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
641 	    ("Non-userspace FPU flags set in get_fpcontext"));
642 	memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
643 	    sizeof(mcp->mc_fpregs.fp_q));
644 	mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
645 	mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
646 	mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
647 	mcp->mc_flags |= _MC_FP_VALID;
648 #endif
649 }
650 
651 static void
set_fpcontext(struct thread * td,mcontext_t * mcp)652 set_fpcontext(struct thread *td, mcontext_t *mcp)
653 {
654 #ifdef VFP
655 	struct pcb *curpcb;
656 
657 	MPASS(td == curthread);
658 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
659 		curpcb = curthread->td_pcb;
660 
661 		/*
662 		 * Discard any vfp state for the current thread, we
663 		 * are about to override it.
664 		 */
665 		critical_enter();
666 		vfp_discard(td);
667 		critical_exit();
668 
669 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
670 		    ("Called set_fpcontext while the kernel is using the VFP"));
671 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
672 		    sizeof(mcp->mc_fpregs.fp_q));
673 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
674 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
675 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_STARTED;
676 	}
677 #endif
678 }
679 
680 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)681 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
682 {
683 	ucontext_t uc;
684 	int error;
685 
686 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
687 		return (EFAULT);
688 
689 	/* Stop an interrupt from causing the sve state to be dropped */
690 	td->td_sa.code = -1;
691 	error = set_mcontext(td, &uc.uc_mcontext);
692 	if (error != 0)
693 		return (error);
694 
695 	/*
696 	 * Sync the VFP and SVE registers. To be backwards compatible we
697 	 * use the VFP registers to restore the lower bits of the SVE
698 	 * register it aliases.
699 	 */
700 	vfp_to_sve_sync(td);
701 
702 	/* Restore signal mask. */
703 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
704 
705 	return (EJUSTRETURN);
706 }
707 
708 static bool
sendsig_ctx_end(struct thread * td,vm_offset_t * addrp)709 sendsig_ctx_end(struct thread *td, vm_offset_t *addrp)
710 {
711 	struct arm64_reg_context end_ctx;
712 	vm_offset_t ctx_addr;
713 
714 	*addrp -= sizeof(end_ctx);
715 	ctx_addr = *addrp;
716 
717 	memset(&end_ctx, 0, sizeof(end_ctx));
718 	end_ctx.ctx_id = ARM64_CTX_END;
719 	end_ctx.ctx_size = sizeof(end_ctx);
720 
721 	if (copyout(&end_ctx, (void *)ctx_addr, sizeof(end_ctx)) != 0)
722 		return (false);
723 
724 	return (true);
725 }
726 
727 static bool
sendsig_ctx_sve(struct thread * td,vm_offset_t * addrp)728 sendsig_ctx_sve(struct thread *td, vm_offset_t *addrp)
729 {
730 	struct sve_context ctx;
731 	struct pcb *pcb;
732 	size_t buf_size;
733 	vm_offset_t ctx_addr;
734 
735 	pcb = td->td_pcb;
736 	/* Do nothing if sve hasn't started */
737 	if (pcb->pcb_svesaved == NULL)
738 		return (true);
739 
740 	MPASS(pcb->pcb_svesaved != NULL);
741 
742 	buf_size = sve_buf_size(td);
743 
744 	/* Address for the full context */
745 	*addrp -= sizeof(ctx) + buf_size;
746 	ctx_addr = *addrp;
747 
748 	memset(&ctx, 0, sizeof(ctx));
749 	ctx.sve_ctx.ctx_id = ARM64_CTX_SVE;
750 	ctx.sve_ctx.ctx_size = sizeof(ctx) + buf_size;
751 	ctx.sve_vector_len = pcb->pcb_sve_len;
752 	ctx.sve_flags = 0;
753 
754 	/* Copy out the header and data */
755 	if (copyout(&ctx, (void *)ctx_addr, sizeof(ctx)) != 0)
756 		return (false);
757 	if (copyout(pcb->pcb_svesaved, (void *)(ctx_addr + sizeof(ctx)),
758 	    buf_size) != 0)
759 		return (false);
760 
761 	return (true);
762 }
763 
764 typedef bool(*ctx_func)(struct thread *, vm_offset_t *);
765 static const ctx_func ctx_funcs[] = {
766 	sendsig_ctx_end,	/* Must be first to end the linked list */
767 	sendsig_ctx_sve,
768 	NULL,
769 };
770 
771 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)772 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
773 {
774 	struct thread *td;
775 	struct proc *p;
776 	struct trapframe *tf;
777 	struct sigframe *fp, frame;
778 	struct sigacts *psp;
779 	vm_offset_t addr;
780 	int onstack, sig;
781 
782 	td = curthread;
783 	p = td->td_proc;
784 	PROC_LOCK_ASSERT(p, MA_OWNED);
785 
786 	sig = ksi->ksi_signo;
787 	psp = p->p_sigacts;
788 	mtx_assert(&psp->ps_mtx, MA_OWNED);
789 
790 	tf = td->td_frame;
791 	onstack = sigonstack(tf->tf_sp);
792 
793 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
794 	    catcher, sig);
795 
796 	/* Allocate and validate space for the signal handler context. */
797 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
798 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
799 		addr = ((uintptr_t)td->td_sigstk.ss_sp +
800 		    td->td_sigstk.ss_size);
801 #if defined(COMPAT_43)
802 		td->td_sigstk.ss_flags |= SS_ONSTACK;
803 #endif
804 	} else {
805 		addr = td->td_frame->tf_sp;
806 	}
807 
808 	/* Fill in the frame to copy out */
809 	bzero(&frame, sizeof(frame));
810 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
811 	frame.sf_si = ksi->ksi_info;
812 	frame.sf_uc.uc_sigmask = *mask;
813 	frame.sf_uc.uc_stack = td->td_sigstk;
814 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
815 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
816 	mtx_unlock(&psp->ps_mtx);
817 	PROC_UNLOCK(td->td_proc);
818 
819 	for (int i = 0; ctx_funcs[i] != NULL; i++) {
820 		if (!ctx_funcs[i](td, &addr)) {
821 			/* Process has trashed its stack. Kill it. */
822 			CTR4(KTR_SIG,
823 			    "sendsig: frame sigexit td=%p fp=%#lx func[%d]=%p",
824 			    td, addr, i, ctx_funcs[i]);
825 			PROC_LOCK(p);
826 			sigexit(td, SIGILL);
827 			/* NOTREACHED */
828 		}
829 	}
830 
831 	/* Point at the first context */
832 	frame.sf_uc.uc_mcontext.mc_ptr = addr;
833 
834 	/* Make room, keeping the stack aligned */
835 	fp = (struct sigframe *)addr;
836 	fp--;
837 	fp = (struct sigframe *)STACKALIGN(fp);
838 
839 	/* Copy the sigframe out to the user's stack. */
840 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
841 		/* Process has trashed its stack. Kill it. */
842 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
843 		PROC_LOCK(p);
844 		sigexit(td, SIGILL);
845 	}
846 
847 	tf->tf_x[0] = sig;
848 	tf->tf_x[1] = (register_t)&fp->sf_si;
849 	tf->tf_x[2] = (register_t)&fp->sf_uc;
850 	tf->tf_x[8] = (register_t)catcher;
851 	tf->tf_sp = (register_t)fp;
852 	tf->tf_elr = (register_t)PROC_SIGCODE(p);
853 
854 	/* Clear the single step flag while in the signal handler */
855 	if ((td->td_pcb->pcb_flags & PCB_SINGLE_STEP) != 0) {
856 		td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
857 		WRITE_SPECIALREG(mdscr_el1,
858 		    READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
859 		isb();
860 	}
861 
862 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
863 	    tf->tf_sp);
864 
865 	PROC_LOCK(p);
866 	mtx_lock(&psp->ps_mtx);
867 }
868