xref: /freebsd/sys/arm64/arm64/swtch.S (revision 4c6c27d3fb4ad15931aae2eaf8e624aed99a3fd9)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under sponsorship from
7 * the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include "assym.inc"
33#include "opt_kstack_pages.h"
34#include "opt_sched.h"
35
36#include <sys/elf_common.h>
37
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/proc.h>
41
42.macro clear_step_flag pcbflags, tmp
43	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
44	mrs	\tmp, mdscr_el1
45	bic	\tmp, \tmp, #MDSCR_SS
46	msr	mdscr_el1, \tmp
47	isb
48999:
49.endm
50
51.macro set_step_flag pcbflags, tmp
52	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
53	mrs	\tmp, mdscr_el1
54	orr	\tmp, \tmp, #MDSCR_SS
55	msr	mdscr_el1, \tmp
56	isb
57999:
58.endm
59
60/*
61 * Lower 32 bits of CONTEXTIDR_EL1 are PID
62 * Upper 32 bits are reserved for future use e.g. TID
63 */
64.macro pid_in_context_idr
65	adrp	x9, arm64_pid_in_contextidr
66	ldrb	w10, [x9, :lo12:arm64_pid_in_contextidr]
67	cbz	w10, 998f
68	ldr	x9, [x1, #TD_PROC]
69	/* PID is always 0 or positive, do not sign extend */
70	ldr	w10, [x9, #P_PID]
71	msr	contextidr_el1, x10
72998:
73.endm
74
75/*
76 * void cpu_throw(struct thread *old, struct thread *new)
77 */
78ENTRY(cpu_throw)
79	/* Of old == NULL skip disabling stepping */
80	cbz	x0, 1f
81
82	/* If we were single stepping, disable it */
83	ldr	x4, [x0, #TD_PCB]
84	ldr	w5, [x4, #PCB_FLAGS]
85	clear_step_flag w5, x6
86
871:
88	/* debug/trace: set CONTEXTIDR_EL1 to current PID, if enabled */
89	pid_in_context_idr
90
91#ifdef VFP
92	/* Backup the new thread pointer around a call to C code */
93	mov	x19, x1
94	bl	vfp_discard
95	mov	x0, x19
96#else
97	mov	x0, x1
98#endif
99
100	/* This returns the thread pointer so no need to save it */
101	bl	ptrauth_switch
102#ifdef PERTHREAD_SSP
103	mov	x19, x0
104#endif
105	/* This returns the thread pcb */
106	bl	pmap_switch
107	mov	x4, x0
108#ifdef PERTHREAD_SSP
109	/* Update the per-thread stack canary pointer. */
110	add	x19, x19, #(TD_MD_CANARY)
111	msr	sp_el0, x19
112#endif
113
114	/* If we are single stepping, enable it */
115	ldr	w5, [x4, #PCB_FLAGS]
116	set_step_flag w5, x6
117
118	/* Restore the registers */
119	ldp	x5, x6, [x4, #PCB_SP]
120	mov	sp, x5
121	msr	tpidr_el0, x6
122	ldr	x6, [x4, #PCB_TPIDRRO]
123	msr	tpidrro_el0, x6
124	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
125	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
126	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
127	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
128	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
129	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
130
131	ret
132END(cpu_throw)
133
134/*
135 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
136 *
137 * x0 = old
138 * x1 = new
139 * x2 = mtx
140 * x3 to x7, x16 and x17 are caller saved
141 */
142ENTRY(cpu_switch)
143	/*
144	 * Save the old context.
145	 */
146	ldr	x4, [x0, #TD_PCB]
147
148	/* Store the callee-saved registers */
149	stp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
150	stp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
151	stp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
152	stp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
153	stp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
154	stp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
155	/* And the old stack pointer */
156	mov	x5, sp
157	mrs	x6, tpidrro_el0
158	str	x6, [x4, #PCB_TPIDRRO]
159	mrs	x6, tpidr_el0
160	stp	x5, x6, [x4, #PCB_SP]
161
162	/* If we were single stepping, disable it */
163	ldr	w5, [x4, #PCB_FLAGS]
164	clear_step_flag w5, x6
165
166	mov	x19, x0
167	mov	x20, x1
168	mov	x21, x2
169
170	/* debug/trace: set CONTEXTIDR_EL1 to current PID, if enabled */
171	pid_in_context_idr
172
173#ifdef VFP
174	bl	vfp_save_state_switch
175	mov	x0, x20
176#else
177	mov	x0, x1
178#endif
179
180	/* This returns the thread pointer so no need to save it */
181	bl	ptrauth_switch
182	/* This returns the thread pcb */
183	bl	pmap_switch
184	/* Move the new pcb out of the way */
185	mov	x4, x0
186
187	mov	x2, x21
188	mov	x1, x20
189	mov	x0, x19
190#ifdef PERTHREAD_SSP
191	/* Update the per-thread stack canary pointer. */
192	add	x20, x20, #(TD_MD_CANARY)
193	msr	sp_el0, x20
194#endif
195
196	/*
197	 * Release the old thread.
198	 */
199	stlr	x2, [x0, #TD_LOCK]
200#if defined(SCHED_ULE) && defined(SMP)
201	/* Spin if TD_LOCK points to a blocked_lock */
202	ldr	x2, =_C_LABEL(blocked_lock)
2031:
204	ldar	x3, [x1, #TD_LOCK]
205	cmp	x3, x2
206	b.eq	1b
207#endif
208
209	/* If we are single stepping, enable it */
210	ldr	w5, [x4, #PCB_FLAGS]
211	set_step_flag w5, x6
212
213	/* Restore the registers */
214	ldp	x5, x6, [x4, #PCB_SP]
215	mov	sp, x5
216	msr	tpidr_el0, x6
217	ldr	x6, [x4, #PCB_TPIDRRO]
218	msr	tpidrro_el0, x6
219	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
220	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
221	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
222	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
223	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
224	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
225
226	ret
227END(cpu_switch)
228
229ENTRY(fork_trampoline)
230	mov	x0, x19
231	mov	x1, x20
232	mov	x2, sp
233	mov	fp, #0	/* Stack traceback stops here. */
234	bl	_C_LABEL(fork_exit)
235
236	/*
237	 * Disable interrupts as we are setting userspace specific
238	 * state that we won't handle correctly in an interrupt while
239	 * in the kernel.
240	 */
241	msr	daifset, #(DAIF_D | DAIF_INTR)
242
243	ldr	x0, [x18, #PC_CURTHREAD]
244
245	/* Set the per-process tcr_el1 fields */
246	ldr	x10, [x0, #TD_PROC]
247	ldr	x10, [x10, #P_MD_TCR]
248	mrs	x11, tcr_el1
249	and	x11, x11, #(~MD_TCR_FIELDS)
250	orr	x11, x11, x10
251	msr	tcr_el1, x11
252	/* No isb as the eret below is the context-synchronising event */
253
254	bl	ptrauth_enter_el0
255
256	/* Restore sp, lr, elr, and spsr */
257	ldp	x18, lr, [sp, #TF_SP]
258	ldp	x10, x11, [sp, #TF_ELR]
259	msr	sp_el0, x18
260	msr	spsr_el1, x11
261	msr	elr_el1, x10
262
263	/* Restore the CPU registers */
264	ldp	x0, x1, [sp, #TF_X + 0 * 8]
265	ldp	x2, x3, [sp, #TF_X + 2 * 8]
266	ldp	x4, x5, [sp, #TF_X + 4 * 8]
267	ldp	x6, x7, [sp, #TF_X + 6 * 8]
268	ldp	x8, x9, [sp, #TF_X + 8 * 8]
269	ldp	x10, x11, [sp, #TF_X + 10 * 8]
270	ldp	x12, x13, [sp, #TF_X + 12 * 8]
271	ldp	x14, x15, [sp, #TF_X + 14 * 8]
272	ldp	x16, x17, [sp, #TF_X + 16 * 8]
273	ldp	x18, x19, [sp, #TF_X + 18 * 8]
274	ldp	x20, x21, [sp, #TF_X + 20 * 8]
275	ldp	x22, x23, [sp, #TF_X + 22 * 8]
276	ldp	x24, x25, [sp, #TF_X + 24 * 8]
277	ldp	x26, x27, [sp, #TF_X + 26 * 8]
278	ldp	x28, x29, [sp, #TF_X + 28 * 8]
279
280	/*
281	 * No need for interrupts reenabling since PSR
282	 * will be set to the desired value anyway.
283	 */
284	ERET
285
286END(fork_trampoline)
287
288ENTRY(savectx)
289	/* Store the callee-saved registers */
290	stp	x19, x20, [x0, #PCB_REGS + (PCB_X19 + 0) * 8]
291	stp	x21, x22, [x0, #PCB_REGS + (PCB_X19 + 2) * 8]
292	stp	x23, x24, [x0, #PCB_REGS + (PCB_X19 + 4) * 8]
293	stp	x25, x26, [x0, #PCB_REGS + (PCB_X19 + 6) * 8]
294	stp	x27, x28, [x0, #PCB_REGS + (PCB_X19 + 8) * 8]
295	stp	x29, lr, [x0, #PCB_REGS + (PCB_X19 + 10) * 8]
296	/* And the old stack pointer */
297	mov	x5, sp
298	mrs	x6, tpidrro_el0
299	str	x6, [x0, #PCB_TPIDRRO]
300	mrs	x6, tpidr_el0
301	stp	x5, x6, [x0, #PCB_SP]
302
303#ifdef VFP
304	/* Store the VFP registers */
305	b	vfp_save_state_savectx
306#else
307	ret
308#endif
309END(savectx)
310
311GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
312