xref: /freebsd/sys/arm/arm/vm_machdep.c (revision f66a407de25eaa4c58b4f6f02086d55141593b63)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1982, 1986 The Regents of the University of California.
5  * Copyright (c) 1989, 1990 William Jolitz
6  * Copyright (c) 1994 John Dyson
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department, and William Jolitz.
12  *
13  * Redistribution and use in source and binary :forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/proc.h>
49 #include <sys/socketvar.h>
50 #include <sys/syscall.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysent.h>
53 #include <sys/unistd.h>
54 
55 #include <machine/cpu.h>
56 #include <machine/frame.h>
57 #include <machine/pcb.h>
58 #include <machine/sysarch.h>
59 #include <sys/lock.h>
60 #include <sys/mutex.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_pageout.h>
70 #include <vm/uma.h>
71 #include <vm/uma_int.h>
72 
73 #include <machine/md_var.h>
74 #include <machine/vfp.h>
75 
76 /*
77  * struct switchframe and trapframe must both be a multiple of 8
78  * for correct stack alignment.
79  */
80 _Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment");
81 _Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment");
82 
83 uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ;
84 
85 /*
86  * Finish a fork operation, with process p2 nearly set up.
87  * Copy and update the pcb, set up the stack so that the child
88  * ready to run and return to user mode.
89  */
90 void
cpu_fork(struct thread * td1,struct proc * p2,struct thread * td2,int flags)91 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
92 {
93 	struct pcb *pcb2;
94 	struct trapframe *tf;
95 	struct mdproc *mdp2;
96 
97 	if ((flags & RFPROC) == 0)
98 		return;
99 
100 	/* Point the pcb to the top of the stack */
101 	pcb2 = (struct pcb *)
102 	    (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
103 #ifdef VFP
104 	/* Store actual state of VFP */
105 	if (curthread == td1) {
106 		if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
107 			vfp_save_state(td1, td1->td_pcb);
108 	}
109 #endif
110 	td2->td_pcb = pcb2;
111 
112 	/* Clone td1's pcb */
113 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
114 
115 	/* Point to mdproc and then copy over td1's contents */
116 	mdp2 = &p2->p_md;
117 	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
118 
119 	/* Point the frame to the stack in front of pcb and copy td1's frame */
120 	td2->td_frame = (struct trapframe *)pcb2 - 1;
121 	*td2->td_frame = *td1->td_frame;
122 
123 	/*
124 	 * Create a new fresh stack for the new process.
125 	 * Copy the trap frame for the return to user mode as if from a
126 	 * syscall.  This copies most of the user mode register values.
127 	 */
128 	pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2);
129 	pcb2->pcb_regs.sf_r4 = (register_t)fork_return;
130 	pcb2->pcb_regs.sf_r5 = (register_t)td2;
131 	pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline;
132 	pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame);
133 	pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls();
134 
135 #ifdef VFP
136 	vfp_new_thread(td2, td1, true);
137 #endif
138 
139 	tf = td2->td_frame;
140 	tf->tf_spsr &= ~PSR_C;
141 	tf->tf_r0 = 0;
142 	tf->tf_r1 = 0;
143 
144 	/* Setup to release spin count in fork_exit(). */
145 	td2->td_md.md_spinlock_count = 1;
146 	td2->td_md.md_saved_cspr = PSR_SVC32_MODE;
147 }
148 
149 void
cpu_set_syscall_retval(struct thread * td,int error)150 cpu_set_syscall_retval(struct thread *td, int error)
151 {
152 	struct trapframe *frame;
153 
154 	frame = td->td_frame;
155 	switch (error) {
156 	case 0:
157 		frame->tf_r0 = td->td_retval[0];
158 		frame->tf_r1 = td->td_retval[1];
159 		frame->tf_spsr &= ~PSR_C;   /* carry bit */
160 		break;
161 	case ERESTART:
162 		/*
163 		 * Reconstruct the pc to point at the swi.
164 		 */
165 		if ((frame->tf_spsr & PSR_T) != 0)
166 			frame->tf_pc -= THUMB_INSN_SIZE;
167 		else
168 			frame->tf_pc -= INSN_SIZE;
169 		break;
170 	case EJUSTRETURN:
171 		/* nothing to do */
172 		break;
173 	default:
174 		frame->tf_r0 = error;
175 		frame->tf_spsr |= PSR_C;    /* carry bit */
176 		break;
177 	}
178 }
179 
180 /*
181  * Initialize machine state, mostly pcb and trap frame for a new
182  * thread, about to return to userspace.  Put enough state in the new
183  * thread's PCB to get it to go back to the fork_return(), which
184  * finalizes the thread state and handles peculiarities of the first
185  * return to userspace for the new thread.
186  */
187 void
cpu_copy_thread(struct thread * td,struct thread * td0)188 cpu_copy_thread(struct thread *td, struct thread *td0)
189 {
190 
191 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
192 	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
193 
194 	td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return;
195 	td->td_pcb->pcb_regs.sf_r5 = (register_t)td;
196 	td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline;
197 	td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame);
198 
199 	td->td_frame->tf_spsr &= ~PSR_C;
200 	td->td_frame->tf_r0 = 0;
201 
202 #ifdef VFP
203 	vfp_new_thread(td, td0, false);
204 #endif
205 
206 	/* Setup to release spin count in fork_exit(). */
207 	td->td_md.md_spinlock_count = 1;
208 	td->td_md.md_saved_cspr = PSR_SVC32_MODE;
209 }
210 
211 /*
212  * Set that machine state for performing an upcall that starts
213  * the entry function with the given argument.
214  */
215 int
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)216 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
217 	stack_t *stack)
218 {
219 	struct trapframe *tf = td->td_frame;
220 
221 	tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size);
222 	tf->tf_pc = (int)entry;
223 	tf->tf_r0 = (int)arg;
224 	tf->tf_spsr = PSR_USR32_MODE;
225 	if ((register_t)entry & 1)
226 		tf->tf_spsr |= PSR_T;
227 	return (0);
228 }
229 
230 int
cpu_set_user_tls(struct thread * td,void * tls_base)231 cpu_set_user_tls(struct thread *td, void *tls_base)
232 {
233 
234 	td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base;
235 	if (td == curthread)
236 		set_tls(tls_base);
237 	return (0);
238 }
239 
240 void
cpu_thread_exit(struct thread * td)241 cpu_thread_exit(struct thread *td)
242 {
243 }
244 
245 void
cpu_thread_alloc(struct thread * td)246 cpu_thread_alloc(struct thread *td)
247 {
248 	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
249 	    PAGE_SIZE) - 1;
250 	/*
251 	 * Ensure td_frame is aligned to an 8 byte boundary as it will be
252 	 * placed into the stack pointer which must be 8 byte aligned in
253 	 * the ARM EABI.
254 	 */
255 	td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1;
256 }
257 
258 void
cpu_thread_free(struct thread * td)259 cpu_thread_free(struct thread *td)
260 {
261 }
262 
263 void
cpu_thread_clean(struct thread * td)264 cpu_thread_clean(struct thread *td)
265 {
266 }
267 
268 /*
269  * Intercept the return address from a freshly forked process that has NOT
270  * been scheduled yet.
271  *
272  * This is needed to make kernel threads stay in kernel mode.
273  */
274 void
cpu_fork_kthread_handler(struct thread * td,void (* func)(void *),void * arg)275 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
276 {
277 	td->td_pcb->pcb_regs.sf_r4 = (register_t)func;	/* function */
278 	td->td_pcb->pcb_regs.sf_r5 = (register_t)arg;	/* first arg */
279 }
280 
281 void
cpu_update_pcb(struct thread * td)282 cpu_update_pcb(struct thread *td)
283 {
284 	MPASS(td == curthread);
285 	td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)get_tls();
286 }
287 
288 void
cpu_exit(struct thread * td)289 cpu_exit(struct thread *td)
290 {
291 }
292 
293 bool
cpu_exec_vmspace_reuse(struct proc * p __unused,vm_map_t map __unused)294 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
295 {
296 
297 	return (true);
298 }
299 
300 int
cpu_procctl(struct thread * td __unused,int idtype __unused,id_t id __unused,int com __unused,void * data __unused)301 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
302     int com __unused, void *data __unused)
303 {
304 
305 	return (EINVAL);
306 }
307 
308 void
cpu_sync_core(void)309 cpu_sync_core(void)
310 {
311 }
312