1 /*-
2 * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * Portions of this software were developed by SRI International and the
6 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
7 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Portions of this software were developed by the University of Cambridge
10 * Computer Laboratory as part of the CTSRD Project, with support from the
11 * UK Higher Education Innovation Fund (HEIF).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/limits.h>
38 #include <sys/proc.h>
39 #include <sys/sf_buf.h>
40 #include <sys/signal.h>
41 #include <sys/unistd.h>
42
43 #include <vm/vm.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_map.h>
46 #include <vm/uma.h>
47 #include <vm/uma_int.h>
48
49 #include <machine/riscvreg.h>
50 #include <machine/cpu.h>
51 #include <machine/cpufunc.h>
52 #include <machine/pcb.h>
53 #include <machine/frame.h>
54 #include <machine/sbi.h>
55
56 #if __riscv_xlen == 64
57 #define TP_OFFSET 16 /* sizeof(struct tcb) */
58 #endif
59
60 static void
cpu_set_pcb_frame(struct thread * td)61 cpu_set_pcb_frame(struct thread *td)
62 {
63 td->td_pcb = (struct pcb *)((char *)td->td_kstack +
64 td->td_kstack_pages * PAGE_SIZE) - 1;
65
66 /*
67 * td->td_frame + TF_SIZE will be the saved kernel stack pointer whilst
68 * in userspace, so keep it aligned so it's also aligned when we
69 * subtract TF_SIZE in the trap handler (and here for the initial stack
70 * pointer). This also keeps the struct kernframe just afterwards
71 * aligned no matter what's in it or struct pcb.
72 *
73 * NB: TF_SIZE not sizeof(struct trapframe) as we need the rounded
74 * value to match the trap handler.
75 */
76 td->td_frame = (struct trapframe *)(STACKALIGN(
77 (char *)td->td_pcb - sizeof(struct kernframe)) - TF_SIZE);
78 }
79
80 /*
81 * Finish a fork operation, with process p2 nearly set up.
82 * Copy and update the pcb, set up the stack so that the child
83 * ready to run and return to user mode.
84 */
85 void
cpu_fork(struct thread * td1,struct proc * p2,struct thread * td2,int flags)86 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
87 {
88 struct pcb *pcb2;
89 struct trapframe *tf;
90
91 if ((flags & RFPROC) == 0)
92 return;
93
94 /* RISCVTODO: save the FPU state here */
95
96 cpu_set_pcb_frame(td2);
97
98 pcb2 = td2->td_pcb;
99 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
100
101 tf = td2->td_frame;
102 bcopy(td1->td_frame, tf, sizeof(*tf));
103
104 /* Clear syscall error flag */
105 tf->tf_t[0] = 0;
106
107 /* Arguments for child */
108 tf->tf_a[0] = 0;
109 tf->tf_a[1] = 0;
110 tf->tf_sstatus |= (SSTATUS_SPIE); /* Enable interrupts. */
111 tf->tf_sstatus &= ~(SSTATUS_SPP); /* User mode. */
112
113 /* Set the return value registers for fork() */
114 td2->td_pcb->pcb_s[0] = (uintptr_t)fork_return;
115 td2->td_pcb->pcb_s[1] = (uintptr_t)td2;
116 td2->td_pcb->pcb_ra = (uintptr_t)fork_trampoline;
117 td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
118
119 /* Setup to release spin count in fork_exit(). */
120 td2->td_md.md_spinlock_count = 1;
121 td2->td_md.md_saved_sstatus_ie = (SSTATUS_SIE);
122 }
123
124 void
cpu_reset(void)125 cpu_reset(void)
126 {
127
128 sbi_system_reset(SBI_SRST_TYPE_COLD_REBOOT, SBI_SRST_REASON_NONE);
129
130 while(1);
131 }
132
133 void
cpu_set_syscall_retval(struct thread * td,int error)134 cpu_set_syscall_retval(struct thread *td, int error)
135 {
136 struct trapframe *frame;
137
138 frame = td->td_frame;
139
140 if (__predict_true(error == 0)) {
141 frame->tf_a[0] = td->td_retval[0];
142 frame->tf_a[1] = td->td_retval[1];
143 frame->tf_t[0] = 0; /* syscall succeeded */
144 return;
145 }
146
147 switch (error) {
148 case ERESTART:
149 frame->tf_sepc -= 4; /* prev instruction */
150 break;
151 case EJUSTRETURN:
152 break;
153 default:
154 frame->tf_a[0] = error;
155 frame->tf_t[0] = 1; /* syscall error */
156 break;
157 }
158 }
159
160 /*
161 * Initialize machine state, mostly pcb and trap frame for a new
162 * thread, about to return to userspace. Put enough state in the new
163 * thread's PCB to get it to go back to the fork_return(), which
164 * finalizes the thread state and handles peculiarities of the first
165 * return to userspace for the new thread.
166 */
167 void
cpu_copy_thread(struct thread * td,struct thread * td0)168 cpu_copy_thread(struct thread *td, struct thread *td0)
169 {
170
171 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
172 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
173
174 td->td_pcb->pcb_s[0] = (uintptr_t)fork_return;
175 td->td_pcb->pcb_s[1] = (uintptr_t)td;
176 td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline;
177 td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
178
179 /* Setup to release spin count in fork_exit(). */
180 td->td_md.md_spinlock_count = 1;
181 td->td_md.md_saved_sstatus_ie = (SSTATUS_SIE);
182 }
183
184 /*
185 * Set that machine state for performing an upcall that starts
186 * the entry function with the given argument.
187 */
188 int
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)189 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
190 stack_t *stack)
191 {
192 struct trapframe *tf;
193
194 tf = td->td_frame;
195
196 tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
197 tf->tf_sepc = (register_t)entry;
198 tf->tf_a[0] = (register_t)arg;
199 return (0);
200 }
201
202 int
cpu_set_user_tls(struct thread * td,void * tls_base)203 cpu_set_user_tls(struct thread *td, void *tls_base)
204 {
205
206 if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS)
207 return (EINVAL);
208
209 /*
210 * The user TLS is set by modifying the trapframe's tp value, which
211 * will be restored when returning to userspace.
212 */
213 td->td_frame->tf_tp = (register_t)tls_base + TP_OFFSET;
214
215 return (0);
216 }
217
218 void
cpu_thread_exit(struct thread * td)219 cpu_thread_exit(struct thread *td)
220 {
221 }
222
223 void
cpu_thread_alloc(struct thread * td)224 cpu_thread_alloc(struct thread *td)
225 {
226 cpu_set_pcb_frame(td);
227 }
228
229 void
cpu_thread_free(struct thread * td)230 cpu_thread_free(struct thread *td)
231 {
232 }
233
234 void
cpu_thread_clean(struct thread * td)235 cpu_thread_clean(struct thread *td)
236 {
237 }
238
239 /*
240 * Intercept the return address from a freshly forked process that has NOT
241 * been scheduled yet.
242 *
243 * This is needed to make kernel threads stay in kernel mode.
244 */
245 void
cpu_fork_kthread_handler(struct thread * td,void (* func)(void *),void * arg)246 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
247 {
248
249 td->td_pcb->pcb_s[0] = (uintptr_t)func;
250 td->td_pcb->pcb_s[1] = (uintptr_t)arg;
251 td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline;
252 td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
253 }
254
255 void
cpu_update_pcb(struct thread * td)256 cpu_update_pcb(struct thread *td)
257 {
258 }
259
260 void
cpu_exit(struct thread * td)261 cpu_exit(struct thread *td)
262 {
263 }
264
265 bool
cpu_exec_vmspace_reuse(struct proc * p __unused,vm_map_t map __unused)266 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
267 {
268
269 return (true);
270 }
271
272 int
cpu_procctl(struct thread * td __unused,int idtype __unused,id_t id __unused,int com __unused,void * data __unused)273 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
274 int com __unused, void *data __unused)
275 {
276
277 return (EINVAL);
278 }
279
280 void
cpu_sync_core(void)281 cpu_sync_core(void)
282 {
283 fence_i();
284 }
285