1 /*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015 Mihai Carabas <mihai.carabas@gmail.com>
5 * Copyright (c) 2024 Ruslan Bukin <br@bsdpad.com>
6 *
7 * This software was developed by the University of Cambridge Computer
8 * Laboratory (Department of Computer Science and Technology) under Innovate
9 * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
10 * Prototype".
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #ifndef _VMM_H_
35 #define _VMM_H_
36
37 #include <sys/param.h>
38 #include <sys/cpuset.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41
42 #include "pte.h"
43 #include "pmap.h"
44
45 struct vcpu;
46
47 enum vm_suspend_how {
48 VM_SUSPEND_NONE,
49 VM_SUSPEND_RESET,
50 VM_SUSPEND_POWEROFF,
51 VM_SUSPEND_HALT,
52 VM_SUSPEND_LAST
53 };
54
55 /*
56 * Identifiers for architecturally defined registers.
57 */
58 enum vm_reg_name {
59 VM_REG_GUEST_ZERO = 0,
60 VM_REG_GUEST_RA,
61 VM_REG_GUEST_SP,
62 VM_REG_GUEST_GP,
63 VM_REG_GUEST_TP,
64 VM_REG_GUEST_T0,
65 VM_REG_GUEST_T1,
66 VM_REG_GUEST_T2,
67 VM_REG_GUEST_S0,
68 VM_REG_GUEST_S1,
69 VM_REG_GUEST_A0,
70 VM_REG_GUEST_A1,
71 VM_REG_GUEST_A2,
72 VM_REG_GUEST_A3,
73 VM_REG_GUEST_A4,
74 VM_REG_GUEST_A5,
75 VM_REG_GUEST_A6,
76 VM_REG_GUEST_A7,
77 VM_REG_GUEST_S2,
78 VM_REG_GUEST_S3,
79 VM_REG_GUEST_S4,
80 VM_REG_GUEST_S5,
81 VM_REG_GUEST_S6,
82 VM_REG_GUEST_S7,
83 VM_REG_GUEST_S8,
84 VM_REG_GUEST_S9,
85 VM_REG_GUEST_S10,
86 VM_REG_GUEST_S11,
87 VM_REG_GUEST_T3,
88 VM_REG_GUEST_T4,
89 VM_REG_GUEST_T5,
90 VM_REG_GUEST_T6,
91 VM_REG_GUEST_SEPC,
92 VM_REG_LAST
93 };
94
95 #define VM_INTINFO_VECTOR(info) ((info) & 0xff)
96 #define VM_INTINFO_DEL_ERRCODE 0x800
97 #define VM_INTINFO_RSVD 0x7ffff000
98 #define VM_INTINFO_VALID 0x80000000
99 #define VM_INTINFO_TYPE 0x700
100 #define VM_INTINFO_HWINTR (0 << 8)
101 #define VM_INTINFO_NMI (2 << 8)
102 #define VM_INTINFO_HWEXCEPTION (3 << 8)
103 #define VM_INTINFO_SWINTR (4 << 8)
104
105 #define VM_MAX_NAMELEN 32
106 #define VM_MAX_SUFFIXLEN 15
107
108 #ifdef _KERNEL
109
110 struct vm;
111 struct vm_exception;
112 struct vm_exit;
113 struct vm_run;
114 struct vm_object;
115 struct vm_guest_paging;
116 struct vm_aplic_descr;
117 struct pmap;
118
119 struct vm_eventinfo {
120 void *rptr; /* rendezvous cookie */
121 int *sptr; /* suspend cookie */
122 int *iptr; /* reqidle cookie */
123 };
124
125 int vm_create(const char *name, struct vm **retvm);
126 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
127 void vm_disable_vcpu_creation(struct vm *vm);
128 void vm_slock_vcpus(struct vm *vm);
129 void vm_unlock_vcpus(struct vm *vm);
130 void vm_destroy(struct vm *vm);
131 int vm_reinit(struct vm *vm);
132 const char *vm_name(struct vm *vm);
133
134 /*
135 * APIs that modify the guest memory map require all vcpus to be frozen.
136 */
137 void vm_slock_memsegs(struct vm *vm);
138 void vm_xlock_memsegs(struct vm *vm);
139 void vm_unlock_memsegs(struct vm *vm);
140 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
141 size_t len, int prot, int flags);
142 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
143 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
144 void vm_free_memseg(struct vm *vm, int ident);
145
146 /*
147 * APIs that inspect the guest memory map require only a *single* vcpu to
148 * be frozen. This acts like a read lock on the guest memory map since any
149 * modification requires *all* vcpus to be frozen.
150 */
151 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
152 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
153 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
154 struct vm_object **objptr);
155 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
156 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
157 int prot, void **cookie);
158 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
159 int prot, void **cookie);
160 void vm_gpa_release(void *cookie);
161 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
162
163 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
164 uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
165
166 uint16_t vm_get_maxcpus(struct vm *vm);
167 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
168 uint16_t *threads, uint16_t *maxcpus);
169 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
170 uint16_t threads, uint16_t maxcpus);
171 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
172 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
173 int vm_run(struct vcpu *vcpu);
174 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
175 void* vm_get_cookie(struct vm *vm);
176 int vcpu_vcpuid(struct vcpu *vcpu);
177 void *vcpu_get_cookie(struct vcpu *vcpu);
178 struct vm *vcpu_vm(struct vcpu *vcpu);
179 struct vcpu *vm_vcpu(struct vm *vm, int cpu);
180 int vm_get_capability(struct vcpu *vcpu, int type, int *val);
181 int vm_set_capability(struct vcpu *vcpu, int type, int val);
182 int vm_activate_cpu(struct vcpu *vcpu);
183 int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
184 int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
185 int vm_inject_exception(struct vcpu *vcpu, uint64_t scause);
186 int vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr);
187 int vm_assert_irq(struct vm *vm, uint32_t irq);
188 int vm_deassert_irq(struct vm *vm, uint32_t irq);
189 int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
190 int func);
191 struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
192 void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc);
193 void vm_exit_debug(struct vcpu *vcpu, uint64_t pc);
194 void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc);
195 void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc);
196
197 cpuset_t vm_active_cpus(struct vm *vm);
198 cpuset_t vm_debug_cpus(struct vm *vm);
199 cpuset_t vm_suspended_cpus(struct vm *vm);
200
201 static __inline int
vcpu_rendezvous_pending(struct vm_eventinfo * info)202 vcpu_rendezvous_pending(struct vm_eventinfo *info)
203 {
204
205 return (*((uintptr_t *)(info->rptr)) != 0);
206 }
207
208 static __inline int
vcpu_suspended(struct vm_eventinfo * info)209 vcpu_suspended(struct vm_eventinfo *info)
210 {
211
212 return (*info->sptr);
213 }
214
215 int vcpu_debugged(struct vcpu *vcpu);
216
217 enum vcpu_state {
218 VCPU_IDLE,
219 VCPU_FROZEN,
220 VCPU_RUNNING,
221 VCPU_SLEEPING,
222 };
223
224 int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
225 enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
226
227 static int __inline
vcpu_is_running(struct vcpu * vcpu,int * hostcpu)228 vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
229 {
230 return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
231 }
232
233 #ifdef _SYS_PROC_H_
234 static int __inline
vcpu_should_yield(struct vcpu * vcpu)235 vcpu_should_yield(struct vcpu *vcpu)
236 {
237 struct thread *td;
238
239 td = curthread;
240 return (td->td_ast != 0 || td->td_owepreempt != 0);
241 }
242 #endif
243
244 void *vcpu_stats(struct vcpu *vcpu);
245 void vcpu_notify_event(struct vcpu *vcpu);
246
247 enum vm_reg_name vm_segment_name(int seg_encoding);
248
249 #endif /* _KERNEL */
250
251 #define VM_DIR_READ 0
252 #define VM_DIR_WRITE 1
253
254 #define VM_GP_M_MASK 0x1f
255 #define VM_GP_MMU_ENABLED (1 << 5)
256
257 struct vm_guest_paging {
258 int flags;
259 int padding;
260 };
261
262 struct vie {
263 uint8_t access_size:4, sign_extend:1, dir:1, unused:2;
264 enum vm_reg_name reg;
265 };
266
267 struct vre {
268 uint32_t inst_syndrome;
269 uint8_t dir:1, unused:7;
270 enum vm_reg_name reg;
271 };
272
273 /*
274 * Identifiers for optional vmm capabilities
275 */
276 enum vm_cap_type {
277 VM_CAP_UNRESTRICTED_GUEST,
278 VM_CAP_MAX
279 };
280
281 enum vm_exitcode {
282 VM_EXITCODE_BOGUS,
283 VM_EXITCODE_ECALL,
284 VM_EXITCODE_HYP,
285 VM_EXITCODE_PAGING,
286 VM_EXITCODE_SUSPENDED,
287 VM_EXITCODE_DEBUG,
288 VM_EXITCODE_INST_EMUL,
289 VM_EXITCODE_WFI,
290 VM_EXITCODE_MAX
291 };
292
293 struct vm_exit {
294 uint64_t scause;
295 uint64_t sepc;
296 uint64_t stval;
297 uint64_t htval;
298 uint64_t htinst;
299 enum vm_exitcode exitcode;
300 int inst_length;
301 uint64_t pc;
302 union {
303 struct {
304 uint64_t gpa;
305 } paging;
306
307 struct {
308 uint64_t gpa;
309 struct vm_guest_paging paging;
310 struct vie vie;
311 } inst_emul;
312
313 struct {
314 uint64_t args[8];
315 } ecall;
316
317 struct {
318 enum vm_suspend_how how;
319 } suspended;
320
321 struct {
322 uint64_t scause;
323 } hyp;
324 } u;
325 };
326
327 #endif /* _VMM_H_ */
328