xref: /freebsd/usr.sbin/bhyve/aarch64/vmexit.c (revision 3d39856d4dfeab5b5a5e6bbdb6ce965db5bc4dc1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/cpuset.h>
31 
32 #include <dev/psci/psci.h>
33 #include <dev/psci/smccc.h>
34 
35 #include <machine/armreg.h>
36 #include <machine/cpu.h>
37 #include <machine/vmm.h>
38 #include <machine/vmm_dev.h>
39 #include <machine/vmm_instruction_emul.h>
40 
41 #include <assert.h>
42 #include <errno.h>
43 #include <stdbool.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <unistd.h>
47 
48 #include <vmmapi.h>
49 
50 #include "bhyve_machdep.h"
51 #include "bhyverun.h"
52 #include "config.h"
53 #include "debug.h"
54 #include "gdb.h"
55 #include "mem.h"
56 #include "vmexit.h"
57 
58 cpuset_t running_cpumask;
59 
60 static int
vmexit_inst_emul(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)61 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu,
62     struct vm_run *vmrun)
63 {
64 	struct vm_exit *vme;
65 	struct vie *vie;
66 	int err;
67 
68 	vme = vmrun->vm_exit;
69 	vie = &vme->u.inst_emul.vie;
70 
71 	err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie,
72 	    &vme->u.inst_emul.paging);
73 	if (err) {
74 		if (err == ESRCH) {
75 			EPRINTLN("Unhandled memory access to 0x%lx\n",
76 			    vme->u.inst_emul.gpa);
77 		}
78 		goto fail;
79 	}
80 
81 	return (VMEXIT_CONTINUE);
82 
83 fail:
84 	fprintf(stderr, "Failed to emulate instruction ");
85 	FPRINTLN(stderr, "at 0x%lx", vme->pc);
86 	return (VMEXIT_ABORT);
87 }
88 
89 static int
vmexit_reg_emul(struct vmctx * ctx __unused,struct vcpu * vcpu __unused,struct vm_run * vmrun)90 vmexit_reg_emul(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
91     struct vm_run *vmrun)
92 {
93 	struct vm_exit *vme;
94 	struct vre *vre;
95 
96 	vme = vmrun->vm_exit;
97 	vre = &vme->u.reg_emul.vre;
98 
99 	EPRINTLN("Unhandled register access: pc %#lx syndrome %#x reg %d\n",
100 	    vme->pc, vre->inst_syndrome, vre->reg);
101 	return (VMEXIT_ABORT);
102 }
103 
104 static int
vmexit_suspend(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun)105 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun)
106 {
107 	struct vm_exit *vme;
108 	enum vm_suspend_how how;
109 	int vcpuid = vcpu_id(vcpu);
110 
111 	vme = vmrun->vm_exit;
112 	how = vme->u.suspended.how;
113 
114 	fbsdrun_deletecpu(vcpuid);
115 
116 	switch (how) {
117 	case VM_SUSPEND_RESET:
118 		exit(0);
119 	case VM_SUSPEND_POWEROFF:
120 		if (get_config_bool_default("destroy_on_poweroff", false))
121 			vm_destroy(ctx);
122 		exit(1);
123 	case VM_SUSPEND_HALT:
124 		exit(2);
125 	case VM_SUSPEND_DESTROY:
126 		exit(4);
127 	default:
128 		fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how);
129 		exit(100);
130 	}
131 	return (0);	/* NOTREACHED */
132 }
133 
134 static int
vmexit_debug(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun __unused)135 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu,
136     struct vm_run *vmrun __unused)
137 {
138 	gdb_cpu_suspend(vcpu);
139 	/*
140 	 * XXX-MJ sleep for a short period to avoid chewing up the CPU in the
141 	 * window between activation of the vCPU thread and the STARTUP IPI.
142 	 */
143 	usleep(1000);
144 	return (VMEXIT_CONTINUE);
145 }
146 
147 static int
vmexit_bogus(struct vmctx * ctx __unused,struct vcpu * vcpu __unused,struct vm_run * vmrun __unused)148 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
149     struct vm_run *vmrun __unused)
150 {
151 	return (VMEXIT_CONTINUE);
152 }
153 
154 static uint64_t
smccc_affinity_info(uint64_t target_affinity,uint32_t lowest_affinity_level)155 smccc_affinity_info(uint64_t target_affinity, uint32_t lowest_affinity_level)
156 {
157 	uint64_t mask = 0;
158 
159 	switch (lowest_affinity_level) {
160 	case 0:
161 		mask |= CPU_AFF0_MASK;
162 		/* FALLTHROUGH */
163 	case 1:
164 		mask |= CPU_AFF1_MASK;
165 		/* FALLTHROUGH */
166 	case 2:
167 		mask |= CPU_AFF2_MASK;
168 		/* FALLTHROUGH */
169 	case 3:
170 		mask |= CPU_AFF3_MASK;
171 		break;
172 	default:
173 		return (PSCI_RETVAL_INVALID_PARAMS);
174 	}
175 
176 	for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) {
177 		if ((cpu_to_mpidr[vcpu] & mask) == (target_affinity & mask) &&
178 		    CPU_ISSET(vcpu, &running_cpumask)) {
179 			/* Return ON if any CPUs are on */
180 			return (PSCI_AFFINITY_INFO_ON);
181 		}
182 	}
183 
184 	/* No CPUs in the affinity mask are on, return OFF */
185 	return (PSCI_AFFINITY_INFO_OFF);
186 }
187 
188 static int
vmexit_smccc(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun)189 vmexit_smccc(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun)
190 {
191 	struct vcpu *newvcpu;
192 	struct vm_exit *vme;
193 	uint64_t mpidr, smccc_rv;
194 	enum vm_suspend_how how;
195 	int error, newcpu;
196 
197 	/* Return the Unknown Function Identifier  by default */
198 	smccc_rv = SMCCC_RET_NOT_SUPPORTED;
199 
200 	vme = vmrun->vm_exit;
201 	switch (vme->u.smccc_call.func_id) {
202 	case PSCI_FNID_VERSION:
203 		/* We implement PSCI 1.0 */
204 		smccc_rv = PSCI_VER(1, 0);
205 		break;
206 	case PSCI_FNID_CPU_SUSPEND:
207 		break;
208 	case PSCI_FNID_CPU_OFF:
209 		CPU_CLR_ATOMIC(vcpu_id(vcpu), &running_cpumask);
210 		vm_suspend_cpu(vcpu);
211 		break;
212 	case PSCI_FNID_CPU_ON:
213 		mpidr = vme->u.smccc_call.args[0];
214 		for (newcpu = 0; newcpu < guest_ncpus; newcpu++) {
215 			if (cpu_to_mpidr[newcpu] == mpidr)
216 				break;
217 		}
218 
219 		if (newcpu == guest_ncpus) {
220 			smccc_rv = PSCI_RETVAL_INVALID_PARAMS;
221 			break;
222 		}
223 
224 		if (CPU_TEST_SET_ATOMIC(newcpu, &running_cpumask)) {
225 			smccc_rv = PSCI_RETVAL_ALREADY_ON;
226 			break;
227 		}
228 
229 		newvcpu = fbsdrun_vcpu(newcpu);
230 		assert(newvcpu != NULL);
231 
232 		/* Set the context ID */
233 		error = vm_set_register(newvcpu, VM_REG_GUEST_X0,
234 		    vme->u.smccc_call.args[2]);
235 		assert(error == 0);
236 
237 		/* Set the start program counter */
238 		error = vm_set_register(newvcpu, VM_REG_GUEST_PC,
239 		    vme->u.smccc_call.args[1]);
240 		assert(error == 0);
241 
242 		vm_resume_cpu(newvcpu);
243 
244 		smccc_rv = PSCI_RETVAL_SUCCESS;
245 		break;
246 	case PSCI_FNID_AFFINITY_INFO:
247 		smccc_rv = smccc_affinity_info(vme->u.smccc_call.args[0],
248 		    vme->u.smccc_call.args[1]);
249 		break;
250 	case PSCI_FNID_SYSTEM_OFF:
251 	case PSCI_FNID_SYSTEM_RESET:
252 		if (vme->u.smccc_call.func_id == PSCI_FNID_SYSTEM_OFF)
253 			how = VM_SUSPEND_POWEROFF;
254 		else
255 			how = VM_SUSPEND_RESET;
256 		error = vm_suspend(ctx, how);
257 		assert(error == 0 || errno == EALREADY);
258 		break;
259 	default:
260 		break;
261 	}
262 
263 	error = vm_set_register(vcpu, VM_REG_GUEST_X0, smccc_rv);
264 	assert(error == 0);
265 
266 	return (VMEXIT_CONTINUE);
267 }
268 
269 static int
vmexit_hyp(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)270 vmexit_hyp(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
271 {
272 	/* Raise an unknown reason exception */
273 	if (vm_inject_exception(vcpu,
274 	    (EXCP_UNKNOWN << ESR_ELx_EC_SHIFT) | ESR_ELx_IL,
275 	    vmrun->vm_exit->u.hyp.far_el2) != 0)
276 		return (VMEXIT_ABORT);
277 
278 	return (VMEXIT_CONTINUE);
279 }
280 
281 static int
vmexit_brk(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)282 vmexit_brk(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
283 {
284 	gdb_cpu_breakpoint(vcpu, vmrun->vm_exit);
285 	return (VMEXIT_CONTINUE);
286 }
287 
288 static int
vmexit_ss(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)289 vmexit_ss(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
290 {
291 	gdb_cpu_debug(vcpu, vmrun->vm_exit);
292 	return (VMEXIT_CONTINUE);
293 }
294 
295 const vmexit_handler_t vmexit_handlers[VM_EXITCODE_MAX] = {
296 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
297 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
298 	[VM_EXITCODE_REG_EMUL] = vmexit_reg_emul,
299 	[VM_EXITCODE_SUSPENDED] = vmexit_suspend,
300 	[VM_EXITCODE_DEBUG] = vmexit_debug,
301 	[VM_EXITCODE_SMCCC] = vmexit_smccc,
302 	[VM_EXITCODE_HYP] = vmexit_hyp,
303 	[VM_EXITCODE_BRK] = vmexit_brk,
304 	[VM_EXITCODE_SS] = vmexit_ss,
305 };
306