xref: /freebsd/usr.sbin/bhyve/aarch64/vmexit.c (revision e51ed89897da9b940bb4399427aec398dae96fba)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/cpuset.h>
31 
32 #include <dev/psci/psci.h>
33 #include <dev/psci/smccc.h>
34 
35 #include <machine/armreg.h>
36 #include <machine/cpu.h>
37 #include <machine/vmm.h>
38 #include <machine/vmm_dev.h>
39 #include <machine/vmm_instruction_emul.h>
40 
41 #include <assert.h>
42 #include <errno.h>
43 #include <stdbool.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <unistd.h>
47 
48 #include <vmmapi.h>
49 
50 #include "bhyverun.h"
51 #include "config.h"
52 #include "debug.h"
53 #include "gdb.h"
54 #include "mem.h"
55 #include "vmexit.h"
56 
57 static cpuset_t running_cpumask;
58 
59 static int
vmexit_inst_emul(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)60 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu,
61     struct vm_run *vmrun)
62 {
63 	struct vm_exit *vme;
64 	struct vie *vie;
65 	int err;
66 
67 	vme = vmrun->vm_exit;
68 	vie = &vme->u.inst_emul.vie;
69 
70 	err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie,
71 	    &vme->u.inst_emul.paging);
72 	if (err) {
73 		if (err == ESRCH) {
74 			EPRINTLN("Unhandled memory access to 0x%lx\n",
75 			    vme->u.inst_emul.gpa);
76 		}
77 		goto fail;
78 	}
79 
80 	return (VMEXIT_CONTINUE);
81 
82 fail:
83 	fprintf(stderr, "Failed to emulate instruction ");
84 	FPRINTLN(stderr, "at 0x%lx", vme->pc);
85 	return (VMEXIT_ABORT);
86 }
87 
88 static int
vmexit_reg_emul(struct vmctx * ctx __unused,struct vcpu * vcpu __unused,struct vm_run * vmrun)89 vmexit_reg_emul(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
90     struct vm_run *vmrun)
91 {
92 	struct vm_exit *vme;
93 	struct vre *vre;
94 
95 	vme = vmrun->vm_exit;
96 	vre = &vme->u.reg_emul.vre;
97 
98 	EPRINTLN("Unhandled register access: pc %#lx syndrome %#x reg %d\n",
99 	    vme->pc, vre->inst_syndrome, vre->reg);
100 	return (VMEXIT_ABORT);
101 }
102 
103 static int
vmexit_suspend(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun)104 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun)
105 {
106 	struct vm_exit *vme;
107 	enum vm_suspend_how how;
108 	int vcpuid = vcpu_id(vcpu);
109 
110 	vme = vmrun->vm_exit;
111 	how = vme->u.suspended.how;
112 
113 	fbsdrun_deletecpu(vcpuid);
114 
115 	switch (how) {
116 	case VM_SUSPEND_RESET:
117 		exit(0);
118 	case VM_SUSPEND_POWEROFF:
119 		if (get_config_bool_default("destroy_on_poweroff", false))
120 			vm_destroy(ctx);
121 		exit(1);
122 	case VM_SUSPEND_HALT:
123 		exit(2);
124 	default:
125 		fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how);
126 		exit(100);
127 	}
128 	return (0);	/* NOTREACHED */
129 }
130 
131 static int
vmexit_debug(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun __unused)132 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu,
133     struct vm_run *vmrun __unused)
134 {
135 	gdb_cpu_suspend(vcpu);
136 	/*
137 	 * XXX-MJ sleep for a short period to avoid chewing up the CPU in the
138 	 * window between activation of the vCPU thread and the STARTUP IPI.
139 	 */
140 	usleep(1000);
141 	return (VMEXIT_CONTINUE);
142 }
143 
144 static int
vmexit_bogus(struct vmctx * ctx __unused,struct vcpu * vcpu __unused,struct vm_run * vmrun __unused)145 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
146     struct vm_run *vmrun __unused)
147 {
148 	return (VMEXIT_CONTINUE);
149 }
150 
151 static uint64_t
smccc_affinity_info(uint64_t target_affinity,uint32_t lowest_affinity_level)152 smccc_affinity_info(uint64_t target_affinity, uint32_t lowest_affinity_level)
153 {
154 	uint64_t cpu_aff, mask = 0;
155 
156 	switch (lowest_affinity_level) {
157 	case 0:
158 		mask |= CPU_AFF0_MASK;
159 		/* FALLTHROUGH */
160 	case 1:
161 		mask |= CPU_AFF1_MASK;
162 		/* FALLTHROUGH */
163 	case 2:
164 		mask |= CPU_AFF2_MASK;
165 		/* FALLTHROUGH */
166 	case 3:
167 		mask |= CPU_AFF3_MASK;
168 		break;
169 	default:
170 		return (PSCI_RETVAL_INVALID_PARAMS);
171 	}
172 
173 	for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) {
174 		/* TODO: We should get this from the kernel */
175 		cpu_aff = (vcpu & 0xf) << MPIDR_AFF0_SHIFT |
176 		    ((vcpu >> 4) & 0xff) << MPIDR_AFF1_SHIFT |
177 		    ((vcpu >> 12) & 0xff) << MPIDR_AFF2_SHIFT |
178 		    (uint64_t)((vcpu >> 20) & 0xff) << MPIDR_AFF3_SHIFT;
179 
180 		if ((cpu_aff & mask) == (target_affinity & mask) &&
181 		    CPU_ISSET(vcpu, &running_cpumask)) {
182 			/* Return ON if any CPUs are on */
183 			return (PSCI_AFFINITY_INFO_ON);
184 		}
185 	}
186 
187 	/* No CPUs in the affinity mask are on, return OFF */
188 	return (PSCI_AFFINITY_INFO_OFF);
189 }
190 
191 static int
vmexit_smccc(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun)192 vmexit_smccc(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun)
193 {
194 	struct vcpu *newvcpu;
195 	struct vm_exit *vme;
196 	uint64_t newcpu, smccc_rv;
197 	enum vm_suspend_how how;
198 	int error;
199 
200 	/* Return the Unknown Function Identifier  by default */
201 	smccc_rv = SMCCC_RET_NOT_SUPPORTED;
202 
203 	vme = vmrun->vm_exit;
204 	switch (vme->u.smccc_call.func_id) {
205 	case PSCI_FNID_VERSION:
206 		/* We implement PSCI 1.0 */
207 		smccc_rv = PSCI_VER(1, 0);
208 		break;
209 	case PSCI_FNID_CPU_SUSPEND:
210 	case PSCI_FNID_CPU_OFF:
211 		break;
212 	case PSCI_FNID_CPU_ON:
213 		newcpu = vme->u.smccc_call.args[0];
214 		if (newcpu > (uint64_t)guest_ncpus) {
215 			smccc_rv = PSCI_RETVAL_INVALID_PARAMS;
216 			break;
217 		}
218 
219 		if (CPU_ISSET(newcpu, &running_cpumask)) {
220 			smccc_rv = PSCI_RETVAL_ALREADY_ON;
221 			break;
222 		}
223 
224 		newvcpu = fbsdrun_vcpu(newcpu);
225 		assert(newvcpu != NULL);
226 
227 		/* Set the context ID */
228 		error = vm_set_register(newvcpu, VM_REG_GUEST_X0,
229 		    vme->u.smccc_call.args[2]);
230 		assert(error == 0);
231 
232 		/* Set the start program counter */
233 		error = vm_set_register(newvcpu, VM_REG_GUEST_PC,
234 		    vme->u.smccc_call.args[1]);
235 		assert(error == 0);
236 
237 		vm_resume_cpu(newvcpu);
238 		CPU_SET_ATOMIC(newcpu, &running_cpumask);
239 
240 		smccc_rv = PSCI_RETVAL_SUCCESS;
241 		break;
242 	case PSCI_FNID_AFFINITY_INFO:
243 		smccc_rv = smccc_affinity_info(vme->u.smccc_call.args[0],
244 		    vme->u.smccc_call.args[1]);
245 		break;
246 	case PSCI_FNID_SYSTEM_OFF:
247 	case PSCI_FNID_SYSTEM_RESET:
248 		if (vme->u.smccc_call.func_id == PSCI_FNID_SYSTEM_OFF)
249 			how = VM_SUSPEND_POWEROFF;
250 		else
251 			how = VM_SUSPEND_RESET;
252 		error = vm_suspend(ctx, how);
253 		assert(error == 0 || errno == EALREADY);
254 		break;
255 	default:
256 		break;
257 	}
258 
259 	error = vm_set_register(vcpu, VM_REG_GUEST_X0, smccc_rv);
260 	assert(error == 0);
261 
262 	return (VMEXIT_CONTINUE);
263 }
264 
265 static int
vmexit_hyp(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)266 vmexit_hyp(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
267 {
268 	/* Raise an unknown reason exception */
269 	if (vm_inject_exception(vcpu,
270 	    (EXCP_UNKNOWN << ESR_ELx_EC_SHIFT) | ESR_ELx_IL,
271 	    vmrun->vm_exit->u.hyp.far_el2) != 0)
272 		return (VMEXIT_ABORT);
273 
274 	return (VMEXIT_CONTINUE);
275 }
276 
277 static int
vmexit_brk(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)278 vmexit_brk(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
279 {
280 	gdb_cpu_breakpoint(vcpu, vmrun->vm_exit);
281 	return (VMEXIT_CONTINUE);
282 }
283 
284 static int
vmexit_ss(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun)285 vmexit_ss(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun)
286 {
287 	gdb_cpu_debug(vcpu, vmrun->vm_exit);
288 	return (VMEXIT_CONTINUE);
289 }
290 
291 const vmexit_handler_t vmexit_handlers[VM_EXITCODE_MAX] = {
292 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
293 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
294 	[VM_EXITCODE_REG_EMUL] = vmexit_reg_emul,
295 	[VM_EXITCODE_SUSPENDED] = vmexit_suspend,
296 	[VM_EXITCODE_DEBUG] = vmexit_debug,
297 	[VM_EXITCODE_SMCCC] = vmexit_smccc,
298 	[VM_EXITCODE_HYP] = vmexit_hyp,
299 	[VM_EXITCODE_BRK] = vmexit_brk,
300 	[VM_EXITCODE_SS] = vmexit_ss,
301 };
302