xref: /freebsd/sys/amd64/vmm/vmm_dev_machdep.c (revision 0225c6d85d0d4250ed304b31e6ce855fc5d93f15)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_bhyve_snapshot.h"
30 
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/conf.h>
34 #include <sys/libkern.h>
35 #include <sys/ioccom.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43 
44 #include <machine/vmparam.h>
45 #include <machine/vmm.h>
46 #include <machine/vmm_instruction_emul.h>
47 #include <machine/vmm_snapshot.h>
48 #include <x86/apicreg.h>
49 
50 #include <dev/vmm/vmm_dev.h>
51 #include <dev/vmm/vmm_mem.h>
52 #include <dev/vmm/vmm_stat.h>
53 
54 #include "vmm_lapic.h"
55 #include "vmm_mem.h"
56 #include "io/ppt.h"
57 #include "io/vatpic.h"
58 #include "io/vioapic.h"
59 #include "io/vhpet.h"
60 #include "io/vrtc.h"
61 
62 #ifdef COMPAT_FREEBSD13
63 struct vm_stats_13 {
64 	int		cpuid;				/* in */
65 	int		num_entries;			/* out */
66 	struct timeval	tv;
67 	uint64_t	statbuf[MAX_VM_STATS];
68 };
69 
70 #define	VM_STATS_13	_IOWR('v', IOCNUM_VM_STATS, struct vm_stats_13)
71 
72 struct vm_snapshot_meta_13 {
73 	void *ctx;			/* unused */
74 	void *dev_data;
75 	const char *dev_name;      /* identify userspace devices */
76 	enum snapshot_req dev_req; /* identify kernel structs */
77 
78 	struct vm_snapshot_buffer buffer;
79 
80 	enum vm_snapshot_op op;
81 };
82 
83 #define VM_SNAPSHOT_REQ_13 \
84 	_IOWR('v', IOCNUM_SNAPSHOT_REQ, struct vm_snapshot_meta_13)
85 
86 struct vm_exit_ipi_13 {
87 	uint32_t	mode;
88 	uint8_t		vector;
89 	__BITSET_DEFINE(, 256) dmask;
90 };
91 
92 struct vm_exit_13 {
93 	uint32_t	exitcode;
94 	int32_t		inst_length;
95 	uint64_t	rip;
96 	uint64_t	u[120 / sizeof(uint64_t)];
97 };
98 
99 struct vm_run_13 {
100 	int		cpuid;
101 	struct vm_exit_13 vm_exit;
102 };
103 
104 #define	VM_RUN_13 \
105 	_IOWR('v', IOCNUM_RUN, struct vm_run_13)
106 
107 #endif /* COMPAT_FREEBSD13 */
108 
109 const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
110 	VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU),
111 #ifdef COMPAT_FREEBSD13
112 	VMMDEV_IOCTL(VM_RUN_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
113 #endif
114 	VMMDEV_IOCTL(VM_GET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
115 	VMMDEV_IOCTL(VM_SET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
116 	VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
117 	VMMDEV_IOCTL(VM_SET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
118 	VMMDEV_IOCTL(VM_GLA2GPA, VMMDEV_IOCTL_LOCK_ONE_VCPU),
119 	VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU),
120 	VMMDEV_IOCTL(VM_SET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
121 	VMMDEV_IOCTL(VM_GET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
122 	VMMDEV_IOCTL(VM_RESTART_INSTRUCTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
123 	VMMDEV_IOCTL(VM_GET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
124 	VMMDEV_IOCTL(VM_SET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
125 
126 	VMMDEV_IOCTL(VM_BIND_PPTDEV,
127 	    VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS |
128 	    VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
129 	VMMDEV_IOCTL(VM_UNBIND_PPTDEV,
130 	    VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS |
131 	    VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
132 
133 	VMMDEV_IOCTL(VM_MAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS |
134 	    VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
135 	VMMDEV_IOCTL(VM_UNMAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS |
136 	    VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
137 #ifdef BHYVE_SNAPSHOT
138 #ifdef COMPAT_FREEBSD13
139 	VMMDEV_IOCTL(VM_SNAPSHOT_REQ_13, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
140 #endif
141 	VMMDEV_IOCTL(VM_SNAPSHOT_REQ, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
142 	VMMDEV_IOCTL(VM_RESTORE_TIME, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
143 #endif
144 
145 #ifdef COMPAT_FREEBSD13
146 	VMMDEV_IOCTL(VM_STATS_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
147 #endif
148 	VMMDEV_IOCTL(VM_INJECT_NMI, VMMDEV_IOCTL_LOCK_ONE_VCPU),
149 	VMMDEV_IOCTL(VM_LAPIC_IRQ, VMMDEV_IOCTL_LOCK_ONE_VCPU),
150 	VMMDEV_IOCTL(VM_GET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
151 
152 	VMMDEV_IOCTL(VM_LAPIC_LOCAL_IRQ, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
153 
154 	VMMDEV_IOCTL(VM_PPTDEV_MSI, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
155 	VMMDEV_IOCTL(VM_PPTDEV_MSIX, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
156 	VMMDEV_IOCTL(VM_PPTDEV_DISABLE_MSIX, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
157 	VMMDEV_IOCTL(VM_LAPIC_MSI, 0),
158 	VMMDEV_IOCTL(VM_IOAPIC_ASSERT_IRQ, 0),
159 	VMMDEV_IOCTL(VM_IOAPIC_DEASSERT_IRQ, 0),
160 	VMMDEV_IOCTL(VM_IOAPIC_PULSE_IRQ, 0),
161 	VMMDEV_IOCTL(VM_IOAPIC_PINCOUNT, 0),
162 	VMMDEV_IOCTL(VM_ISA_ASSERT_IRQ, 0),
163 	VMMDEV_IOCTL(VM_ISA_DEASSERT_IRQ, 0),
164 	VMMDEV_IOCTL(VM_ISA_PULSE_IRQ, 0),
165 	VMMDEV_IOCTL(VM_ISA_SET_IRQ_TRIGGER, 0),
166 	VMMDEV_IOCTL(VM_GET_GPA_PMAP, 0),
167 	VMMDEV_IOCTL(VM_GET_HPET_CAPABILITIES, 0),
168 	VMMDEV_IOCTL(VM_RTC_READ, 0),
169 	VMMDEV_IOCTL(VM_RTC_WRITE, 0),
170 	VMMDEV_IOCTL(VM_RTC_GETTIME, 0),
171 	VMMDEV_IOCTL(VM_RTC_SETTIME, 0),
172 };
173 const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls);
174 
175 int
vmmdev_machdep_ioctl(struct vm * vm,struct vcpu * vcpu,u_long cmd,caddr_t data,int fflag,struct thread * td)176 vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
177     int fflag, struct thread *td)
178 {
179 	int error;
180 
181 	error = 0;
182 	switch (cmd) {
183 	case VM_RUN: {
184 		struct vm_exit *vme;
185 		struct vm_run *vmrun;
186 
187 		vmrun = (struct vm_run *)data;
188 		vme = vm_exitinfo(vcpu);
189 
190 		error = vm_run(vcpu);
191 		if (error != 0)
192 			break;
193 
194 		error = copyout(vme, vmrun->vm_exit, sizeof(*vme));
195 		if (error != 0)
196 			break;
197 		if (vme->exitcode == VM_EXITCODE_IPI) {
198 			error = copyout(vm_exitinfo_cpuset(vcpu),
199 			    vmrun->cpuset,
200 			    min(vmrun->cpusetsize, sizeof(cpuset_t)));
201 			if (error != 0)
202 				break;
203 			if (sizeof(cpuset_t) < vmrun->cpusetsize) {
204 				uint8_t *p;
205 
206 				p = (uint8_t *)vmrun->cpuset +
207 				    sizeof(cpuset_t);
208 				while (p < (uint8_t *)vmrun->cpuset +
209 				    vmrun->cpusetsize) {
210 					if (subyte(p++, 0) != 0) {
211 						error = EFAULT;
212 						break;
213 					}
214 				}
215 			}
216 		}
217 		break;
218 	}
219 #ifdef COMPAT_FREEBSD13
220 	case VM_RUN_13: {
221 		struct vm_exit *vme;
222 		struct vm_exit_13 *vme_13;
223 		struct vm_run_13 *vmrun_13;
224 
225 		vmrun_13 = (struct vm_run_13 *)data;
226 		vme_13 = &vmrun_13->vm_exit;
227 		vme = vm_exitinfo(vcpu);
228 
229 		error = vm_run(vcpu);
230 		if (error == 0) {
231 			vme_13->exitcode = vme->exitcode;
232 			vme_13->inst_length = vme->inst_length;
233 			vme_13->rip = vme->rip;
234 			memcpy(vme_13->u, &vme->u, sizeof(vme_13->u));
235 			if (vme->exitcode == VM_EXITCODE_IPI) {
236 				struct vm_exit_ipi_13 *ipi;
237 				cpuset_t *dmask;
238 				int cpu;
239 
240 				dmask = vm_exitinfo_cpuset(vcpu);
241 				ipi = (struct vm_exit_ipi_13 *)&vme_13->u[0];
242 				BIT_ZERO(256, &ipi->dmask);
243 				CPU_FOREACH_ISSET(cpu, dmask) {
244 					if (cpu >= 256)
245 						break;
246 					BIT_SET(256, cpu, &ipi->dmask);
247 				}
248 			}
249 		}
250 		break;
251 	}
252 	case VM_STATS_13: {
253 		struct vm_stats_13 *vmstats_13;
254 
255 		vmstats_13 = (struct vm_stats_13 *)data;
256 		getmicrotime(&vmstats_13->tv);
257 		error = vmm_stat_copy(vcpu, 0, nitems(vmstats_13->statbuf),
258 		    &vmstats_13->num_entries, vmstats_13->statbuf);
259 		break;
260 	}
261 #endif
262 	case VM_PPTDEV_MSI: {
263 		struct vm_pptdev_msi *pptmsi;
264 
265 		pptmsi = (struct vm_pptdev_msi *)data;
266 		error = ppt_setup_msi(vm, pptmsi->bus, pptmsi->slot,
267 		    pptmsi->func, pptmsi->addr, pptmsi->msg, pptmsi->numvec);
268 		break;
269 	}
270 	case VM_PPTDEV_MSIX: {
271 		struct vm_pptdev_msix *pptmsix;
272 
273 		pptmsix = (struct vm_pptdev_msix *)data;
274 		error = ppt_setup_msix(vm, pptmsix->bus, pptmsix->slot,
275 		    pptmsix->func, pptmsix->idx, pptmsix->addr, pptmsix->msg,
276 		    pptmsix->vector_control);
277 		break;
278 	}
279 	case VM_PPTDEV_DISABLE_MSIX: {
280 		struct vm_pptdev *pptdev;
281 
282 		pptdev = (struct vm_pptdev *)data;
283 		error = ppt_disable_msix(vm, pptdev->bus, pptdev->slot,
284 		    pptdev->func);
285 		break;
286 	}
287 	case VM_MAP_PPTDEV_MMIO: {
288 		struct vm_pptdev_mmio *pptmmio;
289 
290 		pptmmio = (struct vm_pptdev_mmio *)data;
291 		error = ppt_map_mmio(vm, pptmmio->bus, pptmmio->slot,
292 		    pptmmio->func, pptmmio->gpa, pptmmio->len, pptmmio->hpa);
293 		break;
294 	}
295 	case VM_UNMAP_PPTDEV_MMIO: {
296 		struct vm_pptdev_mmio *pptmmio;
297 
298 		pptmmio = (struct vm_pptdev_mmio *)data;
299 		error = ppt_unmap_mmio(vm, pptmmio->bus, pptmmio->slot,
300 		    pptmmio->func, pptmmio->gpa, pptmmio->len);
301 		break;
302 	}
303 	case VM_BIND_PPTDEV: {
304 		struct vm_pptdev *pptdev;
305 
306 		pptdev = (struct vm_pptdev *)data;
307 		error = vm_assign_pptdev(vm, pptdev->bus, pptdev->slot,
308 		    pptdev->func);
309 		break;
310 	}
311 	case VM_UNBIND_PPTDEV: {
312 		struct vm_pptdev *pptdev;
313 
314 		pptdev = (struct vm_pptdev *)data;
315 		error = vm_unassign_pptdev(vm, pptdev->bus, pptdev->slot,
316 		    pptdev->func);
317 		break;
318 	}
319 	case VM_INJECT_EXCEPTION: {
320 		struct vm_exception *vmexc;
321 
322 		vmexc = (struct vm_exception *)data;
323 		error = vm_inject_exception(vcpu,
324 		    vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
325 		    vmexc->restart_instruction);
326 		break;
327 	}
328 	case VM_INJECT_NMI:
329 		error = vm_inject_nmi(vcpu);
330 		break;
331 	case VM_LAPIC_IRQ: {
332 		struct vm_lapic_irq *vmirq;
333 
334 		vmirq = (struct vm_lapic_irq *)data;
335 		error = lapic_intr_edge(vcpu, vmirq->vector);
336 		break;
337 	}
338 	case VM_LAPIC_LOCAL_IRQ: {
339 		struct vm_lapic_irq *vmirq;
340 
341 		vmirq = (struct vm_lapic_irq *)data;
342 		error = lapic_set_local_intr(vm, vcpu, vmirq->vector);
343 		break;
344 	}
345 	case VM_LAPIC_MSI: {
346 		struct vm_lapic_msi *vmmsi;
347 
348 		vmmsi = (struct vm_lapic_msi *)data;
349 		error = lapic_intr_msi(vm, vmmsi->addr, vmmsi->msg);
350 		break;
351 	}
352 	case VM_IOAPIC_ASSERT_IRQ: {
353 		struct vm_ioapic_irq *ioapic_irq;
354 
355 		ioapic_irq = (struct vm_ioapic_irq *)data;
356 		error = vioapic_assert_irq(vm, ioapic_irq->irq);
357 		break;
358 	}
359 	case VM_IOAPIC_DEASSERT_IRQ: {
360 		struct vm_ioapic_irq *ioapic_irq;
361 
362 		ioapic_irq = (struct vm_ioapic_irq *)data;
363 		error = vioapic_deassert_irq(vm, ioapic_irq->irq);
364 		break;
365 	}
366 	case VM_IOAPIC_PULSE_IRQ: {
367 		struct vm_ioapic_irq *ioapic_irq;
368 
369 		ioapic_irq = (struct vm_ioapic_irq *)data;
370 		error = vioapic_pulse_irq(vm, ioapic_irq->irq);
371 		break;
372 	}
373 	case VM_IOAPIC_PINCOUNT:
374 		*(int *)data = vioapic_pincount(vm);
375 		break;
376 	case VM_SET_KERNEMU_DEV:
377 	case VM_GET_KERNEMU_DEV: {
378 		struct vm_readwrite_kernemu_device *kernemu;
379 		mem_region_write_t mwrite;
380 		mem_region_read_t mread;
381 		int size;
382 		bool arg;
383 
384 		kernemu = (void *)data;
385 
386 		if (kernemu->access_width > 0)
387 			size = (1u << kernemu->access_width);
388 		else
389 			size = 1;
390 
391 		if (kernemu->gpa >= DEFAULT_APIC_BASE &&
392 		    kernemu->gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
393 			mread = lapic_mmio_read;
394 			mwrite = lapic_mmio_write;
395 		} else if (kernemu->gpa >= VIOAPIC_BASE &&
396 		    kernemu->gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
397 			mread = vioapic_mmio_read;
398 			mwrite = vioapic_mmio_write;
399 		} else if (kernemu->gpa >= VHPET_BASE &&
400 		    kernemu->gpa < VHPET_BASE + VHPET_SIZE) {
401 			mread = vhpet_mmio_read;
402 			mwrite = vhpet_mmio_write;
403 		} else {
404 			error = EINVAL;
405 			break;
406 		}
407 
408 		if (cmd == VM_SET_KERNEMU_DEV)
409 			error = mwrite(vcpu, kernemu->gpa,
410 			    kernemu->value, size, &arg);
411 		else
412 			error = mread(vcpu, kernemu->gpa,
413 			    &kernemu->value, size, &arg);
414 		break;
415 	}
416 	case VM_ISA_ASSERT_IRQ: {
417 		struct vm_isa_irq *isa_irq;
418 
419 		isa_irq = (struct vm_isa_irq *)data;
420 		error = vatpic_assert_irq(vm, isa_irq->atpic_irq);
421 		if (error == 0 && isa_irq->ioapic_irq != -1)
422 			error = vioapic_assert_irq(vm, isa_irq->ioapic_irq);
423 		break;
424 	}
425 	case VM_ISA_DEASSERT_IRQ: {
426 		struct vm_isa_irq *isa_irq;
427 
428 		isa_irq = (struct vm_isa_irq *)data;
429 		error = vatpic_deassert_irq(vm, isa_irq->atpic_irq);
430 		if (error == 0 && isa_irq->ioapic_irq != -1)
431 			error = vioapic_deassert_irq(vm, isa_irq->ioapic_irq);
432 		break;
433 	}
434 	case VM_ISA_PULSE_IRQ: {
435 		struct vm_isa_irq *isa_irq;
436 
437 		isa_irq = (struct vm_isa_irq *)data;
438 		error = vatpic_pulse_irq(vm, isa_irq->atpic_irq);
439 		if (error == 0 && isa_irq->ioapic_irq != -1)
440 			error = vioapic_pulse_irq(vm, isa_irq->ioapic_irq);
441 		break;
442 	}
443 	case VM_ISA_SET_IRQ_TRIGGER: {
444 		struct vm_isa_irq_trigger *isa_irq_trigger;
445 
446 		isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
447 		error = vatpic_set_irq_trigger(vm,
448 		    isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
449 		break;
450 	}
451 	case VM_SET_SEGMENT_DESCRIPTOR: {
452 		struct vm_seg_desc *vmsegdesc;
453 
454 		vmsegdesc = (struct vm_seg_desc *)data;
455 		error = vm_set_seg_desc(vcpu, vmsegdesc->regnum,
456 		    &vmsegdesc->desc);
457 		break;
458 	}
459 	case VM_GET_SEGMENT_DESCRIPTOR: {
460 		struct vm_seg_desc *vmsegdesc;
461 
462 		vmsegdesc = (struct vm_seg_desc *)data;
463 		error = vm_get_seg_desc(vcpu, vmsegdesc->regnum,
464 		    &vmsegdesc->desc);
465 		break;
466 	}
467 	case VM_SET_X2APIC_STATE: {
468 		struct vm_x2apic *x2apic;
469 
470 		x2apic = (struct vm_x2apic *)data;
471 		error = vm_set_x2apic_state(vcpu, x2apic->state);
472 		break;
473 	}
474 	case VM_GET_X2APIC_STATE: {
475 		struct vm_x2apic *x2apic;
476 
477 		x2apic = (struct vm_x2apic *)data;
478 		error = vm_get_x2apic_state(vcpu, &x2apic->state);
479 		break;
480 	}
481 	case VM_GET_GPA_PMAP: {
482 		struct vm_gpa_pte *gpapte;
483 
484 		gpapte = (struct vm_gpa_pte *)data;
485 		pmap_get_mapping(vmspace_pmap(vm_vmspace(vm)), gpapte->gpa,
486 		    gpapte->pte, &gpapte->ptenum);
487 		break;
488 	}
489 	case VM_GET_HPET_CAPABILITIES:
490 		error = vhpet_getcap((struct vm_hpet_cap *)data);
491 		break;
492 	case VM_GLA2GPA: {
493 		struct vm_gla2gpa *gg;
494 
495 		CTASSERT(PROT_READ == VM_PROT_READ);
496 		CTASSERT(PROT_WRITE == VM_PROT_WRITE);
497 		CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
498 		gg = (struct vm_gla2gpa *)data;
499 		error = vm_gla2gpa(vcpu, &gg->paging, gg->gla,
500 		    gg->prot, &gg->gpa, &gg->fault);
501 		KASSERT(error == 0 || error == EFAULT,
502 		    ("%s: vm_gla2gpa unknown error %d", __func__, error));
503 		break;
504 	}
505 	case VM_GLA2GPA_NOFAULT: {
506 		struct vm_gla2gpa *gg;
507 
508 		gg = (struct vm_gla2gpa *)data;
509 		error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
510 		    gg->prot, &gg->gpa, &gg->fault);
511 		KASSERT(error == 0 || error == EFAULT,
512 		    ("%s: vm_gla2gpa unknown error %d", __func__, error));
513 		break;
514 	}
515 	case VM_SET_INTINFO: {
516 		struct vm_intinfo *vmii;
517 
518 		vmii = (struct vm_intinfo *)data;
519 		error = vm_exit_intinfo(vcpu, vmii->info1);
520 		break;
521 	}
522 	case VM_GET_INTINFO: {
523 		struct vm_intinfo *vmii;
524 
525 		vmii = (struct vm_intinfo *)data;
526 		error = vm_get_intinfo(vcpu, &vmii->info1, &vmii->info2);
527 		break;
528 	}
529 	case VM_RTC_WRITE: {
530 		struct vm_rtc_data *rtcdata;
531 
532 		rtcdata = (struct vm_rtc_data *)data;
533 		error = vrtc_nvram_write(vm, rtcdata->offset,
534 		    rtcdata->value);
535 		break;
536 	}
537 	case VM_RTC_READ: {
538 		struct vm_rtc_data *rtcdata;
539 
540 		rtcdata = (struct vm_rtc_data *)data;
541 		error = vrtc_nvram_read(vm, rtcdata->offset,
542 		    &rtcdata->value);
543 		break;
544 	}
545 	case VM_RTC_SETTIME: {
546 		struct vm_rtc_time *rtctime;
547 
548 		rtctime = (struct vm_rtc_time *)data;
549 		error = vrtc_set_time(vm, rtctime->secs);
550 		break;
551 	}
552 	case VM_RTC_GETTIME: {
553 		struct vm_rtc_time *rtctime;
554 
555 		rtctime = (struct vm_rtc_time *)data;
556 		rtctime->secs = vrtc_get_time(vm);
557 		break;
558 	}
559 	case VM_RESTART_INSTRUCTION:
560 		error = vm_restart_instruction(vcpu);
561 		break;
562 #ifdef BHYVE_SNAPSHOT
563 	case VM_SNAPSHOT_REQ: {
564 		struct vm_snapshot_meta *snapshot_meta;
565 
566 		snapshot_meta = (struct vm_snapshot_meta *)data;
567 		error = vm_snapshot_req(vm, snapshot_meta);
568 		break;
569 	}
570 #ifdef COMPAT_FREEBSD13
571 	case VM_SNAPSHOT_REQ_13: {
572 		struct vm_snapshot_meta *snapshot_meta;
573 		struct vm_snapshot_meta_13 *snapshot_13;
574 
575 		/*
576 		 * The old structure just has an additional pointer at
577 		 * the start that is ignored.
578 		 */
579 		snapshot_13 = (struct vm_snapshot_meta_13 *)data;
580 		snapshot_meta =
581 		    (struct vm_snapshot_meta *)&snapshot_13->dev_data;
582 		error = vm_snapshot_req(vm, snapshot_meta);
583 		break;
584 	}
585 #endif
586 	case VM_RESTORE_TIME:
587 		error = vm_restore_time(vm);
588 		break;
589 #endif
590 	default:
591 		error = ENOTTY;
592 		break;
593 	}
594 
595 	return (error);
596 }
597