xref: /freebsd/sys/amd64/vmm/vmm_dev_machdep.c (revision a852dc580c1c261bc50fd9788b52abca0004661b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_bhyve_snapshot.h"
30 
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/conf.h>
34 #include <sys/libkern.h>
35 #include <sys/ioccom.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43 
44 #include <machine/vmparam.h>
45 #include <machine/vmm.h>
46 #include <machine/vmm_instruction_emul.h>
47 #include <machine/vmm_snapshot.h>
48 #include <x86/apicreg.h>
49 
50 #include <dev/vmm/vmm_dev.h>
51 #include <dev/vmm/vmm_stat.h>
52 
53 #include "vmm_lapic.h"
54 #include "vmm_mem.h"
55 #include "io/ppt.h"
56 #include "io/vatpic.h"
57 #include "io/vioapic.h"
58 #include "io/vhpet.h"
59 #include "io/vrtc.h"
60 
61 #ifdef COMPAT_FREEBSD13
62 struct vm_stats_13 {
63 	int		cpuid;				/* in */
64 	int		num_entries;			/* out */
65 	struct timeval	tv;
66 	uint64_t	statbuf[MAX_VM_STATS];
67 };
68 
69 #define	VM_STATS_13	_IOWR('v', IOCNUM_VM_STATS, struct vm_stats_13)
70 
71 struct vm_snapshot_meta_13 {
72 	void *ctx;			/* unused */
73 	void *dev_data;
74 	const char *dev_name;      /* identify userspace devices */
75 	enum snapshot_req dev_req; /* identify kernel structs */
76 
77 	struct vm_snapshot_buffer buffer;
78 
79 	enum vm_snapshot_op op;
80 };
81 
82 #define VM_SNAPSHOT_REQ_13 \
83 	_IOWR('v', IOCNUM_SNAPSHOT_REQ, struct vm_snapshot_meta_13)
84 
85 struct vm_exit_ipi_13 {
86 	uint32_t	mode;
87 	uint8_t		vector;
88 	__BITSET_DEFINE(, 256) dmask;
89 };
90 
91 struct vm_exit_13 {
92 	uint32_t	exitcode;
93 	int32_t		inst_length;
94 	uint64_t	rip;
95 	uint64_t	u[120 / sizeof(uint64_t)];
96 };
97 
98 struct vm_run_13 {
99 	int		cpuid;
100 	struct vm_exit_13 vm_exit;
101 };
102 
103 #define	VM_RUN_13 \
104 	_IOWR('v', IOCNUM_RUN, struct vm_run_13)
105 
106 #endif /* COMPAT_FREEBSD13 */
107 
108 const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
109 	VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU),
110 #ifdef COMPAT_FREEBSD13
111 	VMMDEV_IOCTL(VM_RUN_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
112 #endif
113 	VMMDEV_IOCTL(VM_GET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
114 	VMMDEV_IOCTL(VM_SET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
115 	VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
116 	VMMDEV_IOCTL(VM_SET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
117 	VMMDEV_IOCTL(VM_GLA2GPA, VMMDEV_IOCTL_LOCK_ONE_VCPU),
118 	VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU),
119 	VMMDEV_IOCTL(VM_SET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
120 	VMMDEV_IOCTL(VM_GET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
121 	VMMDEV_IOCTL(VM_RESTART_INSTRUCTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
122 	VMMDEV_IOCTL(VM_GET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
123 	VMMDEV_IOCTL(VM_SET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
124 
125 	VMMDEV_IOCTL(VM_BIND_PPTDEV,
126 	    VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
127 	VMMDEV_IOCTL(VM_UNBIND_PPTDEV,
128 	    VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
129 
130 	VMMDEV_IOCTL(VM_MAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
131 	VMMDEV_IOCTL(VM_UNMAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
132 #ifdef BHYVE_SNAPSHOT
133 #ifdef COMPAT_FREEBSD13
134 	VMMDEV_IOCTL(VM_SNAPSHOT_REQ_13, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
135 #endif
136 	VMMDEV_IOCTL(VM_SNAPSHOT_REQ, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
137 	VMMDEV_IOCTL(VM_RESTORE_TIME, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
138 #endif
139 
140 #ifdef COMPAT_FREEBSD13
141 	VMMDEV_IOCTL(VM_STATS_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
142 #endif
143 	VMMDEV_IOCTL(VM_INJECT_NMI, VMMDEV_IOCTL_LOCK_ONE_VCPU),
144 	VMMDEV_IOCTL(VM_LAPIC_IRQ, VMMDEV_IOCTL_LOCK_ONE_VCPU),
145 	VMMDEV_IOCTL(VM_GET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
146 
147 	VMMDEV_IOCTL(VM_LAPIC_LOCAL_IRQ, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
148 
149 	VMMDEV_IOCTL(VM_PPTDEV_MSI, 0),
150 	VMMDEV_IOCTL(VM_PPTDEV_MSIX, 0),
151 	VMMDEV_IOCTL(VM_PPTDEV_DISABLE_MSIX, 0),
152 	VMMDEV_IOCTL(VM_LAPIC_MSI, 0),
153 	VMMDEV_IOCTL(VM_IOAPIC_ASSERT_IRQ, 0),
154 	VMMDEV_IOCTL(VM_IOAPIC_DEASSERT_IRQ, 0),
155 	VMMDEV_IOCTL(VM_IOAPIC_PULSE_IRQ, 0),
156 	VMMDEV_IOCTL(VM_IOAPIC_PINCOUNT, 0),
157 	VMMDEV_IOCTL(VM_ISA_ASSERT_IRQ, 0),
158 	VMMDEV_IOCTL(VM_ISA_DEASSERT_IRQ, 0),
159 	VMMDEV_IOCTL(VM_ISA_PULSE_IRQ, 0),
160 	VMMDEV_IOCTL(VM_ISA_SET_IRQ_TRIGGER, 0),
161 	VMMDEV_IOCTL(VM_GET_GPA_PMAP, 0),
162 	VMMDEV_IOCTL(VM_GET_HPET_CAPABILITIES, 0),
163 	VMMDEV_IOCTL(VM_RTC_READ, 0),
164 	VMMDEV_IOCTL(VM_RTC_WRITE, 0),
165 	VMMDEV_IOCTL(VM_RTC_GETTIME, 0),
166 	VMMDEV_IOCTL(VM_RTC_SETTIME, 0),
167 };
168 const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls);
169 
170 int
vmmdev_machdep_ioctl(struct vm * vm,struct vcpu * vcpu,u_long cmd,caddr_t data,int fflag,struct thread * td)171 vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
172     int fflag, struct thread *td)
173 {
174 	struct vm_seg_desc *vmsegdesc;
175 	struct vm_run *vmrun;
176 #ifdef COMPAT_FREEBSD13
177 	struct vm_run_13 *vmrun_13;
178 #endif
179 	struct vm_exception *vmexc;
180 	struct vm_lapic_irq *vmirq;
181 	struct vm_lapic_msi *vmmsi;
182 	struct vm_ioapic_irq *ioapic_irq;
183 	struct vm_isa_irq *isa_irq;
184 	struct vm_isa_irq_trigger *isa_irq_trigger;
185 	struct vm_pptdev *pptdev;
186 	struct vm_pptdev_mmio *pptmmio;
187 	struct vm_pptdev_msi *pptmsi;
188 	struct vm_pptdev_msix *pptmsix;
189 	struct vm_x2apic *x2apic;
190 	struct vm_gpa_pte *gpapte;
191 	struct vm_gla2gpa *gg;
192 	struct vm_intinfo *vmii;
193 	struct vm_rtc_time *rtctime;
194 	struct vm_rtc_data *rtcdata;
195 	struct vm_readwrite_kernemu_device *kernemu;
196 #ifdef BHYVE_SNAPSHOT
197 	struct vm_snapshot_meta *snapshot_meta;
198 #ifdef COMPAT_FREEBSD13
199 	struct vm_snapshot_meta_13 *snapshot_13;
200 #endif
201 #endif
202 	int error;
203 
204 	error = 0;
205 	switch (cmd) {
206 	case VM_RUN: {
207 		struct vm_exit *vme;
208 
209 		vmrun = (struct vm_run *)data;
210 		vme = vm_exitinfo(vcpu);
211 
212 		error = vm_run(vcpu);
213 		if (error != 0)
214 			break;
215 
216 		error = copyout(vme, vmrun->vm_exit, sizeof(*vme));
217 		if (error != 0)
218 			break;
219 		if (vme->exitcode == VM_EXITCODE_IPI) {
220 			error = copyout(vm_exitinfo_cpuset(vcpu),
221 			    vmrun->cpuset,
222 			    min(vmrun->cpusetsize, sizeof(cpuset_t)));
223 			if (error != 0)
224 				break;
225 			if (sizeof(cpuset_t) < vmrun->cpusetsize) {
226 				uint8_t *p;
227 
228 				p = (uint8_t *)vmrun->cpuset +
229 				    sizeof(cpuset_t);
230 				while (p < (uint8_t *)vmrun->cpuset +
231 				    vmrun->cpusetsize) {
232 					if (subyte(p++, 0) != 0) {
233 						error = EFAULT;
234 						break;
235 					}
236 				}
237 			}
238 		}
239 		break;
240 	}
241 #ifdef COMPAT_FREEBSD13
242 	case VM_RUN_13: {
243 		struct vm_exit *vme;
244 		struct vm_exit_13 *vme_13;
245 
246 		vmrun_13 = (struct vm_run_13 *)data;
247 		vme_13 = &vmrun_13->vm_exit;
248 		vme = vm_exitinfo(vcpu);
249 
250 		error = vm_run(vcpu);
251 		if (error == 0) {
252 			vme_13->exitcode = vme->exitcode;
253 			vme_13->inst_length = vme->inst_length;
254 			vme_13->rip = vme->rip;
255 			memcpy(vme_13->u, &vme->u, sizeof(vme_13->u));
256 			if (vme->exitcode == VM_EXITCODE_IPI) {
257 				struct vm_exit_ipi_13 *ipi;
258 				cpuset_t *dmask;
259 				int cpu;
260 
261 				dmask = vm_exitinfo_cpuset(vcpu);
262 				ipi = (struct vm_exit_ipi_13 *)&vme_13->u[0];
263 				BIT_ZERO(256, &ipi->dmask);
264 				CPU_FOREACH_ISSET(cpu, dmask) {
265 					if (cpu >= 256)
266 						break;
267 					BIT_SET(256, cpu, &ipi->dmask);
268 				}
269 			}
270 		}
271 		break;
272 	}
273 	case VM_STATS_13: {
274 		struct vm_stats_13 *vmstats_13;
275 
276 		vmstats_13 = (struct vm_stats_13 *)data;
277 		getmicrotime(&vmstats_13->tv);
278 		error = vmm_stat_copy(vcpu, 0, nitems(vmstats_13->statbuf),
279 		    &vmstats_13->num_entries, vmstats_13->statbuf);
280 		break;
281 	}
282 #endif
283 	case VM_PPTDEV_MSI:
284 		pptmsi = (struct vm_pptdev_msi *)data;
285 		error = ppt_setup_msi(vm,
286 				      pptmsi->bus, pptmsi->slot, pptmsi->func,
287 				      pptmsi->addr, pptmsi->msg,
288 				      pptmsi->numvec);
289 		break;
290 	case VM_PPTDEV_MSIX:
291 		pptmsix = (struct vm_pptdev_msix *)data;
292 		error = ppt_setup_msix(vm,
293 				       pptmsix->bus, pptmsix->slot,
294 				       pptmsix->func, pptmsix->idx,
295 				       pptmsix->addr, pptmsix->msg,
296 				       pptmsix->vector_control);
297 		break;
298 	case VM_PPTDEV_DISABLE_MSIX:
299 		pptdev = (struct vm_pptdev *)data;
300 		error = ppt_disable_msix(vm, pptdev->bus, pptdev->slot,
301 					 pptdev->func);
302 		break;
303 	case VM_MAP_PPTDEV_MMIO:
304 		pptmmio = (struct vm_pptdev_mmio *)data;
305 		error = ppt_map_mmio(vm, pptmmio->bus, pptmmio->slot,
306 				     pptmmio->func, pptmmio->gpa, pptmmio->len,
307 				     pptmmio->hpa);
308 		break;
309 	case VM_UNMAP_PPTDEV_MMIO:
310 		pptmmio = (struct vm_pptdev_mmio *)data;
311 		error = ppt_unmap_mmio(vm, pptmmio->bus, pptmmio->slot,
312 				       pptmmio->func, pptmmio->gpa, pptmmio->len);
313 		break;
314 	case VM_BIND_PPTDEV:
315 		pptdev = (struct vm_pptdev *)data;
316 		error = vm_assign_pptdev(vm, pptdev->bus, pptdev->slot,
317 					 pptdev->func);
318 		break;
319 	case VM_UNBIND_PPTDEV:
320 		pptdev = (struct vm_pptdev *)data;
321 		error = vm_unassign_pptdev(vm, pptdev->bus, pptdev->slot,
322 					   pptdev->func);
323 		break;
324 	case VM_INJECT_EXCEPTION:
325 		vmexc = (struct vm_exception *)data;
326 		error = vm_inject_exception(vcpu,
327 		    vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
328 		    vmexc->restart_instruction);
329 		break;
330 	case VM_INJECT_NMI:
331 		error = vm_inject_nmi(vcpu);
332 		break;
333 	case VM_LAPIC_IRQ:
334 		vmirq = (struct vm_lapic_irq *)data;
335 		error = lapic_intr_edge(vcpu, vmirq->vector);
336 		break;
337 	case VM_LAPIC_LOCAL_IRQ:
338 		vmirq = (struct vm_lapic_irq *)data;
339 		error = lapic_set_local_intr(vm, vcpu, vmirq->vector);
340 		break;
341 	case VM_LAPIC_MSI:
342 		vmmsi = (struct vm_lapic_msi *)data;
343 		error = lapic_intr_msi(vm, vmmsi->addr, vmmsi->msg);
344 		break;
345 	case VM_IOAPIC_ASSERT_IRQ:
346 		ioapic_irq = (struct vm_ioapic_irq *)data;
347 		error = vioapic_assert_irq(vm, ioapic_irq->irq);
348 		break;
349 	case VM_IOAPIC_DEASSERT_IRQ:
350 		ioapic_irq = (struct vm_ioapic_irq *)data;
351 		error = vioapic_deassert_irq(vm, ioapic_irq->irq);
352 		break;
353 	case VM_IOAPIC_PULSE_IRQ:
354 		ioapic_irq = (struct vm_ioapic_irq *)data;
355 		error = vioapic_pulse_irq(vm, ioapic_irq->irq);
356 		break;
357 	case VM_IOAPIC_PINCOUNT:
358 		*(int *)data = vioapic_pincount(vm);
359 		break;
360 	case VM_SET_KERNEMU_DEV:
361 	case VM_GET_KERNEMU_DEV: {
362 		mem_region_write_t mwrite;
363 		mem_region_read_t mread;
364 		int size;
365 		bool arg;
366 
367 		kernemu = (void *)data;
368 
369 		if (kernemu->access_width > 0)
370 			size = (1u << kernemu->access_width);
371 		else
372 			size = 1;
373 
374 		if (kernemu->gpa >= DEFAULT_APIC_BASE &&
375 		    kernemu->gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
376 			mread = lapic_mmio_read;
377 			mwrite = lapic_mmio_write;
378 		} else if (kernemu->gpa >= VIOAPIC_BASE &&
379 		    kernemu->gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
380 			mread = vioapic_mmio_read;
381 			mwrite = vioapic_mmio_write;
382 		} else if (kernemu->gpa >= VHPET_BASE &&
383 		    kernemu->gpa < VHPET_BASE + VHPET_SIZE) {
384 			mread = vhpet_mmio_read;
385 			mwrite = vhpet_mmio_write;
386 		} else {
387 			error = EINVAL;
388 			break;
389 		}
390 
391 		if (cmd == VM_SET_KERNEMU_DEV)
392 			error = mwrite(vcpu, kernemu->gpa,
393 			    kernemu->value, size, &arg);
394 		else
395 			error = mread(vcpu, kernemu->gpa,
396 			    &kernemu->value, size, &arg);
397 		break;
398 		}
399 	case VM_ISA_ASSERT_IRQ:
400 		isa_irq = (struct vm_isa_irq *)data;
401 		error = vatpic_assert_irq(vm, isa_irq->atpic_irq);
402 		if (error == 0 && isa_irq->ioapic_irq != -1)
403 			error = vioapic_assert_irq(vm, isa_irq->ioapic_irq);
404 		break;
405 	case VM_ISA_DEASSERT_IRQ:
406 		isa_irq = (struct vm_isa_irq *)data;
407 		error = vatpic_deassert_irq(vm, isa_irq->atpic_irq);
408 		if (error == 0 && isa_irq->ioapic_irq != -1)
409 			error = vioapic_deassert_irq(vm, isa_irq->ioapic_irq);
410 		break;
411 	case VM_ISA_PULSE_IRQ:
412 		isa_irq = (struct vm_isa_irq *)data;
413 		error = vatpic_pulse_irq(vm, isa_irq->atpic_irq);
414 		if (error == 0 && isa_irq->ioapic_irq != -1)
415 			error = vioapic_pulse_irq(vm, isa_irq->ioapic_irq);
416 		break;
417 	case VM_ISA_SET_IRQ_TRIGGER:
418 		isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
419 		error = vatpic_set_irq_trigger(vm,
420 		    isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
421 		break;
422 	case VM_SET_SEGMENT_DESCRIPTOR:
423 		vmsegdesc = (struct vm_seg_desc *)data;
424 		error = vm_set_seg_desc(vcpu,
425 					vmsegdesc->regnum,
426 					&vmsegdesc->desc);
427 		break;
428 	case VM_GET_SEGMENT_DESCRIPTOR:
429 		vmsegdesc = (struct vm_seg_desc *)data;
430 		error = vm_get_seg_desc(vcpu,
431 					vmsegdesc->regnum,
432 					&vmsegdesc->desc);
433 		break;
434 	case VM_SET_X2APIC_STATE:
435 		x2apic = (struct vm_x2apic *)data;
436 		error = vm_set_x2apic_state(vcpu, x2apic->state);
437 		break;
438 	case VM_GET_X2APIC_STATE:
439 		x2apic = (struct vm_x2apic *)data;
440 		error = vm_get_x2apic_state(vcpu, &x2apic->state);
441 		break;
442 	case VM_GET_GPA_PMAP:
443 		gpapte = (struct vm_gpa_pte *)data;
444 		pmap_get_mapping(vmspace_pmap(vm_get_vmspace(vm)),
445 				 gpapte->gpa, gpapte->pte, &gpapte->ptenum);
446 		error = 0;
447 		break;
448 	case VM_GET_HPET_CAPABILITIES:
449 		error = vhpet_getcap((struct vm_hpet_cap *)data);
450 		break;
451 	case VM_GLA2GPA: {
452 		CTASSERT(PROT_READ == VM_PROT_READ);
453 		CTASSERT(PROT_WRITE == VM_PROT_WRITE);
454 		CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
455 		gg = (struct vm_gla2gpa *)data;
456 		error = vm_gla2gpa(vcpu, &gg->paging, gg->gla,
457 		    gg->prot, &gg->gpa, &gg->fault);
458 		KASSERT(error == 0 || error == EFAULT,
459 		    ("%s: vm_gla2gpa unknown error %d", __func__, error));
460 		break;
461 	}
462 	case VM_GLA2GPA_NOFAULT:
463 		gg = (struct vm_gla2gpa *)data;
464 		error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
465 		    gg->prot, &gg->gpa, &gg->fault);
466 		KASSERT(error == 0 || error == EFAULT,
467 		    ("%s: vm_gla2gpa unknown error %d", __func__, error));
468 		break;
469 	case VM_SET_INTINFO:
470 		vmii = (struct vm_intinfo *)data;
471 		error = vm_exit_intinfo(vcpu, vmii->info1);
472 		break;
473 	case VM_GET_INTINFO:
474 		vmii = (struct vm_intinfo *)data;
475 		error = vm_get_intinfo(vcpu, &vmii->info1, &vmii->info2);
476 		break;
477 	case VM_RTC_WRITE:
478 		rtcdata = (struct vm_rtc_data *)data;
479 		error = vrtc_nvram_write(vm, rtcdata->offset,
480 		    rtcdata->value);
481 		break;
482 	case VM_RTC_READ:
483 		rtcdata = (struct vm_rtc_data *)data;
484 		error = vrtc_nvram_read(vm, rtcdata->offset,
485 		    &rtcdata->value);
486 		break;
487 	case VM_RTC_SETTIME:
488 		rtctime = (struct vm_rtc_time *)data;
489 		error = vrtc_set_time(vm, rtctime->secs);
490 		break;
491 	case VM_RTC_GETTIME:
492 		error = 0;
493 		rtctime = (struct vm_rtc_time *)data;
494 		rtctime->secs = vrtc_get_time(vm);
495 		break;
496 	case VM_RESTART_INSTRUCTION:
497 		error = vm_restart_instruction(vcpu);
498 		break;
499 #ifdef BHYVE_SNAPSHOT
500 	case VM_SNAPSHOT_REQ:
501 		snapshot_meta = (struct vm_snapshot_meta *)data;
502 		error = vm_snapshot_req(vm, snapshot_meta);
503 		break;
504 #ifdef COMPAT_FREEBSD13
505 	case VM_SNAPSHOT_REQ_13:
506 		/*
507 		 * The old structure just has an additional pointer at
508 		 * the start that is ignored.
509 		 */
510 		snapshot_13 = (struct vm_snapshot_meta_13 *)data;
511 		snapshot_meta =
512 		    (struct vm_snapshot_meta *)&snapshot_13->dev_data;
513 		error = vm_snapshot_req(vm, snapshot_meta);
514 		break;
515 #endif
516 	case VM_RESTORE_TIME:
517 		error = vm_restore_time(vm);
518 		break;
519 #endif
520 	default:
521 		error = ENOTTY;
522 		break;
523 	}
524 
525 	return (error);
526 }
527