1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_bhyve_snapshot.h"
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/conf.h>
34 #include <sys/libkern.h>
35 #include <sys/ioccom.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43
44 #include <machine/vmparam.h>
45 #include <machine/vmm.h>
46 #include <machine/vmm_instruction_emul.h>
47 #include <machine/vmm_snapshot.h>
48 #include <x86/apicreg.h>
49
50 #include <dev/vmm/vmm_dev.h>
51 #include <dev/vmm/vmm_mem.h>
52 #include <dev/vmm/vmm_stat.h>
53
54 #include "vmm_lapic.h"
55 #include "vmm_mem.h"
56 #include "io/ppt.h"
57 #include "io/vatpic.h"
58 #include "io/vioapic.h"
59 #include "io/vhpet.h"
60 #include "io/vrtc.h"
61
62 #ifdef COMPAT_FREEBSD13
63 struct vm_stats_13 {
64 int cpuid; /* in */
65 int num_entries; /* out */
66 struct timeval tv;
67 uint64_t statbuf[MAX_VM_STATS];
68 };
69
70 #define VM_STATS_13 _IOWR('v', IOCNUM_VM_STATS, struct vm_stats_13)
71
72 struct vm_snapshot_meta_13 {
73 void *ctx; /* unused */
74 void *dev_data;
75 const char *dev_name; /* identify userspace devices */
76 enum snapshot_req dev_req; /* identify kernel structs */
77
78 struct vm_snapshot_buffer buffer;
79
80 enum vm_snapshot_op op;
81 };
82
83 #define VM_SNAPSHOT_REQ_13 \
84 _IOWR('v', IOCNUM_SNAPSHOT_REQ, struct vm_snapshot_meta_13)
85
86 struct vm_exit_ipi_13 {
87 uint32_t mode;
88 uint8_t vector;
89 __BITSET_DEFINE(, 256) dmask;
90 };
91
92 struct vm_exit_13 {
93 uint32_t exitcode;
94 int32_t inst_length;
95 uint64_t rip;
96 uint64_t u[120 / sizeof(uint64_t)];
97 };
98
99 struct vm_run_13 {
100 int cpuid;
101 struct vm_exit_13 vm_exit;
102 };
103
104 #define VM_RUN_13 \
105 _IOWR('v', IOCNUM_RUN, struct vm_run_13)
106
107 #endif /* COMPAT_FREEBSD13 */
108
109 const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
110 VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU),
111 #ifdef COMPAT_FREEBSD13
112 VMMDEV_IOCTL(VM_RUN_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
113 #endif
114 VMMDEV_IOCTL(VM_GET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
115 VMMDEV_IOCTL(VM_SET_SEGMENT_DESCRIPTOR, VMMDEV_IOCTL_LOCK_ONE_VCPU),
116 VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
117 VMMDEV_IOCTL(VM_SET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
118 VMMDEV_IOCTL(VM_GLA2GPA, VMMDEV_IOCTL_LOCK_ONE_VCPU),
119 VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU),
120 VMMDEV_IOCTL(VM_SET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
121 VMMDEV_IOCTL(VM_GET_INTINFO, VMMDEV_IOCTL_LOCK_ONE_VCPU),
122 VMMDEV_IOCTL(VM_RESTART_INSTRUCTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
123 VMMDEV_IOCTL(VM_GET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
124 VMMDEV_IOCTL(VM_SET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
125
126 VMMDEV_IOCTL(VM_BIND_PPTDEV,
127 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
128 VMMDEV_IOCTL(VM_UNBIND_PPTDEV,
129 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
130
131 VMMDEV_IOCTL(VM_MAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
132 VMMDEV_IOCTL(VM_UNMAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
133 #ifdef BHYVE_SNAPSHOT
134 #ifdef COMPAT_FREEBSD13
135 VMMDEV_IOCTL(VM_SNAPSHOT_REQ_13, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
136 #endif
137 VMMDEV_IOCTL(VM_SNAPSHOT_REQ, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
138 VMMDEV_IOCTL(VM_RESTORE_TIME, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
139 #endif
140
141 #ifdef COMPAT_FREEBSD13
142 VMMDEV_IOCTL(VM_STATS_13, VMMDEV_IOCTL_LOCK_ONE_VCPU),
143 #endif
144 VMMDEV_IOCTL(VM_INJECT_NMI, VMMDEV_IOCTL_LOCK_ONE_VCPU),
145 VMMDEV_IOCTL(VM_LAPIC_IRQ, VMMDEV_IOCTL_LOCK_ONE_VCPU),
146 VMMDEV_IOCTL(VM_GET_X2APIC_STATE, VMMDEV_IOCTL_LOCK_ONE_VCPU),
147
148 VMMDEV_IOCTL(VM_LAPIC_LOCAL_IRQ, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
149
150 VMMDEV_IOCTL(VM_PPTDEV_MSI, 0),
151 VMMDEV_IOCTL(VM_PPTDEV_MSIX, 0),
152 VMMDEV_IOCTL(VM_PPTDEV_DISABLE_MSIX, 0),
153 VMMDEV_IOCTL(VM_LAPIC_MSI, 0),
154 VMMDEV_IOCTL(VM_IOAPIC_ASSERT_IRQ, 0),
155 VMMDEV_IOCTL(VM_IOAPIC_DEASSERT_IRQ, 0),
156 VMMDEV_IOCTL(VM_IOAPIC_PULSE_IRQ, 0),
157 VMMDEV_IOCTL(VM_IOAPIC_PINCOUNT, 0),
158 VMMDEV_IOCTL(VM_ISA_ASSERT_IRQ, 0),
159 VMMDEV_IOCTL(VM_ISA_DEASSERT_IRQ, 0),
160 VMMDEV_IOCTL(VM_ISA_PULSE_IRQ, 0),
161 VMMDEV_IOCTL(VM_ISA_SET_IRQ_TRIGGER, 0),
162 VMMDEV_IOCTL(VM_GET_GPA_PMAP, 0),
163 VMMDEV_IOCTL(VM_GET_HPET_CAPABILITIES, 0),
164 VMMDEV_IOCTL(VM_RTC_READ, 0),
165 VMMDEV_IOCTL(VM_RTC_WRITE, 0),
166 VMMDEV_IOCTL(VM_RTC_GETTIME, 0),
167 VMMDEV_IOCTL(VM_RTC_SETTIME, 0),
168 };
169 const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls);
170
171 int
vmmdev_machdep_ioctl(struct vm * vm,struct vcpu * vcpu,u_long cmd,caddr_t data,int fflag,struct thread * td)172 vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
173 int fflag, struct thread *td)
174 {
175 struct vm_seg_desc *vmsegdesc;
176 struct vm_run *vmrun;
177 #ifdef COMPAT_FREEBSD13
178 struct vm_run_13 *vmrun_13;
179 #endif
180 struct vm_exception *vmexc;
181 struct vm_lapic_irq *vmirq;
182 struct vm_lapic_msi *vmmsi;
183 struct vm_ioapic_irq *ioapic_irq;
184 struct vm_isa_irq *isa_irq;
185 struct vm_isa_irq_trigger *isa_irq_trigger;
186 struct vm_pptdev *pptdev;
187 struct vm_pptdev_mmio *pptmmio;
188 struct vm_pptdev_msi *pptmsi;
189 struct vm_pptdev_msix *pptmsix;
190 struct vm_x2apic *x2apic;
191 struct vm_gpa_pte *gpapte;
192 struct vm_gla2gpa *gg;
193 struct vm_intinfo *vmii;
194 struct vm_rtc_time *rtctime;
195 struct vm_rtc_data *rtcdata;
196 struct vm_readwrite_kernemu_device *kernemu;
197 #ifdef BHYVE_SNAPSHOT
198 struct vm_snapshot_meta *snapshot_meta;
199 #ifdef COMPAT_FREEBSD13
200 struct vm_snapshot_meta_13 *snapshot_13;
201 #endif
202 #endif
203 int error;
204
205 error = 0;
206 switch (cmd) {
207 case VM_RUN: {
208 struct vm_exit *vme;
209
210 vmrun = (struct vm_run *)data;
211 vme = vm_exitinfo(vcpu);
212
213 error = vm_run(vcpu);
214 if (error != 0)
215 break;
216
217 error = copyout(vme, vmrun->vm_exit, sizeof(*vme));
218 if (error != 0)
219 break;
220 if (vme->exitcode == VM_EXITCODE_IPI) {
221 error = copyout(vm_exitinfo_cpuset(vcpu),
222 vmrun->cpuset,
223 min(vmrun->cpusetsize, sizeof(cpuset_t)));
224 if (error != 0)
225 break;
226 if (sizeof(cpuset_t) < vmrun->cpusetsize) {
227 uint8_t *p;
228
229 p = (uint8_t *)vmrun->cpuset +
230 sizeof(cpuset_t);
231 while (p < (uint8_t *)vmrun->cpuset +
232 vmrun->cpusetsize) {
233 if (subyte(p++, 0) != 0) {
234 error = EFAULT;
235 break;
236 }
237 }
238 }
239 }
240 break;
241 }
242 #ifdef COMPAT_FREEBSD13
243 case VM_RUN_13: {
244 struct vm_exit *vme;
245 struct vm_exit_13 *vme_13;
246
247 vmrun_13 = (struct vm_run_13 *)data;
248 vme_13 = &vmrun_13->vm_exit;
249 vme = vm_exitinfo(vcpu);
250
251 error = vm_run(vcpu);
252 if (error == 0) {
253 vme_13->exitcode = vme->exitcode;
254 vme_13->inst_length = vme->inst_length;
255 vme_13->rip = vme->rip;
256 memcpy(vme_13->u, &vme->u, sizeof(vme_13->u));
257 if (vme->exitcode == VM_EXITCODE_IPI) {
258 struct vm_exit_ipi_13 *ipi;
259 cpuset_t *dmask;
260 int cpu;
261
262 dmask = vm_exitinfo_cpuset(vcpu);
263 ipi = (struct vm_exit_ipi_13 *)&vme_13->u[0];
264 BIT_ZERO(256, &ipi->dmask);
265 CPU_FOREACH_ISSET(cpu, dmask) {
266 if (cpu >= 256)
267 break;
268 BIT_SET(256, cpu, &ipi->dmask);
269 }
270 }
271 }
272 break;
273 }
274 case VM_STATS_13: {
275 struct vm_stats_13 *vmstats_13;
276
277 vmstats_13 = (struct vm_stats_13 *)data;
278 getmicrotime(&vmstats_13->tv);
279 error = vmm_stat_copy(vcpu, 0, nitems(vmstats_13->statbuf),
280 &vmstats_13->num_entries, vmstats_13->statbuf);
281 break;
282 }
283 #endif
284 case VM_PPTDEV_MSI:
285 pptmsi = (struct vm_pptdev_msi *)data;
286 error = ppt_setup_msi(vm,
287 pptmsi->bus, pptmsi->slot, pptmsi->func,
288 pptmsi->addr, pptmsi->msg,
289 pptmsi->numvec);
290 break;
291 case VM_PPTDEV_MSIX:
292 pptmsix = (struct vm_pptdev_msix *)data;
293 error = ppt_setup_msix(vm,
294 pptmsix->bus, pptmsix->slot,
295 pptmsix->func, pptmsix->idx,
296 pptmsix->addr, pptmsix->msg,
297 pptmsix->vector_control);
298 break;
299 case VM_PPTDEV_DISABLE_MSIX:
300 pptdev = (struct vm_pptdev *)data;
301 error = ppt_disable_msix(vm, pptdev->bus, pptdev->slot,
302 pptdev->func);
303 break;
304 case VM_MAP_PPTDEV_MMIO:
305 pptmmio = (struct vm_pptdev_mmio *)data;
306 error = ppt_map_mmio(vm, pptmmio->bus, pptmmio->slot,
307 pptmmio->func, pptmmio->gpa, pptmmio->len,
308 pptmmio->hpa);
309 break;
310 case VM_UNMAP_PPTDEV_MMIO:
311 pptmmio = (struct vm_pptdev_mmio *)data;
312 error = ppt_unmap_mmio(vm, pptmmio->bus, pptmmio->slot,
313 pptmmio->func, pptmmio->gpa, pptmmio->len);
314 break;
315 case VM_BIND_PPTDEV:
316 pptdev = (struct vm_pptdev *)data;
317 error = vm_assign_pptdev(vm, pptdev->bus, pptdev->slot,
318 pptdev->func);
319 break;
320 case VM_UNBIND_PPTDEV:
321 pptdev = (struct vm_pptdev *)data;
322 error = vm_unassign_pptdev(vm, pptdev->bus, pptdev->slot,
323 pptdev->func);
324 break;
325 case VM_INJECT_EXCEPTION:
326 vmexc = (struct vm_exception *)data;
327 error = vm_inject_exception(vcpu,
328 vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
329 vmexc->restart_instruction);
330 break;
331 case VM_INJECT_NMI:
332 error = vm_inject_nmi(vcpu);
333 break;
334 case VM_LAPIC_IRQ:
335 vmirq = (struct vm_lapic_irq *)data;
336 error = lapic_intr_edge(vcpu, vmirq->vector);
337 break;
338 case VM_LAPIC_LOCAL_IRQ:
339 vmirq = (struct vm_lapic_irq *)data;
340 error = lapic_set_local_intr(vm, vcpu, vmirq->vector);
341 break;
342 case VM_LAPIC_MSI:
343 vmmsi = (struct vm_lapic_msi *)data;
344 error = lapic_intr_msi(vm, vmmsi->addr, vmmsi->msg);
345 break;
346 case VM_IOAPIC_ASSERT_IRQ:
347 ioapic_irq = (struct vm_ioapic_irq *)data;
348 error = vioapic_assert_irq(vm, ioapic_irq->irq);
349 break;
350 case VM_IOAPIC_DEASSERT_IRQ:
351 ioapic_irq = (struct vm_ioapic_irq *)data;
352 error = vioapic_deassert_irq(vm, ioapic_irq->irq);
353 break;
354 case VM_IOAPIC_PULSE_IRQ:
355 ioapic_irq = (struct vm_ioapic_irq *)data;
356 error = vioapic_pulse_irq(vm, ioapic_irq->irq);
357 break;
358 case VM_IOAPIC_PINCOUNT:
359 *(int *)data = vioapic_pincount(vm);
360 break;
361 case VM_SET_KERNEMU_DEV:
362 case VM_GET_KERNEMU_DEV: {
363 mem_region_write_t mwrite;
364 mem_region_read_t mread;
365 int size;
366 bool arg;
367
368 kernemu = (void *)data;
369
370 if (kernemu->access_width > 0)
371 size = (1u << kernemu->access_width);
372 else
373 size = 1;
374
375 if (kernemu->gpa >= DEFAULT_APIC_BASE &&
376 kernemu->gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
377 mread = lapic_mmio_read;
378 mwrite = lapic_mmio_write;
379 } else if (kernemu->gpa >= VIOAPIC_BASE &&
380 kernemu->gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
381 mread = vioapic_mmio_read;
382 mwrite = vioapic_mmio_write;
383 } else if (kernemu->gpa >= VHPET_BASE &&
384 kernemu->gpa < VHPET_BASE + VHPET_SIZE) {
385 mread = vhpet_mmio_read;
386 mwrite = vhpet_mmio_write;
387 } else {
388 error = EINVAL;
389 break;
390 }
391
392 if (cmd == VM_SET_KERNEMU_DEV)
393 error = mwrite(vcpu, kernemu->gpa,
394 kernemu->value, size, &arg);
395 else
396 error = mread(vcpu, kernemu->gpa,
397 &kernemu->value, size, &arg);
398 break;
399 }
400 case VM_ISA_ASSERT_IRQ:
401 isa_irq = (struct vm_isa_irq *)data;
402 error = vatpic_assert_irq(vm, isa_irq->atpic_irq);
403 if (error == 0 && isa_irq->ioapic_irq != -1)
404 error = vioapic_assert_irq(vm, isa_irq->ioapic_irq);
405 break;
406 case VM_ISA_DEASSERT_IRQ:
407 isa_irq = (struct vm_isa_irq *)data;
408 error = vatpic_deassert_irq(vm, isa_irq->atpic_irq);
409 if (error == 0 && isa_irq->ioapic_irq != -1)
410 error = vioapic_deassert_irq(vm, isa_irq->ioapic_irq);
411 break;
412 case VM_ISA_PULSE_IRQ:
413 isa_irq = (struct vm_isa_irq *)data;
414 error = vatpic_pulse_irq(vm, isa_irq->atpic_irq);
415 if (error == 0 && isa_irq->ioapic_irq != -1)
416 error = vioapic_pulse_irq(vm, isa_irq->ioapic_irq);
417 break;
418 case VM_ISA_SET_IRQ_TRIGGER:
419 isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
420 error = vatpic_set_irq_trigger(vm,
421 isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
422 break;
423 case VM_SET_SEGMENT_DESCRIPTOR:
424 vmsegdesc = (struct vm_seg_desc *)data;
425 error = vm_set_seg_desc(vcpu,
426 vmsegdesc->regnum,
427 &vmsegdesc->desc);
428 break;
429 case VM_GET_SEGMENT_DESCRIPTOR:
430 vmsegdesc = (struct vm_seg_desc *)data;
431 error = vm_get_seg_desc(vcpu,
432 vmsegdesc->regnum,
433 &vmsegdesc->desc);
434 break;
435 case VM_SET_X2APIC_STATE:
436 x2apic = (struct vm_x2apic *)data;
437 error = vm_set_x2apic_state(vcpu, x2apic->state);
438 break;
439 case VM_GET_X2APIC_STATE:
440 x2apic = (struct vm_x2apic *)data;
441 error = vm_get_x2apic_state(vcpu, &x2apic->state);
442 break;
443 case VM_GET_GPA_PMAP:
444 gpapte = (struct vm_gpa_pte *)data;
445 pmap_get_mapping(vmspace_pmap(vm_vmspace(vm)),
446 gpapte->gpa, gpapte->pte, &gpapte->ptenum);
447 error = 0;
448 break;
449 case VM_GET_HPET_CAPABILITIES:
450 error = vhpet_getcap((struct vm_hpet_cap *)data);
451 break;
452 case VM_GLA2GPA: {
453 CTASSERT(PROT_READ == VM_PROT_READ);
454 CTASSERT(PROT_WRITE == VM_PROT_WRITE);
455 CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
456 gg = (struct vm_gla2gpa *)data;
457 error = vm_gla2gpa(vcpu, &gg->paging, gg->gla,
458 gg->prot, &gg->gpa, &gg->fault);
459 KASSERT(error == 0 || error == EFAULT,
460 ("%s: vm_gla2gpa unknown error %d", __func__, error));
461 break;
462 }
463 case VM_GLA2GPA_NOFAULT:
464 gg = (struct vm_gla2gpa *)data;
465 error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
466 gg->prot, &gg->gpa, &gg->fault);
467 KASSERT(error == 0 || error == EFAULT,
468 ("%s: vm_gla2gpa unknown error %d", __func__, error));
469 break;
470 case VM_SET_INTINFO:
471 vmii = (struct vm_intinfo *)data;
472 error = vm_exit_intinfo(vcpu, vmii->info1);
473 break;
474 case VM_GET_INTINFO:
475 vmii = (struct vm_intinfo *)data;
476 error = vm_get_intinfo(vcpu, &vmii->info1, &vmii->info2);
477 break;
478 case VM_RTC_WRITE:
479 rtcdata = (struct vm_rtc_data *)data;
480 error = vrtc_nvram_write(vm, rtcdata->offset,
481 rtcdata->value);
482 break;
483 case VM_RTC_READ:
484 rtcdata = (struct vm_rtc_data *)data;
485 error = vrtc_nvram_read(vm, rtcdata->offset,
486 &rtcdata->value);
487 break;
488 case VM_RTC_SETTIME:
489 rtctime = (struct vm_rtc_time *)data;
490 error = vrtc_set_time(vm, rtctime->secs);
491 break;
492 case VM_RTC_GETTIME:
493 error = 0;
494 rtctime = (struct vm_rtc_time *)data;
495 rtctime->secs = vrtc_get_time(vm);
496 break;
497 case VM_RESTART_INSTRUCTION:
498 error = vm_restart_instruction(vcpu);
499 break;
500 #ifdef BHYVE_SNAPSHOT
501 case VM_SNAPSHOT_REQ:
502 snapshot_meta = (struct vm_snapshot_meta *)data;
503 error = vm_snapshot_req(vm, snapshot_meta);
504 break;
505 #ifdef COMPAT_FREEBSD13
506 case VM_SNAPSHOT_REQ_13:
507 /*
508 * The old structure just has an additional pointer at
509 * the start that is ignored.
510 */
511 snapshot_13 = (struct vm_snapshot_meta_13 *)data;
512 snapshot_meta =
513 (struct vm_snapshot_meta *)&snapshot_13->dev_data;
514 error = vm_snapshot_req(vm, snapshot_meta);
515 break;
516 #endif
517 case VM_RESTORE_TIME:
518 error = vm_restore_time(vm);
519 break;
520 #endif
521 default:
522 error = ENOTTY;
523 break;
524 }
525
526 return (error);
527 }
528