xref: /freebsd/usr.sbin/bhyve/snapshot.c (revision c9fdd4f3cc18c03683de85318ba8d318f96b58c4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Flavius Anton
5  * Copyright (c) 2016 Mihai Tiganus
6  * Copyright (c) 2016-2019 Mihai Carabas
7  * Copyright (c) 2017-2019 Darius Mihai
8  * Copyright (c) 2017-2019 Elena Mihailescu
9  * Copyright (c) 2018-2019 Sergiu Weisz
10  * All rights reserved.
11  * The bhyve-snapshot feature was developed under sponsorships
12  * from Matthew Grooms.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/types.h>
40 #ifndef WITHOUT_CAPSICUM
41 #include <sys/capsicum.h>
42 #endif
43 #include <sys/mman.h>
44 #include <sys/socket.h>
45 #include <sys/stat.h>
46 #include <sys/time.h>
47 #include <sys/un.h>
48 
49 #include <machine/atomic.h>
50 #include <machine/segments.h>
51 
52 #ifndef WITHOUT_CAPSICUM
53 #include <capsicum_helpers.h>
54 #endif
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <err.h>
59 #include <errno.h>
60 #include <fcntl.h>
61 #include <libgen.h>
62 #include <signal.h>
63 #include <unistd.h>
64 #include <assert.h>
65 #include <errno.h>
66 #include <pthread.h>
67 #include <pthread_np.h>
68 #include <sysexits.h>
69 #include <stdbool.h>
70 #include <sys/ioctl.h>
71 
72 #include <machine/vmm.h>
73 #ifndef WITHOUT_CAPSICUM
74 #include <machine/vmm_dev.h>
75 #endif
76 #include <machine/vmm_snapshot.h>
77 #include <vmmapi.h>
78 
79 #include "bhyverun.h"
80 #include "acpi.h"
81 #include "atkbdc.h"
82 #include "debug.h"
83 #include "inout.h"
84 #include "ipc.h"
85 #include "fwctl.h"
86 #include "ioapic.h"
87 #include "mem.h"
88 #include "mevent.h"
89 #include "migration.h"
90 #include "mptbl.h"
91 #include "pci_emul.h"
92 #include "pci_irq.h"
93 #include "pci_lpc.h"
94 #include "smbiostbl.h"
95 #include "snapshot.h"
96 #include "xmsr.h"
97 #include "spinup_ap.h"
98 #include "rtc.h"
99 
100 #include <libxo/xo.h>
101 #include <ucl.h>
102 
103 struct spinner_info {
104 	const size_t *crtval;
105 	const size_t maxval;
106 	const size_t total;
107 };
108 
109 extern int guest_ncpus;
110 
111 static struct winsize winsize;
112 static sig_t old_winch_handler;
113 
114 #define	KB		(1024UL)
115 #define	MB		(1024UL * KB)
116 #define	GB		(1024UL * MB)
117 
118 #define	SNAPSHOT_CHUNK	(4 * MB)
119 #define	PROG_BUF_SZ	(8192)
120 
121 #define	SNAPSHOT_BUFFER_SIZE (20 * MB)
122 
123 #define	JSON_KERNEL_ARR_KEY		"kern_structs"
124 #define	JSON_DEV_ARR_KEY		"devices"
125 #define	JSON_BASIC_METADATA_KEY 	"basic metadata"
126 #define	JSON_SNAPSHOT_REQ_KEY		"device"
127 #define	JSON_SIZE_KEY			"size"
128 #define	JSON_FILE_OFFSET_KEY		"file_offset"
129 
130 #define	JSON_NCPUS_KEY			"ncpus"
131 #define	JSON_VMNAME_KEY 		"vmname"
132 #define	JSON_MEMSIZE_KEY		"memsize"
133 #define	JSON_MEMFLAGS_KEY		"memflags"
134 
135 #define min(a,b)		\
136 ({				\
137  __typeof__ (a) _a = (a);	\
138  __typeof__ (b) _b = (b); 	\
139  _a < _b ? _a : _b;       	\
140  })
141 
142 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = {
143 	{ "vhpet",	STRUCT_VHPET	},
144 	{ "vm",		STRUCT_VM	},
145 	{ "vioapic",	STRUCT_VIOAPIC	},
146 	{ "vlapic",	STRUCT_VLAPIC	},
147 	{ "vmcx",	STRUCT_VMCX	},
148 	{ "vatpit",	STRUCT_VATPIT	},
149 	{ "vatpic",	STRUCT_VATPIC	},
150 	{ "vpmtmr",	STRUCT_VPMTMR	},
151 	{ "vrtc",	STRUCT_VRTC	},
152 };
153 
154 static cpuset_t vcpus_active, vcpus_suspended;
155 static pthread_mutex_t vcpu_lock;
156 static pthread_cond_t vcpus_idle, vcpus_can_run;
157 static bool checkpoint_active;
158 
159 /*
160  * TODO: Harden this function and all of its callers since 'base_str' is a user
161  * provided string.
162  */
163 static char *
164 strcat_extension(const char *base_str, const char *ext)
165 {
166 	char *res;
167 	size_t base_len, ext_len;
168 
169 	base_len = strnlen(base_str, NAME_MAX);
170 	ext_len = strnlen(ext, NAME_MAX);
171 
172 	if (base_len + ext_len > NAME_MAX) {
173 		fprintf(stderr, "Filename exceeds maximum length.\n");
174 		return (NULL);
175 	}
176 
177 	res = malloc(base_len + ext_len + 1);
178 	if (res == NULL) {
179 		perror("Failed to allocate memory.");
180 		return (NULL);
181 	}
182 
183 	memcpy(res, base_str, base_len);
184 	memcpy(res + base_len, ext, ext_len);
185 	res[base_len + ext_len] = 0;
186 
187 	return (res);
188 }
189 
190 void
191 destroy_restore_state(struct restore_state *rstate)
192 {
193 	if (rstate == NULL) {
194 		fprintf(stderr, "Attempting to destroy NULL restore struct.\n");
195 		return;
196 	}
197 
198 	if (rstate->kdata_map != MAP_FAILED)
199 		munmap(rstate->kdata_map, rstate->kdata_len);
200 
201 	if (rstate->kdata_fd > 0)
202 		close(rstate->kdata_fd);
203 	if (rstate->vmmem_fd > 0)
204 		close(rstate->vmmem_fd);
205 
206 	if (rstate->meta_root_obj != NULL)
207 		ucl_object_unref(rstate->meta_root_obj);
208 	if (rstate->meta_parser != NULL)
209 		ucl_parser_free(rstate->meta_parser);
210 }
211 
212 static int
213 load_vmmem_file(const char *filename, struct restore_state *rstate)
214 {
215 	struct stat sb;
216 	int err;
217 
218 	rstate->vmmem_fd = open(filename, O_RDONLY);
219 	if (rstate->vmmem_fd < 0) {
220 		perror("Failed to open restore file");
221 		return (-1);
222 	}
223 
224 	err = fstat(rstate->vmmem_fd, &sb);
225 	if (err < 0) {
226 		perror("Failed to stat restore file");
227 		goto err_load_vmmem;
228 	}
229 
230 	if (sb.st_size == 0) {
231 		fprintf(stderr, "Restore file is empty.\n");
232 		goto err_load_vmmem;
233 	}
234 
235 	rstate->vmmem_len = sb.st_size;
236 
237 	return (0);
238 
239 err_load_vmmem:
240 	if (rstate->vmmem_fd > 0)
241 		close(rstate->vmmem_fd);
242 	return (-1);
243 }
244 
245 static int
246 load_kdata_file(const char *filename, struct restore_state *rstate)
247 {
248 	struct stat sb;
249 	int err;
250 
251 	rstate->kdata_fd = open(filename, O_RDONLY);
252 	if (rstate->kdata_fd < 0) {
253 		perror("Failed to open kernel data file");
254 		return (-1);
255 	}
256 
257 	err = fstat(rstate->kdata_fd, &sb);
258 	if (err < 0) {
259 		perror("Failed to stat kernel data file");
260 		goto err_load_kdata;
261 	}
262 
263 	if (sb.st_size == 0) {
264 		fprintf(stderr, "Kernel data file is empty.\n");
265 		goto err_load_kdata;
266 	}
267 
268 	rstate->kdata_len = sb.st_size;
269 	rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ,
270 				 MAP_SHARED, rstate->kdata_fd, 0);
271 	if (rstate->kdata_map == MAP_FAILED) {
272 		perror("Failed to map restore file");
273 		goto err_load_kdata;
274 	}
275 
276 	return (0);
277 
278 err_load_kdata:
279 	if (rstate->kdata_fd > 0)
280 		close(rstate->kdata_fd);
281 	return (-1);
282 }
283 
284 static int
285 load_metadata_file(const char *filename, struct restore_state *rstate)
286 {
287 	ucl_object_t *obj;
288 	struct ucl_parser *parser;
289 	int err;
290 
291 	parser = ucl_parser_new(UCL_PARSER_DEFAULT);
292 	if (parser == NULL) {
293 		fprintf(stderr, "Failed to initialize UCL parser.\n");
294 		err = -1;
295 		goto err_load_metadata;
296 	}
297 
298 	err = ucl_parser_add_file(parser, filename);
299 	if (err == 0) {
300 		fprintf(stderr, "Failed to parse metadata file: '%s'\n",
301 			filename);
302 		err = -1;
303 		goto err_load_metadata;
304 	}
305 
306 	obj = ucl_parser_get_object(parser);
307 	if (obj == NULL) {
308 		fprintf(stderr, "Failed to parse object.\n");
309 		err = -1;
310 		goto err_load_metadata;
311 	}
312 
313 	rstate->meta_parser = parser;
314 	rstate->meta_root_obj = (ucl_object_t *)obj;
315 
316 	return (0);
317 
318 err_load_metadata:
319 	if (parser != NULL)
320 		ucl_parser_free(parser);
321 	return (err);
322 }
323 
324 int
325 load_restore_file(const char *filename, struct restore_state *rstate)
326 {
327 	int err = 0;
328 	char *kdata_filename = NULL, *meta_filename = NULL;
329 
330 	assert(filename != NULL);
331 	assert(rstate != NULL);
332 
333 	memset(rstate, 0, sizeof(*rstate));
334 	rstate->kdata_map = MAP_FAILED;
335 
336 	err = load_vmmem_file(filename, rstate);
337 	if (err != 0) {
338 		fprintf(stderr, "Failed to load guest RAM file.\n");
339 		goto err_restore;
340 	}
341 
342 	kdata_filename = strcat_extension(filename, ".kern");
343 	if (kdata_filename == NULL) {
344 		fprintf(stderr, "Failed to construct kernel data filename.\n");
345 		goto err_restore;
346 	}
347 
348 	err = load_kdata_file(kdata_filename, rstate);
349 	if (err != 0) {
350 		fprintf(stderr, "Failed to load guest kernel data file.\n");
351 		goto err_restore;
352 	}
353 
354 	meta_filename = strcat_extension(filename, ".meta");
355 	if (meta_filename == NULL) {
356 		fprintf(stderr, "Failed to construct kernel metadata filename.\n");
357 		goto err_restore;
358 	}
359 
360 	err = load_metadata_file(meta_filename, rstate);
361 	if (err != 0) {
362 		fprintf(stderr, "Failed to load guest metadata file.\n");
363 		goto err_restore;
364 	}
365 
366 	return (0);
367 
368 err_restore:
369 	destroy_restore_state(rstate);
370 	if (kdata_filename != NULL)
371 		free(kdata_filename);
372 	if (meta_filename != NULL)
373 		free(meta_filename);
374 	return (-1);
375 }
376 
377 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret)			\
378 do {										\
379 	const ucl_object_t *obj__;						\
380 	obj__ = ucl_object_lookup(obj, key);					\
381 	if (obj__ == NULL) {							\
382 		fprintf(stderr, "Missing key: '%s'", key);			\
383 		return (ret);							\
384 	}									\
385 	if (!ucl_object_toint_safe(obj__, result_ptr)) {			\
386 		fprintf(stderr, "Cannot convert '%s' value to int.", key);	\
387 		return (ret);							\
388 	}									\
389 } while(0)
390 
391 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret)			\
392 do {										\
393 	const ucl_object_t *obj__;						\
394 	obj__ = ucl_object_lookup(obj, key);					\
395 	if (obj__ == NULL) {							\
396 		fprintf(stderr, "Missing key: '%s'", key);			\
397 		return (ret);							\
398 	}									\
399 	if (!ucl_object_tostring_safe(obj__, result_ptr)) {			\
400 		fprintf(stderr, "Cannot convert '%s' value to string.", key);	\
401 		return (ret);							\
402 	}									\
403 } while(0)
404 
405 static void *
406 lookup_check_dev(const char *dev_name, struct restore_state *rstate,
407 		 const ucl_object_t *obj, size_t *data_size)
408 {
409 	const char *snapshot_req;
410 	int64_t size, file_offset;
411 
412 	snapshot_req = NULL;
413 	JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj,
414 				  &snapshot_req, NULL);
415 	assert(snapshot_req != NULL);
416 	if (!strcmp(snapshot_req, dev_name)) {
417 		JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj,
418 				       &size, NULL);
419 		assert(size >= 0);
420 
421 		JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj,
422 				       &file_offset, NULL);
423 		assert(file_offset >= 0);
424 		assert((uint64_t)file_offset + size <= rstate->kdata_len);
425 
426 		*data_size = (size_t)size;
427 		return ((uint8_t *)rstate->kdata_map + file_offset);
428 	}
429 
430 	return (NULL);
431 }
432 
433 static void *
434 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate,
435     size_t *data_size)
436 {
437 	const ucl_object_t *devs = NULL, *obj = NULL;
438 	ucl_object_iter_t it = NULL;
439 	void *ret;
440 
441 	devs = ucl_object_lookup(rstate->meta_root_obj, key);
442 	if (devs == NULL) {
443 		fprintf(stderr, "Failed to find '%s' object.\n",
444 			JSON_DEV_ARR_KEY);
445 		return (NULL);
446 	}
447 
448 	if (ucl_object_type(devs) != UCL_ARRAY) {
449 		fprintf(stderr, "Object '%s' is not an array.\n",
450 			JSON_DEV_ARR_KEY);
451 		return (NULL);
452 	}
453 
454 	while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) {
455 		ret = lookup_check_dev(dev_name, rstate, obj, data_size);
456 		if (ret != NULL)
457 			return (ret);
458 	}
459 
460 	return (NULL);
461 }
462 
463 static const ucl_object_t *
464 lookup_basic_metadata_object(struct restore_state *rstate)
465 {
466 	const ucl_object_t *basic_meta_obj = NULL;
467 
468 	basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj,
469 					   JSON_BASIC_METADATA_KEY);
470 	if (basic_meta_obj == NULL) {
471 		fprintf(stderr, "Failed to find '%s' object.\n",
472 			JSON_BASIC_METADATA_KEY);
473 		return (NULL);
474 	}
475 
476 	if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) {
477 		fprintf(stderr, "Object '%s' is not a JSON object.\n",
478 		JSON_BASIC_METADATA_KEY);
479 		return (NULL);
480 	}
481 
482 	return (basic_meta_obj);
483 }
484 
485 const char *
486 lookup_vmname(struct restore_state *rstate)
487 {
488 	const char *vmname;
489 	const ucl_object_t *obj;
490 
491 	obj = lookup_basic_metadata_object(rstate);
492 	if (obj == NULL)
493 		return (NULL);
494 
495 	JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL);
496 	return (vmname);
497 }
498 
499 int
500 lookup_memflags(struct restore_state *rstate)
501 {
502 	int64_t memflags;
503 	const ucl_object_t *obj;
504 
505 	obj = lookup_basic_metadata_object(rstate);
506 	if (obj == NULL)
507 		return (0);
508 
509 	JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0);
510 
511 	return ((int)memflags);
512 }
513 
514 size_t
515 lookup_memsize(struct restore_state *rstate)
516 {
517 	int64_t memsize;
518 	const ucl_object_t *obj;
519 
520 	obj = lookup_basic_metadata_object(rstate);
521 	if (obj == NULL)
522 		return (0);
523 
524 	JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0);
525 	if (memsize < 0)
526 		memsize = 0;
527 
528 	return ((size_t)memsize);
529 }
530 
531 
532 int
533 lookup_guest_ncpus(struct restore_state *rstate)
534 {
535 	int64_t ncpus;
536 	const ucl_object_t *obj;
537 
538 	obj = lookup_basic_metadata_object(rstate);
539 	if (obj == NULL)
540 		return (0);
541 
542 	JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0);
543 	return ((int)ncpus);
544 }
545 
546 static void
547 winch_handler(int signal __unused)
548 {
549 #ifdef TIOCGWINSZ
550 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
551 #endif /* TIOCGWINSZ */
552 }
553 
554 static int
555 print_progress(size_t crtval, const size_t maxval)
556 {
557 	size_t rc;
558 	double crtval_gb, maxval_gb;
559 	size_t i, win_width, prog_start, prog_done, prog_end;
560 	int mval_len;
561 
562 	static char prog_buf[PROG_BUF_SZ];
563 	static const size_t len = sizeof(prog_buf);
564 
565 	static size_t div;
566 	static const char *div_str;
567 
568 	static char wip_bar[] = { '/', '-', '\\', '|' };
569 	static int wip_idx = 0;
570 
571 	if (maxval == 0) {
572 		printf("[0B / 0B]\r\n");
573 		return (0);
574 	}
575 
576 	if (crtval > maxval)
577 		crtval = maxval;
578 
579 	if (maxval > 10 * GB) {
580 		div = GB;
581 		div_str = "GiB";
582 	} else if (maxval > 10 * MB) {
583 		div = MB;
584 		div_str = "MiB";
585 	} else {
586 		div = KB;
587 		div_str = "KiB";
588 	}
589 
590 	crtval_gb = (double) crtval / div;
591 	maxval_gb = (double) maxval / div;
592 
593 	rc = snprintf(prog_buf, len, "%.03lf", maxval_gb);
594 	if (rc == len) {
595 		fprintf(stderr, "Maxval too big\n");
596 		return (-1);
597 	}
598 	mval_len = rc;
599 
600 	rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |",
601 		mval_len, crtval_gb, div_str, maxval_gb, div_str);
602 
603 	if (rc == len) {
604 		fprintf(stderr, "Buffer too small to print progress\n");
605 		return (-1);
606 	}
607 
608 	win_width = min(winsize.ws_col, len);
609 	prog_start = rc;
610 
611 	if (prog_start < (win_width - 2)) {
612 		prog_end = win_width - prog_start - 2;
613 		prog_done = prog_end * (crtval_gb / maxval_gb);
614 
615 		for (i = prog_start; i < prog_start + prog_done; i++)
616 			prog_buf[i] = '#';
617 
618 		if (crtval != maxval) {
619 			prog_buf[i] = wip_bar[wip_idx];
620 			wip_idx = (wip_idx + 1) % sizeof(wip_bar);
621 			i++;
622 		} else {
623 			prog_buf[i++] = '#';
624 		}
625 
626 		for (; i < win_width - 2; i++)
627 			prog_buf[i] = '_';
628 
629 		prog_buf[win_width - 2] = '|';
630 	}
631 
632 	prog_buf[win_width - 1] = '\0';
633 	write(STDOUT_FILENO, prog_buf, win_width);
634 
635 	return (0);
636 }
637 
638 static void *
639 snapshot_spinner_cb(void *arg)
640 {
641 	int rc;
642 	size_t crtval, maxval, total;
643 	struct spinner_info *si;
644 	struct timespec ts;
645 
646 	si = arg;
647 	if (si == NULL)
648 		pthread_exit(NULL);
649 
650 	ts.tv_sec = 0;
651 	ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */
652 
653 	do {
654 		crtval = *si->crtval;
655 		maxval = si->maxval;
656 		total = si->total;
657 
658 		rc = print_progress(crtval, total);
659 		if (rc < 0) {
660 			fprintf(stderr, "Failed to parse progress\n");
661 			break;
662 		}
663 
664 		nanosleep(&ts, NULL);
665 	} while (crtval < maxval);
666 
667 	pthread_exit(NULL);
668 	return NULL;
669 }
670 
671 static int
672 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src,
673 		     const size_t len, const size_t totalmem, const bool op_wr)
674 {
675 	int rc;
676 	size_t part_done, todo, rem;
677 	ssize_t done;
678 	bool show_progress;
679 	pthread_t spinner_th;
680 	struct spinner_info *si;
681 
682 	if (lseek(snapfd, foff, SEEK_SET) < 0) {
683 		perror("Failed to change file offset");
684 		return (-1);
685 	}
686 
687 	show_progress = false;
688 	if (isatty(STDIN_FILENO) && (winsize.ws_col != 0))
689 		show_progress = true;
690 
691 	part_done = foff;
692 	rem = len;
693 
694 	if (show_progress) {
695 		si = &(struct spinner_info) {
696 			.crtval = &part_done,
697 			.maxval = foff + len,
698 			.total = totalmem
699 		};
700 
701 		rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si);
702 		if (rc) {
703 			perror("Unable to create spinner thread");
704 			show_progress = false;
705 		}
706 	}
707 
708 	while (rem > 0) {
709 		if (show_progress)
710 			todo = min(SNAPSHOT_CHUNK, rem);
711 		else
712 			todo = rem;
713 
714 		if (op_wr)
715 			done = write(snapfd, src, todo);
716 		else
717 			done = read(snapfd, src, todo);
718 		if (done < 0) {
719 			perror("Failed to write in file");
720 			return (-1);
721 		}
722 
723 		src = (uint8_t *)src + done;
724 		part_done += done;
725 		rem -= done;
726 	}
727 
728 	if (show_progress) {
729 		rc = pthread_join(spinner_th, NULL);
730 		if (rc)
731 			perror("Unable to end spinner thread");
732 	}
733 
734 	return (0);
735 }
736 
737 static size_t
738 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr)
739 {
740 	int ret;
741 	size_t lowmem, highmem, totalmem;
742 	char *baseaddr;
743 
744 	ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem);
745 	if (ret) {
746 		fprintf(stderr, "%s: unable to retrieve guest memory size\r\n",
747 			__func__);
748 		return (0);
749 	}
750 	totalmem = lowmem + highmem;
751 
752 	if ((op_wr == false) && (totalmem != memsz)) {
753 		fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n",
754 			__func__, totalmem, memsz);
755 		return (0);
756 	}
757 
758 	winsize.ws_col = 80;
759 #ifdef TIOCGWINSZ
760 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
761 #endif /* TIOCGWINSZ */
762 	old_winch_handler = signal(SIGWINCH, winch_handler);
763 
764 	ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem,
765 		totalmem, op_wr);
766 	if (ret) {
767 		fprintf(stderr, "%s: Could not %s lowmem\r\n",
768 			__func__, op_wr ? "write" : "read");
769 		totalmem = 0;
770 		goto done;
771 	}
772 
773 	if (highmem == 0)
774 		goto done;
775 
776 	ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB,
777 		highmem, totalmem, op_wr);
778 	if (ret) {
779 		fprintf(stderr, "%s: Could not %s highmem\r\n",
780 		        __func__, op_wr ? "write" : "read");
781 		totalmem = 0;
782 		goto done;
783 	}
784 
785 done:
786 	printf("\r\n");
787 	signal(SIGWINCH, old_winch_handler);
788 
789 	return (totalmem);
790 }
791 
792 int
793 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate)
794 {
795 	size_t restored;
796 
797 	restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len,
798 				   false);
799 
800 	if (restored != rstate->vmmem_len)
801 		return (-1);
802 
803 	return (0);
804 }
805 
806 int
807 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate)
808 {
809 	for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) {
810 		const struct vm_snapshot_kern_info *info;
811 		struct vm_snapshot_meta *meta;
812 		void *data;
813 		size_t size;
814 
815 		info = &snapshot_kern_structs[i];
816 		data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size);
817 		if (data == NULL)
818 			errx(EX_DATAERR, "Cannot find kern struct %s",
819 			    info->struct_name);
820 
821 		if (size == 0)
822 			errx(EX_DATAERR, "data with zero size for %s",
823 			    info->struct_name);
824 
825 		meta = &(struct vm_snapshot_meta) {
826 			.dev_name = info->struct_name,
827 			.dev_req  = info->req,
828 
829 			.buffer.buf_start = data,
830 			.buffer.buf_size = size,
831 
832 			.buffer.buf = data,
833 			.buffer.buf_rem = size,
834 
835 			.op = VM_SNAPSHOT_RESTORE,
836 		};
837 
838 		if (vm_snapshot_req(ctx, meta))
839 			err(EX_DATAERR, "Failed to restore %s",
840 			    info->struct_name);
841 	}
842 	return (0);
843 }
844 
845 static int
846 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func,
847     const char *name, void *data)
848 {
849 	void *dev_ptr;
850 	size_t dev_size;
851 	int ret;
852 	struct vm_snapshot_meta *meta;
853 
854 	dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size);
855 
856 	if (dev_ptr == NULL) {
857 		EPRINTLN("Failed to lookup dev: %s", name);
858 		return (EINVAL);
859 	}
860 
861 	if (dev_size == 0) {
862 		EPRINTLN("Restore device size is 0: %s", name);
863 		return (EINVAL);
864 	}
865 
866 	meta = &(struct vm_snapshot_meta) {
867 		.dev_name = name,
868 		.dev_data = data,
869 
870 		.buffer.buf_start = dev_ptr,
871 		.buffer.buf_size = dev_size,
872 
873 		.buffer.buf = dev_ptr,
874 		.buffer.buf_rem = dev_size,
875 
876 		.op = VM_SNAPSHOT_RESTORE,
877 	};
878 
879 	ret = func(meta);
880 	if (ret != 0) {
881 		EPRINTLN("Failed to restore dev: %s %d", name, ret);
882 		return (ret);
883 	}
884 
885 	return (0);
886 }
887 
888 int
889 vm_restore_devices(struct restore_state *rstate)
890 {
891 	int ret;
892 	struct pci_devinst *pdi = NULL;
893 
894 	while ((pdi = pci_next(pdi)) != NULL) {
895 		ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi);
896 		if (ret)
897 			return (ret);
898 	}
899 
900 	return (vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL));
901 }
902 
903 int
904 vm_pause_devices(void)
905 {
906 	int ret;
907 	struct pci_devinst *pdi = NULL;
908 
909 	while ((pdi = pci_next(pdi)) != NULL) {
910 		ret = pci_pause(pdi);
911 		if (ret) {
912 			EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret);
913 			return (ret);
914 		}
915 	}
916 
917 	return (0);
918 }
919 
920 int
921 vm_resume_devices(void)
922 {
923 	int ret;
924 	struct pci_devinst *pdi = NULL;
925 
926 	while ((pdi = pci_next(pdi)) != NULL) {
927 		ret = pci_resume(pdi);
928 		if (ret) {
929 			EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret);
930 			return (ret);
931 		}
932 	}
933 
934 	return (0);
935 }
936 
937 static int
938 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop,
939     const char *array_key, struct vm_snapshot_meta *meta, off_t *offset)
940 {
941 	int ret;
942 	size_t data_size;
943 	ssize_t write_cnt;
944 
945 	ret = vm_snapshot_req(ctx, meta);
946 	if (ret != 0) {
947 		fprintf(stderr, "%s: Failed to snapshot struct %s\r\n",
948 			__func__, meta->dev_name);
949 		ret = -1;
950 		goto done;
951 	}
952 
953 	data_size = vm_get_snapshot_size(meta);
954 
955 	/* XXX-MJ no handling for short writes. */
956 	write_cnt = write(data_fd, meta->buffer.buf_start, data_size);
957 	if (write_cnt < 0 || (size_t)write_cnt != data_size) {
958 		perror("Failed to write all snapshotted data.");
959 		ret = -1;
960 		goto done;
961 	}
962 
963 	/* Write metadata. */
964 	xo_open_instance_h(xop, array_key);
965 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n",
966 	    meta->dev_name);
967 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
968 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
969 	xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY);
970 
971 	*offset += data_size;
972 
973 done:
974 	return (ret);
975 }
976 
977 static int
978 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop)
979 {
980 	int ret, error;
981 	size_t buf_size, i, offset;
982 	char *buffer;
983 	struct vm_snapshot_meta *meta;
984 
985 	error = 0;
986 	offset = 0;
987 	buf_size = SNAPSHOT_BUFFER_SIZE;
988 
989 	buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char));
990 	if (buffer == NULL) {
991 		error = ENOMEM;
992 		perror("Failed to allocate memory for snapshot buffer");
993 		goto err_vm_snapshot_kern_data;
994 	}
995 
996 	meta = &(struct vm_snapshot_meta) {
997 		.buffer.buf_start = buffer,
998 		.buffer.buf_size = buf_size,
999 
1000 		.op = VM_SNAPSHOT_SAVE,
1001 	};
1002 
1003 	xo_open_list_h(xop, JSON_KERNEL_ARR_KEY);
1004 	for (i = 0; i < nitems(snapshot_kern_structs); i++) {
1005 		meta->dev_name = snapshot_kern_structs[i].struct_name;
1006 		meta->dev_req  = snapshot_kern_structs[i].req;
1007 
1008 		memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1009 		meta->buffer.buf = meta->buffer.buf_start;
1010 		meta->buffer.buf_rem = meta->buffer.buf_size;
1011 
1012 		ret = vm_save_kern_struct(ctx, data_fd, xop,
1013 		    JSON_DEV_ARR_KEY, meta, &offset);
1014 		if (ret != 0) {
1015 			error = -1;
1016 			goto err_vm_snapshot_kern_data;
1017 		}
1018 	}
1019 	xo_close_list_h(xop, JSON_KERNEL_ARR_KEY);
1020 
1021 err_vm_snapshot_kern_data:
1022 	if (buffer != NULL)
1023 		free(buffer);
1024 	return (error);
1025 }
1026 
1027 static int
1028 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz)
1029 {
1030 
1031 	xo_open_container_h(xop, JSON_BASIC_METADATA_KEY);
1032 	xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus);
1033 	xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx));
1034 	xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz);
1035 	xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx));
1036 	xo_close_container_h(xop, JSON_BASIC_METADATA_KEY);
1037 
1038 	return (0);
1039 }
1040 
1041 static int
1042 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key,
1043 			   struct vm_snapshot_meta *meta, off_t *offset)
1044 {
1045 	ssize_t ret;
1046 	size_t data_size;
1047 
1048 	data_size = vm_get_snapshot_size(meta);
1049 
1050 	/* XXX-MJ no handling for short writes. */
1051 	ret = write(data_fd, meta->buffer.buf_start, data_size);
1052 	if (ret < 0 || (size_t)ret != data_size) {
1053 		perror("Failed to write all snapshotted data.");
1054 		return (-1);
1055 	}
1056 
1057 	/* Write metadata. */
1058 	xo_open_instance_h(xop, array_key);
1059 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name);
1060 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
1061 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
1062 	xo_close_instance_h(xop, array_key);
1063 
1064 	*offset += data_size;
1065 
1066 	return (0);
1067 }
1068 
1069 static int
1070 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name,
1071     void *devdata, int data_fd, xo_handle_t *xop,
1072     struct vm_snapshot_meta *meta, off_t *offset)
1073 {
1074 	int ret;
1075 
1076 	memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1077 	meta->buffer.buf = meta->buffer.buf_start;
1078 	meta->buffer.buf_rem = meta->buffer.buf_size;
1079 	meta->dev_name = dev_name;
1080 	meta->dev_data = devdata;
1081 
1082 	ret = func(meta);
1083 	if (ret != 0) {
1084 		EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret);
1085 		return (ret);
1086 	}
1087 
1088 	ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta,
1089 					 offset);
1090 	if (ret != 0)
1091 		return (ret);
1092 
1093 	return (0);
1094 }
1095 
1096 static int
1097 vm_snapshot_devices(int data_fd, xo_handle_t *xop)
1098 {
1099 	int ret;
1100 	off_t offset;
1101 	void *buffer;
1102 	size_t buf_size;
1103 	struct vm_snapshot_meta *meta;
1104 	struct pci_devinst *pdi;
1105 
1106 	buf_size = SNAPSHOT_BUFFER_SIZE;
1107 
1108 	offset = lseek(data_fd, 0, SEEK_CUR);
1109 	if (offset < 0) {
1110 		perror("Failed to get data file current offset.");
1111 		return (-1);
1112 	}
1113 
1114 	buffer = malloc(buf_size);
1115 	if (buffer == NULL) {
1116 		perror("Failed to allocate memory for snapshot buffer");
1117 		ret = ENOSPC;
1118 		goto snapshot_err;
1119 	}
1120 
1121 	meta = &(struct vm_snapshot_meta) {
1122 		.buffer.buf_start = buffer,
1123 		.buffer.buf_size = buf_size,
1124 
1125 		.op = VM_SNAPSHOT_SAVE,
1126 	};
1127 
1128 	xo_open_list_h(xop, JSON_DEV_ARR_KEY);
1129 
1130 	/* Save PCI devices */
1131 	pdi = NULL;
1132 	while ((pdi = pci_next(pdi)) != NULL) {
1133 		ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi,
1134 		    data_fd, xop, meta, &offset);
1135 		if (ret != 0)
1136 			goto snapshot_err;
1137 	}
1138 
1139 	ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL,
1140 	    data_fd, xop, meta, &offset);
1141 
1142 	xo_close_list_h(xop, JSON_DEV_ARR_KEY);
1143 
1144 snapshot_err:
1145 	if (buffer != NULL)
1146 		free(buffer);
1147 	return (ret);
1148 }
1149 
1150 void
1151 checkpoint_cpu_add(int vcpu)
1152 {
1153 
1154 	pthread_mutex_lock(&vcpu_lock);
1155 	CPU_SET(vcpu, &vcpus_active);
1156 
1157 	if (checkpoint_active) {
1158 		CPU_SET(vcpu, &vcpus_suspended);
1159 		while (checkpoint_active)
1160 			pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1161 		CPU_CLR(vcpu, &vcpus_suspended);
1162 	}
1163 	pthread_mutex_unlock(&vcpu_lock);
1164 }
1165 
1166 /*
1167  * When a vCPU is suspended for any reason, it calls
1168  * checkpoint_cpu_suspend().  This records that the vCPU is idle.
1169  * Before returning from suspension, checkpoint_cpu_resume() is
1170  * called.  In suspend we note that the vCPU is idle.  In resume we
1171  * pause the vCPU thread until the checkpoint is complete.  The reason
1172  * for the two-step process is that vCPUs might already be stopped in
1173  * the debug server when a checkpoint is requested.  This approach
1174  * allows us to account for and handle those vCPUs.
1175  */
1176 void
1177 checkpoint_cpu_suspend(int vcpu)
1178 {
1179 
1180 	pthread_mutex_lock(&vcpu_lock);
1181 	CPU_SET(vcpu, &vcpus_suspended);
1182 	if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0)
1183 		pthread_cond_signal(&vcpus_idle);
1184 	pthread_mutex_unlock(&vcpu_lock);
1185 }
1186 
1187 void
1188 checkpoint_cpu_resume(int vcpu)
1189 {
1190 
1191 	pthread_mutex_lock(&vcpu_lock);
1192 	while (checkpoint_active)
1193 		pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1194 	CPU_CLR(vcpu, &vcpus_suspended);
1195 	pthread_mutex_unlock(&vcpu_lock);
1196 }
1197 
1198 static void
1199 vm_vcpu_pause(struct vmctx *ctx)
1200 {
1201 
1202 	pthread_mutex_lock(&vcpu_lock);
1203 	checkpoint_active = true;
1204 	vm_suspend_all_cpus(ctx);
1205 	while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
1206 		pthread_cond_wait(&vcpus_idle, &vcpu_lock);
1207 	pthread_mutex_unlock(&vcpu_lock);
1208 }
1209 
1210 static void
1211 vm_vcpu_resume(struct vmctx *ctx)
1212 {
1213 
1214 	pthread_mutex_lock(&vcpu_lock);
1215 	checkpoint_active = false;
1216 	pthread_mutex_unlock(&vcpu_lock);
1217 	vm_resume_all_cpus(ctx);
1218 	pthread_cond_broadcast(&vcpus_can_run);
1219 }
1220 
1221 static int
1222 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file,
1223     bool stop_vm)
1224 {
1225 	int fd_checkpoint = 0, kdata_fd = 0, fd_meta;
1226 	int ret = 0;
1227 	int error = 0;
1228 	size_t memsz;
1229 	xo_handle_t *xop = NULL;
1230 	char *meta_filename = NULL;
1231 	char *kdata_filename = NULL;
1232 	FILE *meta_file = NULL;
1233 
1234 	kdata_filename = strcat_extension(checkpoint_file, ".kern");
1235 	if (kdata_filename == NULL) {
1236 		fprintf(stderr, "Failed to construct kernel data filename.\n");
1237 		return (-1);
1238 	}
1239 
1240 	kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1241 	if (kdata_fd < 0) {
1242 		perror("Failed to open kernel data snapshot file.");
1243 		error = -1;
1244 		goto done;
1245 	}
1246 
1247 	fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700);
1248 
1249 	if (fd_checkpoint < 0) {
1250 		perror("Failed to create checkpoint file");
1251 		error = -1;
1252 		goto done;
1253 	}
1254 
1255 	meta_filename = strcat_extension(checkpoint_file, ".meta");
1256 	if (meta_filename == NULL) {
1257 		fprintf(stderr, "Failed to construct vm metadata filename.\n");
1258 		goto done;
1259 	}
1260 
1261 	fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1262 	if (fd_meta != -1)
1263 		meta_file = fdopen(fd_meta, "w");
1264 	if (meta_file == NULL) {
1265 		perror("Failed to open vm metadata snapshot file.");
1266 		close(fd_meta);
1267 		goto done;
1268 	}
1269 
1270 	xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY);
1271 	if (xop == NULL) {
1272 		perror("Failed to get libxo handle on metadata file.");
1273 		goto done;
1274 	}
1275 
1276 	vm_vcpu_pause(ctx);
1277 
1278 	ret = vm_pause_devices();
1279 	if (ret != 0) {
1280 		fprintf(stderr, "Could not pause devices\r\n");
1281 		error = ret;
1282 		goto done;
1283 	}
1284 
1285 	memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true);
1286 	if (memsz == 0) {
1287 		perror("Could not write guest memory to file");
1288 		error = -1;
1289 		goto done;
1290 	}
1291 
1292 	ret = vm_snapshot_basic_metadata(ctx, xop, memsz);
1293 	if (ret != 0) {
1294 		fprintf(stderr, "Failed to snapshot vm basic metadata.\n");
1295 		error = -1;
1296 		goto done;
1297 	}
1298 
1299 	ret = vm_save_kern_structs(ctx, kdata_fd, xop);
1300 	if (ret != 0) {
1301 		fprintf(stderr, "Failed to snapshot vm kernel data.\n");
1302 		error = -1;
1303 		goto done;
1304 	}
1305 
1306 	ret = vm_snapshot_devices(kdata_fd, xop);
1307 	if (ret != 0) {
1308 		fprintf(stderr, "Failed to snapshot device state.\n");
1309 		error = -1;
1310 		goto done;
1311 	}
1312 
1313 	xo_finish_h(xop);
1314 
1315 	if (stop_vm) {
1316 		vm_destroy(ctx);
1317 		exit(0);
1318 	}
1319 
1320 done:
1321 	ret = vm_resume_devices();
1322 	if (ret != 0)
1323 		fprintf(stderr, "Could not resume devices\r\n");
1324 	vm_vcpu_resume(ctx);
1325 	if (fd_checkpoint > 0)
1326 		close(fd_checkpoint);
1327 	if (meta_filename != NULL)
1328 		free(meta_filename);
1329 	if (kdata_filename != NULL)
1330 		free(kdata_filename);
1331 	if (xop != NULL)
1332 		xo_destroy(xop);
1333 	if (meta_file != NULL)
1334 		fclose(meta_file);
1335 	if (kdata_fd > 0)
1336 		close(kdata_fd);
1337 	return (error);
1338 }
1339 
1340 static int
1341 handle_message(struct vmctx *ctx, nvlist_t *nvl)
1342 {
1343 	const char *cmd;
1344 	struct ipc_command **ipc_cmd;
1345 
1346 	if (!nvlist_exists_string(nvl, "cmd"))
1347 		return (EINVAL);
1348 
1349 	cmd = nvlist_get_string(nvl, "cmd");
1350 	IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) {
1351 		if (strcmp(cmd, (*ipc_cmd)->name) == 0)
1352 			return ((*ipc_cmd)->handler(ctx, nvl));
1353 	}
1354 
1355 	return (EOPNOTSUPP);
1356 }
1357 
1358 /*
1359  * Listen for commands from bhyvectl
1360  */
1361 void *
1362 checkpoint_thread(void *param)
1363 {
1364 	int fd;
1365 	struct checkpoint_thread_info *thread_info;
1366 	nvlist_t *nvl;
1367 
1368 	pthread_set_name_np(pthread_self(), "checkpoint thread");
1369 	thread_info = (struct checkpoint_thread_info *)param;
1370 
1371 	while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) {
1372 		nvl = nvlist_recv(fd, 0);
1373 		if (nvl != NULL)
1374 			handle_message(thread_info->ctx, nvl);
1375 		else
1376 			EPRINTLN("nvlist_recv() failed: %s", strerror(errno));
1377 
1378 		close(fd);
1379 		nvlist_destroy(nvl);
1380 	}
1381 
1382 	return (NULL);
1383 }
1384 
1385 static int
1386 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl)
1387 {
1388 	int error;
1389 
1390 	if (!nvlist_exists_string(nvl, "filename") ||
1391 	    !nvlist_exists_bool(nvl, "suspend") ||
1392 	    !nvlist_exists_descriptor(nvl, "fddir"))
1393 		error = EINVAL;
1394 	else
1395 		error = vm_checkpoint(ctx,
1396 		    nvlist_get_descriptor(nvl, "fddir"),
1397 		    nvlist_get_string(nvl, "filename"),
1398 		    nvlist_get_bool(nvl, "suspend"));
1399 
1400 	return (error);
1401 }
1402 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint);
1403 
1404 static int
1405 vm_do_migrate(struct vmctx __unused *ctx, const nvlist_t *nvl)
1406 {
1407 	size_t len;
1408 	struct migrate_req req;
1409 
1410 	if (!nvlist_exists_string(nvl, "hostname") ||
1411 		!nvlist_exists_number(nvl, "port"))
1412 			return (EINVAL);
1413 
1414 	memset(&req, 0, sizeof(struct migrate_req));
1415 	req.port = nvlist_get_number(nvl, "port");
1416 
1417 	len = strlen(nvlist_get_string(nvl, "hostname"));
1418 	if (len > MAXHOSTNAMELEN - 1) {
1419 		EPRINTLN("Hostname length %lu bigger than maximum allowed %d",
1420 			len, MAXHOSTNAMELEN - 1);
1421 		return (EINVAL);
1422 	}
1423 
1424 	strlcpy(req.host, nvlist_get_string(nvl, "hostname"), MAXHOSTNAMELEN);
1425 
1426 	printf("%s: IP address used for migration: %s;\n"
1427 		"Port used for migration: %d\n",
1428 		__func__,
1429 		req.host,
1430 		req.port);
1431 
1432 	// return (vm_send_migrate_req(ctx, req, nvlist_get_bool(nvl, "live")));
1433 	EPRINTLN("Migration operation not implemented yet\n");
1434 	return (EOPNOTSUPP);
1435 }
1436 IPC_COMMAND(ipc_cmd_set, migrate, vm_do_migrate);
1437 
1438 void
1439 init_snapshot(void)
1440 {
1441 	int err;
1442 
1443 	err = pthread_mutex_init(&vcpu_lock, NULL);
1444 	if (err != 0)
1445 		errc(1, err, "checkpoint mutex init");
1446 	err = pthread_cond_init(&vcpus_idle, NULL);
1447 	if (err != 0)
1448 		errc(1, err, "checkpoint cv init (vcpus_idle)");
1449 	err = pthread_cond_init(&vcpus_can_run, NULL);
1450 	if (err != 0)
1451 		errc(1, err, "checkpoint cv init (vcpus_can_run)");
1452 }
1453 
1454 /*
1455  * Create the listening socket for IPC with bhyvectl
1456  */
1457 int
1458 init_checkpoint_thread(struct vmctx *ctx)
1459 {
1460 	struct checkpoint_thread_info *checkpoint_info = NULL;
1461 	struct sockaddr_un addr;
1462 	int socket_fd;
1463 	pthread_t checkpoint_pthread;
1464 	int err;
1465 #ifndef WITHOUT_CAPSICUM
1466 	cap_rights_t rights;
1467 #endif
1468 
1469 	memset(&addr, 0, sizeof(addr));
1470 
1471 	socket_fd = socket(PF_UNIX, SOCK_STREAM, 0);
1472 	if (socket_fd < 0) {
1473 		EPRINTLN("Socket creation failed: %s", strerror(errno));
1474 		err = -1;
1475 		goto fail;
1476 	}
1477 
1478 	addr.sun_family = AF_UNIX;
1479 
1480 	snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s",
1481 		 BHYVE_RUN_DIR, vm_get_name(ctx));
1482 	addr.sun_len = SUN_LEN(&addr);
1483 	unlink(addr.sun_path);
1484 
1485 	if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) {
1486 		EPRINTLN("Failed to bind socket \"%s\": %s\n",
1487 		    addr.sun_path, strerror(errno));
1488 		err = -1;
1489 		goto fail;
1490 	}
1491 
1492 	if (listen(socket_fd, 10) < 0) {
1493 		EPRINTLN("ipc socket listen: %s\n", strerror(errno));
1494 		err = errno;
1495 		goto fail;
1496 	}
1497 
1498 #ifndef WITHOUT_CAPSICUM
1499 	cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE,
1500 	    CAP_SEND, CAP_GETSOCKOPT);
1501 
1502 	if (caph_rights_limit(socket_fd, &rights) == -1)
1503 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1504 #endif
1505 	checkpoint_info = calloc(1, sizeof(*checkpoint_info));
1506 	checkpoint_info->ctx = ctx;
1507 	checkpoint_info->socket_fd = socket_fd;
1508 
1509 	err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread,
1510 		checkpoint_info);
1511 	if (err != 0)
1512 		goto fail;
1513 
1514 	return (0);
1515 fail:
1516 	free(checkpoint_info);
1517 	if (socket_fd > 0)
1518 		close(socket_fd);
1519 	unlink(addr.sun_path);
1520 
1521 	return (err);
1522 }
1523 
1524 void
1525 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op)
1526 {
1527 	const char *__op;
1528 
1529 	if (op == VM_SNAPSHOT_SAVE)
1530 		__op = "save";
1531 	else if (op == VM_SNAPSHOT_RESTORE)
1532 		__op = "restore";
1533 	else
1534 		__op = "unknown";
1535 
1536 	fprintf(stderr, "%s: snapshot-%s failed for %s\r\n",
1537 		__func__, __op, bufname);
1538 }
1539 
1540 int
1541 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1542 {
1543 	struct vm_snapshot_buffer *buffer;
1544 	int op;
1545 
1546 	buffer = &meta->buffer;
1547 	op = meta->op;
1548 
1549 	if (buffer->buf_rem < data_size) {
1550 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1551 		return (E2BIG);
1552 	}
1553 
1554 	if (op == VM_SNAPSHOT_SAVE)
1555 		memcpy(buffer->buf, data, data_size);
1556 	else if (op == VM_SNAPSHOT_RESTORE)
1557 		memcpy(data, buffer->buf, data_size);
1558 	else
1559 		return (EINVAL);
1560 
1561 	buffer->buf += data_size;
1562 	buffer->buf_rem -= data_size;
1563 
1564 	return (0);
1565 }
1566 
1567 size_t
1568 vm_get_snapshot_size(struct vm_snapshot_meta *meta)
1569 {
1570 	size_t length;
1571 	struct vm_snapshot_buffer *buffer;
1572 
1573 	buffer = &meta->buffer;
1574 
1575 	if (buffer->buf_size < buffer->buf_rem) {
1576 		fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n",
1577 			__func__, buffer->buf_size, buffer->buf_rem);
1578 		length = 0;
1579 	} else {
1580 		length = buffer->buf_size - buffer->buf_rem;
1581 	}
1582 
1583 	return (length);
1584 }
1585 
1586 int
1587 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len,
1588     bool restore_null, struct vm_snapshot_meta *meta)
1589 {
1590 	int ret;
1591 	vm_paddr_t gaddr;
1592 
1593 	if (meta->op == VM_SNAPSHOT_SAVE) {
1594 		gaddr = paddr_host2guest(ctx, *addrp);
1595 		if (gaddr == (vm_paddr_t) -1) {
1596 			if (!restore_null ||
1597 			    (restore_null && (*addrp != NULL))) {
1598 				ret = EFAULT;
1599 				goto done;
1600 			}
1601 		}
1602 
1603 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1604 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
1605 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1606 		if (gaddr == (vm_paddr_t) -1) {
1607 			if (!restore_null) {
1608 				ret = EFAULT;
1609 				goto done;
1610 			}
1611 		}
1612 
1613 		*addrp = paddr_guest2host(ctx, gaddr, len);
1614 	} else {
1615 		ret = EINVAL;
1616 	}
1617 
1618 done:
1619 	return (ret);
1620 }
1621 
1622 int
1623 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1624 {
1625 	struct vm_snapshot_buffer *buffer;
1626 	int op;
1627 	int ret;
1628 
1629 	buffer = &meta->buffer;
1630 	op = meta->op;
1631 
1632 	if (buffer->buf_rem < data_size) {
1633 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1634 		ret = E2BIG;
1635 		goto done;
1636 	}
1637 
1638 	if (op == VM_SNAPSHOT_SAVE) {
1639 		ret = 0;
1640 		memcpy(buffer->buf, data, data_size);
1641 	} else if (op == VM_SNAPSHOT_RESTORE) {
1642 		ret = memcmp(data, buffer->buf, data_size);
1643 	} else {
1644 		ret = EINVAL;
1645 		goto done;
1646 	}
1647 
1648 	buffer->buf += data_size;
1649 	buffer->buf_rem -= data_size;
1650 
1651 done:
1652 	return (ret);
1653 }
1654