xref: /freebsd/usr.sbin/bhyve/snapshot.c (revision 02e9120893770924227138ba49df1edb3896112a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Flavius Anton
5  * Copyright (c) 2016 Mihai Tiganus
6  * Copyright (c) 2016-2019 Mihai Carabas
7  * Copyright (c) 2017-2019 Darius Mihai
8  * Copyright (c) 2017-2019 Elena Mihailescu
9  * Copyright (c) 2018-2019 Sergiu Weisz
10  * All rights reserved.
11  * The bhyve-snapshot feature was developed under sponsorships
12  * from Matthew Grooms.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/types.h>
37 #ifndef WITHOUT_CAPSICUM
38 #include <sys/capsicum.h>
39 #endif
40 #include <sys/mman.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/time.h>
44 #include <sys/un.h>
45 
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
48 #endif
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <string.h>
52 #include <err.h>
53 #include <errno.h>
54 #include <fcntl.h>
55 #include <libgen.h>
56 #include <signal.h>
57 #include <unistd.h>
58 #include <assert.h>
59 #include <errno.h>
60 #include <pthread.h>
61 #include <pthread_np.h>
62 #include <sysexits.h>
63 #include <stdbool.h>
64 #include <sys/ioctl.h>
65 
66 #include <machine/vmm.h>
67 #ifndef WITHOUT_CAPSICUM
68 #include <machine/vmm_dev.h>
69 #endif
70 #include <machine/vmm_snapshot.h>
71 #include <vmmapi.h>
72 
73 #include "bhyverun.h"
74 #include "acpi.h"
75 #ifdef __amd64__
76 #include "amd64/atkbdc.h"
77 #endif
78 #include "debug.h"
79 #include "ipc.h"
80 #include "mem.h"
81 #include "pci_emul.h"
82 #include "snapshot.h"
83 
84 #include <libxo/xo.h>
85 #include <ucl.h>
86 
87 struct spinner_info {
88 	const size_t *crtval;
89 	const size_t maxval;
90 	const size_t total;
91 };
92 
93 extern int guest_ncpus;
94 
95 static struct winsize winsize;
96 static sig_t old_winch_handler;
97 
98 #define	KB		(1024UL)
99 #define	MB		(1024UL * KB)
100 #define	GB		(1024UL * MB)
101 
102 #define	SNAPSHOT_CHUNK	(4 * MB)
103 #define	PROG_BUF_SZ	(8192)
104 
105 #define	SNAPSHOT_BUFFER_SIZE (40 * MB)
106 
107 #define	JSON_KERNEL_ARR_KEY		"kern_structs"
108 #define	JSON_DEV_ARR_KEY		"devices"
109 #define	JSON_BASIC_METADATA_KEY 	"basic metadata"
110 #define	JSON_SNAPSHOT_REQ_KEY		"device"
111 #define	JSON_SIZE_KEY			"size"
112 #define	JSON_FILE_OFFSET_KEY		"file_offset"
113 
114 #define	JSON_NCPUS_KEY			"ncpus"
115 #define	JSON_VMNAME_KEY 		"vmname"
116 #define	JSON_MEMSIZE_KEY		"memsize"
117 #define	JSON_MEMFLAGS_KEY		"memflags"
118 
119 #define min(a,b)		\
120 ({				\
121  __typeof__ (a) _a = (a);	\
122  __typeof__ (b) _b = (b); 	\
123  _a < _b ? _a : _b;       	\
124  })
125 
126 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = {
127 	{ "vhpet",	STRUCT_VHPET	},
128 	{ "vm",		STRUCT_VM	},
129 	{ "vioapic",	STRUCT_VIOAPIC	},
130 	{ "vlapic",	STRUCT_VLAPIC	},
131 	{ "vmcx",	STRUCT_VMCX	},
132 	{ "vatpit",	STRUCT_VATPIT	},
133 	{ "vatpic",	STRUCT_VATPIC	},
134 	{ "vpmtmr",	STRUCT_VPMTMR	},
135 	{ "vrtc",	STRUCT_VRTC	},
136 };
137 
138 static cpuset_t vcpus_active, vcpus_suspended;
139 static pthread_mutex_t vcpu_lock = PTHREAD_MUTEX_INITIALIZER;
140 static pthread_cond_t vcpus_idle = PTHREAD_COND_INITIALIZER;
141 static pthread_cond_t vcpus_can_run = PTHREAD_COND_INITIALIZER;
142 static bool checkpoint_active;
143 
144 /*
145  * TODO: Harden this function and all of its callers since 'base_str' is a user
146  * provided string.
147  */
148 static char *
149 strcat_extension(const char *base_str, const char *ext)
150 {
151 	char *res;
152 	size_t base_len, ext_len;
153 
154 	base_len = strnlen(base_str, NAME_MAX);
155 	ext_len = strnlen(ext, NAME_MAX);
156 
157 	if (base_len + ext_len > NAME_MAX) {
158 		EPRINTLN("Filename exceeds maximum length.");
159 		return (NULL);
160 	}
161 
162 	res = malloc(base_len + ext_len + 1);
163 	if (res == NULL) {
164 		EPRINTLN("Failed to allocate memory: %s", strerror(errno));
165 		return (NULL);
166 	}
167 
168 	memcpy(res, base_str, base_len);
169 	memcpy(res + base_len, ext, ext_len);
170 	res[base_len + ext_len] = 0;
171 
172 	return (res);
173 }
174 
175 void
176 destroy_restore_state(struct restore_state *rstate)
177 {
178 	if (rstate == NULL) {
179 		EPRINTLN("Attempting to destroy NULL restore struct.");
180 		return;
181 	}
182 
183 	if (rstate->kdata_map != MAP_FAILED)
184 		munmap(rstate->kdata_map, rstate->kdata_len);
185 
186 	if (rstate->kdata_fd > 0)
187 		close(rstate->kdata_fd);
188 	if (rstate->vmmem_fd > 0)
189 		close(rstate->vmmem_fd);
190 
191 	if (rstate->meta_root_obj != NULL)
192 		ucl_object_unref(rstate->meta_root_obj);
193 	if (rstate->meta_parser != NULL)
194 		ucl_parser_free(rstate->meta_parser);
195 }
196 
197 static int
198 load_vmmem_file(const char *filename, struct restore_state *rstate)
199 {
200 	struct stat sb;
201 	int err;
202 
203 	rstate->vmmem_fd = open(filename, O_RDONLY);
204 	if (rstate->vmmem_fd < 0) {
205 		perror("Failed to open restore file");
206 		return (-1);
207 	}
208 
209 	err = fstat(rstate->vmmem_fd, &sb);
210 	if (err < 0) {
211 		perror("Failed to stat restore file");
212 		goto err_load_vmmem;
213 	}
214 
215 	if (sb.st_size == 0) {
216 		fprintf(stderr, "Restore file is empty.\n");
217 		goto err_load_vmmem;
218 	}
219 
220 	rstate->vmmem_len = sb.st_size;
221 
222 	return (0);
223 
224 err_load_vmmem:
225 	if (rstate->vmmem_fd > 0)
226 		close(rstate->vmmem_fd);
227 	return (-1);
228 }
229 
230 static int
231 load_kdata_file(const char *filename, struct restore_state *rstate)
232 {
233 	struct stat sb;
234 	int err;
235 
236 	rstate->kdata_fd = open(filename, O_RDONLY);
237 	if (rstate->kdata_fd < 0) {
238 		perror("Failed to open kernel data file");
239 		return (-1);
240 	}
241 
242 	err = fstat(rstate->kdata_fd, &sb);
243 	if (err < 0) {
244 		perror("Failed to stat kernel data file");
245 		goto err_load_kdata;
246 	}
247 
248 	if (sb.st_size == 0) {
249 		fprintf(stderr, "Kernel data file is empty.\n");
250 		goto err_load_kdata;
251 	}
252 
253 	rstate->kdata_len = sb.st_size;
254 	rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ,
255 				 MAP_SHARED, rstate->kdata_fd, 0);
256 	if (rstate->kdata_map == MAP_FAILED) {
257 		perror("Failed to map restore file");
258 		goto err_load_kdata;
259 	}
260 
261 	return (0);
262 
263 err_load_kdata:
264 	if (rstate->kdata_fd > 0)
265 		close(rstate->kdata_fd);
266 	return (-1);
267 }
268 
269 static int
270 load_metadata_file(const char *filename, struct restore_state *rstate)
271 {
272 	ucl_object_t *obj;
273 	struct ucl_parser *parser;
274 	int err;
275 
276 	parser = ucl_parser_new(UCL_PARSER_DEFAULT);
277 	if (parser == NULL) {
278 		fprintf(stderr, "Failed to initialize UCL parser.\n");
279 		err = -1;
280 		goto err_load_metadata;
281 	}
282 
283 	err = ucl_parser_add_file(parser, filename);
284 	if (err == 0) {
285 		fprintf(stderr, "Failed to parse metadata file: '%s'\n",
286 			filename);
287 		err = -1;
288 		goto err_load_metadata;
289 	}
290 
291 	obj = ucl_parser_get_object(parser);
292 	if (obj == NULL) {
293 		fprintf(stderr, "Failed to parse object.\n");
294 		err = -1;
295 		goto err_load_metadata;
296 	}
297 
298 	rstate->meta_parser = parser;
299 	rstate->meta_root_obj = (ucl_object_t *)obj;
300 
301 	return (0);
302 
303 err_load_metadata:
304 	if (parser != NULL)
305 		ucl_parser_free(parser);
306 	return (err);
307 }
308 
309 int
310 load_restore_file(const char *filename, struct restore_state *rstate)
311 {
312 	int err = 0;
313 	char *kdata_filename = NULL, *meta_filename = NULL;
314 
315 	assert(filename != NULL);
316 	assert(rstate != NULL);
317 
318 	memset(rstate, 0, sizeof(*rstate));
319 	rstate->kdata_map = MAP_FAILED;
320 
321 	err = load_vmmem_file(filename, rstate);
322 	if (err != 0) {
323 		fprintf(stderr, "Failed to load guest RAM file.\n");
324 		goto err_restore;
325 	}
326 
327 	kdata_filename = strcat_extension(filename, ".kern");
328 	if (kdata_filename == NULL) {
329 		fprintf(stderr, "Failed to construct kernel data filename.\n");
330 		goto err_restore;
331 	}
332 
333 	err = load_kdata_file(kdata_filename, rstate);
334 	if (err != 0) {
335 		fprintf(stderr, "Failed to load guest kernel data file.\n");
336 		goto err_restore;
337 	}
338 
339 	meta_filename = strcat_extension(filename, ".meta");
340 	if (meta_filename == NULL) {
341 		fprintf(stderr, "Failed to construct kernel metadata filename.\n");
342 		goto err_restore;
343 	}
344 
345 	err = load_metadata_file(meta_filename, rstate);
346 	if (err != 0) {
347 		fprintf(stderr, "Failed to load guest metadata file.\n");
348 		goto err_restore;
349 	}
350 
351 	return (0);
352 
353 err_restore:
354 	destroy_restore_state(rstate);
355 	if (kdata_filename != NULL)
356 		free(kdata_filename);
357 	if (meta_filename != NULL)
358 		free(meta_filename);
359 	return (-1);
360 }
361 
362 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret)			\
363 do {										\
364 	const ucl_object_t *obj__;						\
365 	obj__ = ucl_object_lookup(obj, key);					\
366 	if (obj__ == NULL) {							\
367 		fprintf(stderr, "Missing key: '%s'", key);			\
368 		return (ret);							\
369 	}									\
370 	if (!ucl_object_toint_safe(obj__, result_ptr)) {			\
371 		fprintf(stderr, "Cannot convert '%s' value to int.", key);	\
372 		return (ret);							\
373 	}									\
374 } while(0)
375 
376 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret)			\
377 do {										\
378 	const ucl_object_t *obj__;						\
379 	obj__ = ucl_object_lookup(obj, key);					\
380 	if (obj__ == NULL) {							\
381 		fprintf(stderr, "Missing key: '%s'", key);			\
382 		return (ret);							\
383 	}									\
384 	if (!ucl_object_tostring_safe(obj__, result_ptr)) {			\
385 		fprintf(stderr, "Cannot convert '%s' value to string.", key);	\
386 		return (ret);							\
387 	}									\
388 } while(0)
389 
390 static void *
391 lookup_check_dev(const char *dev_name, struct restore_state *rstate,
392 		 const ucl_object_t *obj, size_t *data_size)
393 {
394 	const char *snapshot_req;
395 	int64_t size, file_offset;
396 
397 	snapshot_req = NULL;
398 	JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj,
399 				  &snapshot_req, NULL);
400 	assert(snapshot_req != NULL);
401 	if (!strcmp(snapshot_req, dev_name)) {
402 		JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj,
403 				       &size, NULL);
404 		assert(size >= 0);
405 
406 		JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj,
407 				       &file_offset, NULL);
408 		assert(file_offset >= 0);
409 		assert((uint64_t)file_offset + size <= rstate->kdata_len);
410 
411 		*data_size = (size_t)size;
412 		return ((uint8_t *)rstate->kdata_map + file_offset);
413 	}
414 
415 	return (NULL);
416 }
417 
418 static void *
419 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate,
420     size_t *data_size)
421 {
422 	const ucl_object_t *devs = NULL, *obj = NULL;
423 	ucl_object_iter_t it = NULL;
424 	void *ret;
425 
426 	devs = ucl_object_lookup(rstate->meta_root_obj, key);
427 	if (devs == NULL) {
428 		fprintf(stderr, "Failed to find '%s' object.\n",
429 			JSON_DEV_ARR_KEY);
430 		return (NULL);
431 	}
432 
433 	if (ucl_object_type(devs) != UCL_ARRAY) {
434 		fprintf(stderr, "Object '%s' is not an array.\n",
435 			JSON_DEV_ARR_KEY);
436 		return (NULL);
437 	}
438 
439 	while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) {
440 		ret = lookup_check_dev(dev_name, rstate, obj, data_size);
441 		if (ret != NULL)
442 			return (ret);
443 	}
444 
445 	return (NULL);
446 }
447 
448 static const ucl_object_t *
449 lookup_basic_metadata_object(struct restore_state *rstate)
450 {
451 	const ucl_object_t *basic_meta_obj = NULL;
452 
453 	basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj,
454 					   JSON_BASIC_METADATA_KEY);
455 	if (basic_meta_obj == NULL) {
456 		fprintf(stderr, "Failed to find '%s' object.\n",
457 			JSON_BASIC_METADATA_KEY);
458 		return (NULL);
459 	}
460 
461 	if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) {
462 		fprintf(stderr, "Object '%s' is not a JSON object.\n",
463 		JSON_BASIC_METADATA_KEY);
464 		return (NULL);
465 	}
466 
467 	return (basic_meta_obj);
468 }
469 
470 const char *
471 lookup_vmname(struct restore_state *rstate)
472 {
473 	const char *vmname;
474 	const ucl_object_t *obj;
475 
476 	obj = lookup_basic_metadata_object(rstate);
477 	if (obj == NULL)
478 		return (NULL);
479 
480 	JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL);
481 	return (vmname);
482 }
483 
484 int
485 lookup_memflags(struct restore_state *rstate)
486 {
487 	int64_t memflags;
488 	const ucl_object_t *obj;
489 
490 	obj = lookup_basic_metadata_object(rstate);
491 	if (obj == NULL)
492 		return (0);
493 
494 	JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0);
495 
496 	return ((int)memflags);
497 }
498 
499 size_t
500 lookup_memsize(struct restore_state *rstate)
501 {
502 	int64_t memsize;
503 	const ucl_object_t *obj;
504 
505 	obj = lookup_basic_metadata_object(rstate);
506 	if (obj == NULL)
507 		return (0);
508 
509 	JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0);
510 	if (memsize < 0)
511 		memsize = 0;
512 
513 	return ((size_t)memsize);
514 }
515 
516 
517 int
518 lookup_guest_ncpus(struct restore_state *rstate)
519 {
520 	int64_t ncpus;
521 	const ucl_object_t *obj;
522 
523 	obj = lookup_basic_metadata_object(rstate);
524 	if (obj == NULL)
525 		return (0);
526 
527 	JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0);
528 	return ((int)ncpus);
529 }
530 
531 static void
532 winch_handler(int signal __unused)
533 {
534 #ifdef TIOCGWINSZ
535 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
536 #endif /* TIOCGWINSZ */
537 }
538 
539 static int
540 print_progress(size_t crtval, const size_t maxval)
541 {
542 	size_t rc;
543 	double crtval_gb, maxval_gb;
544 	size_t i, win_width, prog_start, prog_done, prog_end;
545 	int mval_len;
546 
547 	static char prog_buf[PROG_BUF_SZ];
548 	static const size_t len = sizeof(prog_buf);
549 
550 	static size_t div;
551 	static const char *div_str;
552 
553 	static char wip_bar[] = { '/', '-', '\\', '|' };
554 	static int wip_idx = 0;
555 
556 	if (maxval == 0) {
557 		printf("[0B / 0B]\r\n");
558 		return (0);
559 	}
560 
561 	if (crtval > maxval)
562 		crtval = maxval;
563 
564 	if (maxval > 10 * GB) {
565 		div = GB;
566 		div_str = "GiB";
567 	} else if (maxval > 10 * MB) {
568 		div = MB;
569 		div_str = "MiB";
570 	} else {
571 		div = KB;
572 		div_str = "KiB";
573 	}
574 
575 	crtval_gb = (double) crtval / div;
576 	maxval_gb = (double) maxval / div;
577 
578 	rc = snprintf(prog_buf, len, "%.03lf", maxval_gb);
579 	if (rc == len) {
580 		fprintf(stderr, "Maxval too big\n");
581 		return (-1);
582 	}
583 	mval_len = rc;
584 
585 	rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |",
586 		mval_len, crtval_gb, div_str, maxval_gb, div_str);
587 
588 	if (rc == len) {
589 		fprintf(stderr, "Buffer too small to print progress\n");
590 		return (-1);
591 	}
592 
593 	win_width = min(winsize.ws_col, len);
594 	prog_start = rc;
595 
596 	if (prog_start < (win_width - 2)) {
597 		prog_end = win_width - prog_start - 2;
598 		prog_done = prog_end * (crtval_gb / maxval_gb);
599 
600 		for (i = prog_start; i < prog_start + prog_done; i++)
601 			prog_buf[i] = '#';
602 
603 		if (crtval != maxval) {
604 			prog_buf[i] = wip_bar[wip_idx];
605 			wip_idx = (wip_idx + 1) % sizeof(wip_bar);
606 			i++;
607 		} else {
608 			prog_buf[i++] = '#';
609 		}
610 
611 		for (; i < win_width - 2; i++)
612 			prog_buf[i] = '_';
613 
614 		prog_buf[win_width - 2] = '|';
615 	}
616 
617 	prog_buf[win_width - 1] = '\0';
618 	write(STDOUT_FILENO, prog_buf, win_width);
619 
620 	return (0);
621 }
622 
623 static void *
624 snapshot_spinner_cb(void *arg)
625 {
626 	int rc;
627 	size_t crtval, maxval, total;
628 	struct spinner_info *si;
629 	struct timespec ts;
630 
631 	si = arg;
632 	if (si == NULL)
633 		pthread_exit(NULL);
634 
635 	ts.tv_sec = 0;
636 	ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */
637 
638 	do {
639 		crtval = *si->crtval;
640 		maxval = si->maxval;
641 		total = si->total;
642 
643 		rc = print_progress(crtval, total);
644 		if (rc < 0) {
645 			fprintf(stderr, "Failed to parse progress\n");
646 			break;
647 		}
648 
649 		nanosleep(&ts, NULL);
650 	} while (crtval < maxval);
651 
652 	pthread_exit(NULL);
653 	return NULL;
654 }
655 
656 static int
657 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src,
658 		     const size_t len, const size_t totalmem, const bool op_wr)
659 {
660 	int rc;
661 	size_t part_done, todo, rem;
662 	ssize_t done;
663 	bool show_progress;
664 	pthread_t spinner_th;
665 	struct spinner_info *si;
666 
667 	if (lseek(snapfd, foff, SEEK_SET) < 0) {
668 		perror("Failed to change file offset");
669 		return (-1);
670 	}
671 
672 	show_progress = false;
673 	if (isatty(STDIN_FILENO) && (winsize.ws_col != 0))
674 		show_progress = true;
675 
676 	part_done = foff;
677 	rem = len;
678 
679 	if (show_progress) {
680 		si = &(struct spinner_info) {
681 			.crtval = &part_done,
682 			.maxval = foff + len,
683 			.total = totalmem
684 		};
685 
686 		rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si);
687 		if (rc) {
688 			perror("Unable to create spinner thread");
689 			show_progress = false;
690 		}
691 	}
692 
693 	while (rem > 0) {
694 		if (show_progress)
695 			todo = min(SNAPSHOT_CHUNK, rem);
696 		else
697 			todo = rem;
698 
699 		if (op_wr)
700 			done = write(snapfd, src, todo);
701 		else
702 			done = read(snapfd, src, todo);
703 		if (done < 0) {
704 			perror("Failed to write in file");
705 			return (-1);
706 		}
707 
708 		src = (uint8_t *)src + done;
709 		part_done += done;
710 		rem -= done;
711 	}
712 
713 	if (show_progress) {
714 		rc = pthread_join(spinner_th, NULL);
715 		if (rc)
716 			perror("Unable to end spinner thread");
717 	}
718 
719 	return (0);
720 }
721 
722 static size_t
723 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr)
724 {
725 	int ret;
726 	size_t lowmem, highmem, totalmem;
727 	char *baseaddr;
728 
729 	ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem);
730 	if (ret) {
731 		fprintf(stderr, "%s: unable to retrieve guest memory size\r\n",
732 			__func__);
733 		return (0);
734 	}
735 	totalmem = lowmem + highmem;
736 
737 	if ((op_wr == false) && (totalmem != memsz)) {
738 		fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n",
739 			__func__, totalmem, memsz);
740 		return (0);
741 	}
742 
743 	winsize.ws_col = 80;
744 #ifdef TIOCGWINSZ
745 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
746 #endif /* TIOCGWINSZ */
747 	old_winch_handler = signal(SIGWINCH, winch_handler);
748 
749 	ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem,
750 		totalmem, op_wr);
751 	if (ret) {
752 		fprintf(stderr, "%s: Could not %s lowmem\r\n",
753 			__func__, op_wr ? "write" : "read");
754 		totalmem = 0;
755 		goto done;
756 	}
757 
758 	if (highmem == 0)
759 		goto done;
760 
761 	ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB,
762 		highmem, totalmem, op_wr);
763 	if (ret) {
764 		fprintf(stderr, "%s: Could not %s highmem\r\n",
765 		        __func__, op_wr ? "write" : "read");
766 		totalmem = 0;
767 		goto done;
768 	}
769 
770 done:
771 	printf("\r\n");
772 	signal(SIGWINCH, old_winch_handler);
773 
774 	return (totalmem);
775 }
776 
777 int
778 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate)
779 {
780 	size_t restored;
781 
782 	restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len,
783 				   false);
784 
785 	if (restored != rstate->vmmem_len)
786 		return (-1);
787 
788 	return (0);
789 }
790 
791 int
792 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate)
793 {
794 	for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) {
795 		const struct vm_snapshot_kern_info *info;
796 		struct vm_snapshot_meta *meta;
797 		void *data;
798 		size_t size;
799 
800 		info = &snapshot_kern_structs[i];
801 		data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size);
802 		if (data == NULL)
803 			errx(EX_DATAERR, "Cannot find kern struct %s",
804 			    info->struct_name);
805 
806 		if (size == 0)
807 			errx(EX_DATAERR, "data with zero size for %s",
808 			    info->struct_name);
809 
810 		meta = &(struct vm_snapshot_meta) {
811 			.dev_name = info->struct_name,
812 			.dev_req  = info->req,
813 
814 			.buffer.buf_start = data,
815 			.buffer.buf_size = size,
816 
817 			.buffer.buf = data,
818 			.buffer.buf_rem = size,
819 
820 			.op = VM_SNAPSHOT_RESTORE,
821 		};
822 
823 		if (vm_snapshot_req(ctx, meta))
824 			err(EX_DATAERR, "Failed to restore %s",
825 			    info->struct_name);
826 	}
827 	return (0);
828 }
829 
830 static int
831 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func,
832     const char *name, void *data)
833 {
834 	void *dev_ptr;
835 	size_t dev_size;
836 	int ret;
837 	struct vm_snapshot_meta *meta;
838 
839 	dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size);
840 
841 	if (dev_ptr == NULL) {
842 		EPRINTLN("Failed to lookup dev: %s", name);
843 		return (EINVAL);
844 	}
845 
846 	if (dev_size == 0) {
847 		EPRINTLN("Restore device size is 0: %s", name);
848 		return (EINVAL);
849 	}
850 
851 	meta = &(struct vm_snapshot_meta) {
852 		.dev_name = name,
853 		.dev_data = data,
854 
855 		.buffer.buf_start = dev_ptr,
856 		.buffer.buf_size = dev_size,
857 
858 		.buffer.buf = dev_ptr,
859 		.buffer.buf_rem = dev_size,
860 
861 		.op = VM_SNAPSHOT_RESTORE,
862 	};
863 
864 	ret = func(meta);
865 	if (ret != 0) {
866 		EPRINTLN("Failed to restore dev: %s %d", name, ret);
867 		return (ret);
868 	}
869 
870 	return (0);
871 }
872 
873 int
874 vm_restore_devices(struct restore_state *rstate)
875 {
876 	int ret;
877 	struct pci_devinst *pdi = NULL;
878 
879 	while ((pdi = pci_next(pdi)) != NULL) {
880 		ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi);
881 		if (ret)
882 			return (ret);
883 	}
884 
885 #ifdef __amd64__
886 	ret = vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL);
887 #else
888 	ret = 0;
889 #endif
890 	return (ret);
891 }
892 
893 int
894 vm_pause_devices(void)
895 {
896 	int ret;
897 	struct pci_devinst *pdi = NULL;
898 
899 	while ((pdi = pci_next(pdi)) != NULL) {
900 		ret = pci_pause(pdi);
901 		if (ret) {
902 			EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret);
903 			return (ret);
904 		}
905 	}
906 
907 	return (0);
908 }
909 
910 int
911 vm_resume_devices(void)
912 {
913 	int ret;
914 	struct pci_devinst *pdi = NULL;
915 
916 	while ((pdi = pci_next(pdi)) != NULL) {
917 		ret = pci_resume(pdi);
918 		if (ret) {
919 			EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret);
920 			return (ret);
921 		}
922 	}
923 
924 	return (0);
925 }
926 
927 static int
928 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop,
929     const char *array_key, struct vm_snapshot_meta *meta, off_t *offset)
930 {
931 	int ret;
932 	size_t data_size;
933 	ssize_t write_cnt;
934 
935 	ret = vm_snapshot_req(ctx, meta);
936 	if (ret != 0) {
937 		fprintf(stderr, "%s: Failed to snapshot struct %s\r\n",
938 			__func__, meta->dev_name);
939 		ret = -1;
940 		goto done;
941 	}
942 
943 	data_size = vm_get_snapshot_size(meta);
944 
945 	/* XXX-MJ no handling for short writes. */
946 	write_cnt = write(data_fd, meta->buffer.buf_start, data_size);
947 	if (write_cnt < 0 || (size_t)write_cnt != data_size) {
948 		perror("Failed to write all snapshotted data.");
949 		ret = -1;
950 		goto done;
951 	}
952 
953 	/* Write metadata. */
954 	xo_open_instance_h(xop, array_key);
955 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n",
956 	    meta->dev_name);
957 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
958 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
959 	xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY);
960 
961 	*offset += data_size;
962 
963 done:
964 	return (ret);
965 }
966 
967 static int
968 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop)
969 {
970 	int ret, error;
971 	size_t buf_size, i, offset;
972 	char *buffer;
973 	struct vm_snapshot_meta *meta;
974 
975 	error = 0;
976 	offset = 0;
977 	buf_size = SNAPSHOT_BUFFER_SIZE;
978 
979 	buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char));
980 	if (buffer == NULL) {
981 		error = ENOMEM;
982 		perror("Failed to allocate memory for snapshot buffer");
983 		goto err_vm_snapshot_kern_data;
984 	}
985 
986 	meta = &(struct vm_snapshot_meta) {
987 		.buffer.buf_start = buffer,
988 		.buffer.buf_size = buf_size,
989 
990 		.op = VM_SNAPSHOT_SAVE,
991 	};
992 
993 	xo_open_list_h(xop, JSON_KERNEL_ARR_KEY);
994 	for (i = 0; i < nitems(snapshot_kern_structs); i++) {
995 		meta->dev_name = snapshot_kern_structs[i].struct_name;
996 		meta->dev_req  = snapshot_kern_structs[i].req;
997 
998 		memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
999 		meta->buffer.buf = meta->buffer.buf_start;
1000 		meta->buffer.buf_rem = meta->buffer.buf_size;
1001 
1002 		ret = vm_save_kern_struct(ctx, data_fd, xop,
1003 		    JSON_DEV_ARR_KEY, meta, &offset);
1004 		if (ret != 0) {
1005 			error = -1;
1006 			goto err_vm_snapshot_kern_data;
1007 		}
1008 	}
1009 	xo_close_list_h(xop, JSON_KERNEL_ARR_KEY);
1010 
1011 err_vm_snapshot_kern_data:
1012 	if (buffer != NULL)
1013 		free(buffer);
1014 	return (error);
1015 }
1016 
1017 static int
1018 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz)
1019 {
1020 
1021 	xo_open_container_h(xop, JSON_BASIC_METADATA_KEY);
1022 	xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus);
1023 	xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx));
1024 	xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz);
1025 	xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx));
1026 	xo_close_container_h(xop, JSON_BASIC_METADATA_KEY);
1027 
1028 	return (0);
1029 }
1030 
1031 static int
1032 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key,
1033 			   struct vm_snapshot_meta *meta, off_t *offset)
1034 {
1035 	ssize_t ret;
1036 	size_t data_size;
1037 
1038 	data_size = vm_get_snapshot_size(meta);
1039 
1040 	/* XXX-MJ no handling for short writes. */
1041 	ret = write(data_fd, meta->buffer.buf_start, data_size);
1042 	if (ret < 0 || (size_t)ret != data_size) {
1043 		perror("Failed to write all snapshotted data.");
1044 		return (-1);
1045 	}
1046 
1047 	/* Write metadata. */
1048 	xo_open_instance_h(xop, array_key);
1049 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name);
1050 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
1051 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
1052 	xo_close_instance_h(xop, array_key);
1053 
1054 	*offset += data_size;
1055 
1056 	return (0);
1057 }
1058 
1059 static int
1060 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name,
1061     void *devdata, int data_fd, xo_handle_t *xop,
1062     struct vm_snapshot_meta *meta, off_t *offset)
1063 {
1064 	int ret;
1065 
1066 	memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1067 	meta->buffer.buf = meta->buffer.buf_start;
1068 	meta->buffer.buf_rem = meta->buffer.buf_size;
1069 	meta->dev_name = dev_name;
1070 	meta->dev_data = devdata;
1071 
1072 	ret = func(meta);
1073 	if (ret != 0) {
1074 		EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret);
1075 		return (ret);
1076 	}
1077 
1078 	ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta,
1079 					 offset);
1080 	if (ret != 0)
1081 		return (ret);
1082 
1083 	return (0);
1084 }
1085 
1086 static int
1087 vm_snapshot_devices(int data_fd, xo_handle_t *xop)
1088 {
1089 	int ret;
1090 	off_t offset;
1091 	void *buffer;
1092 	size_t buf_size;
1093 	struct vm_snapshot_meta *meta;
1094 	struct pci_devinst *pdi;
1095 
1096 	buf_size = SNAPSHOT_BUFFER_SIZE;
1097 
1098 	offset = lseek(data_fd, 0, SEEK_CUR);
1099 	if (offset < 0) {
1100 		perror("Failed to get data file current offset.");
1101 		return (-1);
1102 	}
1103 
1104 	buffer = malloc(buf_size);
1105 	if (buffer == NULL) {
1106 		perror("Failed to allocate memory for snapshot buffer");
1107 		ret = ENOSPC;
1108 		goto snapshot_err;
1109 	}
1110 
1111 	meta = &(struct vm_snapshot_meta) {
1112 		.buffer.buf_start = buffer,
1113 		.buffer.buf_size = buf_size,
1114 
1115 		.op = VM_SNAPSHOT_SAVE,
1116 	};
1117 
1118 	xo_open_list_h(xop, JSON_DEV_ARR_KEY);
1119 
1120 	/* Save PCI devices */
1121 	pdi = NULL;
1122 	while ((pdi = pci_next(pdi)) != NULL) {
1123 		ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi,
1124 		    data_fd, xop, meta, &offset);
1125 		if (ret != 0)
1126 			goto snapshot_err;
1127 	}
1128 
1129 #ifdef __amd64__
1130 	ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL,
1131 	    data_fd, xop, meta, &offset);
1132 #else
1133 	ret = 0;
1134 #endif
1135 
1136 	xo_close_list_h(xop, JSON_DEV_ARR_KEY);
1137 
1138 snapshot_err:
1139 	if (buffer != NULL)
1140 		free(buffer);
1141 	return (ret);
1142 }
1143 
1144 void
1145 checkpoint_cpu_add(int vcpu)
1146 {
1147 
1148 	pthread_mutex_lock(&vcpu_lock);
1149 	CPU_SET(vcpu, &vcpus_active);
1150 
1151 	if (checkpoint_active) {
1152 		CPU_SET(vcpu, &vcpus_suspended);
1153 		while (checkpoint_active)
1154 			pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1155 		CPU_CLR(vcpu, &vcpus_suspended);
1156 	}
1157 	pthread_mutex_unlock(&vcpu_lock);
1158 }
1159 
1160 /*
1161  * When a vCPU is suspended for any reason, it calls
1162  * checkpoint_cpu_suspend().  This records that the vCPU is idle.
1163  * Before returning from suspension, checkpoint_cpu_resume() is
1164  * called.  In suspend we note that the vCPU is idle.  In resume we
1165  * pause the vCPU thread until the checkpoint is complete.  The reason
1166  * for the two-step process is that vCPUs might already be stopped in
1167  * the debug server when a checkpoint is requested.  This approach
1168  * allows us to account for and handle those vCPUs.
1169  */
1170 void
1171 checkpoint_cpu_suspend(int vcpu)
1172 {
1173 
1174 	pthread_mutex_lock(&vcpu_lock);
1175 	CPU_SET(vcpu, &vcpus_suspended);
1176 	if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0)
1177 		pthread_cond_signal(&vcpus_idle);
1178 	pthread_mutex_unlock(&vcpu_lock);
1179 }
1180 
1181 void
1182 checkpoint_cpu_resume(int vcpu)
1183 {
1184 
1185 	pthread_mutex_lock(&vcpu_lock);
1186 	while (checkpoint_active)
1187 		pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1188 	CPU_CLR(vcpu, &vcpus_suspended);
1189 	pthread_mutex_unlock(&vcpu_lock);
1190 }
1191 
1192 static void
1193 vm_vcpu_pause(struct vmctx *ctx)
1194 {
1195 
1196 	pthread_mutex_lock(&vcpu_lock);
1197 	checkpoint_active = true;
1198 	vm_suspend_all_cpus(ctx);
1199 	while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
1200 		pthread_cond_wait(&vcpus_idle, &vcpu_lock);
1201 	pthread_mutex_unlock(&vcpu_lock);
1202 }
1203 
1204 static void
1205 vm_vcpu_resume(struct vmctx *ctx)
1206 {
1207 
1208 	pthread_mutex_lock(&vcpu_lock);
1209 	checkpoint_active = false;
1210 	pthread_mutex_unlock(&vcpu_lock);
1211 	vm_resume_all_cpus(ctx);
1212 	pthread_cond_broadcast(&vcpus_can_run);
1213 }
1214 
1215 static int
1216 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file,
1217     bool stop_vm)
1218 {
1219 	int fd_checkpoint = 0, kdata_fd = 0, fd_meta;
1220 	int ret = 0;
1221 	int error = 0;
1222 	size_t memsz;
1223 	xo_handle_t *xop = NULL;
1224 	char *meta_filename = NULL;
1225 	char *kdata_filename = NULL;
1226 	FILE *meta_file = NULL;
1227 
1228 	kdata_filename = strcat_extension(checkpoint_file, ".kern");
1229 	if (kdata_filename == NULL) {
1230 		fprintf(stderr, "Failed to construct kernel data filename.\n");
1231 		return (-1);
1232 	}
1233 
1234 	kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1235 	if (kdata_fd < 0) {
1236 		perror("Failed to open kernel data snapshot file.");
1237 		error = -1;
1238 		goto done;
1239 	}
1240 
1241 	fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700);
1242 
1243 	if (fd_checkpoint < 0) {
1244 		perror("Failed to create checkpoint file");
1245 		error = -1;
1246 		goto done;
1247 	}
1248 
1249 	meta_filename = strcat_extension(checkpoint_file, ".meta");
1250 	if (meta_filename == NULL) {
1251 		fprintf(stderr, "Failed to construct vm metadata filename.\n");
1252 		goto done;
1253 	}
1254 
1255 	fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1256 	if (fd_meta != -1)
1257 		meta_file = fdopen(fd_meta, "w");
1258 	if (meta_file == NULL) {
1259 		perror("Failed to open vm metadata snapshot file.");
1260 		close(fd_meta);
1261 		goto done;
1262 	}
1263 
1264 	xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY);
1265 	if (xop == NULL) {
1266 		perror("Failed to get libxo handle on metadata file.");
1267 		goto done;
1268 	}
1269 
1270 	vm_vcpu_pause(ctx);
1271 
1272 	ret = vm_pause_devices();
1273 	if (ret != 0) {
1274 		fprintf(stderr, "Could not pause devices\r\n");
1275 		error = ret;
1276 		goto done;
1277 	}
1278 
1279 	memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true);
1280 	if (memsz == 0) {
1281 		perror("Could not write guest memory to file");
1282 		error = -1;
1283 		goto done;
1284 	}
1285 
1286 	ret = vm_snapshot_basic_metadata(ctx, xop, memsz);
1287 	if (ret != 0) {
1288 		fprintf(stderr, "Failed to snapshot vm basic metadata.\n");
1289 		error = -1;
1290 		goto done;
1291 	}
1292 
1293 	ret = vm_save_kern_structs(ctx, kdata_fd, xop);
1294 	if (ret != 0) {
1295 		fprintf(stderr, "Failed to snapshot vm kernel data.\n");
1296 		error = -1;
1297 		goto done;
1298 	}
1299 
1300 	ret = vm_snapshot_devices(kdata_fd, xop);
1301 	if (ret != 0) {
1302 		fprintf(stderr, "Failed to snapshot device state.\n");
1303 		error = -1;
1304 		goto done;
1305 	}
1306 
1307 	xo_finish_h(xop);
1308 
1309 	if (stop_vm) {
1310 		vm_destroy(ctx);
1311 		exit(0);
1312 	}
1313 
1314 done:
1315 	ret = vm_resume_devices();
1316 	if (ret != 0)
1317 		fprintf(stderr, "Could not resume devices\r\n");
1318 	vm_vcpu_resume(ctx);
1319 	if (fd_checkpoint > 0)
1320 		close(fd_checkpoint);
1321 	if (meta_filename != NULL)
1322 		free(meta_filename);
1323 	if (kdata_filename != NULL)
1324 		free(kdata_filename);
1325 	if (xop != NULL)
1326 		xo_destroy(xop);
1327 	if (meta_file != NULL)
1328 		fclose(meta_file);
1329 	if (kdata_fd > 0)
1330 		close(kdata_fd);
1331 	return (error);
1332 }
1333 
1334 static int
1335 handle_message(struct vmctx *ctx, nvlist_t *nvl)
1336 {
1337 	const char *cmd;
1338 	struct ipc_command **ipc_cmd;
1339 
1340 	if (!nvlist_exists_string(nvl, "cmd"))
1341 		return (EINVAL);
1342 
1343 	cmd = nvlist_get_string(nvl, "cmd");
1344 	IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) {
1345 		if (strcmp(cmd, (*ipc_cmd)->name) == 0)
1346 			return ((*ipc_cmd)->handler(ctx, nvl));
1347 	}
1348 
1349 	return (EOPNOTSUPP);
1350 }
1351 
1352 /*
1353  * Listen for commands from bhyvectl
1354  */
1355 void *
1356 checkpoint_thread(void *param)
1357 {
1358 	int fd;
1359 	struct checkpoint_thread_info *thread_info;
1360 	nvlist_t *nvl;
1361 
1362 	pthread_set_name_np(pthread_self(), "checkpoint thread");
1363 	thread_info = (struct checkpoint_thread_info *)param;
1364 
1365 	while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) {
1366 		nvl = nvlist_recv(fd, 0);
1367 		if (nvl != NULL)
1368 			handle_message(thread_info->ctx, nvl);
1369 		else
1370 			EPRINTLN("nvlist_recv() failed: %s", strerror(errno));
1371 
1372 		close(fd);
1373 		nvlist_destroy(nvl);
1374 	}
1375 
1376 	return (NULL);
1377 }
1378 
1379 static int
1380 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl)
1381 {
1382 	int error;
1383 
1384 	if (!nvlist_exists_string(nvl, "filename") ||
1385 	    !nvlist_exists_bool(nvl, "suspend") ||
1386 	    !nvlist_exists_descriptor(nvl, "fddir"))
1387 		error = EINVAL;
1388 	else
1389 		error = vm_checkpoint(ctx,
1390 		    nvlist_get_descriptor(nvl, "fddir"),
1391 		    nvlist_get_string(nvl, "filename"),
1392 		    nvlist_get_bool(nvl, "suspend"));
1393 
1394 	return (error);
1395 }
1396 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint);
1397 
1398 /*
1399  * Create the listening socket for IPC with bhyvectl
1400  */
1401 int
1402 init_checkpoint_thread(struct vmctx *ctx)
1403 {
1404 	struct checkpoint_thread_info *checkpoint_info = NULL;
1405 	struct sockaddr_un addr;
1406 	int socket_fd;
1407 	pthread_t checkpoint_pthread;
1408 	int err;
1409 #ifndef WITHOUT_CAPSICUM
1410 	cap_rights_t rights;
1411 #endif
1412 
1413 	memset(&addr, 0, sizeof(addr));
1414 
1415 	socket_fd = socket(PF_UNIX, SOCK_STREAM, 0);
1416 	if (socket_fd < 0) {
1417 		EPRINTLN("Socket creation failed: %s", strerror(errno));
1418 		err = -1;
1419 		goto fail;
1420 	}
1421 
1422 	addr.sun_family = AF_UNIX;
1423 
1424 	snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s",
1425 		 BHYVE_RUN_DIR, vm_get_name(ctx));
1426 	addr.sun_len = SUN_LEN(&addr);
1427 	unlink(addr.sun_path);
1428 
1429 	if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) {
1430 		EPRINTLN("Failed to bind socket \"%s\": %s\n",
1431 		    addr.sun_path, strerror(errno));
1432 		err = -1;
1433 		goto fail;
1434 	}
1435 
1436 	if (listen(socket_fd, 10) < 0) {
1437 		EPRINTLN("ipc socket listen: %s\n", strerror(errno));
1438 		err = errno;
1439 		goto fail;
1440 	}
1441 
1442 #ifndef WITHOUT_CAPSICUM
1443 	cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE,
1444 	    CAP_SEND, CAP_GETSOCKOPT);
1445 
1446 	if (caph_rights_limit(socket_fd, &rights) == -1)
1447 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1448 #endif
1449 	checkpoint_info = calloc(1, sizeof(*checkpoint_info));
1450 	checkpoint_info->ctx = ctx;
1451 	checkpoint_info->socket_fd = socket_fd;
1452 
1453 	err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread,
1454 		checkpoint_info);
1455 	if (err != 0)
1456 		goto fail;
1457 
1458 	return (0);
1459 fail:
1460 	free(checkpoint_info);
1461 	if (socket_fd > 0)
1462 		close(socket_fd);
1463 	unlink(addr.sun_path);
1464 
1465 	return (err);
1466 }
1467 
1468 void
1469 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op)
1470 {
1471 	const char *__op;
1472 
1473 	if (op == VM_SNAPSHOT_SAVE)
1474 		__op = "save";
1475 	else if (op == VM_SNAPSHOT_RESTORE)
1476 		__op = "restore";
1477 	else
1478 		__op = "unknown";
1479 
1480 	fprintf(stderr, "%s: snapshot-%s failed for %s\r\n",
1481 		__func__, __op, bufname);
1482 }
1483 
1484 int
1485 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1486 {
1487 	struct vm_snapshot_buffer *buffer;
1488 	int op;
1489 
1490 	buffer = &meta->buffer;
1491 	op = meta->op;
1492 
1493 	if (buffer->buf_rem < data_size) {
1494 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1495 		return (E2BIG);
1496 	}
1497 
1498 	if (op == VM_SNAPSHOT_SAVE)
1499 		memcpy(buffer->buf, data, data_size);
1500 	else if (op == VM_SNAPSHOT_RESTORE)
1501 		memcpy(data, buffer->buf, data_size);
1502 	else
1503 		return (EINVAL);
1504 
1505 	buffer->buf += data_size;
1506 	buffer->buf_rem -= data_size;
1507 
1508 	return (0);
1509 }
1510 
1511 size_t
1512 vm_get_snapshot_size(struct vm_snapshot_meta *meta)
1513 {
1514 	size_t length;
1515 	struct vm_snapshot_buffer *buffer;
1516 
1517 	buffer = &meta->buffer;
1518 
1519 	if (buffer->buf_size < buffer->buf_rem) {
1520 		fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n",
1521 			__func__, buffer->buf_size, buffer->buf_rem);
1522 		length = 0;
1523 	} else {
1524 		length = buffer->buf_size - buffer->buf_rem;
1525 	}
1526 
1527 	return (length);
1528 }
1529 
1530 int
1531 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len,
1532     bool restore_null, struct vm_snapshot_meta *meta)
1533 {
1534 	int ret;
1535 	vm_paddr_t gaddr;
1536 
1537 	if (meta->op == VM_SNAPSHOT_SAVE) {
1538 		gaddr = paddr_host2guest(ctx, *addrp);
1539 		if (gaddr == (vm_paddr_t) -1) {
1540 			if (!restore_null ||
1541 			    (restore_null && (*addrp != NULL))) {
1542 				ret = EFAULT;
1543 				goto done;
1544 			}
1545 		}
1546 
1547 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1548 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
1549 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1550 		if (gaddr == (vm_paddr_t) -1) {
1551 			if (!restore_null) {
1552 				ret = EFAULT;
1553 				goto done;
1554 			}
1555 		}
1556 
1557 		*addrp = paddr_guest2host(ctx, gaddr, len);
1558 	} else {
1559 		ret = EINVAL;
1560 	}
1561 
1562 done:
1563 	return (ret);
1564 }
1565 
1566 int
1567 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1568 {
1569 	struct vm_snapshot_buffer *buffer;
1570 	int op;
1571 	int ret;
1572 
1573 	buffer = &meta->buffer;
1574 	op = meta->op;
1575 
1576 	if (buffer->buf_rem < data_size) {
1577 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1578 		ret = E2BIG;
1579 		goto done;
1580 	}
1581 
1582 	if (op == VM_SNAPSHOT_SAVE) {
1583 		ret = 0;
1584 		memcpy(buffer->buf, data, data_size);
1585 	} else if (op == VM_SNAPSHOT_RESTORE) {
1586 		ret = memcmp(data, buffer->buf, data_size);
1587 	} else {
1588 		ret = EINVAL;
1589 		goto done;
1590 	}
1591 
1592 	buffer->buf += data_size;
1593 	buffer->buf_rem -= data_size;
1594 
1595 done:
1596 	return (ret);
1597 }
1598