xref: /freebsd/usr.sbin/bhyve/snapshot.c (revision f927afc1a6fe8257e28d79e8bb77305c2958724a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Flavius Anton
5  * Copyright (c) 2016 Mihai Tiganus
6  * Copyright (c) 2016-2019 Mihai Carabas
7  * Copyright (c) 2017-2019 Darius Mihai
8  * Copyright (c) 2017-2019 Elena Mihailescu
9  * Copyright (c) 2018-2019 Sergiu Weisz
10  * All rights reserved.
11  * The bhyve-snapshot feature was developed under sponsorships
12  * from Matthew Grooms.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #include <sys/types.h>
38 #ifndef WITHOUT_CAPSICUM
39 #include <sys/capsicum.h>
40 #endif
41 #include <sys/mman.h>
42 #include <sys/socket.h>
43 #include <sys/stat.h>
44 #include <sys/time.h>
45 #include <sys/un.h>
46 
47 #include <machine/atomic.h>
48 
49 #ifndef WITHOUT_CAPSICUM
50 #include <capsicum_helpers.h>
51 #endif
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <err.h>
56 #include <errno.h>
57 #include <fcntl.h>
58 #include <libgen.h>
59 #include <signal.h>
60 #include <unistd.h>
61 #include <assert.h>
62 #include <errno.h>
63 #include <pthread.h>
64 #include <pthread_np.h>
65 #include <sysexits.h>
66 #include <stdbool.h>
67 #include <sys/ioctl.h>
68 
69 #include <machine/vmm.h>
70 #ifndef WITHOUT_CAPSICUM
71 #include <machine/vmm_dev.h>
72 #endif
73 #include <machine/vmm_snapshot.h>
74 #include <vmmapi.h>
75 
76 #include "bhyverun.h"
77 #include "acpi.h"
78 #ifdef __amd64__
79 #include "amd64/atkbdc.h"
80 #endif
81 #include "debug.h"
82 #include "inout.h"
83 #include "ipc.h"
84 #include "ioapic.h"
85 #include "mem.h"
86 #include "mevent.h"
87 #include "mptbl.h"
88 #include "pci_emul.h"
89 #include "pci_irq.h"
90 #include "pci_lpc.h"
91 #include "smbiostbl.h"
92 #include "snapshot.h"
93 #include "xmsr.h"
94 #include "spinup_ap.h"
95 #include "rtc.h"
96 
97 #include <libxo/xo.h>
98 #include <ucl.h>
99 
100 struct spinner_info {
101 	const size_t *crtval;
102 	const size_t maxval;
103 	const size_t total;
104 };
105 
106 extern int guest_ncpus;
107 
108 static struct winsize winsize;
109 static sig_t old_winch_handler;
110 
111 #define	KB		(1024UL)
112 #define	MB		(1024UL * KB)
113 #define	GB		(1024UL * MB)
114 
115 #define	SNAPSHOT_CHUNK	(4 * MB)
116 #define	PROG_BUF_SZ	(8192)
117 
118 #define	SNAPSHOT_BUFFER_SIZE (20 * MB)
119 
120 #define	JSON_KERNEL_ARR_KEY		"kern_structs"
121 #define	JSON_DEV_ARR_KEY		"devices"
122 #define	JSON_BASIC_METADATA_KEY 	"basic metadata"
123 #define	JSON_SNAPSHOT_REQ_KEY		"device"
124 #define	JSON_SIZE_KEY			"size"
125 #define	JSON_FILE_OFFSET_KEY		"file_offset"
126 
127 #define	JSON_NCPUS_KEY			"ncpus"
128 #define	JSON_VMNAME_KEY 		"vmname"
129 #define	JSON_MEMSIZE_KEY		"memsize"
130 #define	JSON_MEMFLAGS_KEY		"memflags"
131 
132 #define min(a,b)		\
133 ({				\
134  __typeof__ (a) _a = (a);	\
135  __typeof__ (b) _b = (b); 	\
136  _a < _b ? _a : _b;       	\
137  })
138 
139 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = {
140 	{ "vhpet",	STRUCT_VHPET	},
141 	{ "vm",		STRUCT_VM	},
142 	{ "vioapic",	STRUCT_VIOAPIC	},
143 	{ "vlapic",	STRUCT_VLAPIC	},
144 	{ "vmcx",	STRUCT_VMCX	},
145 	{ "vatpit",	STRUCT_VATPIT	},
146 	{ "vatpic",	STRUCT_VATPIC	},
147 	{ "vpmtmr",	STRUCT_VPMTMR	},
148 	{ "vrtc",	STRUCT_VRTC	},
149 };
150 
151 static cpuset_t vcpus_active, vcpus_suspended;
152 static pthread_mutex_t vcpu_lock;
153 static pthread_cond_t vcpus_idle, vcpus_can_run;
154 static bool checkpoint_active;
155 
156 /*
157  * TODO: Harden this function and all of its callers since 'base_str' is a user
158  * provided string.
159  */
160 static char *
161 strcat_extension(const char *base_str, const char *ext)
162 {
163 	char *res;
164 	size_t base_len, ext_len;
165 
166 	base_len = strnlen(base_str, NAME_MAX);
167 	ext_len = strnlen(ext, NAME_MAX);
168 
169 	if (base_len + ext_len > NAME_MAX) {
170 		fprintf(stderr, "Filename exceeds maximum length.\n");
171 		return (NULL);
172 	}
173 
174 	res = malloc(base_len + ext_len + 1);
175 	if (res == NULL) {
176 		perror("Failed to allocate memory.");
177 		return (NULL);
178 	}
179 
180 	memcpy(res, base_str, base_len);
181 	memcpy(res + base_len, ext, ext_len);
182 	res[base_len + ext_len] = 0;
183 
184 	return (res);
185 }
186 
187 void
188 destroy_restore_state(struct restore_state *rstate)
189 {
190 	if (rstate == NULL) {
191 		fprintf(stderr, "Attempting to destroy NULL restore struct.\n");
192 		return;
193 	}
194 
195 	if (rstate->kdata_map != MAP_FAILED)
196 		munmap(rstate->kdata_map, rstate->kdata_len);
197 
198 	if (rstate->kdata_fd > 0)
199 		close(rstate->kdata_fd);
200 	if (rstate->vmmem_fd > 0)
201 		close(rstate->vmmem_fd);
202 
203 	if (rstate->meta_root_obj != NULL)
204 		ucl_object_unref(rstate->meta_root_obj);
205 	if (rstate->meta_parser != NULL)
206 		ucl_parser_free(rstate->meta_parser);
207 }
208 
209 static int
210 load_vmmem_file(const char *filename, struct restore_state *rstate)
211 {
212 	struct stat sb;
213 	int err;
214 
215 	rstate->vmmem_fd = open(filename, O_RDONLY);
216 	if (rstate->vmmem_fd < 0) {
217 		perror("Failed to open restore file");
218 		return (-1);
219 	}
220 
221 	err = fstat(rstate->vmmem_fd, &sb);
222 	if (err < 0) {
223 		perror("Failed to stat restore file");
224 		goto err_load_vmmem;
225 	}
226 
227 	if (sb.st_size == 0) {
228 		fprintf(stderr, "Restore file is empty.\n");
229 		goto err_load_vmmem;
230 	}
231 
232 	rstate->vmmem_len = sb.st_size;
233 
234 	return (0);
235 
236 err_load_vmmem:
237 	if (rstate->vmmem_fd > 0)
238 		close(rstate->vmmem_fd);
239 	return (-1);
240 }
241 
242 static int
243 load_kdata_file(const char *filename, struct restore_state *rstate)
244 {
245 	struct stat sb;
246 	int err;
247 
248 	rstate->kdata_fd = open(filename, O_RDONLY);
249 	if (rstate->kdata_fd < 0) {
250 		perror("Failed to open kernel data file");
251 		return (-1);
252 	}
253 
254 	err = fstat(rstate->kdata_fd, &sb);
255 	if (err < 0) {
256 		perror("Failed to stat kernel data file");
257 		goto err_load_kdata;
258 	}
259 
260 	if (sb.st_size == 0) {
261 		fprintf(stderr, "Kernel data file is empty.\n");
262 		goto err_load_kdata;
263 	}
264 
265 	rstate->kdata_len = sb.st_size;
266 	rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ,
267 				 MAP_SHARED, rstate->kdata_fd, 0);
268 	if (rstate->kdata_map == MAP_FAILED) {
269 		perror("Failed to map restore file");
270 		goto err_load_kdata;
271 	}
272 
273 	return (0);
274 
275 err_load_kdata:
276 	if (rstate->kdata_fd > 0)
277 		close(rstate->kdata_fd);
278 	return (-1);
279 }
280 
281 static int
282 load_metadata_file(const char *filename, struct restore_state *rstate)
283 {
284 	ucl_object_t *obj;
285 	struct ucl_parser *parser;
286 	int err;
287 
288 	parser = ucl_parser_new(UCL_PARSER_DEFAULT);
289 	if (parser == NULL) {
290 		fprintf(stderr, "Failed to initialize UCL parser.\n");
291 		err = -1;
292 		goto err_load_metadata;
293 	}
294 
295 	err = ucl_parser_add_file(parser, filename);
296 	if (err == 0) {
297 		fprintf(stderr, "Failed to parse metadata file: '%s'\n",
298 			filename);
299 		err = -1;
300 		goto err_load_metadata;
301 	}
302 
303 	obj = ucl_parser_get_object(parser);
304 	if (obj == NULL) {
305 		fprintf(stderr, "Failed to parse object.\n");
306 		err = -1;
307 		goto err_load_metadata;
308 	}
309 
310 	rstate->meta_parser = parser;
311 	rstate->meta_root_obj = (ucl_object_t *)obj;
312 
313 	return (0);
314 
315 err_load_metadata:
316 	if (parser != NULL)
317 		ucl_parser_free(parser);
318 	return (err);
319 }
320 
321 int
322 load_restore_file(const char *filename, struct restore_state *rstate)
323 {
324 	int err = 0;
325 	char *kdata_filename = NULL, *meta_filename = NULL;
326 
327 	assert(filename != NULL);
328 	assert(rstate != NULL);
329 
330 	memset(rstate, 0, sizeof(*rstate));
331 	rstate->kdata_map = MAP_FAILED;
332 
333 	err = load_vmmem_file(filename, rstate);
334 	if (err != 0) {
335 		fprintf(stderr, "Failed to load guest RAM file.\n");
336 		goto err_restore;
337 	}
338 
339 	kdata_filename = strcat_extension(filename, ".kern");
340 	if (kdata_filename == NULL) {
341 		fprintf(stderr, "Failed to construct kernel data filename.\n");
342 		goto err_restore;
343 	}
344 
345 	err = load_kdata_file(kdata_filename, rstate);
346 	if (err != 0) {
347 		fprintf(stderr, "Failed to load guest kernel data file.\n");
348 		goto err_restore;
349 	}
350 
351 	meta_filename = strcat_extension(filename, ".meta");
352 	if (meta_filename == NULL) {
353 		fprintf(stderr, "Failed to construct kernel metadata filename.\n");
354 		goto err_restore;
355 	}
356 
357 	err = load_metadata_file(meta_filename, rstate);
358 	if (err != 0) {
359 		fprintf(stderr, "Failed to load guest metadata file.\n");
360 		goto err_restore;
361 	}
362 
363 	return (0);
364 
365 err_restore:
366 	destroy_restore_state(rstate);
367 	if (kdata_filename != NULL)
368 		free(kdata_filename);
369 	if (meta_filename != NULL)
370 		free(meta_filename);
371 	return (-1);
372 }
373 
374 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret)			\
375 do {										\
376 	const ucl_object_t *obj__;						\
377 	obj__ = ucl_object_lookup(obj, key);					\
378 	if (obj__ == NULL) {							\
379 		fprintf(stderr, "Missing key: '%s'", key);			\
380 		return (ret);							\
381 	}									\
382 	if (!ucl_object_toint_safe(obj__, result_ptr)) {			\
383 		fprintf(stderr, "Cannot convert '%s' value to int.", key);	\
384 		return (ret);							\
385 	}									\
386 } while(0)
387 
388 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret)			\
389 do {										\
390 	const ucl_object_t *obj__;						\
391 	obj__ = ucl_object_lookup(obj, key);					\
392 	if (obj__ == NULL) {							\
393 		fprintf(stderr, "Missing key: '%s'", key);			\
394 		return (ret);							\
395 	}									\
396 	if (!ucl_object_tostring_safe(obj__, result_ptr)) {			\
397 		fprintf(stderr, "Cannot convert '%s' value to string.", key);	\
398 		return (ret);							\
399 	}									\
400 } while(0)
401 
402 static void *
403 lookup_check_dev(const char *dev_name, struct restore_state *rstate,
404 		 const ucl_object_t *obj, size_t *data_size)
405 {
406 	const char *snapshot_req;
407 	int64_t size, file_offset;
408 
409 	snapshot_req = NULL;
410 	JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj,
411 				  &snapshot_req, NULL);
412 	assert(snapshot_req != NULL);
413 	if (!strcmp(snapshot_req, dev_name)) {
414 		JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj,
415 				       &size, NULL);
416 		assert(size >= 0);
417 
418 		JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj,
419 				       &file_offset, NULL);
420 		assert(file_offset >= 0);
421 		assert((uint64_t)file_offset + size <= rstate->kdata_len);
422 
423 		*data_size = (size_t)size;
424 		return ((uint8_t *)rstate->kdata_map + file_offset);
425 	}
426 
427 	return (NULL);
428 }
429 
430 static void *
431 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate,
432     size_t *data_size)
433 {
434 	const ucl_object_t *devs = NULL, *obj = NULL;
435 	ucl_object_iter_t it = NULL;
436 	void *ret;
437 
438 	devs = ucl_object_lookup(rstate->meta_root_obj, key);
439 	if (devs == NULL) {
440 		fprintf(stderr, "Failed to find '%s' object.\n",
441 			JSON_DEV_ARR_KEY);
442 		return (NULL);
443 	}
444 
445 	if (ucl_object_type(devs) != UCL_ARRAY) {
446 		fprintf(stderr, "Object '%s' is not an array.\n",
447 			JSON_DEV_ARR_KEY);
448 		return (NULL);
449 	}
450 
451 	while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) {
452 		ret = lookup_check_dev(dev_name, rstate, obj, data_size);
453 		if (ret != NULL)
454 			return (ret);
455 	}
456 
457 	return (NULL);
458 }
459 
460 static const ucl_object_t *
461 lookup_basic_metadata_object(struct restore_state *rstate)
462 {
463 	const ucl_object_t *basic_meta_obj = NULL;
464 
465 	basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj,
466 					   JSON_BASIC_METADATA_KEY);
467 	if (basic_meta_obj == NULL) {
468 		fprintf(stderr, "Failed to find '%s' object.\n",
469 			JSON_BASIC_METADATA_KEY);
470 		return (NULL);
471 	}
472 
473 	if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) {
474 		fprintf(stderr, "Object '%s' is not a JSON object.\n",
475 		JSON_BASIC_METADATA_KEY);
476 		return (NULL);
477 	}
478 
479 	return (basic_meta_obj);
480 }
481 
482 const char *
483 lookup_vmname(struct restore_state *rstate)
484 {
485 	const char *vmname;
486 	const ucl_object_t *obj;
487 
488 	obj = lookup_basic_metadata_object(rstate);
489 	if (obj == NULL)
490 		return (NULL);
491 
492 	JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL);
493 	return (vmname);
494 }
495 
496 int
497 lookup_memflags(struct restore_state *rstate)
498 {
499 	int64_t memflags;
500 	const ucl_object_t *obj;
501 
502 	obj = lookup_basic_metadata_object(rstate);
503 	if (obj == NULL)
504 		return (0);
505 
506 	JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0);
507 
508 	return ((int)memflags);
509 }
510 
511 size_t
512 lookup_memsize(struct restore_state *rstate)
513 {
514 	int64_t memsize;
515 	const ucl_object_t *obj;
516 
517 	obj = lookup_basic_metadata_object(rstate);
518 	if (obj == NULL)
519 		return (0);
520 
521 	JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0);
522 	if (memsize < 0)
523 		memsize = 0;
524 
525 	return ((size_t)memsize);
526 }
527 
528 
529 int
530 lookup_guest_ncpus(struct restore_state *rstate)
531 {
532 	int64_t ncpus;
533 	const ucl_object_t *obj;
534 
535 	obj = lookup_basic_metadata_object(rstate);
536 	if (obj == NULL)
537 		return (0);
538 
539 	JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0);
540 	return ((int)ncpus);
541 }
542 
543 static void
544 winch_handler(int signal __unused)
545 {
546 #ifdef TIOCGWINSZ
547 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
548 #endif /* TIOCGWINSZ */
549 }
550 
551 static int
552 print_progress(size_t crtval, const size_t maxval)
553 {
554 	size_t rc;
555 	double crtval_gb, maxval_gb;
556 	size_t i, win_width, prog_start, prog_done, prog_end;
557 	int mval_len;
558 
559 	static char prog_buf[PROG_BUF_SZ];
560 	static const size_t len = sizeof(prog_buf);
561 
562 	static size_t div;
563 	static const char *div_str;
564 
565 	static char wip_bar[] = { '/', '-', '\\', '|' };
566 	static int wip_idx = 0;
567 
568 	if (maxval == 0) {
569 		printf("[0B / 0B]\r\n");
570 		return (0);
571 	}
572 
573 	if (crtval > maxval)
574 		crtval = maxval;
575 
576 	if (maxval > 10 * GB) {
577 		div = GB;
578 		div_str = "GiB";
579 	} else if (maxval > 10 * MB) {
580 		div = MB;
581 		div_str = "MiB";
582 	} else {
583 		div = KB;
584 		div_str = "KiB";
585 	}
586 
587 	crtval_gb = (double) crtval / div;
588 	maxval_gb = (double) maxval / div;
589 
590 	rc = snprintf(prog_buf, len, "%.03lf", maxval_gb);
591 	if (rc == len) {
592 		fprintf(stderr, "Maxval too big\n");
593 		return (-1);
594 	}
595 	mval_len = rc;
596 
597 	rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |",
598 		mval_len, crtval_gb, div_str, maxval_gb, div_str);
599 
600 	if (rc == len) {
601 		fprintf(stderr, "Buffer too small to print progress\n");
602 		return (-1);
603 	}
604 
605 	win_width = min(winsize.ws_col, len);
606 	prog_start = rc;
607 
608 	if (prog_start < (win_width - 2)) {
609 		prog_end = win_width - prog_start - 2;
610 		prog_done = prog_end * (crtval_gb / maxval_gb);
611 
612 		for (i = prog_start; i < prog_start + prog_done; i++)
613 			prog_buf[i] = '#';
614 
615 		if (crtval != maxval) {
616 			prog_buf[i] = wip_bar[wip_idx];
617 			wip_idx = (wip_idx + 1) % sizeof(wip_bar);
618 			i++;
619 		} else {
620 			prog_buf[i++] = '#';
621 		}
622 
623 		for (; i < win_width - 2; i++)
624 			prog_buf[i] = '_';
625 
626 		prog_buf[win_width - 2] = '|';
627 	}
628 
629 	prog_buf[win_width - 1] = '\0';
630 	write(STDOUT_FILENO, prog_buf, win_width);
631 
632 	return (0);
633 }
634 
635 static void *
636 snapshot_spinner_cb(void *arg)
637 {
638 	int rc;
639 	size_t crtval, maxval, total;
640 	struct spinner_info *si;
641 	struct timespec ts;
642 
643 	si = arg;
644 	if (si == NULL)
645 		pthread_exit(NULL);
646 
647 	ts.tv_sec = 0;
648 	ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */
649 
650 	do {
651 		crtval = *si->crtval;
652 		maxval = si->maxval;
653 		total = si->total;
654 
655 		rc = print_progress(crtval, total);
656 		if (rc < 0) {
657 			fprintf(stderr, "Failed to parse progress\n");
658 			break;
659 		}
660 
661 		nanosleep(&ts, NULL);
662 	} while (crtval < maxval);
663 
664 	pthread_exit(NULL);
665 	return NULL;
666 }
667 
668 static int
669 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src,
670 		     const size_t len, const size_t totalmem, const bool op_wr)
671 {
672 	int rc;
673 	size_t part_done, todo, rem;
674 	ssize_t done;
675 	bool show_progress;
676 	pthread_t spinner_th;
677 	struct spinner_info *si;
678 
679 	if (lseek(snapfd, foff, SEEK_SET) < 0) {
680 		perror("Failed to change file offset");
681 		return (-1);
682 	}
683 
684 	show_progress = false;
685 	if (isatty(STDIN_FILENO) && (winsize.ws_col != 0))
686 		show_progress = true;
687 
688 	part_done = foff;
689 	rem = len;
690 
691 	if (show_progress) {
692 		si = &(struct spinner_info) {
693 			.crtval = &part_done,
694 			.maxval = foff + len,
695 			.total = totalmem
696 		};
697 
698 		rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si);
699 		if (rc) {
700 			perror("Unable to create spinner thread");
701 			show_progress = false;
702 		}
703 	}
704 
705 	while (rem > 0) {
706 		if (show_progress)
707 			todo = min(SNAPSHOT_CHUNK, rem);
708 		else
709 			todo = rem;
710 
711 		if (op_wr)
712 			done = write(snapfd, src, todo);
713 		else
714 			done = read(snapfd, src, todo);
715 		if (done < 0) {
716 			perror("Failed to write in file");
717 			return (-1);
718 		}
719 
720 		src = (uint8_t *)src + done;
721 		part_done += done;
722 		rem -= done;
723 	}
724 
725 	if (show_progress) {
726 		rc = pthread_join(spinner_th, NULL);
727 		if (rc)
728 			perror("Unable to end spinner thread");
729 	}
730 
731 	return (0);
732 }
733 
734 static size_t
735 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr)
736 {
737 	int ret;
738 	size_t lowmem, highmem, totalmem;
739 	char *baseaddr;
740 
741 	ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem);
742 	if (ret) {
743 		fprintf(stderr, "%s: unable to retrieve guest memory size\r\n",
744 			__func__);
745 		return (0);
746 	}
747 	totalmem = lowmem + highmem;
748 
749 	if ((op_wr == false) && (totalmem != memsz)) {
750 		fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n",
751 			__func__, totalmem, memsz);
752 		return (0);
753 	}
754 
755 	winsize.ws_col = 80;
756 #ifdef TIOCGWINSZ
757 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
758 #endif /* TIOCGWINSZ */
759 	old_winch_handler = signal(SIGWINCH, winch_handler);
760 
761 	ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem,
762 		totalmem, op_wr);
763 	if (ret) {
764 		fprintf(stderr, "%s: Could not %s lowmem\r\n",
765 			__func__, op_wr ? "write" : "read");
766 		totalmem = 0;
767 		goto done;
768 	}
769 
770 	if (highmem == 0)
771 		goto done;
772 
773 	ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB,
774 		highmem, totalmem, op_wr);
775 	if (ret) {
776 		fprintf(stderr, "%s: Could not %s highmem\r\n",
777 		        __func__, op_wr ? "write" : "read");
778 		totalmem = 0;
779 		goto done;
780 	}
781 
782 done:
783 	printf("\r\n");
784 	signal(SIGWINCH, old_winch_handler);
785 
786 	return (totalmem);
787 }
788 
789 int
790 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate)
791 {
792 	size_t restored;
793 
794 	restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len,
795 				   false);
796 
797 	if (restored != rstate->vmmem_len)
798 		return (-1);
799 
800 	return (0);
801 }
802 
803 int
804 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate)
805 {
806 	for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) {
807 		const struct vm_snapshot_kern_info *info;
808 		struct vm_snapshot_meta *meta;
809 		void *data;
810 		size_t size;
811 
812 		info = &snapshot_kern_structs[i];
813 		data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size);
814 		if (data == NULL)
815 			errx(EX_DATAERR, "Cannot find kern struct %s",
816 			    info->struct_name);
817 
818 		if (size == 0)
819 			errx(EX_DATAERR, "data with zero size for %s",
820 			    info->struct_name);
821 
822 		meta = &(struct vm_snapshot_meta) {
823 			.dev_name = info->struct_name,
824 			.dev_req  = info->req,
825 
826 			.buffer.buf_start = data,
827 			.buffer.buf_size = size,
828 
829 			.buffer.buf = data,
830 			.buffer.buf_rem = size,
831 
832 			.op = VM_SNAPSHOT_RESTORE,
833 		};
834 
835 		if (vm_snapshot_req(ctx, meta))
836 			err(EX_DATAERR, "Failed to restore %s",
837 			    info->struct_name);
838 	}
839 	return (0);
840 }
841 
842 static int
843 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func,
844     const char *name, void *data)
845 {
846 	void *dev_ptr;
847 	size_t dev_size;
848 	int ret;
849 	struct vm_snapshot_meta *meta;
850 
851 	dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size);
852 
853 	if (dev_ptr == NULL) {
854 		EPRINTLN("Failed to lookup dev: %s", name);
855 		return (EINVAL);
856 	}
857 
858 	if (dev_size == 0) {
859 		EPRINTLN("Restore device size is 0: %s", name);
860 		return (EINVAL);
861 	}
862 
863 	meta = &(struct vm_snapshot_meta) {
864 		.dev_name = name,
865 		.dev_data = data,
866 
867 		.buffer.buf_start = dev_ptr,
868 		.buffer.buf_size = dev_size,
869 
870 		.buffer.buf = dev_ptr,
871 		.buffer.buf_rem = dev_size,
872 
873 		.op = VM_SNAPSHOT_RESTORE,
874 	};
875 
876 	ret = func(meta);
877 	if (ret != 0) {
878 		EPRINTLN("Failed to restore dev: %s %d", name, ret);
879 		return (ret);
880 	}
881 
882 	return (0);
883 }
884 
885 int
886 vm_restore_devices(struct restore_state *rstate)
887 {
888 	int ret;
889 	struct pci_devinst *pdi = NULL;
890 
891 	while ((pdi = pci_next(pdi)) != NULL) {
892 		ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi);
893 		if (ret)
894 			return (ret);
895 	}
896 
897 #ifdef __amd64__
898 	ret = vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL);
899 #else
900 	ret = 0;
901 #endif
902 	return (ret);
903 }
904 
905 int
906 vm_pause_devices(void)
907 {
908 	int ret;
909 	struct pci_devinst *pdi = NULL;
910 
911 	while ((pdi = pci_next(pdi)) != NULL) {
912 		ret = pci_pause(pdi);
913 		if (ret) {
914 			EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret);
915 			return (ret);
916 		}
917 	}
918 
919 	return (0);
920 }
921 
922 int
923 vm_resume_devices(void)
924 {
925 	int ret;
926 	struct pci_devinst *pdi = NULL;
927 
928 	while ((pdi = pci_next(pdi)) != NULL) {
929 		ret = pci_resume(pdi);
930 		if (ret) {
931 			EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret);
932 			return (ret);
933 		}
934 	}
935 
936 	return (0);
937 }
938 
939 static int
940 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop,
941     const char *array_key, struct vm_snapshot_meta *meta, off_t *offset)
942 {
943 	int ret;
944 	size_t data_size;
945 	ssize_t write_cnt;
946 
947 	ret = vm_snapshot_req(ctx, meta);
948 	if (ret != 0) {
949 		fprintf(stderr, "%s: Failed to snapshot struct %s\r\n",
950 			__func__, meta->dev_name);
951 		ret = -1;
952 		goto done;
953 	}
954 
955 	data_size = vm_get_snapshot_size(meta);
956 
957 	/* XXX-MJ no handling for short writes. */
958 	write_cnt = write(data_fd, meta->buffer.buf_start, data_size);
959 	if (write_cnt < 0 || (size_t)write_cnt != data_size) {
960 		perror("Failed to write all snapshotted data.");
961 		ret = -1;
962 		goto done;
963 	}
964 
965 	/* Write metadata. */
966 	xo_open_instance_h(xop, array_key);
967 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n",
968 	    meta->dev_name);
969 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
970 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
971 	xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY);
972 
973 	*offset += data_size;
974 
975 done:
976 	return (ret);
977 }
978 
979 static int
980 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop)
981 {
982 	int ret, error;
983 	size_t buf_size, i, offset;
984 	char *buffer;
985 	struct vm_snapshot_meta *meta;
986 
987 	error = 0;
988 	offset = 0;
989 	buf_size = SNAPSHOT_BUFFER_SIZE;
990 
991 	buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char));
992 	if (buffer == NULL) {
993 		error = ENOMEM;
994 		perror("Failed to allocate memory for snapshot buffer");
995 		goto err_vm_snapshot_kern_data;
996 	}
997 
998 	meta = &(struct vm_snapshot_meta) {
999 		.buffer.buf_start = buffer,
1000 		.buffer.buf_size = buf_size,
1001 
1002 		.op = VM_SNAPSHOT_SAVE,
1003 	};
1004 
1005 	xo_open_list_h(xop, JSON_KERNEL_ARR_KEY);
1006 	for (i = 0; i < nitems(snapshot_kern_structs); i++) {
1007 		meta->dev_name = snapshot_kern_structs[i].struct_name;
1008 		meta->dev_req  = snapshot_kern_structs[i].req;
1009 
1010 		memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1011 		meta->buffer.buf = meta->buffer.buf_start;
1012 		meta->buffer.buf_rem = meta->buffer.buf_size;
1013 
1014 		ret = vm_save_kern_struct(ctx, data_fd, xop,
1015 		    JSON_DEV_ARR_KEY, meta, &offset);
1016 		if (ret != 0) {
1017 			error = -1;
1018 			goto err_vm_snapshot_kern_data;
1019 		}
1020 	}
1021 	xo_close_list_h(xop, JSON_KERNEL_ARR_KEY);
1022 
1023 err_vm_snapshot_kern_data:
1024 	if (buffer != NULL)
1025 		free(buffer);
1026 	return (error);
1027 }
1028 
1029 static int
1030 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz)
1031 {
1032 
1033 	xo_open_container_h(xop, JSON_BASIC_METADATA_KEY);
1034 	xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus);
1035 	xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx));
1036 	xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz);
1037 	xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx));
1038 	xo_close_container_h(xop, JSON_BASIC_METADATA_KEY);
1039 
1040 	return (0);
1041 }
1042 
1043 static int
1044 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key,
1045 			   struct vm_snapshot_meta *meta, off_t *offset)
1046 {
1047 	ssize_t ret;
1048 	size_t data_size;
1049 
1050 	data_size = vm_get_snapshot_size(meta);
1051 
1052 	/* XXX-MJ no handling for short writes. */
1053 	ret = write(data_fd, meta->buffer.buf_start, data_size);
1054 	if (ret < 0 || (size_t)ret != data_size) {
1055 		perror("Failed to write all snapshotted data.");
1056 		return (-1);
1057 	}
1058 
1059 	/* Write metadata. */
1060 	xo_open_instance_h(xop, array_key);
1061 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name);
1062 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
1063 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
1064 	xo_close_instance_h(xop, array_key);
1065 
1066 	*offset += data_size;
1067 
1068 	return (0);
1069 }
1070 
1071 static int
1072 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name,
1073     void *devdata, int data_fd, xo_handle_t *xop,
1074     struct vm_snapshot_meta *meta, off_t *offset)
1075 {
1076 	int ret;
1077 
1078 	memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1079 	meta->buffer.buf = meta->buffer.buf_start;
1080 	meta->buffer.buf_rem = meta->buffer.buf_size;
1081 	meta->dev_name = dev_name;
1082 	meta->dev_data = devdata;
1083 
1084 	ret = func(meta);
1085 	if (ret != 0) {
1086 		EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret);
1087 		return (ret);
1088 	}
1089 
1090 	ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta,
1091 					 offset);
1092 	if (ret != 0)
1093 		return (ret);
1094 
1095 	return (0);
1096 }
1097 
1098 static int
1099 vm_snapshot_devices(int data_fd, xo_handle_t *xop)
1100 {
1101 	int ret;
1102 	off_t offset;
1103 	void *buffer;
1104 	size_t buf_size;
1105 	struct vm_snapshot_meta *meta;
1106 	struct pci_devinst *pdi;
1107 
1108 	buf_size = SNAPSHOT_BUFFER_SIZE;
1109 
1110 	offset = lseek(data_fd, 0, SEEK_CUR);
1111 	if (offset < 0) {
1112 		perror("Failed to get data file current offset.");
1113 		return (-1);
1114 	}
1115 
1116 	buffer = malloc(buf_size);
1117 	if (buffer == NULL) {
1118 		perror("Failed to allocate memory for snapshot buffer");
1119 		ret = ENOSPC;
1120 		goto snapshot_err;
1121 	}
1122 
1123 	meta = &(struct vm_snapshot_meta) {
1124 		.buffer.buf_start = buffer,
1125 		.buffer.buf_size = buf_size,
1126 
1127 		.op = VM_SNAPSHOT_SAVE,
1128 	};
1129 
1130 	xo_open_list_h(xop, JSON_DEV_ARR_KEY);
1131 
1132 	/* Save PCI devices */
1133 	pdi = NULL;
1134 	while ((pdi = pci_next(pdi)) != NULL) {
1135 		ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi,
1136 		    data_fd, xop, meta, &offset);
1137 		if (ret != 0)
1138 			goto snapshot_err;
1139 	}
1140 
1141 #ifdef __amd64__
1142 	ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL,
1143 	    data_fd, xop, meta, &offset);
1144 #else
1145 	ret = 0;
1146 #endif
1147 
1148 	xo_close_list_h(xop, JSON_DEV_ARR_KEY);
1149 
1150 snapshot_err:
1151 	if (buffer != NULL)
1152 		free(buffer);
1153 	return (ret);
1154 }
1155 
1156 void
1157 checkpoint_cpu_add(int vcpu)
1158 {
1159 
1160 	pthread_mutex_lock(&vcpu_lock);
1161 	CPU_SET(vcpu, &vcpus_active);
1162 
1163 	if (checkpoint_active) {
1164 		CPU_SET(vcpu, &vcpus_suspended);
1165 		while (checkpoint_active)
1166 			pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1167 		CPU_CLR(vcpu, &vcpus_suspended);
1168 	}
1169 	pthread_mutex_unlock(&vcpu_lock);
1170 }
1171 
1172 /*
1173  * When a vCPU is suspended for any reason, it calls
1174  * checkpoint_cpu_suspend().  This records that the vCPU is idle.
1175  * Before returning from suspension, checkpoint_cpu_resume() is
1176  * called.  In suspend we note that the vCPU is idle.  In resume we
1177  * pause the vCPU thread until the checkpoint is complete.  The reason
1178  * for the two-step process is that vCPUs might already be stopped in
1179  * the debug server when a checkpoint is requested.  This approach
1180  * allows us to account for and handle those vCPUs.
1181  */
1182 void
1183 checkpoint_cpu_suspend(int vcpu)
1184 {
1185 
1186 	pthread_mutex_lock(&vcpu_lock);
1187 	CPU_SET(vcpu, &vcpus_suspended);
1188 	if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0)
1189 		pthread_cond_signal(&vcpus_idle);
1190 	pthread_mutex_unlock(&vcpu_lock);
1191 }
1192 
1193 void
1194 checkpoint_cpu_resume(int vcpu)
1195 {
1196 
1197 	pthread_mutex_lock(&vcpu_lock);
1198 	while (checkpoint_active)
1199 		pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1200 	CPU_CLR(vcpu, &vcpus_suspended);
1201 	pthread_mutex_unlock(&vcpu_lock);
1202 }
1203 
1204 static void
1205 vm_vcpu_pause(struct vmctx *ctx)
1206 {
1207 
1208 	pthread_mutex_lock(&vcpu_lock);
1209 	checkpoint_active = true;
1210 	vm_suspend_all_cpus(ctx);
1211 	while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
1212 		pthread_cond_wait(&vcpus_idle, &vcpu_lock);
1213 	pthread_mutex_unlock(&vcpu_lock);
1214 }
1215 
1216 static void
1217 vm_vcpu_resume(struct vmctx *ctx)
1218 {
1219 
1220 	pthread_mutex_lock(&vcpu_lock);
1221 	checkpoint_active = false;
1222 	pthread_mutex_unlock(&vcpu_lock);
1223 	vm_resume_all_cpus(ctx);
1224 	pthread_cond_broadcast(&vcpus_can_run);
1225 }
1226 
1227 static int
1228 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file,
1229     bool stop_vm)
1230 {
1231 	int fd_checkpoint = 0, kdata_fd = 0, fd_meta;
1232 	int ret = 0;
1233 	int error = 0;
1234 	size_t memsz;
1235 	xo_handle_t *xop = NULL;
1236 	char *meta_filename = NULL;
1237 	char *kdata_filename = NULL;
1238 	FILE *meta_file = NULL;
1239 
1240 	kdata_filename = strcat_extension(checkpoint_file, ".kern");
1241 	if (kdata_filename == NULL) {
1242 		fprintf(stderr, "Failed to construct kernel data filename.\n");
1243 		return (-1);
1244 	}
1245 
1246 	kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1247 	if (kdata_fd < 0) {
1248 		perror("Failed to open kernel data snapshot file.");
1249 		error = -1;
1250 		goto done;
1251 	}
1252 
1253 	fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700);
1254 
1255 	if (fd_checkpoint < 0) {
1256 		perror("Failed to create checkpoint file");
1257 		error = -1;
1258 		goto done;
1259 	}
1260 
1261 	meta_filename = strcat_extension(checkpoint_file, ".meta");
1262 	if (meta_filename == NULL) {
1263 		fprintf(stderr, "Failed to construct vm metadata filename.\n");
1264 		goto done;
1265 	}
1266 
1267 	fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1268 	if (fd_meta != -1)
1269 		meta_file = fdopen(fd_meta, "w");
1270 	if (meta_file == NULL) {
1271 		perror("Failed to open vm metadata snapshot file.");
1272 		close(fd_meta);
1273 		goto done;
1274 	}
1275 
1276 	xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY);
1277 	if (xop == NULL) {
1278 		perror("Failed to get libxo handle on metadata file.");
1279 		goto done;
1280 	}
1281 
1282 	vm_vcpu_pause(ctx);
1283 
1284 	ret = vm_pause_devices();
1285 	if (ret != 0) {
1286 		fprintf(stderr, "Could not pause devices\r\n");
1287 		error = ret;
1288 		goto done;
1289 	}
1290 
1291 	memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true);
1292 	if (memsz == 0) {
1293 		perror("Could not write guest memory to file");
1294 		error = -1;
1295 		goto done;
1296 	}
1297 
1298 	ret = vm_snapshot_basic_metadata(ctx, xop, memsz);
1299 	if (ret != 0) {
1300 		fprintf(stderr, "Failed to snapshot vm basic metadata.\n");
1301 		error = -1;
1302 		goto done;
1303 	}
1304 
1305 	ret = vm_save_kern_structs(ctx, kdata_fd, xop);
1306 	if (ret != 0) {
1307 		fprintf(stderr, "Failed to snapshot vm kernel data.\n");
1308 		error = -1;
1309 		goto done;
1310 	}
1311 
1312 	ret = vm_snapshot_devices(kdata_fd, xop);
1313 	if (ret != 0) {
1314 		fprintf(stderr, "Failed to snapshot device state.\n");
1315 		error = -1;
1316 		goto done;
1317 	}
1318 
1319 	xo_finish_h(xop);
1320 
1321 	if (stop_vm) {
1322 		vm_destroy(ctx);
1323 		exit(0);
1324 	}
1325 
1326 done:
1327 	ret = vm_resume_devices();
1328 	if (ret != 0)
1329 		fprintf(stderr, "Could not resume devices\r\n");
1330 	vm_vcpu_resume(ctx);
1331 	if (fd_checkpoint > 0)
1332 		close(fd_checkpoint);
1333 	if (meta_filename != NULL)
1334 		free(meta_filename);
1335 	if (kdata_filename != NULL)
1336 		free(kdata_filename);
1337 	if (xop != NULL)
1338 		xo_destroy(xop);
1339 	if (meta_file != NULL)
1340 		fclose(meta_file);
1341 	if (kdata_fd > 0)
1342 		close(kdata_fd);
1343 	return (error);
1344 }
1345 
1346 static int
1347 handle_message(struct vmctx *ctx, nvlist_t *nvl)
1348 {
1349 	const char *cmd;
1350 	struct ipc_command **ipc_cmd;
1351 
1352 	if (!nvlist_exists_string(nvl, "cmd"))
1353 		return (EINVAL);
1354 
1355 	cmd = nvlist_get_string(nvl, "cmd");
1356 	IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) {
1357 		if (strcmp(cmd, (*ipc_cmd)->name) == 0)
1358 			return ((*ipc_cmd)->handler(ctx, nvl));
1359 	}
1360 
1361 	return (EOPNOTSUPP);
1362 }
1363 
1364 /*
1365  * Listen for commands from bhyvectl
1366  */
1367 void *
1368 checkpoint_thread(void *param)
1369 {
1370 	int fd;
1371 	struct checkpoint_thread_info *thread_info;
1372 	nvlist_t *nvl;
1373 
1374 	pthread_set_name_np(pthread_self(), "checkpoint thread");
1375 	thread_info = (struct checkpoint_thread_info *)param;
1376 
1377 	while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) {
1378 		nvl = nvlist_recv(fd, 0);
1379 		if (nvl != NULL)
1380 			handle_message(thread_info->ctx, nvl);
1381 		else
1382 			EPRINTLN("nvlist_recv() failed: %s", strerror(errno));
1383 
1384 		close(fd);
1385 		nvlist_destroy(nvl);
1386 	}
1387 
1388 	return (NULL);
1389 }
1390 
1391 static int
1392 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl)
1393 {
1394 	int error;
1395 
1396 	if (!nvlist_exists_string(nvl, "filename") ||
1397 	    !nvlist_exists_bool(nvl, "suspend") ||
1398 	    !nvlist_exists_descriptor(nvl, "fddir"))
1399 		error = EINVAL;
1400 	else
1401 		error = vm_checkpoint(ctx,
1402 		    nvlist_get_descriptor(nvl, "fddir"),
1403 		    nvlist_get_string(nvl, "filename"),
1404 		    nvlist_get_bool(nvl, "suspend"));
1405 
1406 	return (error);
1407 }
1408 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint);
1409 
1410 void
1411 init_snapshot(void)
1412 {
1413 	int err;
1414 
1415 	err = pthread_mutex_init(&vcpu_lock, NULL);
1416 	if (err != 0)
1417 		errc(1, err, "checkpoint mutex init");
1418 	err = pthread_cond_init(&vcpus_idle, NULL);
1419 	if (err != 0)
1420 		errc(1, err, "checkpoint cv init (vcpus_idle)");
1421 	err = pthread_cond_init(&vcpus_can_run, NULL);
1422 	if (err != 0)
1423 		errc(1, err, "checkpoint cv init (vcpus_can_run)");
1424 }
1425 
1426 /*
1427  * Create the listening socket for IPC with bhyvectl
1428  */
1429 int
1430 init_checkpoint_thread(struct vmctx *ctx)
1431 {
1432 	struct checkpoint_thread_info *checkpoint_info = NULL;
1433 	struct sockaddr_un addr;
1434 	int socket_fd;
1435 	pthread_t checkpoint_pthread;
1436 	int err;
1437 #ifndef WITHOUT_CAPSICUM
1438 	cap_rights_t rights;
1439 #endif
1440 
1441 	memset(&addr, 0, sizeof(addr));
1442 
1443 	socket_fd = socket(PF_UNIX, SOCK_STREAM, 0);
1444 	if (socket_fd < 0) {
1445 		EPRINTLN("Socket creation failed: %s", strerror(errno));
1446 		err = -1;
1447 		goto fail;
1448 	}
1449 
1450 	addr.sun_family = AF_UNIX;
1451 
1452 	snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s",
1453 		 BHYVE_RUN_DIR, vm_get_name(ctx));
1454 	addr.sun_len = SUN_LEN(&addr);
1455 	unlink(addr.sun_path);
1456 
1457 	if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) {
1458 		EPRINTLN("Failed to bind socket \"%s\": %s\n",
1459 		    addr.sun_path, strerror(errno));
1460 		err = -1;
1461 		goto fail;
1462 	}
1463 
1464 	if (listen(socket_fd, 10) < 0) {
1465 		EPRINTLN("ipc socket listen: %s\n", strerror(errno));
1466 		err = errno;
1467 		goto fail;
1468 	}
1469 
1470 #ifndef WITHOUT_CAPSICUM
1471 	cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE,
1472 	    CAP_SEND, CAP_GETSOCKOPT);
1473 
1474 	if (caph_rights_limit(socket_fd, &rights) == -1)
1475 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1476 #endif
1477 	checkpoint_info = calloc(1, sizeof(*checkpoint_info));
1478 	checkpoint_info->ctx = ctx;
1479 	checkpoint_info->socket_fd = socket_fd;
1480 
1481 	err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread,
1482 		checkpoint_info);
1483 	if (err != 0)
1484 		goto fail;
1485 
1486 	return (0);
1487 fail:
1488 	free(checkpoint_info);
1489 	if (socket_fd > 0)
1490 		close(socket_fd);
1491 	unlink(addr.sun_path);
1492 
1493 	return (err);
1494 }
1495 
1496 void
1497 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op)
1498 {
1499 	const char *__op;
1500 
1501 	if (op == VM_SNAPSHOT_SAVE)
1502 		__op = "save";
1503 	else if (op == VM_SNAPSHOT_RESTORE)
1504 		__op = "restore";
1505 	else
1506 		__op = "unknown";
1507 
1508 	fprintf(stderr, "%s: snapshot-%s failed for %s\r\n",
1509 		__func__, __op, bufname);
1510 }
1511 
1512 int
1513 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1514 {
1515 	struct vm_snapshot_buffer *buffer;
1516 	int op;
1517 
1518 	buffer = &meta->buffer;
1519 	op = meta->op;
1520 
1521 	if (buffer->buf_rem < data_size) {
1522 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1523 		return (E2BIG);
1524 	}
1525 
1526 	if (op == VM_SNAPSHOT_SAVE)
1527 		memcpy(buffer->buf, data, data_size);
1528 	else if (op == VM_SNAPSHOT_RESTORE)
1529 		memcpy(data, buffer->buf, data_size);
1530 	else
1531 		return (EINVAL);
1532 
1533 	buffer->buf += data_size;
1534 	buffer->buf_rem -= data_size;
1535 
1536 	return (0);
1537 }
1538 
1539 size_t
1540 vm_get_snapshot_size(struct vm_snapshot_meta *meta)
1541 {
1542 	size_t length;
1543 	struct vm_snapshot_buffer *buffer;
1544 
1545 	buffer = &meta->buffer;
1546 
1547 	if (buffer->buf_size < buffer->buf_rem) {
1548 		fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n",
1549 			__func__, buffer->buf_size, buffer->buf_rem);
1550 		length = 0;
1551 	} else {
1552 		length = buffer->buf_size - buffer->buf_rem;
1553 	}
1554 
1555 	return (length);
1556 }
1557 
1558 int
1559 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len,
1560     bool restore_null, struct vm_snapshot_meta *meta)
1561 {
1562 	int ret;
1563 	vm_paddr_t gaddr;
1564 
1565 	if (meta->op == VM_SNAPSHOT_SAVE) {
1566 		gaddr = paddr_host2guest(ctx, *addrp);
1567 		if (gaddr == (vm_paddr_t) -1) {
1568 			if (!restore_null ||
1569 			    (restore_null && (*addrp != NULL))) {
1570 				ret = EFAULT;
1571 				goto done;
1572 			}
1573 		}
1574 
1575 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1576 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
1577 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1578 		if (gaddr == (vm_paddr_t) -1) {
1579 			if (!restore_null) {
1580 				ret = EFAULT;
1581 				goto done;
1582 			}
1583 		}
1584 
1585 		*addrp = paddr_guest2host(ctx, gaddr, len);
1586 	} else {
1587 		ret = EINVAL;
1588 	}
1589 
1590 done:
1591 	return (ret);
1592 }
1593 
1594 int
1595 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1596 {
1597 	struct vm_snapshot_buffer *buffer;
1598 	int op;
1599 	int ret;
1600 
1601 	buffer = &meta->buffer;
1602 	op = meta->op;
1603 
1604 	if (buffer->buf_rem < data_size) {
1605 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1606 		ret = E2BIG;
1607 		goto done;
1608 	}
1609 
1610 	if (op == VM_SNAPSHOT_SAVE) {
1611 		ret = 0;
1612 		memcpy(buffer->buf, data, data_size);
1613 	} else if (op == VM_SNAPSHOT_RESTORE) {
1614 		ret = memcmp(data, buffer->buf, data_size);
1615 	} else {
1616 		ret = EINVAL;
1617 		goto done;
1618 	}
1619 
1620 	buffer->buf += data_size;
1621 	buffer->buf_rem -= data_size;
1622 
1623 done:
1624 	return (ret);
1625 }
1626