xref: /freebsd/usr.sbin/bhyve/snapshot.c (revision 63f537551380d2dab29fa402ad1269feae17e594)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Flavius Anton
5  * Copyright (c) 2016 Mihai Tiganus
6  * Copyright (c) 2016-2019 Mihai Carabas
7  * Copyright (c) 2017-2019 Darius Mihai
8  * Copyright (c) 2017-2019 Elena Mihailescu
9  * Copyright (c) 2018-2019 Sergiu Weisz
10  * All rights reserved.
11  * The bhyve-snapshot feature was developed under sponsorships
12  * from Matthew Grooms.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #include <sys/types.h>
38 #ifndef WITHOUT_CAPSICUM
39 #include <sys/capsicum.h>
40 #endif
41 #include <sys/mman.h>
42 #include <sys/socket.h>
43 #include <sys/stat.h>
44 #include <sys/time.h>
45 #include <sys/un.h>
46 
47 #ifndef WITHOUT_CAPSICUM
48 #include <capsicum_helpers.h>
49 #endif
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <err.h>
54 #include <errno.h>
55 #include <fcntl.h>
56 #include <libgen.h>
57 #include <signal.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <pthread.h>
62 #include <pthread_np.h>
63 #include <sysexits.h>
64 #include <stdbool.h>
65 #include <sys/ioctl.h>
66 
67 #include <machine/vmm.h>
68 #ifndef WITHOUT_CAPSICUM
69 #include <machine/vmm_dev.h>
70 #endif
71 #include <machine/vmm_snapshot.h>
72 #include <vmmapi.h>
73 
74 #include "bhyverun.h"
75 #include "acpi.h"
76 #ifdef __amd64__
77 #include "amd64/atkbdc.h"
78 #endif
79 #include "debug.h"
80 #include "ipc.h"
81 #include "mem.h"
82 #include "pci_emul.h"
83 #include "snapshot.h"
84 
85 #include <libxo/xo.h>
86 #include <ucl.h>
87 
88 struct spinner_info {
89 	const size_t *crtval;
90 	const size_t maxval;
91 	const size_t total;
92 };
93 
94 extern int guest_ncpus;
95 
96 static struct winsize winsize;
97 static sig_t old_winch_handler;
98 
99 #define	KB		(1024UL)
100 #define	MB		(1024UL * KB)
101 #define	GB		(1024UL * MB)
102 
103 #define	SNAPSHOT_CHUNK	(4 * MB)
104 #define	PROG_BUF_SZ	(8192)
105 
106 #define	SNAPSHOT_BUFFER_SIZE (20 * MB)
107 
108 #define	JSON_KERNEL_ARR_KEY		"kern_structs"
109 #define	JSON_DEV_ARR_KEY		"devices"
110 #define	JSON_BASIC_METADATA_KEY 	"basic metadata"
111 #define	JSON_SNAPSHOT_REQ_KEY		"device"
112 #define	JSON_SIZE_KEY			"size"
113 #define	JSON_FILE_OFFSET_KEY		"file_offset"
114 
115 #define	JSON_NCPUS_KEY			"ncpus"
116 #define	JSON_VMNAME_KEY 		"vmname"
117 #define	JSON_MEMSIZE_KEY		"memsize"
118 #define	JSON_MEMFLAGS_KEY		"memflags"
119 
120 #define min(a,b)		\
121 ({				\
122  __typeof__ (a) _a = (a);	\
123  __typeof__ (b) _b = (b); 	\
124  _a < _b ? _a : _b;       	\
125  })
126 
127 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = {
128 	{ "vhpet",	STRUCT_VHPET	},
129 	{ "vm",		STRUCT_VM	},
130 	{ "vioapic",	STRUCT_VIOAPIC	},
131 	{ "vlapic",	STRUCT_VLAPIC	},
132 	{ "vmcx",	STRUCT_VMCX	},
133 	{ "vatpit",	STRUCT_VATPIT	},
134 	{ "vatpic",	STRUCT_VATPIC	},
135 	{ "vpmtmr",	STRUCT_VPMTMR	},
136 	{ "vrtc",	STRUCT_VRTC	},
137 };
138 
139 static cpuset_t vcpus_active, vcpus_suspended;
140 static pthread_mutex_t vcpu_lock = PTHREAD_MUTEX_INITIALIZER;
141 static pthread_cond_t vcpus_idle = PTHREAD_COND_INITIALIZER;
142 static pthread_cond_t vcpus_can_run = PTHREAD_COND_INITIALIZER;
143 static bool checkpoint_active;
144 
145 /*
146  * TODO: Harden this function and all of its callers since 'base_str' is a user
147  * provided string.
148  */
149 static char *
150 strcat_extension(const char *base_str, const char *ext)
151 {
152 	char *res;
153 	size_t base_len, ext_len;
154 
155 	base_len = strnlen(base_str, NAME_MAX);
156 	ext_len = strnlen(ext, NAME_MAX);
157 
158 	if (base_len + ext_len > NAME_MAX) {
159 		EPRINTLN("Filename exceeds maximum length.");
160 		return (NULL);
161 	}
162 
163 	res = malloc(base_len + ext_len + 1);
164 	if (res == NULL) {
165 		EPRINTLN("Failed to allocate memory: %s", strerror(errno));
166 		return (NULL);
167 	}
168 
169 	memcpy(res, base_str, base_len);
170 	memcpy(res + base_len, ext, ext_len);
171 	res[base_len + ext_len] = 0;
172 
173 	return (res);
174 }
175 
176 void
177 destroy_restore_state(struct restore_state *rstate)
178 {
179 	if (rstate == NULL) {
180 		EPRINTLN("Attempting to destroy NULL restore struct.");
181 		return;
182 	}
183 
184 	if (rstate->kdata_map != MAP_FAILED)
185 		munmap(rstate->kdata_map, rstate->kdata_len);
186 
187 	if (rstate->kdata_fd > 0)
188 		close(rstate->kdata_fd);
189 	if (rstate->vmmem_fd > 0)
190 		close(rstate->vmmem_fd);
191 
192 	if (rstate->meta_root_obj != NULL)
193 		ucl_object_unref(rstate->meta_root_obj);
194 	if (rstate->meta_parser != NULL)
195 		ucl_parser_free(rstate->meta_parser);
196 }
197 
198 static int
199 load_vmmem_file(const char *filename, struct restore_state *rstate)
200 {
201 	struct stat sb;
202 	int err;
203 
204 	rstate->vmmem_fd = open(filename, O_RDONLY);
205 	if (rstate->vmmem_fd < 0) {
206 		perror("Failed to open restore file");
207 		return (-1);
208 	}
209 
210 	err = fstat(rstate->vmmem_fd, &sb);
211 	if (err < 0) {
212 		perror("Failed to stat restore file");
213 		goto err_load_vmmem;
214 	}
215 
216 	if (sb.st_size == 0) {
217 		fprintf(stderr, "Restore file is empty.\n");
218 		goto err_load_vmmem;
219 	}
220 
221 	rstate->vmmem_len = sb.st_size;
222 
223 	return (0);
224 
225 err_load_vmmem:
226 	if (rstate->vmmem_fd > 0)
227 		close(rstate->vmmem_fd);
228 	return (-1);
229 }
230 
231 static int
232 load_kdata_file(const char *filename, struct restore_state *rstate)
233 {
234 	struct stat sb;
235 	int err;
236 
237 	rstate->kdata_fd = open(filename, O_RDONLY);
238 	if (rstate->kdata_fd < 0) {
239 		perror("Failed to open kernel data file");
240 		return (-1);
241 	}
242 
243 	err = fstat(rstate->kdata_fd, &sb);
244 	if (err < 0) {
245 		perror("Failed to stat kernel data file");
246 		goto err_load_kdata;
247 	}
248 
249 	if (sb.st_size == 0) {
250 		fprintf(stderr, "Kernel data file is empty.\n");
251 		goto err_load_kdata;
252 	}
253 
254 	rstate->kdata_len = sb.st_size;
255 	rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ,
256 				 MAP_SHARED, rstate->kdata_fd, 0);
257 	if (rstate->kdata_map == MAP_FAILED) {
258 		perror("Failed to map restore file");
259 		goto err_load_kdata;
260 	}
261 
262 	return (0);
263 
264 err_load_kdata:
265 	if (rstate->kdata_fd > 0)
266 		close(rstate->kdata_fd);
267 	return (-1);
268 }
269 
270 static int
271 load_metadata_file(const char *filename, struct restore_state *rstate)
272 {
273 	ucl_object_t *obj;
274 	struct ucl_parser *parser;
275 	int err;
276 
277 	parser = ucl_parser_new(UCL_PARSER_DEFAULT);
278 	if (parser == NULL) {
279 		fprintf(stderr, "Failed to initialize UCL parser.\n");
280 		err = -1;
281 		goto err_load_metadata;
282 	}
283 
284 	err = ucl_parser_add_file(parser, filename);
285 	if (err == 0) {
286 		fprintf(stderr, "Failed to parse metadata file: '%s'\n",
287 			filename);
288 		err = -1;
289 		goto err_load_metadata;
290 	}
291 
292 	obj = ucl_parser_get_object(parser);
293 	if (obj == NULL) {
294 		fprintf(stderr, "Failed to parse object.\n");
295 		err = -1;
296 		goto err_load_metadata;
297 	}
298 
299 	rstate->meta_parser = parser;
300 	rstate->meta_root_obj = (ucl_object_t *)obj;
301 
302 	return (0);
303 
304 err_load_metadata:
305 	if (parser != NULL)
306 		ucl_parser_free(parser);
307 	return (err);
308 }
309 
310 int
311 load_restore_file(const char *filename, struct restore_state *rstate)
312 {
313 	int err = 0;
314 	char *kdata_filename = NULL, *meta_filename = NULL;
315 
316 	assert(filename != NULL);
317 	assert(rstate != NULL);
318 
319 	memset(rstate, 0, sizeof(*rstate));
320 	rstate->kdata_map = MAP_FAILED;
321 
322 	err = load_vmmem_file(filename, rstate);
323 	if (err != 0) {
324 		fprintf(stderr, "Failed to load guest RAM file.\n");
325 		goto err_restore;
326 	}
327 
328 	kdata_filename = strcat_extension(filename, ".kern");
329 	if (kdata_filename == NULL) {
330 		fprintf(stderr, "Failed to construct kernel data filename.\n");
331 		goto err_restore;
332 	}
333 
334 	err = load_kdata_file(kdata_filename, rstate);
335 	if (err != 0) {
336 		fprintf(stderr, "Failed to load guest kernel data file.\n");
337 		goto err_restore;
338 	}
339 
340 	meta_filename = strcat_extension(filename, ".meta");
341 	if (meta_filename == NULL) {
342 		fprintf(stderr, "Failed to construct kernel metadata filename.\n");
343 		goto err_restore;
344 	}
345 
346 	err = load_metadata_file(meta_filename, rstate);
347 	if (err != 0) {
348 		fprintf(stderr, "Failed to load guest metadata file.\n");
349 		goto err_restore;
350 	}
351 
352 	return (0);
353 
354 err_restore:
355 	destroy_restore_state(rstate);
356 	if (kdata_filename != NULL)
357 		free(kdata_filename);
358 	if (meta_filename != NULL)
359 		free(meta_filename);
360 	return (-1);
361 }
362 
363 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret)			\
364 do {										\
365 	const ucl_object_t *obj__;						\
366 	obj__ = ucl_object_lookup(obj, key);					\
367 	if (obj__ == NULL) {							\
368 		fprintf(stderr, "Missing key: '%s'", key);			\
369 		return (ret);							\
370 	}									\
371 	if (!ucl_object_toint_safe(obj__, result_ptr)) {			\
372 		fprintf(stderr, "Cannot convert '%s' value to int.", key);	\
373 		return (ret);							\
374 	}									\
375 } while(0)
376 
377 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret)			\
378 do {										\
379 	const ucl_object_t *obj__;						\
380 	obj__ = ucl_object_lookup(obj, key);					\
381 	if (obj__ == NULL) {							\
382 		fprintf(stderr, "Missing key: '%s'", key);			\
383 		return (ret);							\
384 	}									\
385 	if (!ucl_object_tostring_safe(obj__, result_ptr)) {			\
386 		fprintf(stderr, "Cannot convert '%s' value to string.", key);	\
387 		return (ret);							\
388 	}									\
389 } while(0)
390 
391 static void *
392 lookup_check_dev(const char *dev_name, struct restore_state *rstate,
393 		 const ucl_object_t *obj, size_t *data_size)
394 {
395 	const char *snapshot_req;
396 	int64_t size, file_offset;
397 
398 	snapshot_req = NULL;
399 	JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj,
400 				  &snapshot_req, NULL);
401 	assert(snapshot_req != NULL);
402 	if (!strcmp(snapshot_req, dev_name)) {
403 		JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj,
404 				       &size, NULL);
405 		assert(size >= 0);
406 
407 		JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj,
408 				       &file_offset, NULL);
409 		assert(file_offset >= 0);
410 		assert((uint64_t)file_offset + size <= rstate->kdata_len);
411 
412 		*data_size = (size_t)size;
413 		return ((uint8_t *)rstate->kdata_map + file_offset);
414 	}
415 
416 	return (NULL);
417 }
418 
419 static void *
420 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate,
421     size_t *data_size)
422 {
423 	const ucl_object_t *devs = NULL, *obj = NULL;
424 	ucl_object_iter_t it = NULL;
425 	void *ret;
426 
427 	devs = ucl_object_lookup(rstate->meta_root_obj, key);
428 	if (devs == NULL) {
429 		fprintf(stderr, "Failed to find '%s' object.\n",
430 			JSON_DEV_ARR_KEY);
431 		return (NULL);
432 	}
433 
434 	if (ucl_object_type(devs) != UCL_ARRAY) {
435 		fprintf(stderr, "Object '%s' is not an array.\n",
436 			JSON_DEV_ARR_KEY);
437 		return (NULL);
438 	}
439 
440 	while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) {
441 		ret = lookup_check_dev(dev_name, rstate, obj, data_size);
442 		if (ret != NULL)
443 			return (ret);
444 	}
445 
446 	return (NULL);
447 }
448 
449 static const ucl_object_t *
450 lookup_basic_metadata_object(struct restore_state *rstate)
451 {
452 	const ucl_object_t *basic_meta_obj = NULL;
453 
454 	basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj,
455 					   JSON_BASIC_METADATA_KEY);
456 	if (basic_meta_obj == NULL) {
457 		fprintf(stderr, "Failed to find '%s' object.\n",
458 			JSON_BASIC_METADATA_KEY);
459 		return (NULL);
460 	}
461 
462 	if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) {
463 		fprintf(stderr, "Object '%s' is not a JSON object.\n",
464 		JSON_BASIC_METADATA_KEY);
465 		return (NULL);
466 	}
467 
468 	return (basic_meta_obj);
469 }
470 
471 const char *
472 lookup_vmname(struct restore_state *rstate)
473 {
474 	const char *vmname;
475 	const ucl_object_t *obj;
476 
477 	obj = lookup_basic_metadata_object(rstate);
478 	if (obj == NULL)
479 		return (NULL);
480 
481 	JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL);
482 	return (vmname);
483 }
484 
485 int
486 lookup_memflags(struct restore_state *rstate)
487 {
488 	int64_t memflags;
489 	const ucl_object_t *obj;
490 
491 	obj = lookup_basic_metadata_object(rstate);
492 	if (obj == NULL)
493 		return (0);
494 
495 	JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0);
496 
497 	return ((int)memflags);
498 }
499 
500 size_t
501 lookup_memsize(struct restore_state *rstate)
502 {
503 	int64_t memsize;
504 	const ucl_object_t *obj;
505 
506 	obj = lookup_basic_metadata_object(rstate);
507 	if (obj == NULL)
508 		return (0);
509 
510 	JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0);
511 	if (memsize < 0)
512 		memsize = 0;
513 
514 	return ((size_t)memsize);
515 }
516 
517 
518 int
519 lookup_guest_ncpus(struct restore_state *rstate)
520 {
521 	int64_t ncpus;
522 	const ucl_object_t *obj;
523 
524 	obj = lookup_basic_metadata_object(rstate);
525 	if (obj == NULL)
526 		return (0);
527 
528 	JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0);
529 	return ((int)ncpus);
530 }
531 
532 static void
533 winch_handler(int signal __unused)
534 {
535 #ifdef TIOCGWINSZ
536 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
537 #endif /* TIOCGWINSZ */
538 }
539 
540 static int
541 print_progress(size_t crtval, const size_t maxval)
542 {
543 	size_t rc;
544 	double crtval_gb, maxval_gb;
545 	size_t i, win_width, prog_start, prog_done, prog_end;
546 	int mval_len;
547 
548 	static char prog_buf[PROG_BUF_SZ];
549 	static const size_t len = sizeof(prog_buf);
550 
551 	static size_t div;
552 	static const char *div_str;
553 
554 	static char wip_bar[] = { '/', '-', '\\', '|' };
555 	static int wip_idx = 0;
556 
557 	if (maxval == 0) {
558 		printf("[0B / 0B]\r\n");
559 		return (0);
560 	}
561 
562 	if (crtval > maxval)
563 		crtval = maxval;
564 
565 	if (maxval > 10 * GB) {
566 		div = GB;
567 		div_str = "GiB";
568 	} else if (maxval > 10 * MB) {
569 		div = MB;
570 		div_str = "MiB";
571 	} else {
572 		div = KB;
573 		div_str = "KiB";
574 	}
575 
576 	crtval_gb = (double) crtval / div;
577 	maxval_gb = (double) maxval / div;
578 
579 	rc = snprintf(prog_buf, len, "%.03lf", maxval_gb);
580 	if (rc == len) {
581 		fprintf(stderr, "Maxval too big\n");
582 		return (-1);
583 	}
584 	mval_len = rc;
585 
586 	rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |",
587 		mval_len, crtval_gb, div_str, maxval_gb, div_str);
588 
589 	if (rc == len) {
590 		fprintf(stderr, "Buffer too small to print progress\n");
591 		return (-1);
592 	}
593 
594 	win_width = min(winsize.ws_col, len);
595 	prog_start = rc;
596 
597 	if (prog_start < (win_width - 2)) {
598 		prog_end = win_width - prog_start - 2;
599 		prog_done = prog_end * (crtval_gb / maxval_gb);
600 
601 		for (i = prog_start; i < prog_start + prog_done; i++)
602 			prog_buf[i] = '#';
603 
604 		if (crtval != maxval) {
605 			prog_buf[i] = wip_bar[wip_idx];
606 			wip_idx = (wip_idx + 1) % sizeof(wip_bar);
607 			i++;
608 		} else {
609 			prog_buf[i++] = '#';
610 		}
611 
612 		for (; i < win_width - 2; i++)
613 			prog_buf[i] = '_';
614 
615 		prog_buf[win_width - 2] = '|';
616 	}
617 
618 	prog_buf[win_width - 1] = '\0';
619 	write(STDOUT_FILENO, prog_buf, win_width);
620 
621 	return (0);
622 }
623 
624 static void *
625 snapshot_spinner_cb(void *arg)
626 {
627 	int rc;
628 	size_t crtval, maxval, total;
629 	struct spinner_info *si;
630 	struct timespec ts;
631 
632 	si = arg;
633 	if (si == NULL)
634 		pthread_exit(NULL);
635 
636 	ts.tv_sec = 0;
637 	ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */
638 
639 	do {
640 		crtval = *si->crtval;
641 		maxval = si->maxval;
642 		total = si->total;
643 
644 		rc = print_progress(crtval, total);
645 		if (rc < 0) {
646 			fprintf(stderr, "Failed to parse progress\n");
647 			break;
648 		}
649 
650 		nanosleep(&ts, NULL);
651 	} while (crtval < maxval);
652 
653 	pthread_exit(NULL);
654 	return NULL;
655 }
656 
657 static int
658 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src,
659 		     const size_t len, const size_t totalmem, const bool op_wr)
660 {
661 	int rc;
662 	size_t part_done, todo, rem;
663 	ssize_t done;
664 	bool show_progress;
665 	pthread_t spinner_th;
666 	struct spinner_info *si;
667 
668 	if (lseek(snapfd, foff, SEEK_SET) < 0) {
669 		perror("Failed to change file offset");
670 		return (-1);
671 	}
672 
673 	show_progress = false;
674 	if (isatty(STDIN_FILENO) && (winsize.ws_col != 0))
675 		show_progress = true;
676 
677 	part_done = foff;
678 	rem = len;
679 
680 	if (show_progress) {
681 		si = &(struct spinner_info) {
682 			.crtval = &part_done,
683 			.maxval = foff + len,
684 			.total = totalmem
685 		};
686 
687 		rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si);
688 		if (rc) {
689 			perror("Unable to create spinner thread");
690 			show_progress = false;
691 		}
692 	}
693 
694 	while (rem > 0) {
695 		if (show_progress)
696 			todo = min(SNAPSHOT_CHUNK, rem);
697 		else
698 			todo = rem;
699 
700 		if (op_wr)
701 			done = write(snapfd, src, todo);
702 		else
703 			done = read(snapfd, src, todo);
704 		if (done < 0) {
705 			perror("Failed to write in file");
706 			return (-1);
707 		}
708 
709 		src = (uint8_t *)src + done;
710 		part_done += done;
711 		rem -= done;
712 	}
713 
714 	if (show_progress) {
715 		rc = pthread_join(spinner_th, NULL);
716 		if (rc)
717 			perror("Unable to end spinner thread");
718 	}
719 
720 	return (0);
721 }
722 
723 static size_t
724 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr)
725 {
726 	int ret;
727 	size_t lowmem, highmem, totalmem;
728 	char *baseaddr;
729 
730 	ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem);
731 	if (ret) {
732 		fprintf(stderr, "%s: unable to retrieve guest memory size\r\n",
733 			__func__);
734 		return (0);
735 	}
736 	totalmem = lowmem + highmem;
737 
738 	if ((op_wr == false) && (totalmem != memsz)) {
739 		fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n",
740 			__func__, totalmem, memsz);
741 		return (0);
742 	}
743 
744 	winsize.ws_col = 80;
745 #ifdef TIOCGWINSZ
746 	ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize);
747 #endif /* TIOCGWINSZ */
748 	old_winch_handler = signal(SIGWINCH, winch_handler);
749 
750 	ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem,
751 		totalmem, op_wr);
752 	if (ret) {
753 		fprintf(stderr, "%s: Could not %s lowmem\r\n",
754 			__func__, op_wr ? "write" : "read");
755 		totalmem = 0;
756 		goto done;
757 	}
758 
759 	if (highmem == 0)
760 		goto done;
761 
762 	ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB,
763 		highmem, totalmem, op_wr);
764 	if (ret) {
765 		fprintf(stderr, "%s: Could not %s highmem\r\n",
766 		        __func__, op_wr ? "write" : "read");
767 		totalmem = 0;
768 		goto done;
769 	}
770 
771 done:
772 	printf("\r\n");
773 	signal(SIGWINCH, old_winch_handler);
774 
775 	return (totalmem);
776 }
777 
778 int
779 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate)
780 {
781 	size_t restored;
782 
783 	restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len,
784 				   false);
785 
786 	if (restored != rstate->vmmem_len)
787 		return (-1);
788 
789 	return (0);
790 }
791 
792 int
793 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate)
794 {
795 	for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) {
796 		const struct vm_snapshot_kern_info *info;
797 		struct vm_snapshot_meta *meta;
798 		void *data;
799 		size_t size;
800 
801 		info = &snapshot_kern_structs[i];
802 		data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size);
803 		if (data == NULL)
804 			errx(EX_DATAERR, "Cannot find kern struct %s",
805 			    info->struct_name);
806 
807 		if (size == 0)
808 			errx(EX_DATAERR, "data with zero size for %s",
809 			    info->struct_name);
810 
811 		meta = &(struct vm_snapshot_meta) {
812 			.dev_name = info->struct_name,
813 			.dev_req  = info->req,
814 
815 			.buffer.buf_start = data,
816 			.buffer.buf_size = size,
817 
818 			.buffer.buf = data,
819 			.buffer.buf_rem = size,
820 
821 			.op = VM_SNAPSHOT_RESTORE,
822 		};
823 
824 		if (vm_snapshot_req(ctx, meta))
825 			err(EX_DATAERR, "Failed to restore %s",
826 			    info->struct_name);
827 	}
828 	return (0);
829 }
830 
831 static int
832 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func,
833     const char *name, void *data)
834 {
835 	void *dev_ptr;
836 	size_t dev_size;
837 	int ret;
838 	struct vm_snapshot_meta *meta;
839 
840 	dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size);
841 
842 	if (dev_ptr == NULL) {
843 		EPRINTLN("Failed to lookup dev: %s", name);
844 		return (EINVAL);
845 	}
846 
847 	if (dev_size == 0) {
848 		EPRINTLN("Restore device size is 0: %s", name);
849 		return (EINVAL);
850 	}
851 
852 	meta = &(struct vm_snapshot_meta) {
853 		.dev_name = name,
854 		.dev_data = data,
855 
856 		.buffer.buf_start = dev_ptr,
857 		.buffer.buf_size = dev_size,
858 
859 		.buffer.buf = dev_ptr,
860 		.buffer.buf_rem = dev_size,
861 
862 		.op = VM_SNAPSHOT_RESTORE,
863 	};
864 
865 	ret = func(meta);
866 	if (ret != 0) {
867 		EPRINTLN("Failed to restore dev: %s %d", name, ret);
868 		return (ret);
869 	}
870 
871 	return (0);
872 }
873 
874 int
875 vm_restore_devices(struct restore_state *rstate)
876 {
877 	int ret;
878 	struct pci_devinst *pdi = NULL;
879 
880 	while ((pdi = pci_next(pdi)) != NULL) {
881 		ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi);
882 		if (ret)
883 			return (ret);
884 	}
885 
886 #ifdef __amd64__
887 	ret = vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL);
888 #else
889 	ret = 0;
890 #endif
891 	return (ret);
892 }
893 
894 int
895 vm_pause_devices(void)
896 {
897 	int ret;
898 	struct pci_devinst *pdi = NULL;
899 
900 	while ((pdi = pci_next(pdi)) != NULL) {
901 		ret = pci_pause(pdi);
902 		if (ret) {
903 			EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret);
904 			return (ret);
905 		}
906 	}
907 
908 	return (0);
909 }
910 
911 int
912 vm_resume_devices(void)
913 {
914 	int ret;
915 	struct pci_devinst *pdi = NULL;
916 
917 	while ((pdi = pci_next(pdi)) != NULL) {
918 		ret = pci_resume(pdi);
919 		if (ret) {
920 			EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret);
921 			return (ret);
922 		}
923 	}
924 
925 	return (0);
926 }
927 
928 static int
929 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop,
930     const char *array_key, struct vm_snapshot_meta *meta, off_t *offset)
931 {
932 	int ret;
933 	size_t data_size;
934 	ssize_t write_cnt;
935 
936 	ret = vm_snapshot_req(ctx, meta);
937 	if (ret != 0) {
938 		fprintf(stderr, "%s: Failed to snapshot struct %s\r\n",
939 			__func__, meta->dev_name);
940 		ret = -1;
941 		goto done;
942 	}
943 
944 	data_size = vm_get_snapshot_size(meta);
945 
946 	/* XXX-MJ no handling for short writes. */
947 	write_cnt = write(data_fd, meta->buffer.buf_start, data_size);
948 	if (write_cnt < 0 || (size_t)write_cnt != data_size) {
949 		perror("Failed to write all snapshotted data.");
950 		ret = -1;
951 		goto done;
952 	}
953 
954 	/* Write metadata. */
955 	xo_open_instance_h(xop, array_key);
956 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n",
957 	    meta->dev_name);
958 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
959 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
960 	xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY);
961 
962 	*offset += data_size;
963 
964 done:
965 	return (ret);
966 }
967 
968 static int
969 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop)
970 {
971 	int ret, error;
972 	size_t buf_size, i, offset;
973 	char *buffer;
974 	struct vm_snapshot_meta *meta;
975 
976 	error = 0;
977 	offset = 0;
978 	buf_size = SNAPSHOT_BUFFER_SIZE;
979 
980 	buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char));
981 	if (buffer == NULL) {
982 		error = ENOMEM;
983 		perror("Failed to allocate memory for snapshot buffer");
984 		goto err_vm_snapshot_kern_data;
985 	}
986 
987 	meta = &(struct vm_snapshot_meta) {
988 		.buffer.buf_start = buffer,
989 		.buffer.buf_size = buf_size,
990 
991 		.op = VM_SNAPSHOT_SAVE,
992 	};
993 
994 	xo_open_list_h(xop, JSON_KERNEL_ARR_KEY);
995 	for (i = 0; i < nitems(snapshot_kern_structs); i++) {
996 		meta->dev_name = snapshot_kern_structs[i].struct_name;
997 		meta->dev_req  = snapshot_kern_structs[i].req;
998 
999 		memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1000 		meta->buffer.buf = meta->buffer.buf_start;
1001 		meta->buffer.buf_rem = meta->buffer.buf_size;
1002 
1003 		ret = vm_save_kern_struct(ctx, data_fd, xop,
1004 		    JSON_DEV_ARR_KEY, meta, &offset);
1005 		if (ret != 0) {
1006 			error = -1;
1007 			goto err_vm_snapshot_kern_data;
1008 		}
1009 	}
1010 	xo_close_list_h(xop, JSON_KERNEL_ARR_KEY);
1011 
1012 err_vm_snapshot_kern_data:
1013 	if (buffer != NULL)
1014 		free(buffer);
1015 	return (error);
1016 }
1017 
1018 static int
1019 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz)
1020 {
1021 
1022 	xo_open_container_h(xop, JSON_BASIC_METADATA_KEY);
1023 	xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus);
1024 	xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx));
1025 	xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz);
1026 	xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx));
1027 	xo_close_container_h(xop, JSON_BASIC_METADATA_KEY);
1028 
1029 	return (0);
1030 }
1031 
1032 static int
1033 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key,
1034 			   struct vm_snapshot_meta *meta, off_t *offset)
1035 {
1036 	ssize_t ret;
1037 	size_t data_size;
1038 
1039 	data_size = vm_get_snapshot_size(meta);
1040 
1041 	/* XXX-MJ no handling for short writes. */
1042 	ret = write(data_fd, meta->buffer.buf_start, data_size);
1043 	if (ret < 0 || (size_t)ret != data_size) {
1044 		perror("Failed to write all snapshotted data.");
1045 		return (-1);
1046 	}
1047 
1048 	/* Write metadata. */
1049 	xo_open_instance_h(xop, array_key);
1050 	xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name);
1051 	xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size);
1052 	xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset);
1053 	xo_close_instance_h(xop, array_key);
1054 
1055 	*offset += data_size;
1056 
1057 	return (0);
1058 }
1059 
1060 static int
1061 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name,
1062     void *devdata, int data_fd, xo_handle_t *xop,
1063     struct vm_snapshot_meta *meta, off_t *offset)
1064 {
1065 	int ret;
1066 
1067 	memset(meta->buffer.buf_start, 0, meta->buffer.buf_size);
1068 	meta->buffer.buf = meta->buffer.buf_start;
1069 	meta->buffer.buf_rem = meta->buffer.buf_size;
1070 	meta->dev_name = dev_name;
1071 	meta->dev_data = devdata;
1072 
1073 	ret = func(meta);
1074 	if (ret != 0) {
1075 		EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret);
1076 		return (ret);
1077 	}
1078 
1079 	ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta,
1080 					 offset);
1081 	if (ret != 0)
1082 		return (ret);
1083 
1084 	return (0);
1085 }
1086 
1087 static int
1088 vm_snapshot_devices(int data_fd, xo_handle_t *xop)
1089 {
1090 	int ret;
1091 	off_t offset;
1092 	void *buffer;
1093 	size_t buf_size;
1094 	struct vm_snapshot_meta *meta;
1095 	struct pci_devinst *pdi;
1096 
1097 	buf_size = SNAPSHOT_BUFFER_SIZE;
1098 
1099 	offset = lseek(data_fd, 0, SEEK_CUR);
1100 	if (offset < 0) {
1101 		perror("Failed to get data file current offset.");
1102 		return (-1);
1103 	}
1104 
1105 	buffer = malloc(buf_size);
1106 	if (buffer == NULL) {
1107 		perror("Failed to allocate memory for snapshot buffer");
1108 		ret = ENOSPC;
1109 		goto snapshot_err;
1110 	}
1111 
1112 	meta = &(struct vm_snapshot_meta) {
1113 		.buffer.buf_start = buffer,
1114 		.buffer.buf_size = buf_size,
1115 
1116 		.op = VM_SNAPSHOT_SAVE,
1117 	};
1118 
1119 	xo_open_list_h(xop, JSON_DEV_ARR_KEY);
1120 
1121 	/* Save PCI devices */
1122 	pdi = NULL;
1123 	while ((pdi = pci_next(pdi)) != NULL) {
1124 		ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi,
1125 		    data_fd, xop, meta, &offset);
1126 		if (ret != 0)
1127 			goto snapshot_err;
1128 	}
1129 
1130 #ifdef __amd64__
1131 	ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL,
1132 	    data_fd, xop, meta, &offset);
1133 #else
1134 	ret = 0;
1135 #endif
1136 
1137 	xo_close_list_h(xop, JSON_DEV_ARR_KEY);
1138 
1139 snapshot_err:
1140 	if (buffer != NULL)
1141 		free(buffer);
1142 	return (ret);
1143 }
1144 
1145 void
1146 checkpoint_cpu_add(int vcpu)
1147 {
1148 
1149 	pthread_mutex_lock(&vcpu_lock);
1150 	CPU_SET(vcpu, &vcpus_active);
1151 
1152 	if (checkpoint_active) {
1153 		CPU_SET(vcpu, &vcpus_suspended);
1154 		while (checkpoint_active)
1155 			pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1156 		CPU_CLR(vcpu, &vcpus_suspended);
1157 	}
1158 	pthread_mutex_unlock(&vcpu_lock);
1159 }
1160 
1161 /*
1162  * When a vCPU is suspended for any reason, it calls
1163  * checkpoint_cpu_suspend().  This records that the vCPU is idle.
1164  * Before returning from suspension, checkpoint_cpu_resume() is
1165  * called.  In suspend we note that the vCPU is idle.  In resume we
1166  * pause the vCPU thread until the checkpoint is complete.  The reason
1167  * for the two-step process is that vCPUs might already be stopped in
1168  * the debug server when a checkpoint is requested.  This approach
1169  * allows us to account for and handle those vCPUs.
1170  */
1171 void
1172 checkpoint_cpu_suspend(int vcpu)
1173 {
1174 
1175 	pthread_mutex_lock(&vcpu_lock);
1176 	CPU_SET(vcpu, &vcpus_suspended);
1177 	if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0)
1178 		pthread_cond_signal(&vcpus_idle);
1179 	pthread_mutex_unlock(&vcpu_lock);
1180 }
1181 
1182 void
1183 checkpoint_cpu_resume(int vcpu)
1184 {
1185 
1186 	pthread_mutex_lock(&vcpu_lock);
1187 	while (checkpoint_active)
1188 		pthread_cond_wait(&vcpus_can_run, &vcpu_lock);
1189 	CPU_CLR(vcpu, &vcpus_suspended);
1190 	pthread_mutex_unlock(&vcpu_lock);
1191 }
1192 
1193 static void
1194 vm_vcpu_pause(struct vmctx *ctx)
1195 {
1196 
1197 	pthread_mutex_lock(&vcpu_lock);
1198 	checkpoint_active = true;
1199 	vm_suspend_all_cpus(ctx);
1200 	while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
1201 		pthread_cond_wait(&vcpus_idle, &vcpu_lock);
1202 	pthread_mutex_unlock(&vcpu_lock);
1203 }
1204 
1205 static void
1206 vm_vcpu_resume(struct vmctx *ctx)
1207 {
1208 
1209 	pthread_mutex_lock(&vcpu_lock);
1210 	checkpoint_active = false;
1211 	pthread_mutex_unlock(&vcpu_lock);
1212 	vm_resume_all_cpus(ctx);
1213 	pthread_cond_broadcast(&vcpus_can_run);
1214 }
1215 
1216 static int
1217 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file,
1218     bool stop_vm)
1219 {
1220 	int fd_checkpoint = 0, kdata_fd = 0, fd_meta;
1221 	int ret = 0;
1222 	int error = 0;
1223 	size_t memsz;
1224 	xo_handle_t *xop = NULL;
1225 	char *meta_filename = NULL;
1226 	char *kdata_filename = NULL;
1227 	FILE *meta_file = NULL;
1228 
1229 	kdata_filename = strcat_extension(checkpoint_file, ".kern");
1230 	if (kdata_filename == NULL) {
1231 		fprintf(stderr, "Failed to construct kernel data filename.\n");
1232 		return (-1);
1233 	}
1234 
1235 	kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1236 	if (kdata_fd < 0) {
1237 		perror("Failed to open kernel data snapshot file.");
1238 		error = -1;
1239 		goto done;
1240 	}
1241 
1242 	fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700);
1243 
1244 	if (fd_checkpoint < 0) {
1245 		perror("Failed to create checkpoint file");
1246 		error = -1;
1247 		goto done;
1248 	}
1249 
1250 	meta_filename = strcat_extension(checkpoint_file, ".meta");
1251 	if (meta_filename == NULL) {
1252 		fprintf(stderr, "Failed to construct vm metadata filename.\n");
1253 		goto done;
1254 	}
1255 
1256 	fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700);
1257 	if (fd_meta != -1)
1258 		meta_file = fdopen(fd_meta, "w");
1259 	if (meta_file == NULL) {
1260 		perror("Failed to open vm metadata snapshot file.");
1261 		close(fd_meta);
1262 		goto done;
1263 	}
1264 
1265 	xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY);
1266 	if (xop == NULL) {
1267 		perror("Failed to get libxo handle on metadata file.");
1268 		goto done;
1269 	}
1270 
1271 	vm_vcpu_pause(ctx);
1272 
1273 	ret = vm_pause_devices();
1274 	if (ret != 0) {
1275 		fprintf(stderr, "Could not pause devices\r\n");
1276 		error = ret;
1277 		goto done;
1278 	}
1279 
1280 	memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true);
1281 	if (memsz == 0) {
1282 		perror("Could not write guest memory to file");
1283 		error = -1;
1284 		goto done;
1285 	}
1286 
1287 	ret = vm_snapshot_basic_metadata(ctx, xop, memsz);
1288 	if (ret != 0) {
1289 		fprintf(stderr, "Failed to snapshot vm basic metadata.\n");
1290 		error = -1;
1291 		goto done;
1292 	}
1293 
1294 	ret = vm_save_kern_structs(ctx, kdata_fd, xop);
1295 	if (ret != 0) {
1296 		fprintf(stderr, "Failed to snapshot vm kernel data.\n");
1297 		error = -1;
1298 		goto done;
1299 	}
1300 
1301 	ret = vm_snapshot_devices(kdata_fd, xop);
1302 	if (ret != 0) {
1303 		fprintf(stderr, "Failed to snapshot device state.\n");
1304 		error = -1;
1305 		goto done;
1306 	}
1307 
1308 	xo_finish_h(xop);
1309 
1310 	if (stop_vm) {
1311 		vm_destroy(ctx);
1312 		exit(0);
1313 	}
1314 
1315 done:
1316 	ret = vm_resume_devices();
1317 	if (ret != 0)
1318 		fprintf(stderr, "Could not resume devices\r\n");
1319 	vm_vcpu_resume(ctx);
1320 	if (fd_checkpoint > 0)
1321 		close(fd_checkpoint);
1322 	if (meta_filename != NULL)
1323 		free(meta_filename);
1324 	if (kdata_filename != NULL)
1325 		free(kdata_filename);
1326 	if (xop != NULL)
1327 		xo_destroy(xop);
1328 	if (meta_file != NULL)
1329 		fclose(meta_file);
1330 	if (kdata_fd > 0)
1331 		close(kdata_fd);
1332 	return (error);
1333 }
1334 
1335 static int
1336 handle_message(struct vmctx *ctx, nvlist_t *nvl)
1337 {
1338 	const char *cmd;
1339 	struct ipc_command **ipc_cmd;
1340 
1341 	if (!nvlist_exists_string(nvl, "cmd"))
1342 		return (EINVAL);
1343 
1344 	cmd = nvlist_get_string(nvl, "cmd");
1345 	IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) {
1346 		if (strcmp(cmd, (*ipc_cmd)->name) == 0)
1347 			return ((*ipc_cmd)->handler(ctx, nvl));
1348 	}
1349 
1350 	return (EOPNOTSUPP);
1351 }
1352 
1353 /*
1354  * Listen for commands from bhyvectl
1355  */
1356 void *
1357 checkpoint_thread(void *param)
1358 {
1359 	int fd;
1360 	struct checkpoint_thread_info *thread_info;
1361 	nvlist_t *nvl;
1362 
1363 	pthread_set_name_np(pthread_self(), "checkpoint thread");
1364 	thread_info = (struct checkpoint_thread_info *)param;
1365 
1366 	while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) {
1367 		nvl = nvlist_recv(fd, 0);
1368 		if (nvl != NULL)
1369 			handle_message(thread_info->ctx, nvl);
1370 		else
1371 			EPRINTLN("nvlist_recv() failed: %s", strerror(errno));
1372 
1373 		close(fd);
1374 		nvlist_destroy(nvl);
1375 	}
1376 
1377 	return (NULL);
1378 }
1379 
1380 static int
1381 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl)
1382 {
1383 	int error;
1384 
1385 	if (!nvlist_exists_string(nvl, "filename") ||
1386 	    !nvlist_exists_bool(nvl, "suspend") ||
1387 	    !nvlist_exists_descriptor(nvl, "fddir"))
1388 		error = EINVAL;
1389 	else
1390 		error = vm_checkpoint(ctx,
1391 		    nvlist_get_descriptor(nvl, "fddir"),
1392 		    nvlist_get_string(nvl, "filename"),
1393 		    nvlist_get_bool(nvl, "suspend"));
1394 
1395 	return (error);
1396 }
1397 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint);
1398 
1399 /*
1400  * Create the listening socket for IPC with bhyvectl
1401  */
1402 int
1403 init_checkpoint_thread(struct vmctx *ctx)
1404 {
1405 	struct checkpoint_thread_info *checkpoint_info = NULL;
1406 	struct sockaddr_un addr;
1407 	int socket_fd;
1408 	pthread_t checkpoint_pthread;
1409 	int err;
1410 #ifndef WITHOUT_CAPSICUM
1411 	cap_rights_t rights;
1412 #endif
1413 
1414 	memset(&addr, 0, sizeof(addr));
1415 
1416 	socket_fd = socket(PF_UNIX, SOCK_STREAM, 0);
1417 	if (socket_fd < 0) {
1418 		EPRINTLN("Socket creation failed: %s", strerror(errno));
1419 		err = -1;
1420 		goto fail;
1421 	}
1422 
1423 	addr.sun_family = AF_UNIX;
1424 
1425 	snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s",
1426 		 BHYVE_RUN_DIR, vm_get_name(ctx));
1427 	addr.sun_len = SUN_LEN(&addr);
1428 	unlink(addr.sun_path);
1429 
1430 	if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) {
1431 		EPRINTLN("Failed to bind socket \"%s\": %s\n",
1432 		    addr.sun_path, strerror(errno));
1433 		err = -1;
1434 		goto fail;
1435 	}
1436 
1437 	if (listen(socket_fd, 10) < 0) {
1438 		EPRINTLN("ipc socket listen: %s\n", strerror(errno));
1439 		err = errno;
1440 		goto fail;
1441 	}
1442 
1443 #ifndef WITHOUT_CAPSICUM
1444 	cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE,
1445 	    CAP_SEND, CAP_GETSOCKOPT);
1446 
1447 	if (caph_rights_limit(socket_fd, &rights) == -1)
1448 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1449 #endif
1450 	checkpoint_info = calloc(1, sizeof(*checkpoint_info));
1451 	checkpoint_info->ctx = ctx;
1452 	checkpoint_info->socket_fd = socket_fd;
1453 
1454 	err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread,
1455 		checkpoint_info);
1456 	if (err != 0)
1457 		goto fail;
1458 
1459 	return (0);
1460 fail:
1461 	free(checkpoint_info);
1462 	if (socket_fd > 0)
1463 		close(socket_fd);
1464 	unlink(addr.sun_path);
1465 
1466 	return (err);
1467 }
1468 
1469 void
1470 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op)
1471 {
1472 	const char *__op;
1473 
1474 	if (op == VM_SNAPSHOT_SAVE)
1475 		__op = "save";
1476 	else if (op == VM_SNAPSHOT_RESTORE)
1477 		__op = "restore";
1478 	else
1479 		__op = "unknown";
1480 
1481 	fprintf(stderr, "%s: snapshot-%s failed for %s\r\n",
1482 		__func__, __op, bufname);
1483 }
1484 
1485 int
1486 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1487 {
1488 	struct vm_snapshot_buffer *buffer;
1489 	int op;
1490 
1491 	buffer = &meta->buffer;
1492 	op = meta->op;
1493 
1494 	if (buffer->buf_rem < data_size) {
1495 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1496 		return (E2BIG);
1497 	}
1498 
1499 	if (op == VM_SNAPSHOT_SAVE)
1500 		memcpy(buffer->buf, data, data_size);
1501 	else if (op == VM_SNAPSHOT_RESTORE)
1502 		memcpy(data, buffer->buf, data_size);
1503 	else
1504 		return (EINVAL);
1505 
1506 	buffer->buf += data_size;
1507 	buffer->buf_rem -= data_size;
1508 
1509 	return (0);
1510 }
1511 
1512 size_t
1513 vm_get_snapshot_size(struct vm_snapshot_meta *meta)
1514 {
1515 	size_t length;
1516 	struct vm_snapshot_buffer *buffer;
1517 
1518 	buffer = &meta->buffer;
1519 
1520 	if (buffer->buf_size < buffer->buf_rem) {
1521 		fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n",
1522 			__func__, buffer->buf_size, buffer->buf_rem);
1523 		length = 0;
1524 	} else {
1525 		length = buffer->buf_size - buffer->buf_rem;
1526 	}
1527 
1528 	return (length);
1529 }
1530 
1531 int
1532 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len,
1533     bool restore_null, struct vm_snapshot_meta *meta)
1534 {
1535 	int ret;
1536 	vm_paddr_t gaddr;
1537 
1538 	if (meta->op == VM_SNAPSHOT_SAVE) {
1539 		gaddr = paddr_host2guest(ctx, *addrp);
1540 		if (gaddr == (vm_paddr_t) -1) {
1541 			if (!restore_null ||
1542 			    (restore_null && (*addrp != NULL))) {
1543 				ret = EFAULT;
1544 				goto done;
1545 			}
1546 		}
1547 
1548 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1549 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
1550 		SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done);
1551 		if (gaddr == (vm_paddr_t) -1) {
1552 			if (!restore_null) {
1553 				ret = EFAULT;
1554 				goto done;
1555 			}
1556 		}
1557 
1558 		*addrp = paddr_guest2host(ctx, gaddr, len);
1559 	} else {
1560 		ret = EINVAL;
1561 	}
1562 
1563 done:
1564 	return (ret);
1565 }
1566 
1567 int
1568 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta)
1569 {
1570 	struct vm_snapshot_buffer *buffer;
1571 	int op;
1572 	int ret;
1573 
1574 	buffer = &meta->buffer;
1575 	op = meta->op;
1576 
1577 	if (buffer->buf_rem < data_size) {
1578 		fprintf(stderr, "%s: buffer too small\r\n", __func__);
1579 		ret = E2BIG;
1580 		goto done;
1581 	}
1582 
1583 	if (op == VM_SNAPSHOT_SAVE) {
1584 		ret = 0;
1585 		memcpy(buffer->buf, data, data_size);
1586 	} else if (op == VM_SNAPSHOT_RESTORE) {
1587 		ret = memcmp(data, buffer->buf, data_size);
1588 	} else {
1589 		ret = EINVAL;
1590 		goto done;
1591 	}
1592 
1593 	buffer->buf += data_size;
1594 	buffer->buf_rem -= data_size;
1595 
1596 done:
1597 	return (ret);
1598 }
1599