1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016 Flavius Anton 5 * Copyright (c) 2016 Mihai Tiganus 6 * Copyright (c) 2016-2019 Mihai Carabas 7 * Copyright (c) 2017-2019 Darius Mihai 8 * Copyright (c) 2017-2019 Elena Mihailescu 9 * Copyright (c) 2018-2019 Sergiu Weisz 10 * All rights reserved. 11 * The bhyve-snapshot feature was developed under sponsorships 12 * from Matthew Grooms. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/types.h> 40 #ifndef WITHOUT_CAPSICUM 41 #include <sys/capsicum.h> 42 #endif 43 #include <sys/mman.h> 44 #include <sys/socket.h> 45 #include <sys/stat.h> 46 #include <sys/time.h> 47 #include <sys/un.h> 48 49 #include <machine/atomic.h> 50 #include <machine/segments.h> 51 52 #ifndef WITHOUT_CAPSICUM 53 #include <capsicum_helpers.h> 54 #endif 55 #include <stdio.h> 56 #include <stdlib.h> 57 #include <string.h> 58 #include <err.h> 59 #include <errno.h> 60 #include <fcntl.h> 61 #include <libgen.h> 62 #include <signal.h> 63 #include <unistd.h> 64 #include <assert.h> 65 #include <errno.h> 66 #include <pthread.h> 67 #include <pthread_np.h> 68 #include <sysexits.h> 69 #include <stdbool.h> 70 #include <sys/ioctl.h> 71 72 #include <machine/vmm.h> 73 #ifndef WITHOUT_CAPSICUM 74 #include <machine/vmm_dev.h> 75 #endif 76 #include <machine/vmm_snapshot.h> 77 #include <vmmapi.h> 78 79 #include "bhyverun.h" 80 #include "acpi.h" 81 #include "atkbdc.h" 82 #include "inout.h" 83 #include "fwctl.h" 84 #include "ioapic.h" 85 #include "mem.h" 86 #include "mevent.h" 87 #include "mptbl.h" 88 #include "pci_emul.h" 89 #include "pci_irq.h" 90 #include "pci_lpc.h" 91 #include "smbiostbl.h" 92 #include "snapshot.h" 93 #include "xmsr.h" 94 #include "spinup_ap.h" 95 #include "rtc.h" 96 97 #include <libxo/xo.h> 98 #include <ucl.h> 99 100 struct spinner_info { 101 const size_t *crtval; 102 const size_t maxval; 103 const size_t total; 104 }; 105 106 extern int guest_ncpus; 107 108 static struct winsize winsize; 109 static sig_t old_winch_handler; 110 111 #define KB (1024UL) 112 #define MB (1024UL * KB) 113 #define GB (1024UL * MB) 114 115 #define SNAPSHOT_CHUNK (4 * MB) 116 #define PROG_BUF_SZ (8192) 117 118 #define BHYVE_RUN_DIR "/var/run/bhyve" 119 #define CHECKPOINT_RUN_DIR BHYVE_RUN_DIR "/checkpoint" 120 #define MAX_VMNAME 100 121 122 #define MAX_MSG_SIZE 1024 123 124 #define SNAPSHOT_BUFFER_SIZE (20 * MB) 125 126 #define JSON_STRUCT_ARR_KEY "structs" 127 #define JSON_DEV_ARR_KEY "devices" 128 #define JSON_BASIC_METADATA_KEY "basic metadata" 129 #define JSON_SNAPSHOT_REQ_KEY "snapshot_req" 130 #define JSON_SIZE_KEY "size" 131 #define JSON_FILE_OFFSET_KEY "file_offset" 132 133 #define JSON_NCPUS_KEY "ncpus" 134 #define JSON_VMNAME_KEY "vmname" 135 #define JSON_MEMSIZE_KEY "memsize" 136 #define JSON_MEMFLAGS_KEY "memflags" 137 138 #define min(a,b) \ 139 ({ \ 140 __typeof__ (a) _a = (a); \ 141 __typeof__ (b) _b = (b); \ 142 _a < _b ? _a : _b; \ 143 }) 144 145 const struct vm_snapshot_dev_info snapshot_devs[] = { 146 { "atkbdc", atkbdc_snapshot, NULL, NULL }, 147 { "virtio-net", pci_snapshot, pci_pause, pci_resume }, 148 { "virtio-blk", pci_snapshot, pci_pause, pci_resume }, 149 { "virtio-rnd", pci_snapshot, NULL, NULL }, 150 { "lpc", pci_snapshot, NULL, NULL }, 151 { "fbuf", pci_snapshot, NULL, NULL }, 152 { "xhci", pci_snapshot, NULL, NULL }, 153 { "e1000", pci_snapshot, NULL, NULL }, 154 { "ahci", pci_snapshot, pci_pause, pci_resume }, 155 { "ahci-hd", pci_snapshot, pci_pause, pci_resume }, 156 { "ahci-cd", pci_snapshot, pci_pause, pci_resume }, 157 }; 158 159 const struct vm_snapshot_kern_info snapshot_kern_structs[] = { 160 { "vhpet", STRUCT_VHPET }, 161 { "vm", STRUCT_VM }, 162 { "vmx", STRUCT_VMX }, 163 { "vioapic", STRUCT_VIOAPIC }, 164 { "vlapic", STRUCT_VLAPIC }, 165 { "vmcx", STRUCT_VMCX }, 166 { "vatpit", STRUCT_VATPIT }, 167 { "vatpic", STRUCT_VATPIC }, 168 { "vpmtmr", STRUCT_VPMTMR }, 169 { "vrtc", STRUCT_VRTC }, 170 }; 171 172 static cpuset_t vcpus_active, vcpus_suspended; 173 static pthread_mutex_t vcpu_lock; 174 static pthread_cond_t vcpus_idle, vcpus_can_run; 175 static bool checkpoint_active; 176 177 /* 178 * TODO: Harden this function and all of its callers since 'base_str' is a user 179 * provided string. 180 */ 181 static char * 182 strcat_extension(const char *base_str, const char *ext) 183 { 184 char *res; 185 size_t base_len, ext_len; 186 187 base_len = strnlen(base_str, MAX_VMNAME); 188 ext_len = strnlen(ext, MAX_VMNAME); 189 190 if (base_len + ext_len > MAX_VMNAME) { 191 fprintf(stderr, "Filename exceeds maximum length.\n"); 192 return (NULL); 193 } 194 195 res = malloc(base_len + ext_len + 1); 196 if (res == NULL) { 197 perror("Failed to allocate memory."); 198 return (NULL); 199 } 200 201 memcpy(res, base_str, base_len); 202 memcpy(res + base_len, ext, ext_len); 203 res[base_len + ext_len] = 0; 204 205 return (res); 206 } 207 208 void 209 destroy_restore_state(struct restore_state *rstate) 210 { 211 if (rstate == NULL) { 212 fprintf(stderr, "Attempting to destroy NULL restore struct.\n"); 213 return; 214 } 215 216 if (rstate->kdata_map != MAP_FAILED) 217 munmap(rstate->kdata_map, rstate->kdata_len); 218 219 if (rstate->kdata_fd > 0) 220 close(rstate->kdata_fd); 221 if (rstate->vmmem_fd > 0) 222 close(rstate->vmmem_fd); 223 224 if (rstate->meta_root_obj != NULL) 225 ucl_object_unref(rstate->meta_root_obj); 226 if (rstate->meta_parser != NULL) 227 ucl_parser_free(rstate->meta_parser); 228 } 229 230 static int 231 load_vmmem_file(const char *filename, struct restore_state *rstate) 232 { 233 struct stat sb; 234 int err; 235 236 rstate->vmmem_fd = open(filename, O_RDONLY); 237 if (rstate->vmmem_fd < 0) { 238 perror("Failed to open restore file"); 239 return (-1); 240 } 241 242 err = fstat(rstate->vmmem_fd, &sb); 243 if (err < 0) { 244 perror("Failed to stat restore file"); 245 goto err_load_vmmem; 246 } 247 248 if (sb.st_size == 0) { 249 fprintf(stderr, "Restore file is empty.\n"); 250 goto err_load_vmmem; 251 } 252 253 rstate->vmmem_len = sb.st_size; 254 255 return (0); 256 257 err_load_vmmem: 258 if (rstate->vmmem_fd > 0) 259 close(rstate->vmmem_fd); 260 return (-1); 261 } 262 263 static int 264 load_kdata_file(const char *filename, struct restore_state *rstate) 265 { 266 struct stat sb; 267 int err; 268 269 rstate->kdata_fd = open(filename, O_RDONLY); 270 if (rstate->kdata_fd < 0) { 271 perror("Failed to open kernel data file"); 272 return (-1); 273 } 274 275 err = fstat(rstate->kdata_fd, &sb); 276 if (err < 0) { 277 perror("Failed to stat kernel data file"); 278 goto err_load_kdata; 279 } 280 281 if (sb.st_size == 0) { 282 fprintf(stderr, "Kernel data file is empty.\n"); 283 goto err_load_kdata; 284 } 285 286 rstate->kdata_len = sb.st_size; 287 rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ, 288 MAP_SHARED, rstate->kdata_fd, 0); 289 if (rstate->kdata_map == MAP_FAILED) { 290 perror("Failed to map restore file"); 291 goto err_load_kdata; 292 } 293 294 return (0); 295 296 err_load_kdata: 297 if (rstate->kdata_fd > 0) 298 close(rstate->kdata_fd); 299 return (-1); 300 } 301 302 static int 303 load_metadata_file(const char *filename, struct restore_state *rstate) 304 { 305 const ucl_object_t *obj; 306 struct ucl_parser *parser; 307 int err; 308 309 parser = ucl_parser_new(UCL_PARSER_DEFAULT); 310 if (parser == NULL) { 311 fprintf(stderr, "Failed to initialize UCL parser.\n"); 312 goto err_load_metadata; 313 } 314 315 err = ucl_parser_add_file(parser, filename); 316 if (err == 0) { 317 fprintf(stderr, "Failed to parse metadata file: '%s'\n", 318 filename); 319 err = -1; 320 goto err_load_metadata; 321 } 322 323 obj = ucl_parser_get_object(parser); 324 if (obj == NULL) { 325 fprintf(stderr, "Failed to parse object.\n"); 326 err = -1; 327 goto err_load_metadata; 328 } 329 330 rstate->meta_parser = parser; 331 rstate->meta_root_obj = (ucl_object_t *)obj; 332 333 return (0); 334 335 err_load_metadata: 336 if (parser != NULL) 337 ucl_parser_free(parser); 338 return (err); 339 } 340 341 int 342 load_restore_file(const char *filename, struct restore_state *rstate) 343 { 344 int err = 0; 345 char *kdata_filename = NULL, *meta_filename = NULL; 346 347 assert(filename != NULL); 348 assert(rstate != NULL); 349 350 memset(rstate, 0, sizeof(*rstate)); 351 rstate->kdata_map = MAP_FAILED; 352 353 err = load_vmmem_file(filename, rstate); 354 if (err != 0) { 355 fprintf(stderr, "Failed to load guest RAM file.\n"); 356 goto err_restore; 357 } 358 359 kdata_filename = strcat_extension(filename, ".kern"); 360 if (kdata_filename == NULL) { 361 fprintf(stderr, "Failed to construct kernel data filename.\n"); 362 goto err_restore; 363 } 364 365 err = load_kdata_file(kdata_filename, rstate); 366 if (err != 0) { 367 fprintf(stderr, "Failed to load guest kernel data file.\n"); 368 goto err_restore; 369 } 370 371 meta_filename = strcat_extension(filename, ".meta"); 372 if (meta_filename == NULL) { 373 fprintf(stderr, "Failed to construct kernel metadata filename.\n"); 374 goto err_restore; 375 } 376 377 err = load_metadata_file(meta_filename, rstate); 378 if (err != 0) { 379 fprintf(stderr, "Failed to load guest metadata file.\n"); 380 goto err_restore; 381 } 382 383 return (0); 384 385 err_restore: 386 destroy_restore_state(rstate); 387 if (kdata_filename != NULL) 388 free(kdata_filename); 389 if (meta_filename != NULL) 390 free(meta_filename); 391 return (-1); 392 } 393 394 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret) \ 395 do { \ 396 const ucl_object_t *obj__; \ 397 obj__ = ucl_object_lookup(obj, key); \ 398 if (obj__ == NULL) { \ 399 fprintf(stderr, "Missing key: '%s'", key); \ 400 return (ret); \ 401 } \ 402 if (!ucl_object_toint_safe(obj__, result_ptr)) { \ 403 fprintf(stderr, "Cannot convert '%s' value to int.", key); \ 404 return (ret); \ 405 } \ 406 } while(0) 407 408 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret) \ 409 do { \ 410 const ucl_object_t *obj__; \ 411 obj__ = ucl_object_lookup(obj, key); \ 412 if (obj__ == NULL) { \ 413 fprintf(stderr, "Missing key: '%s'", key); \ 414 return (ret); \ 415 } \ 416 if (!ucl_object_tostring_safe(obj__, result_ptr)) { \ 417 fprintf(stderr, "Cannot convert '%s' value to string.", key); \ 418 return (ret); \ 419 } \ 420 } while(0) 421 422 static void * 423 lookup_struct(enum snapshot_req struct_id, struct restore_state *rstate, 424 size_t *struct_size) 425 { 426 const ucl_object_t *structs = NULL, *obj = NULL; 427 ucl_object_iter_t it = NULL; 428 int64_t snapshot_req, size, file_offset; 429 430 structs = ucl_object_lookup(rstate->meta_root_obj, JSON_STRUCT_ARR_KEY); 431 if (structs == NULL) { 432 fprintf(stderr, "Failed to find '%s' object.\n", 433 JSON_STRUCT_ARR_KEY); 434 return (NULL); 435 } 436 437 if (ucl_object_type((ucl_object_t *)structs) != UCL_ARRAY) { 438 fprintf(stderr, "Object '%s' is not an array.\n", 439 JSON_STRUCT_ARR_KEY); 440 return (NULL); 441 } 442 443 while ((obj = ucl_object_iterate(structs, &it, true)) != NULL) { 444 snapshot_req = -1; 445 JSON_GET_INT_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj, 446 &snapshot_req, NULL); 447 assert(snapshot_req >= 0); 448 if ((enum snapshot_req) snapshot_req == struct_id) { 449 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj, 450 &size, NULL); 451 assert(size >= 0); 452 453 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj, 454 &file_offset, NULL); 455 assert(file_offset >= 0); 456 assert(file_offset + size <= rstate->kdata_len); 457 458 *struct_size = (size_t)size; 459 return (rstate->kdata_map + file_offset); 460 } 461 } 462 463 return (NULL); 464 } 465 466 static void * 467 lookup_check_dev(const char *dev_name, struct restore_state *rstate, 468 const ucl_object_t *obj, size_t *data_size) 469 { 470 const char *snapshot_req; 471 int64_t size, file_offset; 472 473 snapshot_req = NULL; 474 JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj, 475 &snapshot_req, NULL); 476 assert(snapshot_req != NULL); 477 if (!strcmp(snapshot_req, dev_name)) { 478 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj, 479 &size, NULL); 480 assert(size >= 0); 481 482 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj, 483 &file_offset, NULL); 484 assert(file_offset >= 0); 485 assert(file_offset + size <= rstate->kdata_len); 486 487 *data_size = (size_t)size; 488 return (rstate->kdata_map + file_offset); 489 } 490 491 return (NULL); 492 } 493 494 static void* 495 lookup_dev(const char *dev_name, struct restore_state *rstate, 496 size_t *data_size) 497 { 498 const ucl_object_t *devs = NULL, *obj = NULL; 499 ucl_object_iter_t it = NULL; 500 void *ret; 501 502 devs = ucl_object_lookup(rstate->meta_root_obj, JSON_DEV_ARR_KEY); 503 if (devs == NULL) { 504 fprintf(stderr, "Failed to find '%s' object.\n", 505 JSON_DEV_ARR_KEY); 506 return (NULL); 507 } 508 509 if (ucl_object_type((ucl_object_t *)devs) != UCL_ARRAY) { 510 fprintf(stderr, "Object '%s' is not an array.\n", 511 JSON_DEV_ARR_KEY); 512 return (NULL); 513 } 514 515 while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) { 516 ret = lookup_check_dev(dev_name, rstate, obj, data_size); 517 if (ret != NULL) 518 return (ret); 519 } 520 521 return (NULL); 522 } 523 524 static const ucl_object_t * 525 lookup_basic_metadata_object(struct restore_state *rstate) 526 { 527 const ucl_object_t *basic_meta_obj = NULL; 528 529 basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj, 530 JSON_BASIC_METADATA_KEY); 531 if (basic_meta_obj == NULL) { 532 fprintf(stderr, "Failed to find '%s' object.\n", 533 JSON_BASIC_METADATA_KEY); 534 return (NULL); 535 } 536 537 if (ucl_object_type((ucl_object_t *)basic_meta_obj) != UCL_OBJECT) { 538 fprintf(stderr, "Object '%s' is not a JSON object.\n", 539 JSON_BASIC_METADATA_KEY); 540 return (NULL); 541 } 542 543 return (basic_meta_obj); 544 } 545 546 const char * 547 lookup_vmname(struct restore_state *rstate) 548 { 549 const char *vmname; 550 const ucl_object_t *obj; 551 552 obj = lookup_basic_metadata_object(rstate); 553 if (obj == NULL) 554 return (NULL); 555 556 JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL); 557 return (vmname); 558 } 559 560 int 561 lookup_memflags(struct restore_state *rstate) 562 { 563 int64_t memflags; 564 const ucl_object_t *obj; 565 566 obj = lookup_basic_metadata_object(rstate); 567 if (obj == NULL) 568 return (0); 569 570 JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0); 571 572 return ((int)memflags); 573 } 574 575 size_t 576 lookup_memsize(struct restore_state *rstate) 577 { 578 int64_t memsize; 579 const ucl_object_t *obj; 580 581 obj = lookup_basic_metadata_object(rstate); 582 if (obj == NULL) 583 return (0); 584 585 JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0); 586 if (memsize < 0) 587 memsize = 0; 588 589 return ((size_t)memsize); 590 } 591 592 593 int 594 lookup_guest_ncpus(struct restore_state *rstate) 595 { 596 int64_t ncpus; 597 const ucl_object_t *obj; 598 599 obj = lookup_basic_metadata_object(rstate); 600 if (obj == NULL) 601 return (0); 602 603 JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0); 604 return ((int)ncpus); 605 } 606 607 static void 608 winch_handler(int signal) 609 { 610 #ifdef TIOCGWINSZ 611 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 612 #endif /* TIOCGWINSZ */ 613 } 614 615 static int 616 print_progress(size_t crtval, const size_t maxval) 617 { 618 size_t rc; 619 double crtval_gb, maxval_gb; 620 size_t i, win_width, prog_start, prog_done, prog_end; 621 int mval_len; 622 623 static char prog_buf[PROG_BUF_SZ]; 624 static const size_t len = sizeof(prog_buf); 625 626 static size_t div; 627 static char *div_str; 628 629 static char wip_bar[] = { '/', '-', '\\', '|' }; 630 static int wip_idx = 0; 631 632 if (maxval == 0) { 633 printf("[0B / 0B]\r\n"); 634 return (0); 635 } 636 637 if (crtval > maxval) 638 crtval = maxval; 639 640 if (maxval > 10 * GB) { 641 div = GB; 642 div_str = "GiB"; 643 } else if (maxval > 10 * MB) { 644 div = MB; 645 div_str = "MiB"; 646 } else { 647 div = KB; 648 div_str = "KiB"; 649 } 650 651 crtval_gb = (double) crtval / div; 652 maxval_gb = (double) maxval / div; 653 654 rc = snprintf(prog_buf, len, "%.03lf", maxval_gb); 655 if (rc == len) { 656 fprintf(stderr, "Maxval too big\n"); 657 return (-1); 658 } 659 mval_len = rc; 660 661 rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |", 662 mval_len, crtval_gb, div_str, maxval_gb, div_str); 663 664 if (rc == len) { 665 fprintf(stderr, "Buffer too small to print progress\n"); 666 return (-1); 667 } 668 669 win_width = min(winsize.ws_col, len); 670 prog_start = rc; 671 672 if (prog_start < (win_width - 2)) { 673 prog_end = win_width - prog_start - 2; 674 prog_done = prog_end * (crtval_gb / maxval_gb); 675 676 for (i = prog_start; i < prog_start + prog_done; i++) 677 prog_buf[i] = '#'; 678 679 if (crtval != maxval) { 680 prog_buf[i] = wip_bar[wip_idx]; 681 wip_idx = (wip_idx + 1) % sizeof(wip_bar); 682 i++; 683 } else { 684 prog_buf[i++] = '#'; 685 } 686 687 for (; i < win_width - 2; i++) 688 prog_buf[i] = '_'; 689 690 prog_buf[win_width - 2] = '|'; 691 } 692 693 prog_buf[win_width - 1] = '\0'; 694 write(STDOUT_FILENO, prog_buf, win_width); 695 696 return (0); 697 } 698 699 static void * 700 snapshot_spinner_cb(void *arg) 701 { 702 int rc; 703 size_t crtval, maxval, total; 704 struct spinner_info *si; 705 struct timespec ts; 706 707 si = arg; 708 if (si == NULL) 709 pthread_exit(NULL); 710 711 ts.tv_sec = 0; 712 ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */ 713 714 do { 715 crtval = *si->crtval; 716 maxval = si->maxval; 717 total = si->total; 718 719 rc = print_progress(crtval, total); 720 if (rc < 0) { 721 fprintf(stderr, "Failed to parse progress\n"); 722 break; 723 } 724 725 nanosleep(&ts, NULL); 726 } while (crtval < maxval); 727 728 pthread_exit(NULL); 729 return NULL; 730 } 731 732 static int 733 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src, 734 const size_t len, const size_t totalmem, const bool op_wr) 735 { 736 int rc; 737 size_t part_done, todo, rem; 738 ssize_t done; 739 bool show_progress; 740 pthread_t spinner_th; 741 struct spinner_info *si; 742 743 if (lseek(snapfd, foff, SEEK_SET) < 0) { 744 perror("Failed to change file offset"); 745 return (-1); 746 } 747 748 show_progress = false; 749 if (isatty(STDIN_FILENO) && (winsize.ws_col != 0)) 750 show_progress = true; 751 752 part_done = foff; 753 rem = len; 754 755 if (show_progress) { 756 si = &(struct spinner_info) { 757 .crtval = &part_done, 758 .maxval = foff + len, 759 .total = totalmem 760 }; 761 762 rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si); 763 if (rc) { 764 perror("Unable to create spinner thread"); 765 show_progress = false; 766 } 767 } 768 769 while (rem > 0) { 770 if (show_progress) 771 todo = min(SNAPSHOT_CHUNK, rem); 772 else 773 todo = rem; 774 775 if (op_wr) 776 done = write(snapfd, src, todo); 777 else 778 done = read(snapfd, src, todo); 779 if (done < 0) { 780 perror("Failed to write in file"); 781 return (-1); 782 } 783 784 src += done; 785 part_done += done; 786 rem -= done; 787 } 788 789 if (show_progress) { 790 rc = pthread_join(spinner_th, NULL); 791 if (rc) 792 perror("Unable to end spinner thread"); 793 } 794 795 return (0); 796 } 797 798 static size_t 799 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr) 800 { 801 int ret; 802 size_t lowmem, highmem, totalmem; 803 char *baseaddr; 804 805 ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem); 806 if (ret) { 807 fprintf(stderr, "%s: unable to retrieve guest memory size\r\n", 808 __func__); 809 return (0); 810 } 811 totalmem = lowmem + highmem; 812 813 if ((op_wr == false) && (totalmem != memsz)) { 814 fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n", 815 __func__, totalmem, memsz); 816 return (0); 817 } 818 819 winsize.ws_col = 80; 820 #ifdef TIOCGWINSZ 821 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 822 #endif /* TIOCGWINSZ */ 823 old_winch_handler = signal(SIGWINCH, winch_handler); 824 825 ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem, 826 totalmem, op_wr); 827 if (ret) { 828 fprintf(stderr, "%s: Could not %s lowmem\r\n", 829 __func__, op_wr ? "write" : "read"); 830 totalmem = 0; 831 goto done; 832 } 833 834 if (highmem == 0) 835 goto done; 836 837 ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB, 838 highmem, totalmem, op_wr); 839 if (ret) { 840 fprintf(stderr, "%s: Could not %s highmem\r\n", 841 __func__, op_wr ? "write" : "read"); 842 totalmem = 0; 843 goto done; 844 } 845 846 done: 847 printf("\r\n"); 848 signal(SIGWINCH, old_winch_handler); 849 850 return (totalmem); 851 } 852 853 int 854 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate) 855 { 856 size_t restored; 857 858 restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len, 859 false); 860 861 if (restored != rstate->vmmem_len) 862 return (-1); 863 864 return (0); 865 } 866 867 static int 868 vm_restore_kern_struct(struct vmctx *ctx, struct restore_state *rstate, 869 const struct vm_snapshot_kern_info *info) 870 { 871 void *struct_ptr; 872 size_t struct_size; 873 int ret; 874 struct vm_snapshot_meta *meta; 875 876 struct_ptr = lookup_struct(info->req, rstate, &struct_size); 877 if (struct_ptr == NULL) { 878 fprintf(stderr, "%s: Failed to lookup struct %s\r\n", 879 __func__, info->struct_name); 880 ret = -1; 881 goto done; 882 } 883 884 if (struct_size == 0) { 885 fprintf(stderr, "%s: Kernel struct size was 0 for: %s\r\n", 886 __func__, info->struct_name); 887 ret = -1; 888 goto done; 889 } 890 891 meta = &(struct vm_snapshot_meta) { 892 .ctx = ctx, 893 .dev_name = info->struct_name, 894 .dev_req = info->req, 895 896 .buffer.buf_start = struct_ptr, 897 .buffer.buf_size = struct_size, 898 899 .buffer.buf = struct_ptr, 900 .buffer.buf_rem = struct_size, 901 902 .op = VM_SNAPSHOT_RESTORE, 903 }; 904 905 ret = vm_snapshot_req(meta); 906 if (ret != 0) { 907 fprintf(stderr, "%s: Failed to restore struct: %s\r\n", 908 __func__, info->struct_name); 909 goto done; 910 } 911 912 done: 913 return (ret); 914 } 915 916 int 917 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate) 918 { 919 int ret; 920 int i; 921 922 for (i = 0; i < nitems(snapshot_kern_structs); i++) { 923 ret = vm_restore_kern_struct(ctx, rstate, 924 &snapshot_kern_structs[i]); 925 if (ret != 0) 926 return (ret); 927 } 928 929 return (0); 930 } 931 932 int 933 vm_restore_user_dev(struct vmctx *ctx, struct restore_state *rstate, 934 const struct vm_snapshot_dev_info *info) 935 { 936 void *dev_ptr; 937 size_t dev_size; 938 int ret; 939 struct vm_snapshot_meta *meta; 940 941 dev_ptr = lookup_dev(info->dev_name, rstate, &dev_size); 942 if (dev_ptr == NULL) { 943 fprintf(stderr, "Failed to lookup dev: %s\r\n", info->dev_name); 944 fprintf(stderr, "Continuing the restore/migration process\r\n"); 945 return (0); 946 } 947 948 if (dev_size == 0) { 949 fprintf(stderr, "%s: Device size is 0. " 950 "Assuming %s is not used\r\n", 951 __func__, info->dev_name); 952 return (0); 953 } 954 955 meta = &(struct vm_snapshot_meta) { 956 .ctx = ctx, 957 .dev_name = info->dev_name, 958 959 .buffer.buf_start = dev_ptr, 960 .buffer.buf_size = dev_size, 961 962 .buffer.buf = dev_ptr, 963 .buffer.buf_rem = dev_size, 964 965 .op = VM_SNAPSHOT_RESTORE, 966 }; 967 968 ret = (*info->snapshot_cb)(meta); 969 if (ret != 0) { 970 fprintf(stderr, "Failed to restore dev: %s\r\n", 971 info->dev_name); 972 return (-1); 973 } 974 975 return (0); 976 } 977 978 979 int 980 vm_restore_user_devs(struct vmctx *ctx, struct restore_state *rstate) 981 { 982 int ret; 983 int i; 984 985 for (i = 0; i < nitems(snapshot_devs); i++) { 986 ret = vm_restore_user_dev(ctx, rstate, &snapshot_devs[i]); 987 if (ret != 0) 988 return (ret); 989 } 990 991 return 0; 992 } 993 994 int 995 vm_pause_user_devs(struct vmctx *ctx) 996 { 997 const struct vm_snapshot_dev_info *info; 998 int ret; 999 int i; 1000 1001 for (i = 0; i < nitems(snapshot_devs); i++) { 1002 info = &snapshot_devs[i]; 1003 if (info->pause_cb == NULL) 1004 continue; 1005 1006 ret = info->pause_cb(ctx, info->dev_name); 1007 if (ret != 0) 1008 return (ret); 1009 } 1010 1011 return (0); 1012 } 1013 1014 int 1015 vm_resume_user_devs(struct vmctx *ctx) 1016 { 1017 const struct vm_snapshot_dev_info *info; 1018 int ret; 1019 int i; 1020 1021 for (i = 0; i < nitems(snapshot_devs); i++) { 1022 info = &snapshot_devs[i]; 1023 if (info->resume_cb == NULL) 1024 continue; 1025 1026 ret = info->resume_cb(ctx, info->dev_name); 1027 if (ret != 0) 1028 return (ret); 1029 } 1030 1031 return (0); 1032 } 1033 1034 static int 1035 vm_snapshot_kern_struct(int data_fd, xo_handle_t *xop, const char *array_key, 1036 struct vm_snapshot_meta *meta, off_t *offset) 1037 { 1038 int ret; 1039 size_t data_size; 1040 ssize_t write_cnt; 1041 1042 ret = vm_snapshot_req(meta); 1043 if (ret != 0) { 1044 fprintf(stderr, "%s: Failed to snapshot struct %s\r\n", 1045 __func__, meta->dev_name); 1046 ret = -1; 1047 goto done; 1048 } 1049 1050 data_size = vm_get_snapshot_size(meta); 1051 1052 write_cnt = write(data_fd, meta->buffer.buf_start, data_size); 1053 if (write_cnt != data_size) { 1054 perror("Failed to write all snapshotted data."); 1055 ret = -1; 1056 goto done; 1057 } 1058 1059 /* Write metadata. */ 1060 xo_open_instance_h(xop, array_key); 1061 xo_emit_h(xop, "{:debug_name/%s}\n", meta->dev_name); 1062 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%d}\n", 1063 meta->dev_req); 1064 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 1065 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 1066 xo_close_instance_h(xop, JSON_STRUCT_ARR_KEY); 1067 1068 *offset += data_size; 1069 1070 done: 1071 return (ret); 1072 } 1073 1074 static int 1075 vm_snapshot_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop) 1076 { 1077 int ret, i, error; 1078 size_t offset, buf_size; 1079 char *buffer; 1080 struct vm_snapshot_meta *meta; 1081 1082 error = 0; 1083 offset = 0; 1084 buf_size = SNAPSHOT_BUFFER_SIZE; 1085 1086 buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char)); 1087 if (buffer == NULL) { 1088 error = ENOMEM; 1089 perror("Failed to allocate memory for snapshot buffer"); 1090 goto err_vm_snapshot_kern_data; 1091 } 1092 1093 meta = &(struct vm_snapshot_meta) { 1094 .ctx = ctx, 1095 1096 .buffer.buf_start = buffer, 1097 .buffer.buf_size = buf_size, 1098 1099 .op = VM_SNAPSHOT_SAVE, 1100 }; 1101 1102 xo_open_list_h(xop, JSON_STRUCT_ARR_KEY); 1103 for (i = 0; i < nitems(snapshot_kern_structs); i++) { 1104 meta->dev_name = snapshot_kern_structs[i].struct_name; 1105 meta->dev_req = snapshot_kern_structs[i].req; 1106 1107 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1108 meta->buffer.buf = meta->buffer.buf_start; 1109 meta->buffer.buf_rem = meta->buffer.buf_size; 1110 1111 ret = vm_snapshot_kern_struct(data_fd, xop, JSON_DEV_ARR_KEY, 1112 meta, &offset); 1113 if (ret != 0) { 1114 error = -1; 1115 goto err_vm_snapshot_kern_data; 1116 } 1117 } 1118 xo_close_list_h(xop, JSON_STRUCT_ARR_KEY); 1119 1120 err_vm_snapshot_kern_data: 1121 if (buffer != NULL) 1122 free(buffer); 1123 return (error); 1124 } 1125 1126 static int 1127 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz) 1128 { 1129 int error; 1130 int memflags; 1131 char vmname_buf[MAX_VMNAME]; 1132 1133 memset(vmname_buf, 0, MAX_VMNAME); 1134 error = vm_get_name(ctx, vmname_buf, MAX_VMNAME - 1); 1135 if (error != 0) { 1136 perror("Failed to get VM name"); 1137 goto err; 1138 } 1139 1140 memflags = vm_get_memflags(ctx); 1141 1142 xo_open_container_h(xop, JSON_BASIC_METADATA_KEY); 1143 xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus); 1144 xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vmname_buf); 1145 xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz); 1146 xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", memflags); 1147 xo_close_container_h(xop, JSON_BASIC_METADATA_KEY); 1148 1149 err: 1150 return (error); 1151 } 1152 1153 static int 1154 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key, 1155 struct vm_snapshot_meta *meta, off_t *offset) 1156 { 1157 int ret; 1158 size_t data_size; 1159 1160 data_size = vm_get_snapshot_size(meta); 1161 1162 ret = write(data_fd, meta->buffer.buf_start, data_size); 1163 if (ret != data_size) { 1164 perror("Failed to write all snapshotted data."); 1165 return (-1); 1166 } 1167 1168 /* Write metadata. */ 1169 xo_open_instance_h(xop, array_key); 1170 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name); 1171 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 1172 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 1173 xo_close_instance_h(xop, array_key); 1174 1175 *offset += data_size; 1176 1177 return (0); 1178 } 1179 1180 static int 1181 vm_snapshot_user_dev(const struct vm_snapshot_dev_info *info, 1182 int data_fd, xo_handle_t *xop, 1183 struct vm_snapshot_meta *meta, off_t *offset) 1184 { 1185 int ret; 1186 1187 ret = (*info->snapshot_cb)(meta); 1188 if (ret != 0) { 1189 fprintf(stderr, "Failed to snapshot %s; ret=%d\r\n", 1190 meta->dev_name, ret); 1191 return (ret); 1192 } 1193 1194 ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta, 1195 offset); 1196 if (ret != 0) 1197 return (ret); 1198 1199 return (0); 1200 } 1201 1202 static int 1203 vm_snapshot_user_devs(struct vmctx *ctx, int data_fd, xo_handle_t *xop) 1204 { 1205 int ret, i; 1206 off_t offset; 1207 void *buffer; 1208 size_t buf_size; 1209 struct vm_snapshot_meta *meta; 1210 1211 buf_size = SNAPSHOT_BUFFER_SIZE; 1212 1213 offset = lseek(data_fd, 0, SEEK_CUR); 1214 if (offset < 0) { 1215 perror("Failed to get data file current offset."); 1216 return (-1); 1217 } 1218 1219 buffer = malloc(buf_size); 1220 if (buffer == NULL) { 1221 perror("Failed to allocate memory for snapshot buffer"); 1222 ret = ENOSPC; 1223 goto snapshot_err; 1224 } 1225 1226 meta = &(struct vm_snapshot_meta) { 1227 .ctx = ctx, 1228 1229 .buffer.buf_start = buffer, 1230 .buffer.buf_size = buf_size, 1231 1232 .op = VM_SNAPSHOT_SAVE, 1233 }; 1234 1235 xo_open_list_h(xop, JSON_DEV_ARR_KEY); 1236 1237 /* Restore other devices that support this feature */ 1238 for (i = 0; i < nitems(snapshot_devs); i++) { 1239 meta->dev_name = snapshot_devs[i].dev_name; 1240 1241 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1242 meta->buffer.buf = meta->buffer.buf_start; 1243 meta->buffer.buf_rem = meta->buffer.buf_size; 1244 1245 ret = vm_snapshot_user_dev(&snapshot_devs[i], data_fd, xop, 1246 meta, &offset); 1247 if (ret != 0) 1248 goto snapshot_err; 1249 } 1250 1251 xo_close_list_h(xop, JSON_DEV_ARR_KEY); 1252 1253 snapshot_err: 1254 if (buffer != NULL) 1255 free(buffer); 1256 return (ret); 1257 } 1258 1259 void 1260 checkpoint_cpu_add(int vcpu) 1261 { 1262 1263 pthread_mutex_lock(&vcpu_lock); 1264 CPU_SET(vcpu, &vcpus_active); 1265 1266 if (checkpoint_active) { 1267 CPU_SET(vcpu, &vcpus_suspended); 1268 while (checkpoint_active) 1269 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1270 CPU_CLR(vcpu, &vcpus_suspended); 1271 } 1272 pthread_mutex_unlock(&vcpu_lock); 1273 } 1274 1275 /* 1276 * When a vCPU is suspended for any reason, it calls 1277 * checkpoint_cpu_suspend(). This records that the vCPU is idle. 1278 * Before returning from suspension, checkpoint_cpu_resume() is 1279 * called. In suspend we note that the vCPU is idle. In resume we 1280 * pause the vCPU thread until the checkpoint is complete. The reason 1281 * for the two-step process is that vCPUs might already be stopped in 1282 * the debug server when a checkpoint is requested. This approach 1283 * allows us to account for and handle those vCPUs. 1284 */ 1285 void 1286 checkpoint_cpu_suspend(int vcpu) 1287 { 1288 1289 pthread_mutex_lock(&vcpu_lock); 1290 CPU_SET(vcpu, &vcpus_suspended); 1291 if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0) 1292 pthread_cond_signal(&vcpus_idle); 1293 pthread_mutex_unlock(&vcpu_lock); 1294 } 1295 1296 void 1297 checkpoint_cpu_resume(int vcpu) 1298 { 1299 1300 pthread_mutex_lock(&vcpu_lock); 1301 while (checkpoint_active) 1302 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1303 CPU_CLR(vcpu, &vcpus_suspended); 1304 pthread_mutex_unlock(&vcpu_lock); 1305 } 1306 1307 static void 1308 vm_vcpu_pause(struct vmctx *ctx) 1309 { 1310 1311 pthread_mutex_lock(&vcpu_lock); 1312 checkpoint_active = true; 1313 vm_suspend_cpu(ctx, -1); 1314 while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0) 1315 pthread_cond_wait(&vcpus_idle, &vcpu_lock); 1316 pthread_mutex_unlock(&vcpu_lock); 1317 } 1318 1319 static void 1320 vm_vcpu_resume(struct vmctx *ctx) 1321 { 1322 1323 pthread_mutex_lock(&vcpu_lock); 1324 checkpoint_active = false; 1325 pthread_mutex_unlock(&vcpu_lock); 1326 vm_resume_cpu(ctx, -1); 1327 pthread_cond_broadcast(&vcpus_can_run); 1328 } 1329 1330 static int 1331 vm_checkpoint(struct vmctx *ctx, char *checkpoint_file, bool stop_vm) 1332 { 1333 int fd_checkpoint = 0, kdata_fd = 0; 1334 int ret = 0; 1335 int error = 0; 1336 size_t memsz; 1337 xo_handle_t *xop = NULL; 1338 char *meta_filename = NULL; 1339 char *kdata_filename = NULL; 1340 FILE *meta_file = NULL; 1341 1342 kdata_filename = strcat_extension(checkpoint_file, ".kern"); 1343 if (kdata_filename == NULL) { 1344 fprintf(stderr, "Failed to construct kernel data filename.\n"); 1345 return (-1); 1346 } 1347 1348 kdata_fd = open(kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700); 1349 if (kdata_fd < 0) { 1350 perror("Failed to open kernel data snapshot file."); 1351 error = -1; 1352 goto done; 1353 } 1354 1355 fd_checkpoint = open(checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700); 1356 1357 if (fd_checkpoint < 0) { 1358 perror("Failed to create checkpoint file"); 1359 error = -1; 1360 goto done; 1361 } 1362 1363 meta_filename = strcat_extension(checkpoint_file, ".meta"); 1364 if (meta_filename == NULL) { 1365 fprintf(stderr, "Failed to construct vm metadata filename.\n"); 1366 goto done; 1367 } 1368 1369 meta_file = fopen(meta_filename, "w"); 1370 if (meta_file == NULL) { 1371 perror("Failed to open vm metadata snapshot file."); 1372 goto done; 1373 } 1374 1375 xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY); 1376 if (xop == NULL) { 1377 perror("Failed to get libxo handle on metadata file."); 1378 goto done; 1379 } 1380 1381 vm_vcpu_pause(ctx); 1382 1383 ret = vm_pause_user_devs(ctx); 1384 if (ret != 0) { 1385 fprintf(stderr, "Could not pause devices\r\n"); 1386 error = ret; 1387 goto done; 1388 } 1389 1390 memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true); 1391 if (memsz == 0) { 1392 perror("Could not write guest memory to file"); 1393 error = -1; 1394 goto done; 1395 } 1396 1397 ret = vm_snapshot_basic_metadata(ctx, xop, memsz); 1398 if (ret != 0) { 1399 fprintf(stderr, "Failed to snapshot vm basic metadata.\n"); 1400 error = -1; 1401 goto done; 1402 } 1403 1404 1405 ret = vm_snapshot_kern_structs(ctx, kdata_fd, xop); 1406 if (ret != 0) { 1407 fprintf(stderr, "Failed to snapshot vm kernel data.\n"); 1408 error = -1; 1409 goto done; 1410 } 1411 1412 ret = vm_snapshot_user_devs(ctx, kdata_fd, xop); 1413 if (ret != 0) { 1414 fprintf(stderr, "Failed to snapshot device state.\n"); 1415 error = -1; 1416 goto done; 1417 } 1418 1419 xo_finish_h(xop); 1420 1421 if (stop_vm) { 1422 vm_destroy(ctx); 1423 exit(0); 1424 } 1425 1426 done: 1427 ret = vm_resume_user_devs(ctx); 1428 if (ret != 0) 1429 fprintf(stderr, "Could not resume devices\r\n"); 1430 vm_vcpu_resume(ctx); 1431 if (fd_checkpoint > 0) 1432 close(fd_checkpoint); 1433 if (meta_filename != NULL) 1434 free(meta_filename); 1435 if (kdata_filename != NULL) 1436 free(kdata_filename); 1437 if (xop != NULL) 1438 xo_destroy(xop); 1439 if (meta_file != NULL) 1440 fclose(meta_file); 1441 if (kdata_fd > 0) 1442 close(kdata_fd); 1443 return (error); 1444 } 1445 1446 int 1447 get_checkpoint_msg(int conn_fd, struct vmctx *ctx) 1448 { 1449 unsigned char buf[MAX_MSG_SIZE]; 1450 struct checkpoint_op *checkpoint_op; 1451 int len, recv_len, total_recv = 0; 1452 int err = 0; 1453 1454 len = sizeof(struct checkpoint_op); /* expected length */ 1455 while ((recv_len = recv(conn_fd, buf + total_recv, len - total_recv, 0)) > 0) { 1456 total_recv += recv_len; 1457 } 1458 if (recv_len < 0) { 1459 perror("Error while receiving data from bhyvectl"); 1460 err = -1; 1461 goto done; 1462 } 1463 1464 checkpoint_op = (struct checkpoint_op *)buf; 1465 switch (checkpoint_op->op) { 1466 case START_CHECKPOINT: 1467 err = vm_checkpoint(ctx, checkpoint_op->snapshot_filename, false); 1468 break; 1469 case START_SUSPEND: 1470 err = vm_checkpoint(ctx, checkpoint_op->snapshot_filename, true); 1471 break; 1472 default: 1473 fprintf(stderr, "Unrecognized checkpoint operation.\n"); 1474 err = -1; 1475 } 1476 1477 done: 1478 close(conn_fd); 1479 return (err); 1480 } 1481 1482 /* 1483 * Listen for commands from bhyvectl 1484 */ 1485 void * 1486 checkpoint_thread(void *param) 1487 { 1488 struct checkpoint_thread_info *thread_info; 1489 int conn_fd, ret; 1490 1491 pthread_set_name_np(pthread_self(), "checkpoint thread"); 1492 thread_info = (struct checkpoint_thread_info *)param; 1493 1494 while ((conn_fd = accept(thread_info->socket_fd, NULL, NULL)) > -1) { 1495 ret = get_checkpoint_msg(conn_fd, thread_info->ctx); 1496 if (ret != 0) { 1497 fprintf(stderr, "Failed to read message on checkpoint " 1498 "socket. Retrying.\n"); 1499 } 1500 } 1501 if (conn_fd < -1) { 1502 perror("Failed to accept connection"); 1503 } 1504 1505 return (NULL); 1506 } 1507 1508 /* 1509 * Create directory tree to store runtime specific information: 1510 * i.e. UNIX sockets for IPC with bhyvectl. 1511 */ 1512 static int 1513 make_checkpoint_dir(void) 1514 { 1515 int err; 1516 1517 err = mkdir(BHYVE_RUN_DIR, 0755); 1518 if (err < 0 && errno != EEXIST) 1519 return (err); 1520 1521 err = mkdir(CHECKPOINT_RUN_DIR, 0755); 1522 if (err < 0 && errno != EEXIST) 1523 return (err); 1524 1525 return 0; 1526 } 1527 1528 /* 1529 * Create the listening socket for IPC with bhyvectl 1530 */ 1531 int 1532 init_checkpoint_thread(struct vmctx *ctx) 1533 { 1534 struct checkpoint_thread_info *checkpoint_info = NULL; 1535 struct sockaddr_un addr; 1536 int socket_fd; 1537 pthread_t checkpoint_pthread; 1538 char vmname_buf[MAX_VMNAME]; 1539 int ret, err = 0; 1540 1541 memset(&addr, 0, sizeof(addr)); 1542 1543 err = pthread_mutex_init(&vcpu_lock, NULL); 1544 if (err != 0) 1545 errc(1, err, "checkpoint mutex init"); 1546 err = pthread_cond_init(&vcpus_idle, NULL); 1547 if (err == 0) 1548 err = pthread_cond_init(&vcpus_can_run, NULL); 1549 if (err != 0) 1550 errc(1, err, "checkpoint cv init"); 1551 1552 socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); 1553 if (socket_fd < 0) { 1554 perror("Socket creation failed (IPC with bhyvectl"); 1555 err = -1; 1556 goto fail; 1557 } 1558 1559 err = make_checkpoint_dir(); 1560 if (err < 0) { 1561 perror("Failed to create checkpoint runtime directory"); 1562 goto fail; 1563 } 1564 1565 addr.sun_family = AF_UNIX; 1566 1567 err = vm_get_name(ctx, vmname_buf, MAX_VMNAME - 1); 1568 if (err != 0) { 1569 perror("Failed to get VM name"); 1570 goto fail; 1571 } 1572 1573 snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/%s", 1574 CHECKPOINT_RUN_DIR, vmname_buf); 1575 addr.sun_len = SUN_LEN(&addr); 1576 unlink(addr.sun_path); 1577 1578 if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) { 1579 perror("Failed to bind socket (IPC with bhyvectl)"); 1580 err = -1; 1581 goto fail; 1582 } 1583 1584 if (listen(socket_fd, 10) < 0) { 1585 perror("Failed to listen on socket (IPC with bhyvectl)"); 1586 err = -1; 1587 goto fail; 1588 } 1589 1590 checkpoint_info = calloc(1, sizeof(*checkpoint_info)); 1591 checkpoint_info->ctx = ctx; 1592 checkpoint_info->socket_fd = socket_fd; 1593 1594 ret = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread, 1595 checkpoint_info); 1596 if (ret < 0) { 1597 err = ret; 1598 goto fail; 1599 } 1600 1601 return (0); 1602 fail: 1603 free(checkpoint_info); 1604 if (socket_fd > 0) 1605 close(socket_fd); 1606 unlink(addr.sun_path); 1607 1608 return (err); 1609 } 1610 1611 void 1612 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op) 1613 { 1614 const char *__op; 1615 1616 if (op == VM_SNAPSHOT_SAVE) 1617 __op = "save"; 1618 else if (op == VM_SNAPSHOT_RESTORE) 1619 __op = "restore"; 1620 else 1621 __op = "unknown"; 1622 1623 fprintf(stderr, "%s: snapshot-%s failed for %s\r\n", 1624 __func__, __op, bufname); 1625 } 1626 1627 int 1628 vm_snapshot_buf(volatile void *data, size_t data_size, 1629 struct vm_snapshot_meta *meta) 1630 { 1631 struct vm_snapshot_buffer *buffer; 1632 int op; 1633 1634 buffer = &meta->buffer; 1635 op = meta->op; 1636 1637 if (buffer->buf_rem < data_size) { 1638 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1639 return (E2BIG); 1640 } 1641 1642 if (op == VM_SNAPSHOT_SAVE) 1643 memcpy(buffer->buf, (uint8_t *) data, data_size); 1644 else if (op == VM_SNAPSHOT_RESTORE) 1645 memcpy((uint8_t *) data, buffer->buf, data_size); 1646 else 1647 return (EINVAL); 1648 1649 buffer->buf += data_size; 1650 buffer->buf_rem -= data_size; 1651 1652 return (0); 1653 } 1654 1655 size_t 1656 vm_get_snapshot_size(struct vm_snapshot_meta *meta) 1657 { 1658 size_t length; 1659 struct vm_snapshot_buffer *buffer; 1660 1661 buffer = &meta->buffer; 1662 1663 if (buffer->buf_size < buffer->buf_rem) { 1664 fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n", 1665 __func__, buffer->buf_size, buffer->buf_rem); 1666 length = 0; 1667 } else { 1668 length = buffer->buf_size - buffer->buf_rem; 1669 } 1670 1671 return (length); 1672 } 1673 1674 int 1675 vm_snapshot_guest2host_addr(void **addrp, size_t len, bool restore_null, 1676 struct vm_snapshot_meta *meta) 1677 { 1678 int ret; 1679 vm_paddr_t gaddr; 1680 1681 if (meta->op == VM_SNAPSHOT_SAVE) { 1682 gaddr = paddr_host2guest(meta->ctx, *addrp); 1683 if (gaddr == (vm_paddr_t) -1) { 1684 if (!restore_null || 1685 (restore_null && (*addrp != NULL))) { 1686 ret = EFAULT; 1687 goto done; 1688 } 1689 } 1690 1691 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1692 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 1693 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1694 if (gaddr == (vm_paddr_t) -1) { 1695 if (!restore_null) { 1696 ret = EFAULT; 1697 goto done; 1698 } 1699 } 1700 1701 *addrp = paddr_guest2host(meta->ctx, gaddr, len); 1702 } else { 1703 ret = EINVAL; 1704 } 1705 1706 done: 1707 return (ret); 1708 } 1709 1710 int 1711 vm_snapshot_buf_cmp(volatile void *data, size_t data_size, 1712 struct vm_snapshot_meta *meta) 1713 { 1714 struct vm_snapshot_buffer *buffer; 1715 int op; 1716 int ret; 1717 1718 buffer = &meta->buffer; 1719 op = meta->op; 1720 1721 if (buffer->buf_rem < data_size) { 1722 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1723 ret = E2BIG; 1724 goto done; 1725 } 1726 1727 if (op == VM_SNAPSHOT_SAVE) { 1728 ret = 0; 1729 memcpy(buffer->buf, (uint8_t *) data, data_size); 1730 } else if (op == VM_SNAPSHOT_RESTORE) { 1731 ret = memcmp((uint8_t *) data, buffer->buf, data_size); 1732 } else { 1733 ret = EINVAL; 1734 goto done; 1735 } 1736 1737 buffer->buf += data_size; 1738 buffer->buf_rem -= data_size; 1739 1740 done: 1741 return (ret); 1742 } 1743