1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016 Flavius Anton 5 * Copyright (c) 2016 Mihai Tiganus 6 * Copyright (c) 2016-2019 Mihai Carabas 7 * Copyright (c) 2017-2019 Darius Mihai 8 * Copyright (c) 2017-2019 Elena Mihailescu 9 * Copyright (c) 2018-2019 Sergiu Weisz 10 * All rights reserved. 11 * The bhyve-snapshot feature was developed under sponsorships 12 * from Matthew Grooms. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/types.h> 40 #ifndef WITHOUT_CAPSICUM 41 #include <sys/capsicum.h> 42 #endif 43 #include <sys/mman.h> 44 #include <sys/socket.h> 45 #include <sys/stat.h> 46 #include <sys/time.h> 47 #include <sys/un.h> 48 49 #include <machine/atomic.h> 50 #include <machine/segments.h> 51 52 #ifndef WITHOUT_CAPSICUM 53 #include <capsicum_helpers.h> 54 #endif 55 #include <stdio.h> 56 #include <stdlib.h> 57 #include <string.h> 58 #include <err.h> 59 #include <errno.h> 60 #include <fcntl.h> 61 #include <libgen.h> 62 #include <signal.h> 63 #include <unistd.h> 64 #include <assert.h> 65 #include <errno.h> 66 #include <pthread.h> 67 #include <pthread_np.h> 68 #include <sysexits.h> 69 #include <stdbool.h> 70 #include <sys/ioctl.h> 71 72 #include <machine/vmm.h> 73 #ifndef WITHOUT_CAPSICUM 74 #include <machine/vmm_dev.h> 75 #endif 76 #include <machine/vmm_snapshot.h> 77 #include <vmmapi.h> 78 79 #include "bhyverun.h" 80 #include "acpi.h" 81 #include "atkbdc.h" 82 #include "inout.h" 83 #include "dbgport.h" 84 #include "fwctl.h" 85 #include "ioapic.h" 86 #include "mem.h" 87 #include "mevent.h" 88 #include "mptbl.h" 89 #include "pci_emul.h" 90 #include "pci_irq.h" 91 #include "pci_lpc.h" 92 #include "smbiostbl.h" 93 #include "snapshot.h" 94 #include "xmsr.h" 95 #include "spinup_ap.h" 96 #include "rtc.h" 97 98 #include <libxo/xo.h> 99 #include <ucl.h> 100 101 struct spinner_info { 102 const size_t *crtval; 103 const size_t maxval; 104 const size_t total; 105 }; 106 107 extern int guest_ncpus; 108 109 static struct winsize winsize; 110 static sig_t old_winch_handler; 111 112 #define KB (1024UL) 113 #define MB (1024UL * KB) 114 #define GB (1024UL * MB) 115 116 #define SNAPSHOT_CHUNK (4 * MB) 117 #define PROG_BUF_SZ (8192) 118 119 #define BHYVE_RUN_DIR "/var/run/bhyve" 120 #define CHECKPOINT_RUN_DIR BHYVE_RUN_DIR "/checkpoint" 121 #define MAX_VMNAME 100 122 123 #define MAX_MSG_SIZE 1024 124 125 #define SNAPSHOT_BUFFER_SIZE (20 * MB) 126 127 #define JSON_STRUCT_ARR_KEY "structs" 128 #define JSON_DEV_ARR_KEY "devices" 129 #define JSON_BASIC_METADATA_KEY "basic metadata" 130 #define JSON_SNAPSHOT_REQ_KEY "snapshot_req" 131 #define JSON_SIZE_KEY "size" 132 #define JSON_FILE_OFFSET_KEY "file_offset" 133 134 #define JSON_NCPUS_KEY "ncpus" 135 #define JSON_VMNAME_KEY "vmname" 136 #define JSON_MEMSIZE_KEY "memsize" 137 #define JSON_MEMFLAGS_KEY "memflags" 138 139 #define min(a,b) \ 140 ({ \ 141 __typeof__ (a) _a = (a); \ 142 __typeof__ (b) _b = (b); \ 143 _a < _b ? _a : _b; \ 144 }) 145 146 const struct vm_snapshot_dev_info snapshot_devs[] = { 147 { "atkbdc", atkbdc_snapshot, NULL, NULL }, 148 { "virtio-net", pci_snapshot, pci_pause, pci_resume }, 149 { "virtio-blk", pci_snapshot, pci_pause, pci_resume }, 150 { "virtio-rnd", pci_snapshot, NULL, NULL }, 151 { "lpc", pci_snapshot, NULL, NULL }, 152 { "fbuf", pci_snapshot, NULL, NULL }, 153 { "xhci", pci_snapshot, NULL, NULL }, 154 { "e1000", pci_snapshot, NULL, NULL }, 155 { "ahci", pci_snapshot, pci_pause, pci_resume }, 156 { "ahci-hd", pci_snapshot, pci_pause, pci_resume }, 157 { "ahci-cd", pci_snapshot, pci_pause, pci_resume }, 158 }; 159 160 const struct vm_snapshot_kern_info snapshot_kern_structs[] = { 161 { "vhpet", STRUCT_VHPET }, 162 { "vm", STRUCT_VM }, 163 { "vmx", STRUCT_VMX }, 164 { "vioapic", STRUCT_VIOAPIC }, 165 { "vlapic", STRUCT_VLAPIC }, 166 { "vmcx", STRUCT_VMCX }, 167 { "vatpit", STRUCT_VATPIT }, 168 { "vatpic", STRUCT_VATPIC }, 169 { "vpmtmr", STRUCT_VPMTMR }, 170 { "vrtc", STRUCT_VRTC }, 171 }; 172 173 static cpuset_t vcpus_active, vcpus_suspended; 174 static pthread_mutex_t vcpu_lock; 175 static pthread_cond_t vcpus_idle, vcpus_can_run; 176 static bool checkpoint_active; 177 178 /* 179 * TODO: Harden this function and all of its callers since 'base_str' is a user 180 * provided string. 181 */ 182 static char * 183 strcat_extension(const char *base_str, const char *ext) 184 { 185 char *res; 186 size_t base_len, ext_len; 187 188 base_len = strnlen(base_str, MAX_VMNAME); 189 ext_len = strnlen(ext, MAX_VMNAME); 190 191 if (base_len + ext_len > MAX_VMNAME) { 192 fprintf(stderr, "Filename exceeds maximum length.\n"); 193 return (NULL); 194 } 195 196 res = malloc(base_len + ext_len + 1); 197 if (res == NULL) { 198 perror("Failed to allocate memory."); 199 return (NULL); 200 } 201 202 memcpy(res, base_str, base_len); 203 memcpy(res + base_len, ext, ext_len); 204 res[base_len + ext_len] = 0; 205 206 return (res); 207 } 208 209 void 210 destroy_restore_state(struct restore_state *rstate) 211 { 212 if (rstate == NULL) { 213 fprintf(stderr, "Attempting to destroy NULL restore struct.\n"); 214 return; 215 } 216 217 if (rstate->kdata_map != MAP_FAILED) 218 munmap(rstate->kdata_map, rstate->kdata_len); 219 220 if (rstate->kdata_fd > 0) 221 close(rstate->kdata_fd); 222 if (rstate->vmmem_fd > 0) 223 close(rstate->vmmem_fd); 224 225 if (rstate->meta_root_obj != NULL) 226 ucl_object_unref(rstate->meta_root_obj); 227 if (rstate->meta_parser != NULL) 228 ucl_parser_free(rstate->meta_parser); 229 } 230 231 static int 232 load_vmmem_file(const char *filename, struct restore_state *rstate) 233 { 234 struct stat sb; 235 int err; 236 237 rstate->vmmem_fd = open(filename, O_RDONLY); 238 if (rstate->vmmem_fd < 0) { 239 perror("Failed to open restore file"); 240 return (-1); 241 } 242 243 err = fstat(rstate->vmmem_fd, &sb); 244 if (err < 0) { 245 perror("Failed to stat restore file"); 246 goto err_load_vmmem; 247 } 248 249 if (sb.st_size == 0) { 250 fprintf(stderr, "Restore file is empty.\n"); 251 goto err_load_vmmem; 252 } 253 254 rstate->vmmem_len = sb.st_size; 255 256 return (0); 257 258 err_load_vmmem: 259 if (rstate->vmmem_fd > 0) 260 close(rstate->vmmem_fd); 261 return (-1); 262 } 263 264 static int 265 load_kdata_file(const char *filename, struct restore_state *rstate) 266 { 267 struct stat sb; 268 int err; 269 270 rstate->kdata_fd = open(filename, O_RDONLY); 271 if (rstate->kdata_fd < 0) { 272 perror("Failed to open kernel data file"); 273 return (-1); 274 } 275 276 err = fstat(rstate->kdata_fd, &sb); 277 if (err < 0) { 278 perror("Failed to stat kernel data file"); 279 goto err_load_kdata; 280 } 281 282 if (sb.st_size == 0) { 283 fprintf(stderr, "Kernel data file is empty.\n"); 284 goto err_load_kdata; 285 } 286 287 rstate->kdata_len = sb.st_size; 288 rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ, 289 MAP_SHARED, rstate->kdata_fd, 0); 290 if (rstate->kdata_map == MAP_FAILED) { 291 perror("Failed to map restore file"); 292 goto err_load_kdata; 293 } 294 295 return (0); 296 297 err_load_kdata: 298 if (rstate->kdata_fd > 0) 299 close(rstate->kdata_fd); 300 return (-1); 301 } 302 303 static int 304 load_metadata_file(const char *filename, struct restore_state *rstate) 305 { 306 const ucl_object_t *obj; 307 struct ucl_parser *parser; 308 int err; 309 310 parser = ucl_parser_new(UCL_PARSER_DEFAULT); 311 if (parser == NULL) { 312 fprintf(stderr, "Failed to initialize UCL parser.\n"); 313 goto err_load_metadata; 314 } 315 316 err = ucl_parser_add_file(parser, filename); 317 if (err == 0) { 318 fprintf(stderr, "Failed to parse metadata file: '%s'\n", 319 filename); 320 err = -1; 321 goto err_load_metadata; 322 } 323 324 obj = ucl_parser_get_object(parser); 325 if (obj == NULL) { 326 fprintf(stderr, "Failed to parse object.\n"); 327 err = -1; 328 goto err_load_metadata; 329 } 330 331 rstate->meta_parser = parser; 332 rstate->meta_root_obj = (ucl_object_t *)obj; 333 334 return (0); 335 336 err_load_metadata: 337 if (parser != NULL) 338 ucl_parser_free(parser); 339 return (err); 340 } 341 342 int 343 load_restore_file(const char *filename, struct restore_state *rstate) 344 { 345 int err = 0; 346 char *kdata_filename = NULL, *meta_filename = NULL; 347 348 assert(filename != NULL); 349 assert(rstate != NULL); 350 351 memset(rstate, 0, sizeof(*rstate)); 352 rstate->kdata_map = MAP_FAILED; 353 354 err = load_vmmem_file(filename, rstate); 355 if (err != 0) { 356 fprintf(stderr, "Failed to load guest RAM file.\n"); 357 goto err_restore; 358 } 359 360 kdata_filename = strcat_extension(filename, ".kern"); 361 if (kdata_filename == NULL) { 362 fprintf(stderr, "Failed to construct kernel data filename.\n"); 363 goto err_restore; 364 } 365 366 err = load_kdata_file(kdata_filename, rstate); 367 if (err != 0) { 368 fprintf(stderr, "Failed to load guest kernel data file.\n"); 369 goto err_restore; 370 } 371 372 meta_filename = strcat_extension(filename, ".meta"); 373 if (meta_filename == NULL) { 374 fprintf(stderr, "Failed to construct kernel metadata filename.\n"); 375 goto err_restore; 376 } 377 378 err = load_metadata_file(meta_filename, rstate); 379 if (err != 0) { 380 fprintf(stderr, "Failed to load guest metadata file.\n"); 381 goto err_restore; 382 } 383 384 return (0); 385 386 err_restore: 387 destroy_restore_state(rstate); 388 if (kdata_filename != NULL) 389 free(kdata_filename); 390 if (meta_filename != NULL) 391 free(meta_filename); 392 return (-1); 393 } 394 395 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret) \ 396 do { \ 397 const ucl_object_t *obj__; \ 398 obj__ = ucl_object_lookup(obj, key); \ 399 if (obj__ == NULL) { \ 400 fprintf(stderr, "Missing key: '%s'", key); \ 401 return (ret); \ 402 } \ 403 if (!ucl_object_toint_safe(obj__, result_ptr)) { \ 404 fprintf(stderr, "Cannot convert '%s' value to int.", key); \ 405 return (ret); \ 406 } \ 407 } while(0) 408 409 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret) \ 410 do { \ 411 const ucl_object_t *obj__; \ 412 obj__ = ucl_object_lookup(obj, key); \ 413 if (obj__ == NULL) { \ 414 fprintf(stderr, "Missing key: '%s'", key); \ 415 return (ret); \ 416 } \ 417 if (!ucl_object_tostring_safe(obj__, result_ptr)) { \ 418 fprintf(stderr, "Cannot convert '%s' value to string.", key); \ 419 return (ret); \ 420 } \ 421 } while(0) 422 423 static void * 424 lookup_struct(enum snapshot_req struct_id, struct restore_state *rstate, 425 size_t *struct_size) 426 { 427 const ucl_object_t *structs = NULL, *obj = NULL; 428 ucl_object_iter_t it = NULL; 429 int64_t snapshot_req, size, file_offset; 430 431 structs = ucl_object_lookup(rstate->meta_root_obj, JSON_STRUCT_ARR_KEY); 432 if (structs == NULL) { 433 fprintf(stderr, "Failed to find '%s' object.\n", 434 JSON_STRUCT_ARR_KEY); 435 return (NULL); 436 } 437 438 if (ucl_object_type((ucl_object_t *)structs) != UCL_ARRAY) { 439 fprintf(stderr, "Object '%s' is not an array.\n", 440 JSON_STRUCT_ARR_KEY); 441 return (NULL); 442 } 443 444 while ((obj = ucl_object_iterate(structs, &it, true)) != NULL) { 445 snapshot_req = -1; 446 JSON_GET_INT_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj, 447 &snapshot_req, NULL); 448 assert(snapshot_req >= 0); 449 if ((enum snapshot_req) snapshot_req == struct_id) { 450 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj, 451 &size, NULL); 452 assert(size >= 0); 453 454 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj, 455 &file_offset, NULL); 456 assert(file_offset >= 0); 457 assert(file_offset + size <= rstate->kdata_len); 458 459 *struct_size = (size_t)size; 460 return (rstate->kdata_map + file_offset); 461 } 462 } 463 464 return (NULL); 465 } 466 467 static void * 468 lookup_check_dev(const char *dev_name, struct restore_state *rstate, 469 const ucl_object_t *obj, size_t *data_size) 470 { 471 const char *snapshot_req; 472 int64_t size, file_offset; 473 474 snapshot_req = NULL; 475 JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj, 476 &snapshot_req, NULL); 477 assert(snapshot_req != NULL); 478 if (!strcmp(snapshot_req, dev_name)) { 479 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj, 480 &size, NULL); 481 assert(size >= 0); 482 483 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj, 484 &file_offset, NULL); 485 assert(file_offset >= 0); 486 assert(file_offset + size <= rstate->kdata_len); 487 488 *data_size = (size_t)size; 489 return (rstate->kdata_map + file_offset); 490 } 491 492 return (NULL); 493 } 494 495 static void* 496 lookup_dev(const char *dev_name, struct restore_state *rstate, 497 size_t *data_size) 498 { 499 const ucl_object_t *devs = NULL, *obj = NULL; 500 ucl_object_iter_t it = NULL; 501 void *ret; 502 503 devs = ucl_object_lookup(rstate->meta_root_obj, JSON_DEV_ARR_KEY); 504 if (devs == NULL) { 505 fprintf(stderr, "Failed to find '%s' object.\n", 506 JSON_DEV_ARR_KEY); 507 return (NULL); 508 } 509 510 if (ucl_object_type((ucl_object_t *)devs) != UCL_ARRAY) { 511 fprintf(stderr, "Object '%s' is not an array.\n", 512 JSON_DEV_ARR_KEY); 513 return (NULL); 514 } 515 516 while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) { 517 ret = lookup_check_dev(dev_name, rstate, obj, data_size); 518 if (ret != NULL) 519 return (ret); 520 } 521 522 return (NULL); 523 } 524 525 static const ucl_object_t * 526 lookup_basic_metadata_object(struct restore_state *rstate) 527 { 528 const ucl_object_t *basic_meta_obj = NULL; 529 530 basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj, 531 JSON_BASIC_METADATA_KEY); 532 if (basic_meta_obj == NULL) { 533 fprintf(stderr, "Failed to find '%s' object.\n", 534 JSON_BASIC_METADATA_KEY); 535 return (NULL); 536 } 537 538 if (ucl_object_type((ucl_object_t *)basic_meta_obj) != UCL_OBJECT) { 539 fprintf(stderr, "Object '%s' is not a JSON object.\n", 540 JSON_BASIC_METADATA_KEY); 541 return (NULL); 542 } 543 544 return (basic_meta_obj); 545 } 546 547 const char * 548 lookup_vmname(struct restore_state *rstate) 549 { 550 const char *vmname; 551 const ucl_object_t *obj; 552 553 obj = lookup_basic_metadata_object(rstate); 554 if (obj == NULL) 555 return (NULL); 556 557 JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL); 558 return (vmname); 559 } 560 561 int 562 lookup_memflags(struct restore_state *rstate) 563 { 564 int64_t memflags; 565 const ucl_object_t *obj; 566 567 obj = lookup_basic_metadata_object(rstate); 568 if (obj == NULL) 569 return (0); 570 571 JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0); 572 573 return ((int)memflags); 574 } 575 576 size_t 577 lookup_memsize(struct restore_state *rstate) 578 { 579 int64_t memsize; 580 const ucl_object_t *obj; 581 582 obj = lookup_basic_metadata_object(rstate); 583 if (obj == NULL) 584 return (0); 585 586 JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0); 587 if (memsize < 0) 588 memsize = 0; 589 590 return ((size_t)memsize); 591 } 592 593 594 int 595 lookup_guest_ncpus(struct restore_state *rstate) 596 { 597 int64_t ncpus; 598 const ucl_object_t *obj; 599 600 obj = lookup_basic_metadata_object(rstate); 601 if (obj == NULL) 602 return (0); 603 604 JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0); 605 return ((int)ncpus); 606 } 607 608 static void 609 winch_handler(int signal) 610 { 611 #ifdef TIOCGWINSZ 612 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 613 #endif /* TIOCGWINSZ */ 614 } 615 616 static int 617 print_progress(size_t crtval, const size_t maxval) 618 { 619 size_t rc; 620 double crtval_gb, maxval_gb; 621 size_t i, win_width, prog_start, prog_done, prog_end; 622 int mval_len; 623 624 static char prog_buf[PROG_BUF_SZ]; 625 static const size_t len = sizeof(prog_buf); 626 627 static size_t div; 628 static char *div_str; 629 630 static char wip_bar[] = { '/', '-', '\\', '|' }; 631 static int wip_idx = 0; 632 633 if (maxval == 0) { 634 printf("[0B / 0B]\r\n"); 635 return (0); 636 } 637 638 if (crtval > maxval) 639 crtval = maxval; 640 641 if (maxval > 10 * GB) { 642 div = GB; 643 div_str = "GiB"; 644 } else if (maxval > 10 * MB) { 645 div = MB; 646 div_str = "MiB"; 647 } else { 648 div = KB; 649 div_str = "KiB"; 650 } 651 652 crtval_gb = (double) crtval / div; 653 maxval_gb = (double) maxval / div; 654 655 rc = snprintf(prog_buf, len, "%.03lf", maxval_gb); 656 if (rc == len) { 657 fprintf(stderr, "Maxval too big\n"); 658 return (-1); 659 } 660 mval_len = rc; 661 662 rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |", 663 mval_len, crtval_gb, div_str, maxval_gb, div_str); 664 665 if (rc == len) { 666 fprintf(stderr, "Buffer too small to print progress\n"); 667 return (-1); 668 } 669 670 win_width = min(winsize.ws_col, len); 671 prog_start = rc; 672 673 if (prog_start < (win_width - 2)) { 674 prog_end = win_width - prog_start - 2; 675 prog_done = prog_end * (crtval_gb / maxval_gb); 676 677 for (i = prog_start; i < prog_start + prog_done; i++) 678 prog_buf[i] = '#'; 679 680 if (crtval != maxval) { 681 prog_buf[i] = wip_bar[wip_idx]; 682 wip_idx = (wip_idx + 1) % sizeof(wip_bar); 683 i++; 684 } else { 685 prog_buf[i++] = '#'; 686 } 687 688 for (; i < win_width - 2; i++) 689 prog_buf[i] = '_'; 690 691 prog_buf[win_width - 2] = '|'; 692 } 693 694 prog_buf[win_width - 1] = '\0'; 695 write(STDOUT_FILENO, prog_buf, win_width); 696 697 return (0); 698 } 699 700 static void * 701 snapshot_spinner_cb(void *arg) 702 { 703 int rc; 704 size_t crtval, maxval, total; 705 struct spinner_info *si; 706 struct timespec ts; 707 708 si = arg; 709 if (si == NULL) 710 pthread_exit(NULL); 711 712 ts.tv_sec = 0; 713 ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */ 714 715 do { 716 crtval = *si->crtval; 717 maxval = si->maxval; 718 total = si->total; 719 720 rc = print_progress(crtval, total); 721 if (rc < 0) { 722 fprintf(stderr, "Failed to parse progress\n"); 723 break; 724 } 725 726 nanosleep(&ts, NULL); 727 } while (crtval < maxval); 728 729 pthread_exit(NULL); 730 return NULL; 731 } 732 733 static int 734 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src, 735 const size_t len, const size_t totalmem, const bool op_wr) 736 { 737 int rc; 738 size_t part_done, todo, rem; 739 ssize_t done; 740 bool show_progress; 741 pthread_t spinner_th; 742 struct spinner_info *si; 743 744 if (lseek(snapfd, foff, SEEK_SET) < 0) { 745 perror("Failed to change file offset"); 746 return (-1); 747 } 748 749 show_progress = false; 750 if (isatty(STDIN_FILENO) && (winsize.ws_col != 0)) 751 show_progress = true; 752 753 part_done = foff; 754 rem = len; 755 756 if (show_progress) { 757 si = &(struct spinner_info) { 758 .crtval = &part_done, 759 .maxval = foff + len, 760 .total = totalmem 761 }; 762 763 rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si); 764 if (rc) { 765 perror("Unable to create spinner thread"); 766 show_progress = false; 767 } 768 } 769 770 while (rem > 0) { 771 if (show_progress) 772 todo = min(SNAPSHOT_CHUNK, rem); 773 else 774 todo = rem; 775 776 if (op_wr) 777 done = write(snapfd, src, todo); 778 else 779 done = read(snapfd, src, todo); 780 if (done < 0) { 781 perror("Failed to write in file"); 782 return (-1); 783 } 784 785 src += done; 786 part_done += done; 787 rem -= done; 788 } 789 790 if (show_progress) { 791 rc = pthread_join(spinner_th, NULL); 792 if (rc) 793 perror("Unable to end spinner thread"); 794 } 795 796 return (0); 797 } 798 799 static size_t 800 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr) 801 { 802 int ret; 803 size_t lowmem, highmem, totalmem; 804 char *baseaddr; 805 806 ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem); 807 if (ret) { 808 fprintf(stderr, "%s: unable to retrieve guest memory size\r\n", 809 __func__); 810 return (0); 811 } 812 totalmem = lowmem + highmem; 813 814 if ((op_wr == false) && (totalmem != memsz)) { 815 fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n", 816 __func__, totalmem, memsz); 817 return (0); 818 } 819 820 winsize.ws_col = 80; 821 #ifdef TIOCGWINSZ 822 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 823 #endif /* TIOCGWINSZ */ 824 old_winch_handler = signal(SIGWINCH, winch_handler); 825 826 ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem, 827 totalmem, op_wr); 828 if (ret) { 829 fprintf(stderr, "%s: Could not %s lowmem\r\n", 830 __func__, op_wr ? "write" : "read"); 831 totalmem = 0; 832 goto done; 833 } 834 835 if (highmem == 0) 836 goto done; 837 838 ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB, 839 highmem, totalmem, op_wr); 840 if (ret) { 841 fprintf(stderr, "%s: Could not %s highmem\r\n", 842 __func__, op_wr ? "write" : "read"); 843 totalmem = 0; 844 goto done; 845 } 846 847 done: 848 printf("\r\n"); 849 signal(SIGWINCH, old_winch_handler); 850 851 return (totalmem); 852 } 853 854 int 855 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate) 856 { 857 size_t restored; 858 859 restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len, 860 false); 861 862 if (restored != rstate->vmmem_len) 863 return (-1); 864 865 return (0); 866 } 867 868 static int 869 vm_restore_kern_struct(struct vmctx *ctx, struct restore_state *rstate, 870 const struct vm_snapshot_kern_info *info) 871 { 872 void *struct_ptr; 873 size_t struct_size; 874 int ret; 875 struct vm_snapshot_meta *meta; 876 877 struct_ptr = lookup_struct(info->req, rstate, &struct_size); 878 if (struct_ptr == NULL) { 879 fprintf(stderr, "%s: Failed to lookup struct %s\r\n", 880 __func__, info->struct_name); 881 ret = -1; 882 goto done; 883 } 884 885 if (struct_size == 0) { 886 fprintf(stderr, "%s: Kernel struct size was 0 for: %s\r\n", 887 __func__, info->struct_name); 888 ret = -1; 889 goto done; 890 } 891 892 meta = &(struct vm_snapshot_meta) { 893 .ctx = ctx, 894 .dev_name = info->struct_name, 895 .dev_req = info->req, 896 897 .buffer.buf_start = struct_ptr, 898 .buffer.buf_size = struct_size, 899 900 .buffer.buf = struct_ptr, 901 .buffer.buf_rem = struct_size, 902 903 .op = VM_SNAPSHOT_RESTORE, 904 }; 905 906 ret = vm_snapshot_req(meta); 907 if (ret != 0) { 908 fprintf(stderr, "%s: Failed to restore struct: %s\r\n", 909 __func__, info->struct_name); 910 goto done; 911 } 912 913 done: 914 return (ret); 915 } 916 917 int 918 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate) 919 { 920 int ret; 921 int i; 922 923 for (i = 0; i < nitems(snapshot_kern_structs); i++) { 924 ret = vm_restore_kern_struct(ctx, rstate, 925 &snapshot_kern_structs[i]); 926 if (ret != 0) 927 return (ret); 928 } 929 930 return (0); 931 } 932 933 int 934 vm_restore_user_dev(struct vmctx *ctx, struct restore_state *rstate, 935 const struct vm_snapshot_dev_info *info) 936 { 937 void *dev_ptr; 938 size_t dev_size; 939 int ret; 940 struct vm_snapshot_meta *meta; 941 942 dev_ptr = lookup_dev(info->dev_name, rstate, &dev_size); 943 if (dev_ptr == NULL) { 944 fprintf(stderr, "Failed to lookup dev: %s\r\n", info->dev_name); 945 fprintf(stderr, "Continuing the restore/migration process\r\n"); 946 return (0); 947 } 948 949 if (dev_size == 0) { 950 fprintf(stderr, "%s: Device size is 0. " 951 "Assuming %s is not used\r\n", 952 __func__, info->dev_name); 953 return (0); 954 } 955 956 meta = &(struct vm_snapshot_meta) { 957 .ctx = ctx, 958 .dev_name = info->dev_name, 959 960 .buffer.buf_start = dev_ptr, 961 .buffer.buf_size = dev_size, 962 963 .buffer.buf = dev_ptr, 964 .buffer.buf_rem = dev_size, 965 966 .op = VM_SNAPSHOT_RESTORE, 967 }; 968 969 ret = (*info->snapshot_cb)(meta); 970 if (ret != 0) { 971 fprintf(stderr, "Failed to restore dev: %s\r\n", 972 info->dev_name); 973 return (-1); 974 } 975 976 return (0); 977 } 978 979 980 int 981 vm_restore_user_devs(struct vmctx *ctx, struct restore_state *rstate) 982 { 983 int ret; 984 int i; 985 986 for (i = 0; i < nitems(snapshot_devs); i++) { 987 ret = vm_restore_user_dev(ctx, rstate, &snapshot_devs[i]); 988 if (ret != 0) 989 return (ret); 990 } 991 992 return 0; 993 } 994 995 int 996 vm_pause_user_devs(struct vmctx *ctx) 997 { 998 const struct vm_snapshot_dev_info *info; 999 int ret; 1000 int i; 1001 1002 for (i = 0; i < nitems(snapshot_devs); i++) { 1003 info = &snapshot_devs[i]; 1004 if (info->pause_cb == NULL) 1005 continue; 1006 1007 ret = info->pause_cb(ctx, info->dev_name); 1008 if (ret != 0) 1009 return (ret); 1010 } 1011 1012 return (0); 1013 } 1014 1015 int 1016 vm_resume_user_devs(struct vmctx *ctx) 1017 { 1018 const struct vm_snapshot_dev_info *info; 1019 int ret; 1020 int i; 1021 1022 for (i = 0; i < nitems(snapshot_devs); i++) { 1023 info = &snapshot_devs[i]; 1024 if (info->resume_cb == NULL) 1025 continue; 1026 1027 ret = info->resume_cb(ctx, info->dev_name); 1028 if (ret != 0) 1029 return (ret); 1030 } 1031 1032 return (0); 1033 } 1034 1035 static int 1036 vm_snapshot_kern_struct(int data_fd, xo_handle_t *xop, const char *array_key, 1037 struct vm_snapshot_meta *meta, off_t *offset) 1038 { 1039 int ret; 1040 size_t data_size; 1041 ssize_t write_cnt; 1042 1043 ret = vm_snapshot_req(meta); 1044 if (ret != 0) { 1045 fprintf(stderr, "%s: Failed to snapshot struct %s\r\n", 1046 __func__, meta->dev_name); 1047 ret = -1; 1048 goto done; 1049 } 1050 1051 data_size = vm_get_snapshot_size(meta); 1052 1053 write_cnt = write(data_fd, meta->buffer.buf_start, data_size); 1054 if (write_cnt != data_size) { 1055 perror("Failed to write all snapshotted data."); 1056 ret = -1; 1057 goto done; 1058 } 1059 1060 /* Write metadata. */ 1061 xo_open_instance_h(xop, array_key); 1062 xo_emit_h(xop, "{:debug_name/%s}\n", meta->dev_name); 1063 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%d}\n", 1064 meta->dev_req); 1065 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 1066 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 1067 xo_close_instance_h(xop, JSON_STRUCT_ARR_KEY); 1068 1069 *offset += data_size; 1070 1071 done: 1072 return (ret); 1073 } 1074 1075 static int 1076 vm_snapshot_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop) 1077 { 1078 int ret, i, error; 1079 size_t offset, buf_size; 1080 char *buffer; 1081 struct vm_snapshot_meta *meta; 1082 1083 error = 0; 1084 offset = 0; 1085 buf_size = SNAPSHOT_BUFFER_SIZE; 1086 1087 buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char)); 1088 if (buffer == NULL) { 1089 error = ENOMEM; 1090 perror("Failed to allocate memory for snapshot buffer"); 1091 goto err_vm_snapshot_kern_data; 1092 } 1093 1094 meta = &(struct vm_snapshot_meta) { 1095 .ctx = ctx, 1096 1097 .buffer.buf_start = buffer, 1098 .buffer.buf_size = buf_size, 1099 1100 .op = VM_SNAPSHOT_SAVE, 1101 }; 1102 1103 xo_open_list_h(xop, JSON_STRUCT_ARR_KEY); 1104 for (i = 0; i < nitems(snapshot_kern_structs); i++) { 1105 meta->dev_name = snapshot_kern_structs[i].struct_name; 1106 meta->dev_req = snapshot_kern_structs[i].req; 1107 1108 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1109 meta->buffer.buf = meta->buffer.buf_start; 1110 meta->buffer.buf_rem = meta->buffer.buf_size; 1111 1112 ret = vm_snapshot_kern_struct(data_fd, xop, JSON_DEV_ARR_KEY, 1113 meta, &offset); 1114 if (ret != 0) { 1115 error = -1; 1116 goto err_vm_snapshot_kern_data; 1117 } 1118 } 1119 xo_close_list_h(xop, JSON_STRUCT_ARR_KEY); 1120 1121 err_vm_snapshot_kern_data: 1122 if (buffer != NULL) 1123 free(buffer); 1124 return (error); 1125 } 1126 1127 static int 1128 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz) 1129 { 1130 int error; 1131 int memflags; 1132 char vmname_buf[MAX_VMNAME]; 1133 1134 memset(vmname_buf, 0, MAX_VMNAME); 1135 error = vm_get_name(ctx, vmname_buf, MAX_VMNAME - 1); 1136 if (error != 0) { 1137 perror("Failed to get VM name"); 1138 goto err; 1139 } 1140 1141 memflags = vm_get_memflags(ctx); 1142 1143 xo_open_container_h(xop, JSON_BASIC_METADATA_KEY); 1144 xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus); 1145 xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vmname_buf); 1146 xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz); 1147 xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", memflags); 1148 xo_close_container_h(xop, JSON_BASIC_METADATA_KEY); 1149 1150 err: 1151 return (error); 1152 } 1153 1154 static int 1155 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key, 1156 struct vm_snapshot_meta *meta, off_t *offset) 1157 { 1158 int ret; 1159 size_t data_size; 1160 1161 data_size = vm_get_snapshot_size(meta); 1162 1163 ret = write(data_fd, meta->buffer.buf_start, data_size); 1164 if (ret != data_size) { 1165 perror("Failed to write all snapshotted data."); 1166 return (-1); 1167 } 1168 1169 /* Write metadata. */ 1170 xo_open_instance_h(xop, array_key); 1171 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name); 1172 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 1173 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 1174 xo_close_instance_h(xop, array_key); 1175 1176 *offset += data_size; 1177 1178 return (0); 1179 } 1180 1181 static int 1182 vm_snapshot_user_dev(const struct vm_snapshot_dev_info *info, 1183 int data_fd, xo_handle_t *xop, 1184 struct vm_snapshot_meta *meta, off_t *offset) 1185 { 1186 int ret; 1187 1188 ret = (*info->snapshot_cb)(meta); 1189 if (ret != 0) { 1190 fprintf(stderr, "Failed to snapshot %s; ret=%d\r\n", 1191 meta->dev_name, ret); 1192 return (ret); 1193 } 1194 1195 ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta, 1196 offset); 1197 if (ret != 0) 1198 return (ret); 1199 1200 return (0); 1201 } 1202 1203 static int 1204 vm_snapshot_user_devs(struct vmctx *ctx, int data_fd, xo_handle_t *xop) 1205 { 1206 int ret, i; 1207 off_t offset; 1208 void *buffer; 1209 size_t buf_size; 1210 struct vm_snapshot_meta *meta; 1211 1212 buf_size = SNAPSHOT_BUFFER_SIZE; 1213 1214 offset = lseek(data_fd, 0, SEEK_CUR); 1215 if (offset < 0) { 1216 perror("Failed to get data file current offset."); 1217 return (-1); 1218 } 1219 1220 buffer = malloc(buf_size); 1221 if (buffer == NULL) { 1222 perror("Failed to allocate memory for snapshot buffer"); 1223 ret = ENOSPC; 1224 goto snapshot_err; 1225 } 1226 1227 meta = &(struct vm_snapshot_meta) { 1228 .ctx = ctx, 1229 1230 .buffer.buf_start = buffer, 1231 .buffer.buf_size = buf_size, 1232 1233 .op = VM_SNAPSHOT_SAVE, 1234 }; 1235 1236 xo_open_list_h(xop, JSON_DEV_ARR_KEY); 1237 1238 /* Restore other devices that support this feature */ 1239 for (i = 0; i < nitems(snapshot_devs); i++) { 1240 meta->dev_name = snapshot_devs[i].dev_name; 1241 1242 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1243 meta->buffer.buf = meta->buffer.buf_start; 1244 meta->buffer.buf_rem = meta->buffer.buf_size; 1245 1246 ret = vm_snapshot_user_dev(&snapshot_devs[i], data_fd, xop, 1247 meta, &offset); 1248 if (ret != 0) 1249 goto snapshot_err; 1250 } 1251 1252 xo_close_list_h(xop, JSON_DEV_ARR_KEY); 1253 1254 snapshot_err: 1255 if (buffer != NULL) 1256 free(buffer); 1257 return (ret); 1258 } 1259 1260 void 1261 checkpoint_cpu_add(int vcpu) 1262 { 1263 1264 pthread_mutex_lock(&vcpu_lock); 1265 CPU_SET(vcpu, &vcpus_active); 1266 1267 if (checkpoint_active) { 1268 CPU_SET(vcpu, &vcpus_suspended); 1269 while (checkpoint_active) 1270 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1271 CPU_CLR(vcpu, &vcpus_suspended); 1272 } 1273 pthread_mutex_unlock(&vcpu_lock); 1274 } 1275 1276 /* 1277 * When a vCPU is suspended for any reason, it calls 1278 * checkpoint_cpu_suspend(). This records that the vCPU is idle. 1279 * Before returning from suspension, checkpoint_cpu_resume() is 1280 * called. In suspend we note that the vCPU is idle. In resume we 1281 * pause the vCPU thread until the checkpoint is complete. The reason 1282 * for the two-step process is that vCPUs might already be stopped in 1283 * the debug server when a checkpoint is requested. This approach 1284 * allows us to account for and handle those vCPUs. 1285 */ 1286 void 1287 checkpoint_cpu_suspend(int vcpu) 1288 { 1289 1290 pthread_mutex_lock(&vcpu_lock); 1291 CPU_SET(vcpu, &vcpus_suspended); 1292 if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0) 1293 pthread_cond_signal(&vcpus_idle); 1294 pthread_mutex_unlock(&vcpu_lock); 1295 } 1296 1297 void 1298 checkpoint_cpu_resume(int vcpu) 1299 { 1300 1301 pthread_mutex_lock(&vcpu_lock); 1302 while (checkpoint_active) 1303 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1304 CPU_CLR(vcpu, &vcpus_suspended); 1305 pthread_mutex_unlock(&vcpu_lock); 1306 } 1307 1308 static void 1309 vm_vcpu_pause(struct vmctx *ctx) 1310 { 1311 1312 pthread_mutex_lock(&vcpu_lock); 1313 checkpoint_active = true; 1314 vm_suspend_cpu(ctx, -1); 1315 while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0) 1316 pthread_cond_wait(&vcpus_idle, &vcpu_lock); 1317 pthread_mutex_unlock(&vcpu_lock); 1318 } 1319 1320 static void 1321 vm_vcpu_resume(struct vmctx *ctx) 1322 { 1323 1324 pthread_mutex_lock(&vcpu_lock); 1325 checkpoint_active = false; 1326 pthread_mutex_unlock(&vcpu_lock); 1327 vm_resume_cpu(ctx, -1); 1328 pthread_cond_broadcast(&vcpus_can_run); 1329 } 1330 1331 static int 1332 vm_checkpoint(struct vmctx *ctx, char *checkpoint_file, bool stop_vm) 1333 { 1334 int fd_checkpoint = 0, kdata_fd = 0; 1335 int ret = 0; 1336 int error = 0; 1337 size_t memsz; 1338 xo_handle_t *xop = NULL; 1339 char *meta_filename = NULL; 1340 char *kdata_filename = NULL; 1341 FILE *meta_file = NULL; 1342 1343 kdata_filename = strcat_extension(checkpoint_file, ".kern"); 1344 if (kdata_filename == NULL) { 1345 fprintf(stderr, "Failed to construct kernel data filename.\n"); 1346 return (-1); 1347 } 1348 1349 kdata_fd = open(kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700); 1350 if (kdata_fd < 0) { 1351 perror("Failed to open kernel data snapshot file."); 1352 error = -1; 1353 goto done; 1354 } 1355 1356 fd_checkpoint = open(checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700); 1357 1358 if (fd_checkpoint < 0) { 1359 perror("Failed to create checkpoint file"); 1360 error = -1; 1361 goto done; 1362 } 1363 1364 meta_filename = strcat_extension(checkpoint_file, ".meta"); 1365 if (meta_filename == NULL) { 1366 fprintf(stderr, "Failed to construct vm metadata filename.\n"); 1367 goto done; 1368 } 1369 1370 meta_file = fopen(meta_filename, "w"); 1371 if (meta_file == NULL) { 1372 perror("Failed to open vm metadata snapshot file."); 1373 goto done; 1374 } 1375 1376 xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY); 1377 if (xop == NULL) { 1378 perror("Failed to get libxo handle on metadata file."); 1379 goto done; 1380 } 1381 1382 vm_vcpu_pause(ctx); 1383 1384 ret = vm_pause_user_devs(ctx); 1385 if (ret != 0) { 1386 fprintf(stderr, "Could not pause devices\r\n"); 1387 error = ret; 1388 goto done; 1389 } 1390 1391 memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true); 1392 if (memsz == 0) { 1393 perror("Could not write guest memory to file"); 1394 error = -1; 1395 goto done; 1396 } 1397 1398 ret = vm_snapshot_basic_metadata(ctx, xop, memsz); 1399 if (ret != 0) { 1400 fprintf(stderr, "Failed to snapshot vm basic metadata.\n"); 1401 error = -1; 1402 goto done; 1403 } 1404 1405 1406 ret = vm_snapshot_kern_structs(ctx, kdata_fd, xop); 1407 if (ret != 0) { 1408 fprintf(stderr, "Failed to snapshot vm kernel data.\n"); 1409 error = -1; 1410 goto done; 1411 } 1412 1413 ret = vm_snapshot_user_devs(ctx, kdata_fd, xop); 1414 if (ret != 0) { 1415 fprintf(stderr, "Failed to snapshot device state.\n"); 1416 error = -1; 1417 goto done; 1418 } 1419 1420 xo_finish_h(xop); 1421 1422 if (stop_vm) { 1423 vm_destroy(ctx); 1424 exit(0); 1425 } 1426 1427 done: 1428 ret = vm_resume_user_devs(ctx); 1429 if (ret != 0) 1430 fprintf(stderr, "Could not resume devices\r\n"); 1431 vm_vcpu_resume(ctx); 1432 if (fd_checkpoint > 0) 1433 close(fd_checkpoint); 1434 if (meta_filename != NULL) 1435 free(meta_filename); 1436 if (kdata_filename != NULL) 1437 free(kdata_filename); 1438 if (xop != NULL) 1439 xo_destroy(xop); 1440 if (meta_file != NULL) 1441 fclose(meta_file); 1442 if (kdata_fd > 0) 1443 close(kdata_fd); 1444 return (error); 1445 } 1446 1447 int 1448 get_checkpoint_msg(int conn_fd, struct vmctx *ctx) 1449 { 1450 unsigned char buf[MAX_MSG_SIZE]; 1451 struct checkpoint_op *checkpoint_op; 1452 int len, recv_len, total_recv = 0; 1453 int err = 0; 1454 1455 len = sizeof(struct checkpoint_op); /* expected length */ 1456 while ((recv_len = recv(conn_fd, buf + total_recv, len - total_recv, 0)) > 0) { 1457 total_recv += recv_len; 1458 } 1459 if (recv_len < 0) { 1460 perror("Error while receiving data from bhyvectl"); 1461 err = -1; 1462 goto done; 1463 } 1464 1465 checkpoint_op = (struct checkpoint_op *)buf; 1466 switch (checkpoint_op->op) { 1467 case START_CHECKPOINT: 1468 err = vm_checkpoint(ctx, checkpoint_op->snapshot_filename, false); 1469 break; 1470 case START_SUSPEND: 1471 err = vm_checkpoint(ctx, checkpoint_op->snapshot_filename, true); 1472 break; 1473 default: 1474 fprintf(stderr, "Unrecognized checkpoint operation.\n"); 1475 err = -1; 1476 } 1477 1478 done: 1479 close(conn_fd); 1480 return (err); 1481 } 1482 1483 /* 1484 * Listen for commands from bhyvectl 1485 */ 1486 void * 1487 checkpoint_thread(void *param) 1488 { 1489 struct checkpoint_thread_info *thread_info; 1490 int conn_fd, ret; 1491 1492 pthread_set_name_np(pthread_self(), "checkpoint thread"); 1493 thread_info = (struct checkpoint_thread_info *)param; 1494 1495 while ((conn_fd = accept(thread_info->socket_fd, NULL, NULL)) > -1) { 1496 ret = get_checkpoint_msg(conn_fd, thread_info->ctx); 1497 if (ret != 0) { 1498 fprintf(stderr, "Failed to read message on checkpoint " 1499 "socket. Retrying.\n"); 1500 } 1501 } 1502 if (conn_fd < -1) { 1503 perror("Failed to accept connection"); 1504 } 1505 1506 return (NULL); 1507 } 1508 1509 /* 1510 * Create directory tree to store runtime specific information: 1511 * i.e. UNIX sockets for IPC with bhyvectl. 1512 */ 1513 static int 1514 make_checkpoint_dir(void) 1515 { 1516 int err; 1517 1518 err = mkdir(BHYVE_RUN_DIR, 0755); 1519 if (err < 0 && errno != EEXIST) 1520 return (err); 1521 1522 err = mkdir(CHECKPOINT_RUN_DIR, 0755); 1523 if (err < 0 && errno != EEXIST) 1524 return (err); 1525 1526 return 0; 1527 } 1528 1529 /* 1530 * Create the listening socket for IPC with bhyvectl 1531 */ 1532 int 1533 init_checkpoint_thread(struct vmctx *ctx) 1534 { 1535 struct checkpoint_thread_info *checkpoint_info = NULL; 1536 struct sockaddr_un addr; 1537 int socket_fd; 1538 pthread_t checkpoint_pthread; 1539 char vmname_buf[MAX_VMNAME]; 1540 int ret, err = 0; 1541 1542 memset(&addr, 0, sizeof(addr)); 1543 1544 err = pthread_mutex_init(&vcpu_lock, NULL); 1545 if (err != 0) 1546 errc(1, err, "checkpoint mutex init"); 1547 err = pthread_cond_init(&vcpus_idle, NULL); 1548 if (err == 0) 1549 err = pthread_cond_init(&vcpus_can_run, NULL); 1550 if (err != 0) 1551 errc(1, err, "checkpoint cv init"); 1552 1553 socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); 1554 if (socket_fd < 0) { 1555 perror("Socket creation failed (IPC with bhyvectl"); 1556 err = -1; 1557 goto fail; 1558 } 1559 1560 err = make_checkpoint_dir(); 1561 if (err < 0) { 1562 perror("Failed to create checkpoint runtime directory"); 1563 goto fail; 1564 } 1565 1566 addr.sun_family = AF_UNIX; 1567 1568 err = vm_get_name(ctx, vmname_buf, MAX_VMNAME - 1); 1569 if (err != 0) { 1570 perror("Failed to get VM name"); 1571 goto fail; 1572 } 1573 1574 snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/%s", 1575 CHECKPOINT_RUN_DIR, vmname_buf); 1576 addr.sun_len = SUN_LEN(&addr); 1577 unlink(addr.sun_path); 1578 1579 if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) { 1580 perror("Failed to bind socket (IPC with bhyvectl)"); 1581 err = -1; 1582 goto fail; 1583 } 1584 1585 if (listen(socket_fd, 10) < 0) { 1586 perror("Failed to listen on socket (IPC with bhyvectl)"); 1587 err = -1; 1588 goto fail; 1589 } 1590 1591 checkpoint_info = calloc(1, sizeof(*checkpoint_info)); 1592 checkpoint_info->ctx = ctx; 1593 checkpoint_info->socket_fd = socket_fd; 1594 1595 ret = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread, 1596 checkpoint_info); 1597 if (ret < 0) { 1598 err = ret; 1599 goto fail; 1600 } 1601 1602 return (0); 1603 fail: 1604 free(checkpoint_info); 1605 if (socket_fd > 0) 1606 close(socket_fd); 1607 unlink(addr.sun_path); 1608 1609 return (err); 1610 } 1611 1612 void 1613 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op) 1614 { 1615 const char *__op; 1616 1617 if (op == VM_SNAPSHOT_SAVE) 1618 __op = "save"; 1619 else if (op == VM_SNAPSHOT_RESTORE) 1620 __op = "restore"; 1621 else 1622 __op = "unknown"; 1623 1624 fprintf(stderr, "%s: snapshot-%s failed for %s\r\n", 1625 __func__, __op, bufname); 1626 } 1627 1628 int 1629 vm_snapshot_buf(volatile void *data, size_t data_size, 1630 struct vm_snapshot_meta *meta) 1631 { 1632 struct vm_snapshot_buffer *buffer; 1633 int op; 1634 1635 buffer = &meta->buffer; 1636 op = meta->op; 1637 1638 if (buffer->buf_rem < data_size) { 1639 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1640 return (E2BIG); 1641 } 1642 1643 if (op == VM_SNAPSHOT_SAVE) 1644 memcpy(buffer->buf, (uint8_t *) data, data_size); 1645 else if (op == VM_SNAPSHOT_RESTORE) 1646 memcpy((uint8_t *) data, buffer->buf, data_size); 1647 else 1648 return (EINVAL); 1649 1650 buffer->buf += data_size; 1651 buffer->buf_rem -= data_size; 1652 1653 return (0); 1654 } 1655 1656 size_t 1657 vm_get_snapshot_size(struct vm_snapshot_meta *meta) 1658 { 1659 size_t length; 1660 struct vm_snapshot_buffer *buffer; 1661 1662 buffer = &meta->buffer; 1663 1664 if (buffer->buf_size < buffer->buf_rem) { 1665 fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n", 1666 __func__, buffer->buf_size, buffer->buf_rem); 1667 length = 0; 1668 } else { 1669 length = buffer->buf_size - buffer->buf_rem; 1670 } 1671 1672 return (length); 1673 } 1674 1675 int 1676 vm_snapshot_guest2host_addr(void **addrp, size_t len, bool restore_null, 1677 struct vm_snapshot_meta *meta) 1678 { 1679 int ret; 1680 vm_paddr_t gaddr; 1681 1682 if (meta->op == VM_SNAPSHOT_SAVE) { 1683 gaddr = paddr_host2guest(meta->ctx, *addrp); 1684 if (gaddr == (vm_paddr_t) -1) { 1685 if (!restore_null || 1686 (restore_null && (*addrp != NULL))) { 1687 ret = EFAULT; 1688 goto done; 1689 } 1690 } 1691 1692 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1693 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 1694 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1695 if (gaddr == (vm_paddr_t) -1) { 1696 if (!restore_null) { 1697 ret = EFAULT; 1698 goto done; 1699 } 1700 } 1701 1702 *addrp = paddr_guest2host(meta->ctx, gaddr, len); 1703 } else { 1704 ret = EINVAL; 1705 } 1706 1707 done: 1708 return (ret); 1709 } 1710 1711 int 1712 vm_snapshot_buf_cmp(volatile void *data, size_t data_size, 1713 struct vm_snapshot_meta *meta) 1714 { 1715 struct vm_snapshot_buffer *buffer; 1716 int op; 1717 int ret; 1718 1719 buffer = &meta->buffer; 1720 op = meta->op; 1721 1722 if (buffer->buf_rem < data_size) { 1723 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1724 ret = E2BIG; 1725 goto done; 1726 } 1727 1728 if (op == VM_SNAPSHOT_SAVE) { 1729 ret = 0; 1730 memcpy(buffer->buf, (uint8_t *) data, data_size); 1731 } else if (op == VM_SNAPSHOT_RESTORE) { 1732 ret = memcmp((uint8_t *) data, buffer->buf, data_size); 1733 } else { 1734 ret = EINVAL; 1735 goto done; 1736 } 1737 1738 buffer->buf += data_size; 1739 buffer->buf_rem -= data_size; 1740 1741 done: 1742 return (ret); 1743 } 1744