1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016 Flavius Anton 5 * Copyright (c) 2016 Mihai Tiganus 6 * Copyright (c) 2016-2019 Mihai Carabas 7 * Copyright (c) 2017-2019 Darius Mihai 8 * Copyright (c) 2017-2019 Elena Mihailescu 9 * Copyright (c) 2018-2019 Sergiu Weisz 10 * All rights reserved. 11 * The bhyve-snapshot feature was developed under sponsorships 12 * from Matthew Grooms. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/types.h> 40 #ifndef WITHOUT_CAPSICUM 41 #include <sys/capsicum.h> 42 #endif 43 #include <sys/mman.h> 44 #include <sys/socket.h> 45 #include <sys/stat.h> 46 #include <sys/time.h> 47 #include <sys/un.h> 48 49 #include <machine/atomic.h> 50 51 #ifndef WITHOUT_CAPSICUM 52 #include <capsicum_helpers.h> 53 #endif 54 #include <stdio.h> 55 #include <stdlib.h> 56 #include <string.h> 57 #include <err.h> 58 #include <errno.h> 59 #include <fcntl.h> 60 #include <libgen.h> 61 #include <signal.h> 62 #include <unistd.h> 63 #include <assert.h> 64 #include <errno.h> 65 #include <pthread.h> 66 #include <pthread_np.h> 67 #include <sysexits.h> 68 #include <stdbool.h> 69 #include <sys/ioctl.h> 70 71 #include <machine/vmm.h> 72 #ifndef WITHOUT_CAPSICUM 73 #include <machine/vmm_dev.h> 74 #endif 75 #include <machine/vmm_snapshot.h> 76 #include <vmmapi.h> 77 78 #include "bhyverun.h" 79 #include "acpi.h" 80 #include "atkbdc.h" 81 #include "debug.h" 82 #include "inout.h" 83 #include "ipc.h" 84 #include "fwctl.h" 85 #include "ioapic.h" 86 #include "mem.h" 87 #include "mevent.h" 88 #include "migration.h" 89 #include "mptbl.h" 90 #include "pci_emul.h" 91 #include "pci_irq.h" 92 #include "pci_lpc.h" 93 #include "smbiostbl.h" 94 #include "snapshot.h" 95 #include "xmsr.h" 96 #include "spinup_ap.h" 97 #include "rtc.h" 98 99 #include <libxo/xo.h> 100 #include <ucl.h> 101 102 struct spinner_info { 103 const size_t *crtval; 104 const size_t maxval; 105 const size_t total; 106 }; 107 108 extern int guest_ncpus; 109 110 static struct winsize winsize; 111 static sig_t old_winch_handler; 112 113 #define KB (1024UL) 114 #define MB (1024UL * KB) 115 #define GB (1024UL * MB) 116 117 #define SNAPSHOT_CHUNK (4 * MB) 118 #define PROG_BUF_SZ (8192) 119 120 #define SNAPSHOT_BUFFER_SIZE (20 * MB) 121 122 #define JSON_KERNEL_ARR_KEY "kern_structs" 123 #define JSON_DEV_ARR_KEY "devices" 124 #define JSON_BASIC_METADATA_KEY "basic metadata" 125 #define JSON_SNAPSHOT_REQ_KEY "device" 126 #define JSON_SIZE_KEY "size" 127 #define JSON_FILE_OFFSET_KEY "file_offset" 128 129 #define JSON_NCPUS_KEY "ncpus" 130 #define JSON_VMNAME_KEY "vmname" 131 #define JSON_MEMSIZE_KEY "memsize" 132 #define JSON_MEMFLAGS_KEY "memflags" 133 134 #define min(a,b) \ 135 ({ \ 136 __typeof__ (a) _a = (a); \ 137 __typeof__ (b) _b = (b); \ 138 _a < _b ? _a : _b; \ 139 }) 140 141 static const struct vm_snapshot_kern_info snapshot_kern_structs[] = { 142 { "vhpet", STRUCT_VHPET }, 143 { "vm", STRUCT_VM }, 144 { "vioapic", STRUCT_VIOAPIC }, 145 { "vlapic", STRUCT_VLAPIC }, 146 { "vmcx", STRUCT_VMCX }, 147 { "vatpit", STRUCT_VATPIT }, 148 { "vatpic", STRUCT_VATPIC }, 149 { "vpmtmr", STRUCT_VPMTMR }, 150 { "vrtc", STRUCT_VRTC }, 151 }; 152 153 static cpuset_t vcpus_active, vcpus_suspended; 154 static pthread_mutex_t vcpu_lock; 155 static pthread_cond_t vcpus_idle, vcpus_can_run; 156 static bool checkpoint_active; 157 158 /* 159 * TODO: Harden this function and all of its callers since 'base_str' is a user 160 * provided string. 161 */ 162 static char * 163 strcat_extension(const char *base_str, const char *ext) 164 { 165 char *res; 166 size_t base_len, ext_len; 167 168 base_len = strnlen(base_str, NAME_MAX); 169 ext_len = strnlen(ext, NAME_MAX); 170 171 if (base_len + ext_len > NAME_MAX) { 172 fprintf(stderr, "Filename exceeds maximum length.\n"); 173 return (NULL); 174 } 175 176 res = malloc(base_len + ext_len + 1); 177 if (res == NULL) { 178 perror("Failed to allocate memory."); 179 return (NULL); 180 } 181 182 memcpy(res, base_str, base_len); 183 memcpy(res + base_len, ext, ext_len); 184 res[base_len + ext_len] = 0; 185 186 return (res); 187 } 188 189 void 190 destroy_restore_state(struct restore_state *rstate) 191 { 192 if (rstate == NULL) { 193 fprintf(stderr, "Attempting to destroy NULL restore struct.\n"); 194 return; 195 } 196 197 if (rstate->kdata_map != MAP_FAILED) 198 munmap(rstate->kdata_map, rstate->kdata_len); 199 200 if (rstate->kdata_fd > 0) 201 close(rstate->kdata_fd); 202 if (rstate->vmmem_fd > 0) 203 close(rstate->vmmem_fd); 204 205 if (rstate->meta_root_obj != NULL) 206 ucl_object_unref(rstate->meta_root_obj); 207 if (rstate->meta_parser != NULL) 208 ucl_parser_free(rstate->meta_parser); 209 } 210 211 static int 212 load_vmmem_file(const char *filename, struct restore_state *rstate) 213 { 214 struct stat sb; 215 int err; 216 217 rstate->vmmem_fd = open(filename, O_RDONLY); 218 if (rstate->vmmem_fd < 0) { 219 perror("Failed to open restore file"); 220 return (-1); 221 } 222 223 err = fstat(rstate->vmmem_fd, &sb); 224 if (err < 0) { 225 perror("Failed to stat restore file"); 226 goto err_load_vmmem; 227 } 228 229 if (sb.st_size == 0) { 230 fprintf(stderr, "Restore file is empty.\n"); 231 goto err_load_vmmem; 232 } 233 234 rstate->vmmem_len = sb.st_size; 235 236 return (0); 237 238 err_load_vmmem: 239 if (rstate->vmmem_fd > 0) 240 close(rstate->vmmem_fd); 241 return (-1); 242 } 243 244 static int 245 load_kdata_file(const char *filename, struct restore_state *rstate) 246 { 247 struct stat sb; 248 int err; 249 250 rstate->kdata_fd = open(filename, O_RDONLY); 251 if (rstate->kdata_fd < 0) { 252 perror("Failed to open kernel data file"); 253 return (-1); 254 } 255 256 err = fstat(rstate->kdata_fd, &sb); 257 if (err < 0) { 258 perror("Failed to stat kernel data file"); 259 goto err_load_kdata; 260 } 261 262 if (sb.st_size == 0) { 263 fprintf(stderr, "Kernel data file is empty.\n"); 264 goto err_load_kdata; 265 } 266 267 rstate->kdata_len = sb.st_size; 268 rstate->kdata_map = mmap(NULL, rstate->kdata_len, PROT_READ, 269 MAP_SHARED, rstate->kdata_fd, 0); 270 if (rstate->kdata_map == MAP_FAILED) { 271 perror("Failed to map restore file"); 272 goto err_load_kdata; 273 } 274 275 return (0); 276 277 err_load_kdata: 278 if (rstate->kdata_fd > 0) 279 close(rstate->kdata_fd); 280 return (-1); 281 } 282 283 static int 284 load_metadata_file(const char *filename, struct restore_state *rstate) 285 { 286 ucl_object_t *obj; 287 struct ucl_parser *parser; 288 int err; 289 290 parser = ucl_parser_new(UCL_PARSER_DEFAULT); 291 if (parser == NULL) { 292 fprintf(stderr, "Failed to initialize UCL parser.\n"); 293 err = -1; 294 goto err_load_metadata; 295 } 296 297 err = ucl_parser_add_file(parser, filename); 298 if (err == 0) { 299 fprintf(stderr, "Failed to parse metadata file: '%s'\n", 300 filename); 301 err = -1; 302 goto err_load_metadata; 303 } 304 305 obj = ucl_parser_get_object(parser); 306 if (obj == NULL) { 307 fprintf(stderr, "Failed to parse object.\n"); 308 err = -1; 309 goto err_load_metadata; 310 } 311 312 rstate->meta_parser = parser; 313 rstate->meta_root_obj = (ucl_object_t *)obj; 314 315 return (0); 316 317 err_load_metadata: 318 if (parser != NULL) 319 ucl_parser_free(parser); 320 return (err); 321 } 322 323 int 324 load_restore_file(const char *filename, struct restore_state *rstate) 325 { 326 int err = 0; 327 char *kdata_filename = NULL, *meta_filename = NULL; 328 329 assert(filename != NULL); 330 assert(rstate != NULL); 331 332 memset(rstate, 0, sizeof(*rstate)); 333 rstate->kdata_map = MAP_FAILED; 334 335 err = load_vmmem_file(filename, rstate); 336 if (err != 0) { 337 fprintf(stderr, "Failed to load guest RAM file.\n"); 338 goto err_restore; 339 } 340 341 kdata_filename = strcat_extension(filename, ".kern"); 342 if (kdata_filename == NULL) { 343 fprintf(stderr, "Failed to construct kernel data filename.\n"); 344 goto err_restore; 345 } 346 347 err = load_kdata_file(kdata_filename, rstate); 348 if (err != 0) { 349 fprintf(stderr, "Failed to load guest kernel data file.\n"); 350 goto err_restore; 351 } 352 353 meta_filename = strcat_extension(filename, ".meta"); 354 if (meta_filename == NULL) { 355 fprintf(stderr, "Failed to construct kernel metadata filename.\n"); 356 goto err_restore; 357 } 358 359 err = load_metadata_file(meta_filename, rstate); 360 if (err != 0) { 361 fprintf(stderr, "Failed to load guest metadata file.\n"); 362 goto err_restore; 363 } 364 365 return (0); 366 367 err_restore: 368 destroy_restore_state(rstate); 369 if (kdata_filename != NULL) 370 free(kdata_filename); 371 if (meta_filename != NULL) 372 free(meta_filename); 373 return (-1); 374 } 375 376 #define JSON_GET_INT_OR_RETURN(key, obj, result_ptr, ret) \ 377 do { \ 378 const ucl_object_t *obj__; \ 379 obj__ = ucl_object_lookup(obj, key); \ 380 if (obj__ == NULL) { \ 381 fprintf(stderr, "Missing key: '%s'", key); \ 382 return (ret); \ 383 } \ 384 if (!ucl_object_toint_safe(obj__, result_ptr)) { \ 385 fprintf(stderr, "Cannot convert '%s' value to int.", key); \ 386 return (ret); \ 387 } \ 388 } while(0) 389 390 #define JSON_GET_STRING_OR_RETURN(key, obj, result_ptr, ret) \ 391 do { \ 392 const ucl_object_t *obj__; \ 393 obj__ = ucl_object_lookup(obj, key); \ 394 if (obj__ == NULL) { \ 395 fprintf(stderr, "Missing key: '%s'", key); \ 396 return (ret); \ 397 } \ 398 if (!ucl_object_tostring_safe(obj__, result_ptr)) { \ 399 fprintf(stderr, "Cannot convert '%s' value to string.", key); \ 400 return (ret); \ 401 } \ 402 } while(0) 403 404 static void * 405 lookup_check_dev(const char *dev_name, struct restore_state *rstate, 406 const ucl_object_t *obj, size_t *data_size) 407 { 408 const char *snapshot_req; 409 int64_t size, file_offset; 410 411 snapshot_req = NULL; 412 JSON_GET_STRING_OR_RETURN(JSON_SNAPSHOT_REQ_KEY, obj, 413 &snapshot_req, NULL); 414 assert(snapshot_req != NULL); 415 if (!strcmp(snapshot_req, dev_name)) { 416 JSON_GET_INT_OR_RETURN(JSON_SIZE_KEY, obj, 417 &size, NULL); 418 assert(size >= 0); 419 420 JSON_GET_INT_OR_RETURN(JSON_FILE_OFFSET_KEY, obj, 421 &file_offset, NULL); 422 assert(file_offset >= 0); 423 assert((uint64_t)file_offset + size <= rstate->kdata_len); 424 425 *data_size = (size_t)size; 426 return ((uint8_t *)rstate->kdata_map + file_offset); 427 } 428 429 return (NULL); 430 } 431 432 static void * 433 lookup_dev(const char *dev_name, const char *key, struct restore_state *rstate, 434 size_t *data_size) 435 { 436 const ucl_object_t *devs = NULL, *obj = NULL; 437 ucl_object_iter_t it = NULL; 438 void *ret; 439 440 devs = ucl_object_lookup(rstate->meta_root_obj, key); 441 if (devs == NULL) { 442 fprintf(stderr, "Failed to find '%s' object.\n", 443 JSON_DEV_ARR_KEY); 444 return (NULL); 445 } 446 447 if (ucl_object_type(devs) != UCL_ARRAY) { 448 fprintf(stderr, "Object '%s' is not an array.\n", 449 JSON_DEV_ARR_KEY); 450 return (NULL); 451 } 452 453 while ((obj = ucl_object_iterate(devs, &it, true)) != NULL) { 454 ret = lookup_check_dev(dev_name, rstate, obj, data_size); 455 if (ret != NULL) 456 return (ret); 457 } 458 459 return (NULL); 460 } 461 462 static const ucl_object_t * 463 lookup_basic_metadata_object(struct restore_state *rstate) 464 { 465 const ucl_object_t *basic_meta_obj = NULL; 466 467 basic_meta_obj = ucl_object_lookup(rstate->meta_root_obj, 468 JSON_BASIC_METADATA_KEY); 469 if (basic_meta_obj == NULL) { 470 fprintf(stderr, "Failed to find '%s' object.\n", 471 JSON_BASIC_METADATA_KEY); 472 return (NULL); 473 } 474 475 if (ucl_object_type(basic_meta_obj) != UCL_OBJECT) { 476 fprintf(stderr, "Object '%s' is not a JSON object.\n", 477 JSON_BASIC_METADATA_KEY); 478 return (NULL); 479 } 480 481 return (basic_meta_obj); 482 } 483 484 const char * 485 lookup_vmname(struct restore_state *rstate) 486 { 487 const char *vmname; 488 const ucl_object_t *obj; 489 490 obj = lookup_basic_metadata_object(rstate); 491 if (obj == NULL) 492 return (NULL); 493 494 JSON_GET_STRING_OR_RETURN(JSON_VMNAME_KEY, obj, &vmname, NULL); 495 return (vmname); 496 } 497 498 int 499 lookup_memflags(struct restore_state *rstate) 500 { 501 int64_t memflags; 502 const ucl_object_t *obj; 503 504 obj = lookup_basic_metadata_object(rstate); 505 if (obj == NULL) 506 return (0); 507 508 JSON_GET_INT_OR_RETURN(JSON_MEMFLAGS_KEY, obj, &memflags, 0); 509 510 return ((int)memflags); 511 } 512 513 size_t 514 lookup_memsize(struct restore_state *rstate) 515 { 516 int64_t memsize; 517 const ucl_object_t *obj; 518 519 obj = lookup_basic_metadata_object(rstate); 520 if (obj == NULL) 521 return (0); 522 523 JSON_GET_INT_OR_RETURN(JSON_MEMSIZE_KEY, obj, &memsize, 0); 524 if (memsize < 0) 525 memsize = 0; 526 527 return ((size_t)memsize); 528 } 529 530 531 int 532 lookup_guest_ncpus(struct restore_state *rstate) 533 { 534 int64_t ncpus; 535 const ucl_object_t *obj; 536 537 obj = lookup_basic_metadata_object(rstate); 538 if (obj == NULL) 539 return (0); 540 541 JSON_GET_INT_OR_RETURN(JSON_NCPUS_KEY, obj, &ncpus, 0); 542 return ((int)ncpus); 543 } 544 545 static void 546 winch_handler(int signal __unused) 547 { 548 #ifdef TIOCGWINSZ 549 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 550 #endif /* TIOCGWINSZ */ 551 } 552 553 static int 554 print_progress(size_t crtval, const size_t maxval) 555 { 556 size_t rc; 557 double crtval_gb, maxval_gb; 558 size_t i, win_width, prog_start, prog_done, prog_end; 559 int mval_len; 560 561 static char prog_buf[PROG_BUF_SZ]; 562 static const size_t len = sizeof(prog_buf); 563 564 static size_t div; 565 static const char *div_str; 566 567 static char wip_bar[] = { '/', '-', '\\', '|' }; 568 static int wip_idx = 0; 569 570 if (maxval == 0) { 571 printf("[0B / 0B]\r\n"); 572 return (0); 573 } 574 575 if (crtval > maxval) 576 crtval = maxval; 577 578 if (maxval > 10 * GB) { 579 div = GB; 580 div_str = "GiB"; 581 } else if (maxval > 10 * MB) { 582 div = MB; 583 div_str = "MiB"; 584 } else { 585 div = KB; 586 div_str = "KiB"; 587 } 588 589 crtval_gb = (double) crtval / div; 590 maxval_gb = (double) maxval / div; 591 592 rc = snprintf(prog_buf, len, "%.03lf", maxval_gb); 593 if (rc == len) { 594 fprintf(stderr, "Maxval too big\n"); 595 return (-1); 596 } 597 mval_len = rc; 598 599 rc = snprintf(prog_buf, len, "\r[%*.03lf%s / %.03lf%s] |", 600 mval_len, crtval_gb, div_str, maxval_gb, div_str); 601 602 if (rc == len) { 603 fprintf(stderr, "Buffer too small to print progress\n"); 604 return (-1); 605 } 606 607 win_width = min(winsize.ws_col, len); 608 prog_start = rc; 609 610 if (prog_start < (win_width - 2)) { 611 prog_end = win_width - prog_start - 2; 612 prog_done = prog_end * (crtval_gb / maxval_gb); 613 614 for (i = prog_start; i < prog_start + prog_done; i++) 615 prog_buf[i] = '#'; 616 617 if (crtval != maxval) { 618 prog_buf[i] = wip_bar[wip_idx]; 619 wip_idx = (wip_idx + 1) % sizeof(wip_bar); 620 i++; 621 } else { 622 prog_buf[i++] = '#'; 623 } 624 625 for (; i < win_width - 2; i++) 626 prog_buf[i] = '_'; 627 628 prog_buf[win_width - 2] = '|'; 629 } 630 631 prog_buf[win_width - 1] = '\0'; 632 write(STDOUT_FILENO, prog_buf, win_width); 633 634 return (0); 635 } 636 637 static void * 638 snapshot_spinner_cb(void *arg) 639 { 640 int rc; 641 size_t crtval, maxval, total; 642 struct spinner_info *si; 643 struct timespec ts; 644 645 si = arg; 646 if (si == NULL) 647 pthread_exit(NULL); 648 649 ts.tv_sec = 0; 650 ts.tv_nsec = 50 * 1000 * 1000; /* 50 ms sleep time */ 651 652 do { 653 crtval = *si->crtval; 654 maxval = si->maxval; 655 total = si->total; 656 657 rc = print_progress(crtval, total); 658 if (rc < 0) { 659 fprintf(stderr, "Failed to parse progress\n"); 660 break; 661 } 662 663 nanosleep(&ts, NULL); 664 } while (crtval < maxval); 665 666 pthread_exit(NULL); 667 return NULL; 668 } 669 670 static int 671 vm_snapshot_mem_part(const int snapfd, const size_t foff, void *src, 672 const size_t len, const size_t totalmem, const bool op_wr) 673 { 674 int rc; 675 size_t part_done, todo, rem; 676 ssize_t done; 677 bool show_progress; 678 pthread_t spinner_th; 679 struct spinner_info *si; 680 681 if (lseek(snapfd, foff, SEEK_SET) < 0) { 682 perror("Failed to change file offset"); 683 return (-1); 684 } 685 686 show_progress = false; 687 if (isatty(STDIN_FILENO) && (winsize.ws_col != 0)) 688 show_progress = true; 689 690 part_done = foff; 691 rem = len; 692 693 if (show_progress) { 694 si = &(struct spinner_info) { 695 .crtval = &part_done, 696 .maxval = foff + len, 697 .total = totalmem 698 }; 699 700 rc = pthread_create(&spinner_th, 0, snapshot_spinner_cb, si); 701 if (rc) { 702 perror("Unable to create spinner thread"); 703 show_progress = false; 704 } 705 } 706 707 while (rem > 0) { 708 if (show_progress) 709 todo = min(SNAPSHOT_CHUNK, rem); 710 else 711 todo = rem; 712 713 if (op_wr) 714 done = write(snapfd, src, todo); 715 else 716 done = read(snapfd, src, todo); 717 if (done < 0) { 718 perror("Failed to write in file"); 719 return (-1); 720 } 721 722 src = (uint8_t *)src + done; 723 part_done += done; 724 rem -= done; 725 } 726 727 if (show_progress) { 728 rc = pthread_join(spinner_th, NULL); 729 if (rc) 730 perror("Unable to end spinner thread"); 731 } 732 733 return (0); 734 } 735 736 static size_t 737 vm_snapshot_mem(struct vmctx *ctx, int snapfd, size_t memsz, const bool op_wr) 738 { 739 int ret; 740 size_t lowmem, highmem, totalmem; 741 char *baseaddr; 742 743 ret = vm_get_guestmem_from_ctx(ctx, &baseaddr, &lowmem, &highmem); 744 if (ret) { 745 fprintf(stderr, "%s: unable to retrieve guest memory size\r\n", 746 __func__); 747 return (0); 748 } 749 totalmem = lowmem + highmem; 750 751 if ((op_wr == false) && (totalmem != memsz)) { 752 fprintf(stderr, "%s: mem size mismatch: %ld vs %ld\r\n", 753 __func__, totalmem, memsz); 754 return (0); 755 } 756 757 winsize.ws_col = 80; 758 #ifdef TIOCGWINSZ 759 ioctl(STDOUT_FILENO, TIOCGWINSZ, &winsize); 760 #endif /* TIOCGWINSZ */ 761 old_winch_handler = signal(SIGWINCH, winch_handler); 762 763 ret = vm_snapshot_mem_part(snapfd, 0, baseaddr, lowmem, 764 totalmem, op_wr); 765 if (ret) { 766 fprintf(stderr, "%s: Could not %s lowmem\r\n", 767 __func__, op_wr ? "write" : "read"); 768 totalmem = 0; 769 goto done; 770 } 771 772 if (highmem == 0) 773 goto done; 774 775 ret = vm_snapshot_mem_part(snapfd, lowmem, baseaddr + 4*GB, 776 highmem, totalmem, op_wr); 777 if (ret) { 778 fprintf(stderr, "%s: Could not %s highmem\r\n", 779 __func__, op_wr ? "write" : "read"); 780 totalmem = 0; 781 goto done; 782 } 783 784 done: 785 printf("\r\n"); 786 signal(SIGWINCH, old_winch_handler); 787 788 return (totalmem); 789 } 790 791 int 792 restore_vm_mem(struct vmctx *ctx, struct restore_state *rstate) 793 { 794 size_t restored; 795 796 restored = vm_snapshot_mem(ctx, rstate->vmmem_fd, rstate->vmmem_len, 797 false); 798 799 if (restored != rstate->vmmem_len) 800 return (-1); 801 802 return (0); 803 } 804 805 int 806 vm_restore_kern_structs(struct vmctx *ctx, struct restore_state *rstate) 807 { 808 for (unsigned i = 0; i < nitems(snapshot_kern_structs); i++) { 809 const struct vm_snapshot_kern_info *info; 810 struct vm_snapshot_meta *meta; 811 void *data; 812 size_t size; 813 814 info = &snapshot_kern_structs[i]; 815 data = lookup_dev(info->struct_name, JSON_KERNEL_ARR_KEY, rstate, &size); 816 if (data == NULL) 817 errx(EX_DATAERR, "Cannot find kern struct %s", 818 info->struct_name); 819 820 if (size == 0) 821 errx(EX_DATAERR, "data with zero size for %s", 822 info->struct_name); 823 824 meta = &(struct vm_snapshot_meta) { 825 .dev_name = info->struct_name, 826 .dev_req = info->req, 827 828 .buffer.buf_start = data, 829 .buffer.buf_size = size, 830 831 .buffer.buf = data, 832 .buffer.buf_rem = size, 833 834 .op = VM_SNAPSHOT_RESTORE, 835 }; 836 837 if (vm_snapshot_req(ctx, meta)) 838 err(EX_DATAERR, "Failed to restore %s", 839 info->struct_name); 840 } 841 return (0); 842 } 843 844 static int 845 vm_restore_device(struct restore_state *rstate, vm_snapshot_dev_cb func, 846 const char *name, void *data) 847 { 848 void *dev_ptr; 849 size_t dev_size; 850 int ret; 851 struct vm_snapshot_meta *meta; 852 853 dev_ptr = lookup_dev(name, JSON_DEV_ARR_KEY, rstate, &dev_size); 854 855 if (dev_ptr == NULL) { 856 EPRINTLN("Failed to lookup dev: %s", name); 857 return (EINVAL); 858 } 859 860 if (dev_size == 0) { 861 EPRINTLN("Restore device size is 0: %s", name); 862 return (EINVAL); 863 } 864 865 meta = &(struct vm_snapshot_meta) { 866 .dev_name = name, 867 .dev_data = data, 868 869 .buffer.buf_start = dev_ptr, 870 .buffer.buf_size = dev_size, 871 872 .buffer.buf = dev_ptr, 873 .buffer.buf_rem = dev_size, 874 875 .op = VM_SNAPSHOT_RESTORE, 876 }; 877 878 ret = func(meta); 879 if (ret != 0) { 880 EPRINTLN("Failed to restore dev: %s %d", name, ret); 881 return (ret); 882 } 883 884 return (0); 885 } 886 887 int 888 vm_restore_devices(struct restore_state *rstate) 889 { 890 int ret; 891 struct pci_devinst *pdi = NULL; 892 893 while ((pdi = pci_next(pdi)) != NULL) { 894 ret = vm_restore_device(rstate, pci_snapshot, pdi->pi_name, pdi); 895 if (ret) 896 return (ret); 897 } 898 899 return (vm_restore_device(rstate, atkbdc_snapshot, "atkbdc", NULL)); 900 } 901 902 int 903 vm_pause_devices(void) 904 { 905 int ret; 906 struct pci_devinst *pdi = NULL; 907 908 while ((pdi = pci_next(pdi)) != NULL) { 909 ret = pci_pause(pdi); 910 if (ret) { 911 EPRINTLN("Cannot pause dev %s: %d", pdi->pi_name, ret); 912 return (ret); 913 } 914 } 915 916 return (0); 917 } 918 919 int 920 vm_resume_devices(void) 921 { 922 int ret; 923 struct pci_devinst *pdi = NULL; 924 925 while ((pdi = pci_next(pdi)) != NULL) { 926 ret = pci_resume(pdi); 927 if (ret) { 928 EPRINTLN("Cannot resume '%s': %d", pdi->pi_name, ret); 929 return (ret); 930 } 931 } 932 933 return (0); 934 } 935 936 static int 937 vm_save_kern_struct(struct vmctx *ctx, int data_fd, xo_handle_t *xop, 938 const char *array_key, struct vm_snapshot_meta *meta, off_t *offset) 939 { 940 int ret; 941 size_t data_size; 942 ssize_t write_cnt; 943 944 ret = vm_snapshot_req(ctx, meta); 945 if (ret != 0) { 946 fprintf(stderr, "%s: Failed to snapshot struct %s\r\n", 947 __func__, meta->dev_name); 948 ret = -1; 949 goto done; 950 } 951 952 data_size = vm_get_snapshot_size(meta); 953 954 /* XXX-MJ no handling for short writes. */ 955 write_cnt = write(data_fd, meta->buffer.buf_start, data_size); 956 if (write_cnt < 0 || (size_t)write_cnt != data_size) { 957 perror("Failed to write all snapshotted data."); 958 ret = -1; 959 goto done; 960 } 961 962 /* Write metadata. */ 963 xo_open_instance_h(xop, array_key); 964 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", 965 meta->dev_name); 966 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 967 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 968 xo_close_instance_h(xop, JSON_KERNEL_ARR_KEY); 969 970 *offset += data_size; 971 972 done: 973 return (ret); 974 } 975 976 static int 977 vm_save_kern_structs(struct vmctx *ctx, int data_fd, xo_handle_t *xop) 978 { 979 int ret, error; 980 size_t buf_size, i, offset; 981 char *buffer; 982 struct vm_snapshot_meta *meta; 983 984 error = 0; 985 offset = 0; 986 buf_size = SNAPSHOT_BUFFER_SIZE; 987 988 buffer = malloc(SNAPSHOT_BUFFER_SIZE * sizeof(char)); 989 if (buffer == NULL) { 990 error = ENOMEM; 991 perror("Failed to allocate memory for snapshot buffer"); 992 goto err_vm_snapshot_kern_data; 993 } 994 995 meta = &(struct vm_snapshot_meta) { 996 .buffer.buf_start = buffer, 997 .buffer.buf_size = buf_size, 998 999 .op = VM_SNAPSHOT_SAVE, 1000 }; 1001 1002 xo_open_list_h(xop, JSON_KERNEL_ARR_KEY); 1003 for (i = 0; i < nitems(snapshot_kern_structs); i++) { 1004 meta->dev_name = snapshot_kern_structs[i].struct_name; 1005 meta->dev_req = snapshot_kern_structs[i].req; 1006 1007 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1008 meta->buffer.buf = meta->buffer.buf_start; 1009 meta->buffer.buf_rem = meta->buffer.buf_size; 1010 1011 ret = vm_save_kern_struct(ctx, data_fd, xop, 1012 JSON_DEV_ARR_KEY, meta, &offset); 1013 if (ret != 0) { 1014 error = -1; 1015 goto err_vm_snapshot_kern_data; 1016 } 1017 } 1018 xo_close_list_h(xop, JSON_KERNEL_ARR_KEY); 1019 1020 err_vm_snapshot_kern_data: 1021 if (buffer != NULL) 1022 free(buffer); 1023 return (error); 1024 } 1025 1026 static int 1027 vm_snapshot_basic_metadata(struct vmctx *ctx, xo_handle_t *xop, size_t memsz) 1028 { 1029 1030 xo_open_container_h(xop, JSON_BASIC_METADATA_KEY); 1031 xo_emit_h(xop, "{:" JSON_NCPUS_KEY "/%ld}\n", guest_ncpus); 1032 xo_emit_h(xop, "{:" JSON_VMNAME_KEY "/%s}\n", vm_get_name(ctx)); 1033 xo_emit_h(xop, "{:" JSON_MEMSIZE_KEY "/%lu}\n", memsz); 1034 xo_emit_h(xop, "{:" JSON_MEMFLAGS_KEY "/%d}\n", vm_get_memflags(ctx)); 1035 xo_close_container_h(xop, JSON_BASIC_METADATA_KEY); 1036 1037 return (0); 1038 } 1039 1040 static int 1041 vm_snapshot_dev_write_data(int data_fd, xo_handle_t *xop, const char *array_key, 1042 struct vm_snapshot_meta *meta, off_t *offset) 1043 { 1044 ssize_t ret; 1045 size_t data_size; 1046 1047 data_size = vm_get_snapshot_size(meta); 1048 1049 /* XXX-MJ no handling for short writes. */ 1050 ret = write(data_fd, meta->buffer.buf_start, data_size); 1051 if (ret < 0 || (size_t)ret != data_size) { 1052 perror("Failed to write all snapshotted data."); 1053 return (-1); 1054 } 1055 1056 /* Write metadata. */ 1057 xo_open_instance_h(xop, array_key); 1058 xo_emit_h(xop, "{:" JSON_SNAPSHOT_REQ_KEY "/%s}\n", meta->dev_name); 1059 xo_emit_h(xop, "{:" JSON_SIZE_KEY "/%lu}\n", data_size); 1060 xo_emit_h(xop, "{:" JSON_FILE_OFFSET_KEY "/%lu}\n", *offset); 1061 xo_close_instance_h(xop, array_key); 1062 1063 *offset += data_size; 1064 1065 return (0); 1066 } 1067 1068 static int 1069 vm_snapshot_device(vm_snapshot_dev_cb func, const char *dev_name, 1070 void *devdata, int data_fd, xo_handle_t *xop, 1071 struct vm_snapshot_meta *meta, off_t *offset) 1072 { 1073 int ret; 1074 1075 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 1076 meta->buffer.buf = meta->buffer.buf_start; 1077 meta->buffer.buf_rem = meta->buffer.buf_size; 1078 meta->dev_name = dev_name; 1079 meta->dev_data = devdata; 1080 1081 ret = func(meta); 1082 if (ret != 0) { 1083 EPRINTLN("Failed to snapshot %s; ret=%d", dev_name, ret); 1084 return (ret); 1085 } 1086 1087 ret = vm_snapshot_dev_write_data(data_fd, xop, JSON_DEV_ARR_KEY, meta, 1088 offset); 1089 if (ret != 0) 1090 return (ret); 1091 1092 return (0); 1093 } 1094 1095 static int 1096 vm_snapshot_devices(int data_fd, xo_handle_t *xop) 1097 { 1098 int ret; 1099 off_t offset; 1100 void *buffer; 1101 size_t buf_size; 1102 struct vm_snapshot_meta *meta; 1103 struct pci_devinst *pdi; 1104 1105 buf_size = SNAPSHOT_BUFFER_SIZE; 1106 1107 offset = lseek(data_fd, 0, SEEK_CUR); 1108 if (offset < 0) { 1109 perror("Failed to get data file current offset."); 1110 return (-1); 1111 } 1112 1113 buffer = malloc(buf_size); 1114 if (buffer == NULL) { 1115 perror("Failed to allocate memory for snapshot buffer"); 1116 ret = ENOSPC; 1117 goto snapshot_err; 1118 } 1119 1120 meta = &(struct vm_snapshot_meta) { 1121 .buffer.buf_start = buffer, 1122 .buffer.buf_size = buf_size, 1123 1124 .op = VM_SNAPSHOT_SAVE, 1125 }; 1126 1127 xo_open_list_h(xop, JSON_DEV_ARR_KEY); 1128 1129 /* Save PCI devices */ 1130 pdi = NULL; 1131 while ((pdi = pci_next(pdi)) != NULL) { 1132 ret = vm_snapshot_device(pci_snapshot, pdi->pi_name, pdi, 1133 data_fd, xop, meta, &offset); 1134 if (ret != 0) 1135 goto snapshot_err; 1136 } 1137 1138 ret = vm_snapshot_device(atkbdc_snapshot, "atkbdc", NULL, 1139 data_fd, xop, meta, &offset); 1140 1141 xo_close_list_h(xop, JSON_DEV_ARR_KEY); 1142 1143 snapshot_err: 1144 if (buffer != NULL) 1145 free(buffer); 1146 return (ret); 1147 } 1148 1149 void 1150 checkpoint_cpu_add(int vcpu) 1151 { 1152 1153 pthread_mutex_lock(&vcpu_lock); 1154 CPU_SET(vcpu, &vcpus_active); 1155 1156 if (checkpoint_active) { 1157 CPU_SET(vcpu, &vcpus_suspended); 1158 while (checkpoint_active) 1159 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1160 CPU_CLR(vcpu, &vcpus_suspended); 1161 } 1162 pthread_mutex_unlock(&vcpu_lock); 1163 } 1164 1165 /* 1166 * When a vCPU is suspended for any reason, it calls 1167 * checkpoint_cpu_suspend(). This records that the vCPU is idle. 1168 * Before returning from suspension, checkpoint_cpu_resume() is 1169 * called. In suspend we note that the vCPU is idle. In resume we 1170 * pause the vCPU thread until the checkpoint is complete. The reason 1171 * for the two-step process is that vCPUs might already be stopped in 1172 * the debug server when a checkpoint is requested. This approach 1173 * allows us to account for and handle those vCPUs. 1174 */ 1175 void 1176 checkpoint_cpu_suspend(int vcpu) 1177 { 1178 1179 pthread_mutex_lock(&vcpu_lock); 1180 CPU_SET(vcpu, &vcpus_suspended); 1181 if (checkpoint_active && CPU_CMP(&vcpus_active, &vcpus_suspended) == 0) 1182 pthread_cond_signal(&vcpus_idle); 1183 pthread_mutex_unlock(&vcpu_lock); 1184 } 1185 1186 void 1187 checkpoint_cpu_resume(int vcpu) 1188 { 1189 1190 pthread_mutex_lock(&vcpu_lock); 1191 while (checkpoint_active) 1192 pthread_cond_wait(&vcpus_can_run, &vcpu_lock); 1193 CPU_CLR(vcpu, &vcpus_suspended); 1194 pthread_mutex_unlock(&vcpu_lock); 1195 } 1196 1197 static void 1198 vm_vcpu_pause(struct vmctx *ctx) 1199 { 1200 1201 pthread_mutex_lock(&vcpu_lock); 1202 checkpoint_active = true; 1203 vm_suspend_all_cpus(ctx); 1204 while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0) 1205 pthread_cond_wait(&vcpus_idle, &vcpu_lock); 1206 pthread_mutex_unlock(&vcpu_lock); 1207 } 1208 1209 static void 1210 vm_vcpu_resume(struct vmctx *ctx) 1211 { 1212 1213 pthread_mutex_lock(&vcpu_lock); 1214 checkpoint_active = false; 1215 pthread_mutex_unlock(&vcpu_lock); 1216 vm_resume_all_cpus(ctx); 1217 pthread_cond_broadcast(&vcpus_can_run); 1218 } 1219 1220 static int 1221 vm_checkpoint(struct vmctx *ctx, int fddir, const char *checkpoint_file, 1222 bool stop_vm) 1223 { 1224 int fd_checkpoint = 0, kdata_fd = 0, fd_meta; 1225 int ret = 0; 1226 int error = 0; 1227 size_t memsz; 1228 xo_handle_t *xop = NULL; 1229 char *meta_filename = NULL; 1230 char *kdata_filename = NULL; 1231 FILE *meta_file = NULL; 1232 1233 kdata_filename = strcat_extension(checkpoint_file, ".kern"); 1234 if (kdata_filename == NULL) { 1235 fprintf(stderr, "Failed to construct kernel data filename.\n"); 1236 return (-1); 1237 } 1238 1239 kdata_fd = openat(fddir, kdata_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700); 1240 if (kdata_fd < 0) { 1241 perror("Failed to open kernel data snapshot file."); 1242 error = -1; 1243 goto done; 1244 } 1245 1246 fd_checkpoint = openat(fddir, checkpoint_file, O_RDWR | O_CREAT | O_TRUNC, 0700); 1247 1248 if (fd_checkpoint < 0) { 1249 perror("Failed to create checkpoint file"); 1250 error = -1; 1251 goto done; 1252 } 1253 1254 meta_filename = strcat_extension(checkpoint_file, ".meta"); 1255 if (meta_filename == NULL) { 1256 fprintf(stderr, "Failed to construct vm metadata filename.\n"); 1257 goto done; 1258 } 1259 1260 fd_meta = openat(fddir, meta_filename, O_WRONLY | O_CREAT | O_TRUNC, 0700); 1261 if (fd_meta != -1) 1262 meta_file = fdopen(fd_meta, "w"); 1263 if (meta_file == NULL) { 1264 perror("Failed to open vm metadata snapshot file."); 1265 close(fd_meta); 1266 goto done; 1267 } 1268 1269 xop = xo_create_to_file(meta_file, XO_STYLE_JSON, XOF_PRETTY); 1270 if (xop == NULL) { 1271 perror("Failed to get libxo handle on metadata file."); 1272 goto done; 1273 } 1274 1275 vm_vcpu_pause(ctx); 1276 1277 ret = vm_pause_devices(); 1278 if (ret != 0) { 1279 fprintf(stderr, "Could not pause devices\r\n"); 1280 error = ret; 1281 goto done; 1282 } 1283 1284 memsz = vm_snapshot_mem(ctx, fd_checkpoint, 0, true); 1285 if (memsz == 0) { 1286 perror("Could not write guest memory to file"); 1287 error = -1; 1288 goto done; 1289 } 1290 1291 ret = vm_snapshot_basic_metadata(ctx, xop, memsz); 1292 if (ret != 0) { 1293 fprintf(stderr, "Failed to snapshot vm basic metadata.\n"); 1294 error = -1; 1295 goto done; 1296 } 1297 1298 ret = vm_save_kern_structs(ctx, kdata_fd, xop); 1299 if (ret != 0) { 1300 fprintf(stderr, "Failed to snapshot vm kernel data.\n"); 1301 error = -1; 1302 goto done; 1303 } 1304 1305 ret = vm_snapshot_devices(kdata_fd, xop); 1306 if (ret != 0) { 1307 fprintf(stderr, "Failed to snapshot device state.\n"); 1308 error = -1; 1309 goto done; 1310 } 1311 1312 xo_finish_h(xop); 1313 1314 if (stop_vm) { 1315 vm_destroy(ctx); 1316 exit(0); 1317 } 1318 1319 done: 1320 ret = vm_resume_devices(); 1321 if (ret != 0) 1322 fprintf(stderr, "Could not resume devices\r\n"); 1323 vm_vcpu_resume(ctx); 1324 if (fd_checkpoint > 0) 1325 close(fd_checkpoint); 1326 if (meta_filename != NULL) 1327 free(meta_filename); 1328 if (kdata_filename != NULL) 1329 free(kdata_filename); 1330 if (xop != NULL) 1331 xo_destroy(xop); 1332 if (meta_file != NULL) 1333 fclose(meta_file); 1334 if (kdata_fd > 0) 1335 close(kdata_fd); 1336 return (error); 1337 } 1338 1339 static int 1340 handle_message(struct vmctx *ctx, nvlist_t *nvl) 1341 { 1342 const char *cmd; 1343 struct ipc_command **ipc_cmd; 1344 1345 if (!nvlist_exists_string(nvl, "cmd")) 1346 return (EINVAL); 1347 1348 cmd = nvlist_get_string(nvl, "cmd"); 1349 IPC_COMMAND_FOREACH(ipc_cmd, ipc_cmd_set) { 1350 if (strcmp(cmd, (*ipc_cmd)->name) == 0) 1351 return ((*ipc_cmd)->handler(ctx, nvl)); 1352 } 1353 1354 return (EOPNOTSUPP); 1355 } 1356 1357 /* 1358 * Listen for commands from bhyvectl 1359 */ 1360 void * 1361 checkpoint_thread(void *param) 1362 { 1363 int fd; 1364 struct checkpoint_thread_info *thread_info; 1365 nvlist_t *nvl; 1366 1367 pthread_set_name_np(pthread_self(), "checkpoint thread"); 1368 thread_info = (struct checkpoint_thread_info *)param; 1369 1370 while ((fd = accept(thread_info->socket_fd, NULL, NULL)) != -1) { 1371 nvl = nvlist_recv(fd, 0); 1372 if (nvl != NULL) 1373 handle_message(thread_info->ctx, nvl); 1374 else 1375 EPRINTLN("nvlist_recv() failed: %s", strerror(errno)); 1376 1377 close(fd); 1378 nvlist_destroy(nvl); 1379 } 1380 1381 return (NULL); 1382 } 1383 1384 static int 1385 vm_do_checkpoint(struct vmctx *ctx, const nvlist_t *nvl) 1386 { 1387 int error; 1388 1389 if (!nvlist_exists_string(nvl, "filename") || 1390 !nvlist_exists_bool(nvl, "suspend") || 1391 !nvlist_exists_descriptor(nvl, "fddir")) 1392 error = EINVAL; 1393 else 1394 error = vm_checkpoint(ctx, 1395 nvlist_get_descriptor(nvl, "fddir"), 1396 nvlist_get_string(nvl, "filename"), 1397 nvlist_get_bool(nvl, "suspend")); 1398 1399 return (error); 1400 } 1401 IPC_COMMAND(ipc_cmd_set, checkpoint, vm_do_checkpoint); 1402 1403 static int 1404 vm_do_migrate(struct vmctx __unused *ctx, const nvlist_t *nvl) 1405 { 1406 size_t len; 1407 struct migrate_req req; 1408 1409 if (!nvlist_exists_string(nvl, "hostname") || 1410 !nvlist_exists_number(nvl, "port")) 1411 return (EINVAL); 1412 1413 memset(&req, 0, sizeof(struct migrate_req)); 1414 req.port = nvlist_get_number(nvl, "port"); 1415 1416 len = strlen(nvlist_get_string(nvl, "hostname")); 1417 if (len > MAXHOSTNAMELEN - 1) { 1418 EPRINTLN("Hostname length %lu bigger than maximum allowed %d", 1419 len, MAXHOSTNAMELEN - 1); 1420 return (EINVAL); 1421 } 1422 1423 strlcpy(req.host, nvlist_get_string(nvl, "hostname"), MAXHOSTNAMELEN); 1424 1425 printf("%s: IP address used for migration: %s;\n" 1426 "Port used for migration: %d\n", 1427 __func__, 1428 req.host, 1429 req.port); 1430 1431 // return (vm_send_migrate_req(ctx, req, nvlist_get_bool(nvl, "live"))); 1432 EPRINTLN("Migration operation not implemented yet\n"); 1433 return (EOPNOTSUPP); 1434 } 1435 IPC_COMMAND(ipc_cmd_set, migrate, vm_do_migrate); 1436 1437 void 1438 init_snapshot(void) 1439 { 1440 int err; 1441 1442 err = pthread_mutex_init(&vcpu_lock, NULL); 1443 if (err != 0) 1444 errc(1, err, "checkpoint mutex init"); 1445 err = pthread_cond_init(&vcpus_idle, NULL); 1446 if (err != 0) 1447 errc(1, err, "checkpoint cv init (vcpus_idle)"); 1448 err = pthread_cond_init(&vcpus_can_run, NULL); 1449 if (err != 0) 1450 errc(1, err, "checkpoint cv init (vcpus_can_run)"); 1451 } 1452 1453 /* 1454 * Create the listening socket for IPC with bhyvectl 1455 */ 1456 int 1457 init_checkpoint_thread(struct vmctx *ctx) 1458 { 1459 struct checkpoint_thread_info *checkpoint_info = NULL; 1460 struct sockaddr_un addr; 1461 int socket_fd; 1462 pthread_t checkpoint_pthread; 1463 int err; 1464 #ifndef WITHOUT_CAPSICUM 1465 cap_rights_t rights; 1466 #endif 1467 1468 memset(&addr, 0, sizeof(addr)); 1469 1470 socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); 1471 if (socket_fd < 0) { 1472 EPRINTLN("Socket creation failed: %s", strerror(errno)); 1473 err = -1; 1474 goto fail; 1475 } 1476 1477 addr.sun_family = AF_UNIX; 1478 1479 snprintf(addr.sun_path, sizeof(addr.sun_path), "%s%s", 1480 BHYVE_RUN_DIR, vm_get_name(ctx)); 1481 addr.sun_len = SUN_LEN(&addr); 1482 unlink(addr.sun_path); 1483 1484 if (bind(socket_fd, (struct sockaddr *)&addr, addr.sun_len) != 0) { 1485 EPRINTLN("Failed to bind socket \"%s\": %s\n", 1486 addr.sun_path, strerror(errno)); 1487 err = -1; 1488 goto fail; 1489 } 1490 1491 if (listen(socket_fd, 10) < 0) { 1492 EPRINTLN("ipc socket listen: %s\n", strerror(errno)); 1493 err = errno; 1494 goto fail; 1495 } 1496 1497 #ifndef WITHOUT_CAPSICUM 1498 cap_rights_init(&rights, CAP_ACCEPT, CAP_READ, CAP_RECV, CAP_WRITE, 1499 CAP_SEND, CAP_GETSOCKOPT); 1500 1501 if (caph_rights_limit(socket_fd, &rights) == -1) 1502 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1503 #endif 1504 checkpoint_info = calloc(1, sizeof(*checkpoint_info)); 1505 checkpoint_info->ctx = ctx; 1506 checkpoint_info->socket_fd = socket_fd; 1507 1508 err = pthread_create(&checkpoint_pthread, NULL, checkpoint_thread, 1509 checkpoint_info); 1510 if (err != 0) 1511 goto fail; 1512 1513 return (0); 1514 fail: 1515 free(checkpoint_info); 1516 if (socket_fd > 0) 1517 close(socket_fd); 1518 unlink(addr.sun_path); 1519 1520 return (err); 1521 } 1522 1523 void 1524 vm_snapshot_buf_err(const char *bufname, const enum vm_snapshot_op op) 1525 { 1526 const char *__op; 1527 1528 if (op == VM_SNAPSHOT_SAVE) 1529 __op = "save"; 1530 else if (op == VM_SNAPSHOT_RESTORE) 1531 __op = "restore"; 1532 else 1533 __op = "unknown"; 1534 1535 fprintf(stderr, "%s: snapshot-%s failed for %s\r\n", 1536 __func__, __op, bufname); 1537 } 1538 1539 int 1540 vm_snapshot_buf(void *data, size_t data_size, struct vm_snapshot_meta *meta) 1541 { 1542 struct vm_snapshot_buffer *buffer; 1543 int op; 1544 1545 buffer = &meta->buffer; 1546 op = meta->op; 1547 1548 if (buffer->buf_rem < data_size) { 1549 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1550 return (E2BIG); 1551 } 1552 1553 if (op == VM_SNAPSHOT_SAVE) 1554 memcpy(buffer->buf, data, data_size); 1555 else if (op == VM_SNAPSHOT_RESTORE) 1556 memcpy(data, buffer->buf, data_size); 1557 else 1558 return (EINVAL); 1559 1560 buffer->buf += data_size; 1561 buffer->buf_rem -= data_size; 1562 1563 return (0); 1564 } 1565 1566 size_t 1567 vm_get_snapshot_size(struct vm_snapshot_meta *meta) 1568 { 1569 size_t length; 1570 struct vm_snapshot_buffer *buffer; 1571 1572 buffer = &meta->buffer; 1573 1574 if (buffer->buf_size < buffer->buf_rem) { 1575 fprintf(stderr, "%s: Invalid buffer: size = %zu, rem = %zu\r\n", 1576 __func__, buffer->buf_size, buffer->buf_rem); 1577 length = 0; 1578 } else { 1579 length = buffer->buf_size - buffer->buf_rem; 1580 } 1581 1582 return (length); 1583 } 1584 1585 int 1586 vm_snapshot_guest2host_addr(struct vmctx *ctx, void **addrp, size_t len, 1587 bool restore_null, struct vm_snapshot_meta *meta) 1588 { 1589 int ret; 1590 vm_paddr_t gaddr; 1591 1592 if (meta->op == VM_SNAPSHOT_SAVE) { 1593 gaddr = paddr_host2guest(ctx, *addrp); 1594 if (gaddr == (vm_paddr_t) -1) { 1595 if (!restore_null || 1596 (restore_null && (*addrp != NULL))) { 1597 ret = EFAULT; 1598 goto done; 1599 } 1600 } 1601 1602 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1603 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 1604 SNAPSHOT_VAR_OR_LEAVE(gaddr, meta, ret, done); 1605 if (gaddr == (vm_paddr_t) -1) { 1606 if (!restore_null) { 1607 ret = EFAULT; 1608 goto done; 1609 } 1610 } 1611 1612 *addrp = paddr_guest2host(ctx, gaddr, len); 1613 } else { 1614 ret = EINVAL; 1615 } 1616 1617 done: 1618 return (ret); 1619 } 1620 1621 int 1622 vm_snapshot_buf_cmp(void *data, size_t data_size, struct vm_snapshot_meta *meta) 1623 { 1624 struct vm_snapshot_buffer *buffer; 1625 int op; 1626 int ret; 1627 1628 buffer = &meta->buffer; 1629 op = meta->op; 1630 1631 if (buffer->buf_rem < data_size) { 1632 fprintf(stderr, "%s: buffer too small\r\n", __func__); 1633 ret = E2BIG; 1634 goto done; 1635 } 1636 1637 if (op == VM_SNAPSHOT_SAVE) { 1638 ret = 0; 1639 memcpy(buffer->buf, data, data_size); 1640 } else if (op == VM_SNAPSHOT_RESTORE) { 1641 ret = memcmp(data, buffer->buf, data_size); 1642 } else { 1643 ret = EINVAL; 1644 goto done; 1645 } 1646 1647 buffer->buf += data_size; 1648 buffer->buf_rem -= data_size; 1649 1650 done: 1651 return (ret); 1652 } 1653