1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #ifndef WITHOUT_CAPSICUM 30 #include <sys/capsicum.h> 31 #endif 32 #include <sys/endian.h> 33 #include <sys/ioctl.h> 34 #include <sys/mman.h> 35 #include <sys/queue.h> 36 #include <sys/socket.h> 37 #include <machine/atomic.h> 38 #include <machine/specialreg.h> 39 #include <machine/vmm.h> 40 #include <netinet/in.h> 41 #include <assert.h> 42 #ifndef WITHOUT_CAPSICUM 43 #include <capsicum_helpers.h> 44 #endif 45 #include <err.h> 46 #include <errno.h> 47 #include <fcntl.h> 48 #include <netdb.h> 49 #include <pthread.h> 50 #include <pthread_np.h> 51 #include <stdbool.h> 52 #include <stdio.h> 53 #include <stdlib.h> 54 #include <string.h> 55 #include <sysexits.h> 56 #include <unistd.h> 57 #include <vmmapi.h> 58 59 #include "bhyverun.h" 60 #include "config.h" 61 #include "debug.h" 62 #include "gdb.h" 63 #include "mem.h" 64 #include "mevent.h" 65 66 /* 67 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops 68 * use SIGTRAP. 69 */ 70 #define GDB_SIGNAL_TRAP 5 71 72 #define GDB_BP_SIZE 1 73 #define GDB_BP_INSTR (uint8_t []){0xcc} 74 #define GDB_PC_REGNAME VM_REG_GUEST_RIP 75 76 _Static_assert(sizeof(GDB_BP_INSTR) == GDB_BP_SIZE, 77 "GDB_BP_INSTR has wrong size"); 78 79 static void gdb_resume_vcpus(void); 80 static void check_command(int fd); 81 82 static struct mevent *read_event, *write_event; 83 84 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting; 85 static pthread_mutex_t gdb_lock; 86 static pthread_cond_t idle_vcpus; 87 static bool first_stop, report_next_stop, swbreak_enabled; 88 89 /* 90 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a 91 * read buffer, 'start' is unused and 'len' contains the number of 92 * valid bytes in the buffer. For a write buffer, 'start' is set to 93 * the index of the next byte in 'data' to send, and 'len' contains 94 * the remaining number of valid bytes to send. 95 */ 96 struct io_buffer { 97 uint8_t *data; 98 size_t capacity; 99 size_t start; 100 size_t len; 101 }; 102 103 struct breakpoint { 104 uint64_t gpa; 105 uint8_t shadow_inst[GDB_BP_SIZE]; 106 TAILQ_ENTRY(breakpoint) link; 107 }; 108 109 /* 110 * When a vCPU stops to due to an event that should be reported to the 111 * debugger, information about the event is stored in this structure. 112 * The vCPU thread then sets 'stopped_vcpu' if it is not already set 113 * and stops other vCPUs so the event can be reported. The 114 * report_stop() function reports the event for the 'stopped_vcpu' 115 * vCPU. When the debugger resumes execution via continue or step, 116 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their 117 * event handlers until the associated event is reported or disabled. 118 * 119 * An idle vCPU will have all of the boolean fields set to false. 120 * 121 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is 122 * released to execute the stepped instruction. When the vCPU reports 123 * the stepping trap, 'stepped' is set. 124 * 125 * When a vCPU hits a breakpoint set by the debug server, 126 * 'hit_swbreak' is set to true. 127 */ 128 struct vcpu_state { 129 bool stepping; 130 bool stepped; 131 bool hit_swbreak; 132 }; 133 134 static struct io_buffer cur_comm, cur_resp; 135 static uint8_t cur_csum; 136 static struct vmctx *ctx; 137 static int cur_fd = -1; 138 static TAILQ_HEAD(, breakpoint) breakpoints; 139 static struct vcpu_state *vcpu_state; 140 static struct vcpu **vcpus; 141 static int cur_vcpu, stopped_vcpu; 142 static bool gdb_active = false; 143 144 static const struct gdb_reg { 145 enum vm_reg_name id; 146 int size; 147 } gdb_regset[] = { 148 { .id = VM_REG_GUEST_RAX, .size = 8 }, 149 { .id = VM_REG_GUEST_RBX, .size = 8 }, 150 { .id = VM_REG_GUEST_RCX, .size = 8 }, 151 { .id = VM_REG_GUEST_RDX, .size = 8 }, 152 { .id = VM_REG_GUEST_RSI, .size = 8 }, 153 { .id = VM_REG_GUEST_RDI, .size = 8 }, 154 { .id = VM_REG_GUEST_RBP, .size = 8 }, 155 { .id = VM_REG_GUEST_RSP, .size = 8 }, 156 { .id = VM_REG_GUEST_R8, .size = 8 }, 157 { .id = VM_REG_GUEST_R9, .size = 8 }, 158 { .id = VM_REG_GUEST_R10, .size = 8 }, 159 { .id = VM_REG_GUEST_R11, .size = 8 }, 160 { .id = VM_REG_GUEST_R12, .size = 8 }, 161 { .id = VM_REG_GUEST_R13, .size = 8 }, 162 { .id = VM_REG_GUEST_R14, .size = 8 }, 163 { .id = VM_REG_GUEST_R15, .size = 8 }, 164 { .id = VM_REG_GUEST_RIP, .size = 8 }, 165 { .id = VM_REG_GUEST_RFLAGS, .size = 4 }, 166 { .id = VM_REG_GUEST_CS, .size = 4 }, 167 { .id = VM_REG_GUEST_SS, .size = 4 }, 168 { .id = VM_REG_GUEST_DS, .size = 4 }, 169 { .id = VM_REG_GUEST_ES, .size = 4 }, 170 { .id = VM_REG_GUEST_FS, .size = 4 }, 171 { .id = VM_REG_GUEST_GS, .size = 4 }, 172 }; 173 174 #ifdef GDB_LOG 175 #include <stdarg.h> 176 #include <stdio.h> 177 178 static void __printflike(1, 2) 179 debug(const char *fmt, ...) 180 { 181 static FILE *logfile; 182 va_list ap; 183 184 if (logfile == NULL) { 185 logfile = fopen("/tmp/bhyve_gdb.log", "w"); 186 if (logfile == NULL) 187 return; 188 #ifndef WITHOUT_CAPSICUM 189 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) { 190 fclose(logfile); 191 logfile = NULL; 192 return; 193 } 194 #endif 195 setlinebuf(logfile); 196 } 197 va_start(ap, fmt); 198 vfprintf(logfile, fmt, ap); 199 va_end(ap); 200 } 201 #else 202 #define debug(...) 203 #endif 204 205 static void remove_all_sw_breakpoints(void); 206 207 static int 208 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging) 209 { 210 uint64_t regs[4]; 211 const int regset[4] = { 212 VM_REG_GUEST_CR0, 213 VM_REG_GUEST_CR3, 214 VM_REG_GUEST_CR4, 215 VM_REG_GUEST_EFER 216 }; 217 218 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1) 219 return (-1); 220 221 /* 222 * For the debugger, always pretend to be the kernel (CPL 0), 223 * and if long-mode is enabled, always parse addresses as if 224 * in 64-bit mode. 225 */ 226 paging->cr3 = regs[1]; 227 paging->cpl = 0; 228 if (regs[3] & EFER_LMA) 229 paging->cpu_mode = CPU_MODE_64BIT; 230 else if (regs[0] & CR0_PE) 231 paging->cpu_mode = CPU_MODE_PROTECTED; 232 else 233 paging->cpu_mode = CPU_MODE_REAL; 234 if (!(regs[0] & CR0_PG)) 235 paging->paging_mode = PAGING_MODE_FLAT; 236 else if (!(regs[2] & CR4_PAE)) 237 paging->paging_mode = PAGING_MODE_32; 238 else if (regs[3] & EFER_LME) 239 paging->paging_mode = (regs[2] & CR4_LA57) ? 240 PAGING_MODE_64_LA57 : PAGING_MODE_64; 241 else 242 paging->paging_mode = PAGING_MODE_PAE; 243 return (0); 244 } 245 246 /* 247 * Map a guest virtual address to a physical address (for a given vcpu). 248 * If a guest virtual address is valid, return 1. If the address is 249 * not valid, return 0. If an error occurs obtaining the mapping, 250 * return -1. 251 */ 252 static int 253 guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr) 254 { 255 struct vm_guest_paging paging; 256 int fault; 257 258 if (guest_paging_info(vcpu, &paging) == -1) 259 return (-1); 260 261 /* 262 * Always use PROT_READ. We really care if the VA is 263 * accessible, not if the current vCPU can write. 264 */ 265 if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr, 266 &fault) == -1) 267 return (-1); 268 if (fault) 269 return (0); 270 return (1); 271 } 272 273 static uint64_t 274 guest_pc(struct vm_exit *vme) 275 { 276 return (vme->rip); 277 } 278 279 static void 280 io_buffer_reset(struct io_buffer *io) 281 { 282 283 io->start = 0; 284 io->len = 0; 285 } 286 287 /* Available room for adding data. */ 288 static size_t 289 io_buffer_avail(struct io_buffer *io) 290 { 291 292 return (io->capacity - (io->start + io->len)); 293 } 294 295 static uint8_t * 296 io_buffer_head(struct io_buffer *io) 297 { 298 299 return (io->data + io->start); 300 } 301 302 static uint8_t * 303 io_buffer_tail(struct io_buffer *io) 304 { 305 306 return (io->data + io->start + io->len); 307 } 308 309 static void 310 io_buffer_advance(struct io_buffer *io, size_t amount) 311 { 312 313 assert(amount <= io->len); 314 io->start += amount; 315 io->len -= amount; 316 } 317 318 static void 319 io_buffer_consume(struct io_buffer *io, size_t amount) 320 { 321 322 io_buffer_advance(io, amount); 323 if (io->len == 0) { 324 io->start = 0; 325 return; 326 } 327 328 /* 329 * XXX: Consider making this move optional and compacting on a 330 * future read() before realloc(). 331 */ 332 memmove(io->data, io_buffer_head(io), io->len); 333 io->start = 0; 334 } 335 336 static void 337 io_buffer_grow(struct io_buffer *io, size_t newsize) 338 { 339 uint8_t *new_data; 340 size_t avail, new_cap; 341 342 avail = io_buffer_avail(io); 343 if (newsize <= avail) 344 return; 345 346 new_cap = io->capacity + (newsize - avail); 347 new_data = realloc(io->data, new_cap); 348 if (new_data == NULL) 349 err(1, "Failed to grow GDB I/O buffer"); 350 io->data = new_data; 351 io->capacity = new_cap; 352 } 353 354 static bool 355 response_pending(void) 356 { 357 358 if (cur_resp.start == 0 && cur_resp.len == 0) 359 return (false); 360 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+') 361 return (false); 362 return (true); 363 } 364 365 static void 366 close_connection(void) 367 { 368 369 /* 370 * XXX: This triggers a warning because mevent does the close 371 * before the EV_DELETE. 372 */ 373 pthread_mutex_lock(&gdb_lock); 374 mevent_delete(write_event); 375 mevent_delete_close(read_event); 376 write_event = NULL; 377 read_event = NULL; 378 io_buffer_reset(&cur_comm); 379 io_buffer_reset(&cur_resp); 380 cur_fd = -1; 381 382 remove_all_sw_breakpoints(); 383 384 /* Clear any pending events. */ 385 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state)); 386 387 /* Resume any stopped vCPUs. */ 388 gdb_resume_vcpus(); 389 pthread_mutex_unlock(&gdb_lock); 390 } 391 392 static uint8_t 393 hex_digit(uint8_t nibble) 394 { 395 396 if (nibble <= 9) 397 return (nibble + '0'); 398 else 399 return (nibble + 'a' - 10); 400 } 401 402 static uint8_t 403 parse_digit(uint8_t v) 404 { 405 406 if (v >= '0' && v <= '9') 407 return (v - '0'); 408 if (v >= 'a' && v <= 'f') 409 return (v - 'a' + 10); 410 if (v >= 'A' && v <= 'F') 411 return (v - 'A' + 10); 412 return (0xF); 413 } 414 415 /* Parses big-endian hexadecimal. */ 416 static uintmax_t 417 parse_integer(const uint8_t *p, size_t len) 418 { 419 uintmax_t v; 420 421 v = 0; 422 while (len > 0) { 423 v <<= 4; 424 v |= parse_digit(*p); 425 p++; 426 len--; 427 } 428 return (v); 429 } 430 431 static uint8_t 432 parse_byte(const uint8_t *p) 433 { 434 435 return (parse_digit(p[0]) << 4 | parse_digit(p[1])); 436 } 437 438 static void 439 send_pending_data(int fd) 440 { 441 ssize_t nwritten; 442 443 if (cur_resp.len == 0) { 444 mevent_disable(write_event); 445 return; 446 } 447 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len); 448 if (nwritten == -1) { 449 warn("Write to GDB socket failed"); 450 close_connection(); 451 } else { 452 io_buffer_advance(&cur_resp, nwritten); 453 if (cur_resp.len == 0) 454 mevent_disable(write_event); 455 else 456 mevent_enable(write_event); 457 } 458 } 459 460 /* Append a single character to the output buffer. */ 461 static void 462 send_char(uint8_t data) 463 { 464 io_buffer_grow(&cur_resp, 1); 465 *io_buffer_tail(&cur_resp) = data; 466 cur_resp.len++; 467 } 468 469 /* Append an array of bytes to the output buffer. */ 470 static void 471 send_data(const uint8_t *data, size_t len) 472 { 473 474 io_buffer_grow(&cur_resp, len); 475 memcpy(io_buffer_tail(&cur_resp), data, len); 476 cur_resp.len += len; 477 } 478 479 static void 480 format_byte(uint8_t v, uint8_t *buf) 481 { 482 483 buf[0] = hex_digit(v >> 4); 484 buf[1] = hex_digit(v & 0xf); 485 } 486 487 /* 488 * Append a single byte (formatted as two hex characters) to the 489 * output buffer. 490 */ 491 static void 492 send_byte(uint8_t v) 493 { 494 uint8_t buf[2]; 495 496 format_byte(v, buf); 497 send_data(buf, sizeof(buf)); 498 } 499 500 static void 501 start_packet(void) 502 { 503 504 send_char('$'); 505 cur_csum = 0; 506 } 507 508 static void 509 finish_packet(void) 510 { 511 512 send_char('#'); 513 send_byte(cur_csum); 514 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp)); 515 } 516 517 /* 518 * Append a single character (for the packet payload) and update the 519 * checksum. 520 */ 521 static void 522 append_char(uint8_t v) 523 { 524 525 send_char(v); 526 cur_csum += v; 527 } 528 529 /* 530 * Append an array of bytes (for the packet payload) and update the 531 * checksum. 532 */ 533 static void 534 append_packet_data(const uint8_t *data, size_t len) 535 { 536 537 send_data(data, len); 538 while (len > 0) { 539 cur_csum += *data; 540 data++; 541 len--; 542 } 543 } 544 545 static void 546 append_string(const char *str) 547 { 548 549 append_packet_data(str, strlen(str)); 550 } 551 552 static void 553 append_byte(uint8_t v) 554 { 555 uint8_t buf[2]; 556 557 format_byte(v, buf); 558 append_packet_data(buf, sizeof(buf)); 559 } 560 561 static void 562 append_unsigned_native(uintmax_t value, size_t len) 563 { 564 size_t i; 565 566 for (i = 0; i < len; i++) { 567 append_byte(value); 568 value >>= 8; 569 } 570 } 571 572 static void 573 append_unsigned_be(uintmax_t value, size_t len) 574 { 575 char buf[len * 2]; 576 size_t i; 577 578 for (i = 0; i < len; i++) { 579 format_byte(value, buf + (len - i - 1) * 2); 580 value >>= 8; 581 } 582 append_packet_data(buf, sizeof(buf)); 583 } 584 585 static void 586 append_integer(unsigned int value) 587 { 588 589 if (value == 0) 590 append_char('0'); 591 else 592 append_unsigned_be(value, (fls(value) + 7) / 8); 593 } 594 595 static void 596 append_asciihex(const char *str) 597 { 598 599 while (*str != '\0') { 600 append_byte(*str); 601 str++; 602 } 603 } 604 605 static void 606 send_empty_response(void) 607 { 608 609 start_packet(); 610 finish_packet(); 611 } 612 613 static void 614 send_error(int error) 615 { 616 617 start_packet(); 618 append_char('E'); 619 append_byte(error); 620 finish_packet(); 621 } 622 623 static void 624 send_ok(void) 625 { 626 627 start_packet(); 628 append_string("OK"); 629 finish_packet(); 630 } 631 632 static int 633 parse_threadid(const uint8_t *data, size_t len) 634 { 635 636 if (len == 1 && *data == '0') 637 return (0); 638 if (len == 2 && memcmp(data, "-1", 2) == 0) 639 return (-1); 640 if (len == 0) 641 return (-2); 642 return (parse_integer(data, len)); 643 } 644 645 /* 646 * Report the current stop event to the debugger. If the stop is due 647 * to an event triggered on a specific vCPU such as a breakpoint or 648 * stepping trap, stopped_vcpu will be set to the vCPU triggering the 649 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to 650 * the reporting vCPU for vCPU events. 651 */ 652 static void 653 report_stop(bool set_cur_vcpu) 654 { 655 struct vcpu_state *vs; 656 657 start_packet(); 658 if (stopped_vcpu == -1) { 659 append_char('S'); 660 append_byte(GDB_SIGNAL_TRAP); 661 } else { 662 vs = &vcpu_state[stopped_vcpu]; 663 if (set_cur_vcpu) 664 cur_vcpu = stopped_vcpu; 665 append_char('T'); 666 append_byte(GDB_SIGNAL_TRAP); 667 append_string("thread:"); 668 append_integer(stopped_vcpu + 1); 669 append_char(';'); 670 if (vs->hit_swbreak) { 671 debug("$vCPU %d reporting swbreak\n", stopped_vcpu); 672 if (swbreak_enabled) 673 append_string("swbreak:;"); 674 } else if (vs->stepped) 675 debug("$vCPU %d reporting step\n", stopped_vcpu); 676 else 677 debug("$vCPU %d reporting ???\n", stopped_vcpu); 678 } 679 finish_packet(); 680 report_next_stop = false; 681 } 682 683 /* 684 * If this stop is due to a vCPU event, clear that event to mark it as 685 * acknowledged. 686 */ 687 static void 688 discard_stop(void) 689 { 690 struct vcpu_state *vs; 691 692 if (stopped_vcpu != -1) { 693 vs = &vcpu_state[stopped_vcpu]; 694 vs->hit_swbreak = false; 695 vs->stepped = false; 696 stopped_vcpu = -1; 697 } 698 report_next_stop = true; 699 } 700 701 static void 702 gdb_finish_suspend_vcpus(void) 703 { 704 705 if (first_stop) { 706 first_stop = false; 707 stopped_vcpu = -1; 708 } else if (report_next_stop) { 709 assert(!response_pending()); 710 report_stop(true); 711 send_pending_data(cur_fd); 712 } 713 } 714 715 /* 716 * vCPU threads invoke this function whenever the vCPU enters the 717 * debug server to pause or report an event. vCPU threads wait here 718 * as long as the debug server keeps them suspended. 719 */ 720 static void 721 _gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop) 722 { 723 int vcpuid = vcpu_id(vcpu); 724 725 debug("$vCPU %d suspending\n", vcpuid); 726 CPU_SET(vcpuid, &vcpus_waiting); 727 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0) 728 gdb_finish_suspend_vcpus(); 729 while (CPU_ISSET(vcpuid, &vcpus_suspended)) 730 pthread_cond_wait(&idle_vcpus, &gdb_lock); 731 CPU_CLR(vcpuid, &vcpus_waiting); 732 debug("$vCPU %d resuming\n", vcpuid); 733 } 734 735 /* 736 * Requests vCPU single-stepping using a 737 * VMEXIT suitable for the host platform. 738 */ 739 static int 740 _gdb_set_step(struct vcpu *vcpu, int val) 741 { 742 int error; 743 744 /* 745 * If the MTRAP cap fails, we are running on an AMD host. 746 * In that case, we request DB exits caused by RFLAGS.TF. 747 */ 748 error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, val); 749 if (error != 0) 750 error = vm_set_capability(vcpu, VM_CAP_RFLAGS_TF, val); 751 if (error == 0) 752 (void)vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val); 753 754 return (error); 755 } 756 757 /* 758 * Checks whether single-stepping is enabled for a given vCPU. 759 */ 760 static int 761 _gdb_check_step(struct vcpu *vcpu) 762 { 763 int val; 764 765 if (vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val) != 0) { 766 if (vm_get_capability(vcpu, VM_CAP_RFLAGS_TF, &val) != 0) 767 return -1; 768 } 769 return 0; 770 } 771 772 /* 773 * Invoked at the start of a vCPU thread's execution to inform the 774 * debug server about the new thread. 775 */ 776 void 777 gdb_cpu_add(struct vcpu *vcpu) 778 { 779 int vcpuid; 780 781 if (!gdb_active) 782 return; 783 vcpuid = vcpu_id(vcpu); 784 debug("$vCPU %d starting\n", vcpuid); 785 pthread_mutex_lock(&gdb_lock); 786 assert(vcpuid < guest_ncpus); 787 assert(vcpus[vcpuid] == NULL); 788 vcpus[vcpuid] = vcpu; 789 CPU_SET(vcpuid, &vcpus_active); 790 if (!TAILQ_EMPTY(&breakpoints)) { 791 vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1); 792 debug("$vCPU %d enabled breakpoint exits\n", vcpuid); 793 } 794 795 /* 796 * If a vcpu is added while vcpus are stopped, suspend the new 797 * vcpu so that it will pop back out with a debug exit before 798 * executing the first instruction. 799 */ 800 if (!CPU_EMPTY(&vcpus_suspended)) { 801 CPU_SET(vcpuid, &vcpus_suspended); 802 _gdb_cpu_suspend(vcpu, false); 803 } 804 pthread_mutex_unlock(&gdb_lock); 805 } 806 807 /* 808 * Invoked by vCPU before resuming execution. This enables stepping 809 * if the vCPU is marked as stepping. 810 */ 811 static void 812 gdb_cpu_resume(struct vcpu *vcpu) 813 { 814 struct vcpu_state *vs; 815 int error; 816 817 vs = &vcpu_state[vcpu_id(vcpu)]; 818 819 /* 820 * Any pending event should already be reported before 821 * resuming. 822 */ 823 assert(vs->hit_swbreak == false); 824 assert(vs->stepped == false); 825 if (vs->stepping) { 826 error = _gdb_set_step(vcpu, 1); 827 assert(error == 0); 828 } 829 } 830 831 /* 832 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest 833 * has been suspended due to an event on different vCPU or in response 834 * to a guest-wide suspend such as Ctrl-C or the stop on attach. 835 */ 836 void 837 gdb_cpu_suspend(struct vcpu *vcpu) 838 { 839 840 if (!gdb_active) 841 return; 842 pthread_mutex_lock(&gdb_lock); 843 _gdb_cpu_suspend(vcpu, true); 844 gdb_cpu_resume(vcpu); 845 pthread_mutex_unlock(&gdb_lock); 846 } 847 848 static void 849 gdb_suspend_vcpus(void) 850 { 851 852 assert(pthread_mutex_isowned_np(&gdb_lock)); 853 debug("suspending all CPUs\n"); 854 vcpus_suspended = vcpus_active; 855 vm_suspend_all_cpus(ctx); 856 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0) 857 gdb_finish_suspend_vcpus(); 858 } 859 860 /* 861 * Invoked each time a vmexit handler needs to step a vCPU. 862 * Handles MTRAP and RFLAGS.TF vmexits. 863 */ 864 static void 865 gdb_cpu_step(struct vcpu *vcpu) 866 { 867 struct vcpu_state *vs; 868 int vcpuid = vcpu_id(vcpu); 869 int error; 870 871 debug("$vCPU %d stepped\n", vcpuid); 872 pthread_mutex_lock(&gdb_lock); 873 vs = &vcpu_state[vcpuid]; 874 if (vs->stepping) { 875 vs->stepping = false; 876 vs->stepped = true; 877 error = _gdb_set_step(vcpu, 0); 878 assert(error == 0); 879 880 while (vs->stepped) { 881 if (stopped_vcpu == -1) { 882 debug("$vCPU %d reporting step\n", vcpuid); 883 stopped_vcpu = vcpuid; 884 gdb_suspend_vcpus(); 885 } 886 _gdb_cpu_suspend(vcpu, true); 887 } 888 gdb_cpu_resume(vcpu); 889 } 890 pthread_mutex_unlock(&gdb_lock); 891 } 892 893 /* 894 * A general handler for VM_EXITCODE_DB. 895 * Handles RFLAGS.TF exits on AMD SVM. 896 */ 897 void 898 gdb_cpu_debug(struct vcpu *vcpu, struct vm_exit *vmexit) 899 { 900 if (!gdb_active) 901 return; 902 903 /* RFLAGS.TF exit? */ 904 if (vmexit->u.dbg.trace_trap) { 905 gdb_cpu_step(vcpu); 906 } 907 } 908 909 /* 910 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via 911 * the VT-x-specific MTRAP exit. 912 */ 913 void 914 gdb_cpu_mtrap(struct vcpu *vcpu) 915 { 916 if (!gdb_active) 917 return; 918 gdb_cpu_step(vcpu); 919 } 920 921 static struct breakpoint * 922 find_breakpoint(uint64_t gpa) 923 { 924 struct breakpoint *bp; 925 926 TAILQ_FOREACH(bp, &breakpoints, link) { 927 if (bp->gpa == gpa) 928 return (bp); 929 } 930 return (NULL); 931 } 932 933 void 934 gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit) 935 { 936 struct breakpoint *bp; 937 struct vcpu_state *vs; 938 uint64_t gpa; 939 int error, vcpuid; 940 941 if (!gdb_active) { 942 EPRINTLN("vm_loop: unexpected VMEXIT_DEBUG"); 943 exit(4); 944 } 945 vcpuid = vcpu_id(vcpu); 946 pthread_mutex_lock(&gdb_lock); 947 error = guest_vaddr2paddr(vcpu, guest_pc(vmexit), &gpa); 948 assert(error == 1); 949 bp = find_breakpoint(gpa); 950 if (bp != NULL) { 951 vs = &vcpu_state[vcpuid]; 952 assert(vs->stepping == false); 953 assert(vs->stepped == false); 954 assert(vs->hit_swbreak == false); 955 vs->hit_swbreak = true; 956 vm_set_register(vcpu, GDB_PC_REGNAME, guest_pc(vmexit)); 957 for (;;) { 958 if (stopped_vcpu == -1) { 959 debug("$vCPU %d reporting breakpoint at rip %#lx\n", 960 vcpuid, guest_pc(vmexit)); 961 stopped_vcpu = vcpuid; 962 gdb_suspend_vcpus(); 963 } 964 _gdb_cpu_suspend(vcpu, true); 965 if (!vs->hit_swbreak) { 966 /* Breakpoint reported. */ 967 break; 968 } 969 bp = find_breakpoint(gpa); 970 if (bp == NULL) { 971 /* Breakpoint was removed. */ 972 vs->hit_swbreak = false; 973 break; 974 } 975 } 976 gdb_cpu_resume(vcpu); 977 } else { 978 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid, 979 guest_pc(vmexit)); 980 error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH, 981 vmexit->u.bpt.inst_length); 982 assert(error == 0); 983 error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0); 984 assert(error == 0); 985 } 986 pthread_mutex_unlock(&gdb_lock); 987 } 988 989 static bool 990 gdb_step_vcpu(struct vcpu *vcpu) 991 { 992 int error, vcpuid; 993 994 vcpuid = vcpu_id(vcpu); 995 debug("$vCPU %d step\n", vcpuid); 996 error = _gdb_check_step(vcpu); 997 if (error < 0) 998 return (false); 999 1000 discard_stop(); 1001 vcpu_state[vcpuid].stepping = true; 1002 vm_resume_cpu(vcpu); 1003 CPU_CLR(vcpuid, &vcpus_suspended); 1004 pthread_cond_broadcast(&idle_vcpus); 1005 return (true); 1006 } 1007 1008 static void 1009 gdb_resume_vcpus(void) 1010 { 1011 1012 assert(pthread_mutex_isowned_np(&gdb_lock)); 1013 vm_resume_all_cpus(ctx); 1014 debug("resuming all CPUs\n"); 1015 CPU_ZERO(&vcpus_suspended); 1016 pthread_cond_broadcast(&idle_vcpus); 1017 } 1018 1019 static void 1020 gdb_read_regs(void) 1021 { 1022 uint64_t regvals[nitems(gdb_regset)]; 1023 int regnums[nitems(gdb_regset)]; 1024 1025 for (size_t i = 0; i < nitems(gdb_regset); i++) 1026 regnums[i] = gdb_regset[i].id; 1027 if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset), 1028 regnums, regvals) == -1) { 1029 send_error(errno); 1030 return; 1031 } 1032 start_packet(); 1033 for (size_t i = 0; i < nitems(gdb_regset); i++) 1034 append_unsigned_native(regvals[i], gdb_regset[i].size); 1035 finish_packet(); 1036 } 1037 1038 static void 1039 gdb_read_mem(const uint8_t *data, size_t len) 1040 { 1041 uint64_t gpa, gva, val; 1042 uint8_t *cp; 1043 size_t resid, todo, bytes; 1044 bool started; 1045 int error; 1046 1047 assert(len >= 1); 1048 1049 /* Skip 'm' */ 1050 data += 1; 1051 len -= 1; 1052 1053 /* Parse and consume address. */ 1054 cp = memchr(data, ',', len); 1055 if (cp == NULL || cp == data) { 1056 send_error(EINVAL); 1057 return; 1058 } 1059 gva = parse_integer(data, cp - data); 1060 len -= (cp - data) + 1; 1061 data += (cp - data) + 1; 1062 1063 /* Parse length. */ 1064 resid = parse_integer(data, len); 1065 1066 started = false; 1067 while (resid > 0) { 1068 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1069 if (error == -1) { 1070 if (started) 1071 finish_packet(); 1072 else 1073 send_error(errno); 1074 return; 1075 } 1076 if (error == 0) { 1077 if (started) 1078 finish_packet(); 1079 else 1080 send_error(EFAULT); 1081 return; 1082 } 1083 1084 /* Read bytes from current page. */ 1085 todo = getpagesize() - gpa % getpagesize(); 1086 if (todo > resid) 1087 todo = resid; 1088 1089 cp = paddr_guest2host(ctx, gpa, todo); 1090 if (cp != NULL) { 1091 /* 1092 * If this page is guest RAM, read it a byte 1093 * at a time. 1094 */ 1095 if (!started) { 1096 start_packet(); 1097 started = true; 1098 } 1099 while (todo > 0) { 1100 append_byte(*cp); 1101 cp++; 1102 gpa++; 1103 gva++; 1104 resid--; 1105 todo--; 1106 } 1107 } else { 1108 /* 1109 * If this page isn't guest RAM, try to handle 1110 * it via MMIO. For MMIO requests, use 1111 * aligned reads of words when possible. 1112 */ 1113 while (todo > 0) { 1114 if (gpa & 1 || todo == 1) 1115 bytes = 1; 1116 else if (gpa & 2 || todo == 2) 1117 bytes = 2; 1118 else 1119 bytes = 4; 1120 error = read_mem(vcpus[cur_vcpu], gpa, &val, 1121 bytes); 1122 if (error == 0) { 1123 if (!started) { 1124 start_packet(); 1125 started = true; 1126 } 1127 gpa += bytes; 1128 gva += bytes; 1129 resid -= bytes; 1130 todo -= bytes; 1131 while (bytes > 0) { 1132 append_byte(val); 1133 val >>= 8; 1134 bytes--; 1135 } 1136 } else { 1137 if (started) 1138 finish_packet(); 1139 else 1140 send_error(EFAULT); 1141 return; 1142 } 1143 } 1144 } 1145 assert(resid == 0 || gpa % getpagesize() == 0); 1146 } 1147 if (!started) 1148 start_packet(); 1149 finish_packet(); 1150 } 1151 1152 static void 1153 gdb_write_mem(const uint8_t *data, size_t len) 1154 { 1155 uint64_t gpa, gva, val; 1156 uint8_t *cp; 1157 size_t resid, todo, bytes; 1158 int error; 1159 1160 assert(len >= 1); 1161 1162 /* Skip 'M' */ 1163 data += 1; 1164 len -= 1; 1165 1166 /* Parse and consume address. */ 1167 cp = memchr(data, ',', len); 1168 if (cp == NULL || cp == data) { 1169 send_error(EINVAL); 1170 return; 1171 } 1172 gva = parse_integer(data, cp - data); 1173 len -= (cp - data) + 1; 1174 data += (cp - data) + 1; 1175 1176 /* Parse and consume length. */ 1177 cp = memchr(data, ':', len); 1178 if (cp == NULL || cp == data) { 1179 send_error(EINVAL); 1180 return; 1181 } 1182 resid = parse_integer(data, cp - data); 1183 len -= (cp - data) + 1; 1184 data += (cp - data) + 1; 1185 1186 /* Verify the available bytes match the length. */ 1187 if (len != resid * 2) { 1188 send_error(EINVAL); 1189 return; 1190 } 1191 1192 while (resid > 0) { 1193 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1194 if (error == -1) { 1195 send_error(errno); 1196 return; 1197 } 1198 if (error == 0) { 1199 send_error(EFAULT); 1200 return; 1201 } 1202 1203 /* Write bytes to current page. */ 1204 todo = getpagesize() - gpa % getpagesize(); 1205 if (todo > resid) 1206 todo = resid; 1207 1208 cp = paddr_guest2host(ctx, gpa, todo); 1209 if (cp != NULL) { 1210 /* 1211 * If this page is guest RAM, write it a byte 1212 * at a time. 1213 */ 1214 while (todo > 0) { 1215 assert(len >= 2); 1216 *cp = parse_byte(data); 1217 data += 2; 1218 len -= 2; 1219 cp++; 1220 gpa++; 1221 gva++; 1222 resid--; 1223 todo--; 1224 } 1225 } else { 1226 /* 1227 * If this page isn't guest RAM, try to handle 1228 * it via MMIO. For MMIO requests, use 1229 * aligned writes of words when possible. 1230 */ 1231 while (todo > 0) { 1232 if (gpa & 1 || todo == 1) { 1233 bytes = 1; 1234 val = parse_byte(data); 1235 } else if (gpa & 2 || todo == 2) { 1236 bytes = 2; 1237 val = be16toh(parse_integer(data, 4)); 1238 } else { 1239 bytes = 4; 1240 val = be32toh(parse_integer(data, 8)); 1241 } 1242 error = write_mem(vcpus[cur_vcpu], gpa, val, 1243 bytes); 1244 if (error == 0) { 1245 gpa += bytes; 1246 gva += bytes; 1247 resid -= bytes; 1248 todo -= bytes; 1249 data += 2 * bytes; 1250 len -= 2 * bytes; 1251 } else { 1252 send_error(EFAULT); 1253 return; 1254 } 1255 } 1256 } 1257 assert(resid == 0 || gpa % getpagesize() == 0); 1258 } 1259 assert(len == 0); 1260 send_ok(); 1261 } 1262 1263 static bool 1264 set_breakpoint_caps(bool enable) 1265 { 1266 cpuset_t mask; 1267 int vcpu; 1268 1269 mask = vcpus_active; 1270 while (!CPU_EMPTY(&mask)) { 1271 vcpu = CPU_FFS(&mask) - 1; 1272 CPU_CLR(vcpu, &mask); 1273 if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT, 1274 enable ? 1 : 0) < 0) 1275 return (false); 1276 debug("$vCPU %d %sabled breakpoint exits\n", vcpu, 1277 enable ? "en" : "dis"); 1278 } 1279 return (true); 1280 } 1281 1282 static void 1283 remove_all_sw_breakpoints(void) 1284 { 1285 struct breakpoint *bp, *nbp; 1286 uint8_t *cp; 1287 1288 if (TAILQ_EMPTY(&breakpoints)) 1289 return; 1290 1291 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) { 1292 debug("remove breakpoint at %#lx\n", bp->gpa); 1293 cp = paddr_guest2host(ctx, bp->gpa, sizeof(bp->shadow_inst)); 1294 memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); 1295 TAILQ_REMOVE(&breakpoints, bp, link); 1296 free(bp); 1297 } 1298 TAILQ_INIT(&breakpoints); 1299 set_breakpoint_caps(false); 1300 } 1301 1302 static void 1303 update_sw_breakpoint(uint64_t gva, int kind, bool insert) 1304 { 1305 struct breakpoint *bp; 1306 uint64_t gpa; 1307 uint8_t *cp; 1308 int error; 1309 1310 if (kind != GDB_BP_SIZE) { 1311 send_error(EINVAL); 1312 return; 1313 } 1314 1315 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1316 if (error == -1) { 1317 send_error(errno); 1318 return; 1319 } 1320 if (error == 0) { 1321 send_error(EFAULT); 1322 return; 1323 } 1324 1325 cp = paddr_guest2host(ctx, gpa, sizeof(bp->shadow_inst)); 1326 1327 /* Only permit breakpoints in guest RAM. */ 1328 if (cp == NULL) { 1329 send_error(EFAULT); 1330 return; 1331 } 1332 1333 /* Find any existing breakpoint. */ 1334 bp = find_breakpoint(gpa); 1335 1336 /* 1337 * Silently ignore duplicate commands since the protocol 1338 * requires these packets to be idempotent. 1339 */ 1340 if (insert) { 1341 if (bp == NULL) { 1342 if (TAILQ_EMPTY(&breakpoints) && 1343 !set_breakpoint_caps(true)) { 1344 send_empty_response(); 1345 return; 1346 } 1347 bp = malloc(sizeof(*bp)); 1348 bp->gpa = gpa; 1349 memcpy(bp->shadow_inst, cp, sizeof(bp->shadow_inst)); 1350 memcpy(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst)); 1351 TAILQ_INSERT_TAIL(&breakpoints, bp, link); 1352 debug("new breakpoint at %#lx\n", gpa); 1353 } 1354 } else { 1355 if (bp != NULL) { 1356 debug("remove breakpoint at %#lx\n", gpa); 1357 memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); 1358 TAILQ_REMOVE(&breakpoints, bp, link); 1359 free(bp); 1360 if (TAILQ_EMPTY(&breakpoints)) 1361 set_breakpoint_caps(false); 1362 } 1363 } 1364 send_ok(); 1365 } 1366 1367 static void 1368 parse_breakpoint(const uint8_t *data, size_t len) 1369 { 1370 uint64_t gva; 1371 uint8_t *cp; 1372 bool insert; 1373 int kind, type; 1374 1375 insert = data[0] == 'Z'; 1376 1377 /* Skip 'Z/z' */ 1378 data += 1; 1379 len -= 1; 1380 1381 /* Parse and consume type. */ 1382 cp = memchr(data, ',', len); 1383 if (cp == NULL || cp == data) { 1384 send_error(EINVAL); 1385 return; 1386 } 1387 type = parse_integer(data, cp - data); 1388 len -= (cp - data) + 1; 1389 data += (cp - data) + 1; 1390 1391 /* Parse and consume address. */ 1392 cp = memchr(data, ',', len); 1393 if (cp == NULL || cp == data) { 1394 send_error(EINVAL); 1395 return; 1396 } 1397 gva = parse_integer(data, cp - data); 1398 len -= (cp - data) + 1; 1399 data += (cp - data) + 1; 1400 1401 /* Parse and consume kind. */ 1402 cp = memchr(data, ';', len); 1403 if (cp == data) { 1404 send_error(EINVAL); 1405 return; 1406 } 1407 if (cp != NULL) { 1408 /* 1409 * We do not advertise support for either the 1410 * ConditionalBreakpoints or BreakpointCommands 1411 * features, so we should not be getting conditions or 1412 * commands from the remote end. 1413 */ 1414 send_empty_response(); 1415 return; 1416 } 1417 kind = parse_integer(data, len); 1418 data += len; 1419 len = 0; 1420 1421 switch (type) { 1422 case 0: 1423 update_sw_breakpoint(gva, kind, insert); 1424 break; 1425 default: 1426 send_empty_response(); 1427 break; 1428 } 1429 } 1430 1431 static bool 1432 command_equals(const uint8_t *data, size_t len, const char *cmd) 1433 { 1434 1435 if (strlen(cmd) > len) 1436 return (false); 1437 return (memcmp(data, cmd, strlen(cmd)) == 0); 1438 } 1439 1440 static void 1441 check_features(const uint8_t *data, size_t len) 1442 { 1443 char *feature, *next_feature, *str, *value; 1444 bool supported; 1445 1446 str = malloc(len + 1); 1447 memcpy(str, data, len); 1448 str[len] = '\0'; 1449 next_feature = str; 1450 1451 while ((feature = strsep(&next_feature, ";")) != NULL) { 1452 /* 1453 * Null features shouldn't exist, but skip if they 1454 * do. 1455 */ 1456 if (strcmp(feature, "") == 0) 1457 continue; 1458 1459 /* 1460 * Look for the value or supported / not supported 1461 * flag. 1462 */ 1463 value = strchr(feature, '='); 1464 if (value != NULL) { 1465 *value = '\0'; 1466 value++; 1467 supported = true; 1468 } else { 1469 value = feature + strlen(feature) - 1; 1470 switch (*value) { 1471 case '+': 1472 supported = true; 1473 break; 1474 case '-': 1475 supported = false; 1476 break; 1477 default: 1478 /* 1479 * This is really a protocol error, 1480 * but we just ignore malformed 1481 * features for ease of 1482 * implementation. 1483 */ 1484 continue; 1485 } 1486 value = NULL; 1487 } 1488 1489 if (strcmp(feature, "swbreak") == 0) 1490 swbreak_enabled = supported; 1491 } 1492 free(str); 1493 1494 start_packet(); 1495 1496 /* This is an arbitrary limit. */ 1497 append_string("PacketSize=4096"); 1498 append_string(";swbreak+"); 1499 finish_packet(); 1500 } 1501 1502 static void 1503 gdb_query(const uint8_t *data, size_t len) 1504 { 1505 1506 /* 1507 * TODO: 1508 * - qSearch 1509 */ 1510 if (command_equals(data, len, "qAttached")) { 1511 start_packet(); 1512 append_char('1'); 1513 finish_packet(); 1514 } else if (command_equals(data, len, "qC")) { 1515 start_packet(); 1516 append_string("QC"); 1517 append_integer(cur_vcpu + 1); 1518 finish_packet(); 1519 } else if (command_equals(data, len, "qfThreadInfo")) { 1520 cpuset_t mask; 1521 bool first; 1522 int vcpu; 1523 1524 if (CPU_EMPTY(&vcpus_active)) { 1525 send_error(EINVAL); 1526 return; 1527 } 1528 mask = vcpus_active; 1529 start_packet(); 1530 append_char('m'); 1531 first = true; 1532 while (!CPU_EMPTY(&mask)) { 1533 vcpu = CPU_FFS(&mask) - 1; 1534 CPU_CLR(vcpu, &mask); 1535 if (first) 1536 first = false; 1537 else 1538 append_char(','); 1539 append_integer(vcpu + 1); 1540 } 1541 finish_packet(); 1542 } else if (command_equals(data, len, "qsThreadInfo")) { 1543 start_packet(); 1544 append_char('l'); 1545 finish_packet(); 1546 } else if (command_equals(data, len, "qSupported")) { 1547 data += strlen("qSupported"); 1548 len -= strlen("qSupported"); 1549 check_features(data, len); 1550 } else if (command_equals(data, len, "qThreadExtraInfo")) { 1551 char buf[16]; 1552 int tid; 1553 1554 data += strlen("qThreadExtraInfo"); 1555 len -= strlen("qThreadExtraInfo"); 1556 if (len == 0 || *data != ',') { 1557 send_error(EINVAL); 1558 return; 1559 } 1560 tid = parse_threadid(data + 1, len - 1); 1561 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) { 1562 send_error(EINVAL); 1563 return; 1564 } 1565 1566 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1); 1567 start_packet(); 1568 append_asciihex(buf); 1569 finish_packet(); 1570 } else 1571 send_empty_response(); 1572 } 1573 1574 static void 1575 handle_command(const uint8_t *data, size_t len) 1576 { 1577 1578 /* Reject packets with a sequence-id. */ 1579 if (len >= 3 && data[0] >= '0' && data[0] <= '9' && 1580 data[0] >= '0' && data[0] <= '9' && data[2] == ':') { 1581 send_empty_response(); 1582 return; 1583 } 1584 1585 switch (*data) { 1586 case 'c': 1587 if (len != 1) { 1588 send_error(EINVAL); 1589 break; 1590 } 1591 1592 discard_stop(); 1593 gdb_resume_vcpus(); 1594 break; 1595 case 'D': 1596 send_ok(); 1597 1598 /* TODO: Resume any stopped CPUs. */ 1599 break; 1600 case 'g': { 1601 gdb_read_regs(); 1602 break; 1603 } 1604 case 'H': { 1605 int tid; 1606 1607 if (len < 2 || (data[1] != 'g' && data[1] != 'c')) { 1608 send_error(EINVAL); 1609 break; 1610 } 1611 tid = parse_threadid(data + 2, len - 2); 1612 if (tid == -2) { 1613 send_error(EINVAL); 1614 break; 1615 } 1616 1617 if (CPU_EMPTY(&vcpus_active)) { 1618 send_error(EINVAL); 1619 break; 1620 } 1621 if (tid == -1 || tid == 0) 1622 cur_vcpu = CPU_FFS(&vcpus_active) - 1; 1623 else if (CPU_ISSET(tid - 1, &vcpus_active)) 1624 cur_vcpu = tid - 1; 1625 else { 1626 send_error(EINVAL); 1627 break; 1628 } 1629 send_ok(); 1630 break; 1631 } 1632 case 'm': 1633 gdb_read_mem(data, len); 1634 break; 1635 case 'M': 1636 gdb_write_mem(data, len); 1637 break; 1638 case 'T': { 1639 int tid; 1640 1641 tid = parse_threadid(data + 1, len - 1); 1642 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) { 1643 send_error(EINVAL); 1644 return; 1645 } 1646 send_ok(); 1647 break; 1648 } 1649 case 'q': 1650 gdb_query(data, len); 1651 break; 1652 case 's': 1653 if (len != 1) { 1654 send_error(EINVAL); 1655 break; 1656 } 1657 1658 /* Don't send a reply until a stop occurs. */ 1659 if (!gdb_step_vcpu(vcpus[cur_vcpu])) { 1660 send_error(EOPNOTSUPP); 1661 break; 1662 } 1663 break; 1664 case 'z': 1665 case 'Z': 1666 parse_breakpoint(data, len); 1667 break; 1668 case '?': 1669 report_stop(false); 1670 break; 1671 case 'G': /* TODO */ 1672 case 'v': 1673 /* Handle 'vCont' */ 1674 /* 'vCtrlC' */ 1675 case 'p': /* TODO */ 1676 case 'P': /* TODO */ 1677 case 'Q': /* TODO */ 1678 case 't': /* TODO */ 1679 case 'X': /* TODO */ 1680 default: 1681 send_empty_response(); 1682 } 1683 } 1684 1685 /* Check for a valid packet in the command buffer. */ 1686 static void 1687 check_command(int fd) 1688 { 1689 uint8_t *head, *hash, *p, sum; 1690 size_t avail, plen; 1691 1692 for (;;) { 1693 avail = cur_comm.len; 1694 if (avail == 0) 1695 return; 1696 head = io_buffer_head(&cur_comm); 1697 switch (*head) { 1698 case 0x03: 1699 debug("<- Ctrl-C\n"); 1700 io_buffer_consume(&cur_comm, 1); 1701 1702 gdb_suspend_vcpus(); 1703 break; 1704 case '+': 1705 /* ACK of previous response. */ 1706 debug("<- +\n"); 1707 if (response_pending()) 1708 io_buffer_reset(&cur_resp); 1709 io_buffer_consume(&cur_comm, 1); 1710 if (stopped_vcpu != -1 && report_next_stop) { 1711 report_stop(true); 1712 send_pending_data(fd); 1713 } 1714 break; 1715 case '-': 1716 /* NACK of previous response. */ 1717 debug("<- -\n"); 1718 if (response_pending()) { 1719 cur_resp.len += cur_resp.start; 1720 cur_resp.start = 0; 1721 if (cur_resp.data[0] == '+') 1722 io_buffer_advance(&cur_resp, 1); 1723 debug("-> %.*s\n", (int)cur_resp.len, 1724 io_buffer_head(&cur_resp)); 1725 } 1726 io_buffer_consume(&cur_comm, 1); 1727 send_pending_data(fd); 1728 break; 1729 case '$': 1730 /* Packet. */ 1731 1732 if (response_pending()) { 1733 warnx("New GDB command while response in " 1734 "progress"); 1735 io_buffer_reset(&cur_resp); 1736 } 1737 1738 /* Is packet complete? */ 1739 hash = memchr(head, '#', avail); 1740 if (hash == NULL) 1741 return; 1742 plen = (hash - head + 1) + 2; 1743 if (avail < plen) 1744 return; 1745 debug("<- %.*s\n", (int)plen, head); 1746 1747 /* Verify checksum. */ 1748 for (sum = 0, p = head + 1; p < hash; p++) 1749 sum += *p; 1750 if (sum != parse_byte(hash + 1)) { 1751 io_buffer_consume(&cur_comm, plen); 1752 debug("-> -\n"); 1753 send_char('-'); 1754 send_pending_data(fd); 1755 break; 1756 } 1757 send_char('+'); 1758 1759 handle_command(head + 1, hash - (head + 1)); 1760 io_buffer_consume(&cur_comm, plen); 1761 if (!response_pending()) 1762 debug("-> +\n"); 1763 send_pending_data(fd); 1764 break; 1765 default: 1766 /* XXX: Possibly drop connection instead. */ 1767 debug("-> %02x\n", *head); 1768 io_buffer_consume(&cur_comm, 1); 1769 break; 1770 } 1771 } 1772 } 1773 1774 static void 1775 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused) 1776 { 1777 size_t pending; 1778 ssize_t nread; 1779 int n; 1780 1781 if (ioctl(fd, FIONREAD, &n) == -1) { 1782 warn("FIONREAD on GDB socket"); 1783 return; 1784 } 1785 assert(n >= 0); 1786 pending = n; 1787 1788 /* 1789 * 'pending' might be zero due to EOF. We need to call read 1790 * with a non-zero length to detect EOF. 1791 */ 1792 if (pending == 0) 1793 pending = 1; 1794 1795 /* Ensure there is room in the command buffer. */ 1796 io_buffer_grow(&cur_comm, pending); 1797 assert(io_buffer_avail(&cur_comm) >= pending); 1798 1799 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm)); 1800 if (nread == 0) { 1801 close_connection(); 1802 } else if (nread == -1) { 1803 if (errno == EAGAIN) 1804 return; 1805 1806 warn("Read from GDB socket"); 1807 close_connection(); 1808 } else { 1809 cur_comm.len += nread; 1810 pthread_mutex_lock(&gdb_lock); 1811 check_command(fd); 1812 pthread_mutex_unlock(&gdb_lock); 1813 } 1814 } 1815 1816 static void 1817 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused) 1818 { 1819 1820 send_pending_data(fd); 1821 } 1822 1823 static void 1824 new_connection(int fd, enum ev_type event __unused, void *arg) 1825 { 1826 int optval, s; 1827 1828 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK); 1829 if (s == -1) { 1830 if (arg != NULL) 1831 err(1, "Failed accepting initial GDB connection"); 1832 1833 /* Silently ignore errors post-startup. */ 1834 return; 1835 } 1836 1837 optval = 1; 1838 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) == 1839 -1) { 1840 warn("Failed to disable SIGPIPE for GDB connection"); 1841 close(s); 1842 return; 1843 } 1844 1845 pthread_mutex_lock(&gdb_lock); 1846 if (cur_fd != -1) { 1847 close(s); 1848 warnx("Ignoring additional GDB connection."); 1849 } 1850 1851 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL); 1852 if (read_event == NULL) { 1853 if (arg != NULL) 1854 err(1, "Failed to setup initial GDB connection"); 1855 pthread_mutex_unlock(&gdb_lock); 1856 return; 1857 } 1858 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL); 1859 if (write_event == NULL) { 1860 if (arg != NULL) 1861 err(1, "Failed to setup initial GDB connection"); 1862 mevent_delete_close(read_event); 1863 read_event = NULL; 1864 } 1865 1866 cur_fd = s; 1867 cur_vcpu = 0; 1868 stopped_vcpu = -1; 1869 1870 /* Break on attach. */ 1871 first_stop = true; 1872 report_next_stop = false; 1873 gdb_suspend_vcpus(); 1874 pthread_mutex_unlock(&gdb_lock); 1875 } 1876 1877 #ifndef WITHOUT_CAPSICUM 1878 static void 1879 limit_gdb_socket(int s) 1880 { 1881 cap_rights_t rights; 1882 unsigned long ioctls[] = { FIONREAD }; 1883 1884 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE, 1885 CAP_SETSOCKOPT, CAP_IOCTL); 1886 if (caph_rights_limit(s, &rights) == -1) 1887 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1888 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1) 1889 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1890 } 1891 #endif 1892 1893 void 1894 init_gdb(struct vmctx *_ctx) 1895 { 1896 int error, flags, optval, s; 1897 struct addrinfo hints; 1898 struct addrinfo *gdbaddr; 1899 const char *saddr, *value; 1900 char *sport; 1901 bool wait; 1902 1903 value = get_config_value("gdb.port"); 1904 if (value == NULL) 1905 return; 1906 sport = strdup(value); 1907 if (sport == NULL) 1908 errx(4, "Failed to allocate memory"); 1909 1910 wait = get_config_bool_default("gdb.wait", false); 1911 1912 saddr = get_config_value("gdb.address"); 1913 if (saddr == NULL) { 1914 saddr = "localhost"; 1915 } 1916 1917 debug("==> starting on %s:%s, %swaiting\n", 1918 saddr, sport, wait ? "" : "not "); 1919 1920 error = pthread_mutex_init(&gdb_lock, NULL); 1921 if (error != 0) 1922 errc(1, error, "gdb mutex init"); 1923 error = pthread_cond_init(&idle_vcpus, NULL); 1924 if (error != 0) 1925 errc(1, error, "gdb cv init"); 1926 1927 memset(&hints, 0, sizeof(hints)); 1928 hints.ai_family = AF_UNSPEC; 1929 hints.ai_socktype = SOCK_STREAM; 1930 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE; 1931 1932 error = getaddrinfo(saddr, sport, &hints, &gdbaddr); 1933 if (error != 0) 1934 errx(1, "gdb address resolution: %s", gai_strerror(error)); 1935 1936 ctx = _ctx; 1937 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0); 1938 if (s < 0) 1939 err(1, "gdb socket create"); 1940 1941 optval = 1; 1942 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); 1943 1944 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0) 1945 err(1, "gdb socket bind"); 1946 1947 if (listen(s, 1) < 0) 1948 err(1, "gdb socket listen"); 1949 1950 stopped_vcpu = -1; 1951 TAILQ_INIT(&breakpoints); 1952 vcpus = calloc(guest_ncpus, sizeof(*vcpus)); 1953 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state)); 1954 if (wait) { 1955 /* 1956 * Set vcpu 0 in vcpus_suspended. This will trigger the 1957 * logic in gdb_cpu_add() to suspend the first vcpu before 1958 * it starts execution. The vcpu will remain suspended 1959 * until a debugger connects. 1960 */ 1961 CPU_SET(0, &vcpus_suspended); 1962 stopped_vcpu = 0; 1963 } 1964 1965 flags = fcntl(s, F_GETFL); 1966 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1) 1967 err(1, "Failed to mark gdb socket non-blocking"); 1968 1969 #ifndef WITHOUT_CAPSICUM 1970 limit_gdb_socket(s); 1971 #endif 1972 mevent_add(s, EVF_READ, new_connection, NULL); 1973 gdb_active = true; 1974 freeaddrinfo(gdbaddr); 1975 free(sport); 1976 } 1977