1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #ifndef WITHOUT_CAPSICUM 30 #include <sys/capsicum.h> 31 #endif 32 #include <sys/endian.h> 33 #include <sys/ioctl.h> 34 #include <sys/mman.h> 35 #include <sys/queue.h> 36 #include <sys/socket.h> 37 #include <sys/stat.h> 38 39 #ifdef __aarch64__ 40 #include <machine/armreg.h> 41 #endif 42 #include <machine/atomic.h> 43 #ifdef __amd64__ 44 #include <machine/specialreg.h> 45 #endif 46 #include <machine/vmm.h> 47 48 #include <netinet/in.h> 49 50 #include <assert.h> 51 #ifndef WITHOUT_CAPSICUM 52 #include <capsicum_helpers.h> 53 #endif 54 #include <err.h> 55 #include <errno.h> 56 #include <fcntl.h> 57 #include <netdb.h> 58 #include <pthread.h> 59 #include <pthread_np.h> 60 #include <stdbool.h> 61 #include <stdio.h> 62 #include <stdlib.h> 63 #include <string.h> 64 #include <sysexits.h> 65 #include <unistd.h> 66 #include <vmmapi.h> 67 68 #include "bhyverun.h" 69 #include "config.h" 70 #include "debug.h" 71 #include "gdb.h" 72 #include "mem.h" 73 #include "mevent.h" 74 75 #define _PATH_GDB_XML "/usr/share/bhyve/gdb" 76 77 /* 78 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops 79 * use SIGTRAP. 80 */ 81 #define GDB_SIGNAL_TRAP 5 82 83 #if defined(__amd64__) 84 #define GDB_BP_SIZE 1 85 #define GDB_BP_INSTR (uint8_t []){0xcc} 86 #define GDB_PC_REGNAME VM_REG_GUEST_RIP 87 #define GDB_BREAKPOINT_CAP VM_CAP_BPT_EXIT 88 #elif defined(__aarch64__) 89 #define GDB_BP_SIZE 4 90 #define GDB_BP_INSTR (uint8_t []){0x00, 0x00, 0x20, 0xd4} 91 #define GDB_PC_REGNAME VM_REG_GUEST_PC 92 #define GDB_BREAKPOINT_CAP VM_CAP_BRK_EXIT 93 #else 94 #error "Unsupported architecture" 95 #endif 96 97 _Static_assert(sizeof(GDB_BP_INSTR) == GDB_BP_SIZE, 98 "GDB_BP_INSTR has wrong size"); 99 100 static void gdb_resume_vcpus(void); 101 static void check_command(int fd); 102 103 static struct mevent *read_event, *write_event; 104 105 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting; 106 static pthread_mutex_t gdb_lock; 107 static pthread_cond_t idle_vcpus; 108 static bool first_stop, report_next_stop, swbreak_enabled; 109 static int xml_dfd = -1; 110 111 /* 112 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a 113 * read buffer, 'start' is unused and 'len' contains the number of 114 * valid bytes in the buffer. For a write buffer, 'start' is set to 115 * the index of the next byte in 'data' to send, and 'len' contains 116 * the remaining number of valid bytes to send. 117 */ 118 struct io_buffer { 119 uint8_t *data; 120 size_t capacity; 121 size_t start; 122 size_t len; 123 }; 124 125 struct breakpoint { 126 uint64_t gpa; 127 uint8_t shadow_inst[GDB_BP_SIZE]; 128 TAILQ_ENTRY(breakpoint) link; 129 }; 130 131 /* 132 * When a vCPU stops to due to an event that should be reported to the 133 * debugger, information about the event is stored in this structure. 134 * The vCPU thread then sets 'stopped_vcpu' if it is not already set 135 * and stops other vCPUs so the event can be reported. The 136 * report_stop() function reports the event for the 'stopped_vcpu' 137 * vCPU. When the debugger resumes execution via continue or step, 138 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their 139 * event handlers until the associated event is reported or disabled. 140 * 141 * An idle vCPU will have all of the boolean fields set to false. 142 * 143 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is 144 * released to execute the stepped instruction. When the vCPU reports 145 * the stepping trap, 'stepped' is set. 146 * 147 * When a vCPU hits a breakpoint set by the debug server, 148 * 'hit_swbreak' is set to true. 149 */ 150 struct vcpu_state { 151 bool stepping; 152 bool stepped; 153 bool hit_swbreak; 154 }; 155 156 static struct io_buffer cur_comm, cur_resp; 157 static uint8_t cur_csum; 158 static struct vmctx *ctx; 159 static int cur_fd = -1; 160 static TAILQ_HEAD(, breakpoint) breakpoints; 161 static struct vcpu_state *vcpu_state; 162 static struct vcpu **vcpus; 163 static int cur_vcpu, stopped_vcpu; 164 static bool gdb_active = false; 165 166 struct gdb_reg { 167 enum vm_reg_name id; 168 int size; 169 }; 170 171 #ifdef __amd64__ 172 static const struct gdb_reg gdb_regset[] = { 173 { .id = VM_REG_GUEST_RAX, .size = 8 }, 174 { .id = VM_REG_GUEST_RBX, .size = 8 }, 175 { .id = VM_REG_GUEST_RCX, .size = 8 }, 176 { .id = VM_REG_GUEST_RDX, .size = 8 }, 177 { .id = VM_REG_GUEST_RSI, .size = 8 }, 178 { .id = VM_REG_GUEST_RDI, .size = 8 }, 179 { .id = VM_REG_GUEST_RBP, .size = 8 }, 180 { .id = VM_REG_GUEST_RSP, .size = 8 }, 181 { .id = VM_REG_GUEST_R8, .size = 8 }, 182 { .id = VM_REG_GUEST_R9, .size = 8 }, 183 { .id = VM_REG_GUEST_R10, .size = 8 }, 184 { .id = VM_REG_GUEST_R11, .size = 8 }, 185 { .id = VM_REG_GUEST_R12, .size = 8 }, 186 { .id = VM_REG_GUEST_R13, .size = 8 }, 187 { .id = VM_REG_GUEST_R14, .size = 8 }, 188 { .id = VM_REG_GUEST_R15, .size = 8 }, 189 { .id = VM_REG_GUEST_RIP, .size = 8 }, 190 { .id = VM_REG_GUEST_RFLAGS, .size = 4 }, 191 { .id = VM_REG_GUEST_CS, .size = 4 }, 192 { .id = VM_REG_GUEST_SS, .size = 4 }, 193 { .id = VM_REG_GUEST_DS, .size = 4 }, 194 { .id = VM_REG_GUEST_ES, .size = 4 }, 195 { .id = VM_REG_GUEST_FS, .size = 4 }, 196 { .id = VM_REG_GUEST_GS, .size = 4 }, 197 /* 198 * Registers past this point are not included in a reply to a 'g' query, 199 * to provide compatibility with debuggers that do not fetch a target 200 * description. The debugger can query them individually with 'p' if it 201 * knows about them. 202 */ 203 #define GDB_REG_FIRST_EXT VM_REG_GUEST_FS_BASE 204 { .id = VM_REG_GUEST_FS_BASE, .size = 8 }, 205 { .id = VM_REG_GUEST_GS_BASE, .size = 8 }, 206 { .id = VM_REG_GUEST_KGS_BASE, .size = 8 }, 207 { .id = VM_REG_GUEST_CR0, .size = 8 }, 208 { .id = VM_REG_GUEST_CR2, .size = 8 }, 209 { .id = VM_REG_GUEST_CR3, .size = 8 }, 210 { .id = VM_REG_GUEST_CR4, .size = 8 }, 211 { .id = VM_REG_GUEST_TPR, .size = 8 }, 212 { .id = VM_REG_GUEST_EFER, .size = 8 }, 213 }; 214 #else /* __aarch64__ */ 215 static const struct gdb_reg gdb_regset[] = { 216 { .id = VM_REG_GUEST_X0, .size = 8 }, 217 { .id = VM_REG_GUEST_X1, .size = 8 }, 218 { .id = VM_REG_GUEST_X2, .size = 8 }, 219 { .id = VM_REG_GUEST_X3, .size = 8 }, 220 { .id = VM_REG_GUEST_X4, .size = 8 }, 221 { .id = VM_REG_GUEST_X5, .size = 8 }, 222 { .id = VM_REG_GUEST_X6, .size = 8 }, 223 { .id = VM_REG_GUEST_X7, .size = 8 }, 224 { .id = VM_REG_GUEST_X8, .size = 8 }, 225 { .id = VM_REG_GUEST_X9, .size = 8 }, 226 { .id = VM_REG_GUEST_X10, .size = 8 }, 227 { .id = VM_REG_GUEST_X11, .size = 8 }, 228 { .id = VM_REG_GUEST_X12, .size = 8 }, 229 { .id = VM_REG_GUEST_X13, .size = 8 }, 230 { .id = VM_REG_GUEST_X14, .size = 8 }, 231 { .id = VM_REG_GUEST_X15, .size = 8 }, 232 { .id = VM_REG_GUEST_X16, .size = 8 }, 233 { .id = VM_REG_GUEST_X17, .size = 8 }, 234 { .id = VM_REG_GUEST_X18, .size = 8 }, 235 { .id = VM_REG_GUEST_X19, .size = 8 }, 236 { .id = VM_REG_GUEST_X20, .size = 8 }, 237 { .id = VM_REG_GUEST_X21, .size = 8 }, 238 { .id = VM_REG_GUEST_X22, .size = 8 }, 239 { .id = VM_REG_GUEST_X23, .size = 8 }, 240 { .id = VM_REG_GUEST_X24, .size = 8 }, 241 { .id = VM_REG_GUEST_X25, .size = 8 }, 242 { .id = VM_REG_GUEST_X26, .size = 8 }, 243 { .id = VM_REG_GUEST_X27, .size = 8 }, 244 { .id = VM_REG_GUEST_X28, .size = 8 }, 245 { .id = VM_REG_GUEST_X29, .size = 8 }, 246 { .id = VM_REG_GUEST_LR, .size = 8 }, 247 { .id = VM_REG_GUEST_SP, .size = 8 }, 248 { .id = VM_REG_GUEST_PC, .size = 8 }, 249 { .id = VM_REG_GUEST_CPSR, .size = 8 }, 250 }; 251 #endif 252 253 #ifdef GDB_LOG 254 #include <stdarg.h> 255 #include <stdio.h> 256 257 static void __printflike(1, 2) 258 debug(const char *fmt, ...) 259 { 260 static FILE *logfile; 261 va_list ap; 262 263 if (logfile == NULL) { 264 logfile = fopen("/tmp/bhyve_gdb.log", "w"); 265 if (logfile == NULL) 266 return; 267 #ifndef WITHOUT_CAPSICUM 268 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) { 269 fclose(logfile); 270 logfile = NULL; 271 return; 272 } 273 #endif 274 setlinebuf(logfile); 275 } 276 va_start(ap, fmt); 277 vfprintf(logfile, fmt, ap); 278 va_end(ap); 279 } 280 #else 281 #define debug(...) 282 #endif 283 284 static void remove_all_sw_breakpoints(void); 285 286 static int 287 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging) 288 { 289 #ifdef __amd64__ 290 uint64_t regs[4]; 291 const int regset[4] = { 292 VM_REG_GUEST_CR0, 293 VM_REG_GUEST_CR3, 294 VM_REG_GUEST_CR4, 295 VM_REG_GUEST_EFER 296 }; 297 298 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1) 299 return (-1); 300 301 /* 302 * For the debugger, always pretend to be the kernel (CPL 0), 303 * and if long-mode is enabled, always parse addresses as if 304 * in 64-bit mode. 305 */ 306 paging->cr3 = regs[1]; 307 paging->cpl = 0; 308 if (regs[3] & EFER_LMA) 309 paging->cpu_mode = CPU_MODE_64BIT; 310 else if (regs[0] & CR0_PE) 311 paging->cpu_mode = CPU_MODE_PROTECTED; 312 else 313 paging->cpu_mode = CPU_MODE_REAL; 314 if (!(regs[0] & CR0_PG)) 315 paging->paging_mode = PAGING_MODE_FLAT; 316 else if (!(regs[2] & CR4_PAE)) 317 paging->paging_mode = PAGING_MODE_32; 318 else if (regs[3] & EFER_LME) 319 paging->paging_mode = (regs[2] & CR4_LA57) ? 320 PAGING_MODE_64_LA57 : PAGING_MODE_64; 321 else 322 paging->paging_mode = PAGING_MODE_PAE; 323 return (0); 324 #else /* __aarch64__ */ 325 uint64_t regs[6]; 326 const int regset[6] = { 327 VM_REG_GUEST_TTBR0_EL1, 328 VM_REG_GUEST_TTBR1_EL1, 329 VM_REG_GUEST_TCR_EL1, 330 VM_REG_GUEST_TCR2_EL1, 331 VM_REG_GUEST_SCTLR_EL1, 332 VM_REG_GUEST_CPSR, 333 }; 334 335 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1) 336 return (-1); 337 338 memset(paging, 0, sizeof(*paging)); 339 paging->ttbr0_addr = regs[0] & ~(TTBR_ASID_MASK | TTBR_CnP); 340 paging->ttbr1_addr = regs[1] & ~(TTBR_ASID_MASK | TTBR_CnP); 341 paging->tcr_el1 = regs[2]; 342 paging->tcr2_el1 = regs[3]; 343 paging->flags = regs[5] & (PSR_M_MASK | PSR_M_32); 344 if ((regs[4] & SCTLR_M) != 0) 345 paging->flags |= VM_GP_MMU_ENABLED; 346 347 return (0); 348 #endif /* __aarch64__ */ 349 } 350 351 /* 352 * Map a guest virtual address to a physical address (for a given vcpu). 353 * If a guest virtual address is valid, return 1. If the address is 354 * not valid, return 0. If an error occurs obtaining the mapping, 355 * return -1. 356 */ 357 static int 358 guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr) 359 { 360 struct vm_guest_paging paging; 361 int fault; 362 363 if (guest_paging_info(vcpu, &paging) == -1) 364 return (-1); 365 366 /* 367 * Always use PROT_READ. We really care if the VA is 368 * accessible, not if the current vCPU can write. 369 */ 370 if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr, 371 &fault) == -1) 372 return (-1); 373 if (fault) 374 return (0); 375 return (1); 376 } 377 378 static uint64_t 379 guest_pc(struct vm_exit *vme) 380 { 381 #ifdef __amd64__ 382 return (vme->rip); 383 #else /* __aarch64__ */ 384 return (vme->pc); 385 #endif 386 } 387 388 static void 389 io_buffer_reset(struct io_buffer *io) 390 { 391 392 io->start = 0; 393 io->len = 0; 394 } 395 396 /* Available room for adding data. */ 397 static size_t 398 io_buffer_avail(struct io_buffer *io) 399 { 400 401 return (io->capacity - (io->start + io->len)); 402 } 403 404 static uint8_t * 405 io_buffer_head(struct io_buffer *io) 406 { 407 408 return (io->data + io->start); 409 } 410 411 static uint8_t * 412 io_buffer_tail(struct io_buffer *io) 413 { 414 415 return (io->data + io->start + io->len); 416 } 417 418 static void 419 io_buffer_advance(struct io_buffer *io, size_t amount) 420 { 421 422 assert(amount <= io->len); 423 io->start += amount; 424 io->len -= amount; 425 } 426 427 static void 428 io_buffer_consume(struct io_buffer *io, size_t amount) 429 { 430 431 io_buffer_advance(io, amount); 432 if (io->len == 0) { 433 io->start = 0; 434 return; 435 } 436 437 /* 438 * XXX: Consider making this move optional and compacting on a 439 * future read() before realloc(). 440 */ 441 memmove(io->data, io_buffer_head(io), io->len); 442 io->start = 0; 443 } 444 445 static void 446 io_buffer_grow(struct io_buffer *io, size_t newsize) 447 { 448 uint8_t *new_data; 449 size_t avail, new_cap; 450 451 avail = io_buffer_avail(io); 452 if (newsize <= avail) 453 return; 454 455 new_cap = io->capacity + (newsize - avail); 456 new_data = realloc(io->data, new_cap); 457 if (new_data == NULL) 458 err(1, "Failed to grow GDB I/O buffer"); 459 io->data = new_data; 460 io->capacity = new_cap; 461 } 462 463 static bool 464 response_pending(void) 465 { 466 467 if (cur_resp.start == 0 && cur_resp.len == 0) 468 return (false); 469 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+') 470 return (false); 471 return (true); 472 } 473 474 static void 475 close_connection(void) 476 { 477 478 /* 479 * XXX: This triggers a warning because mevent does the close 480 * before the EV_DELETE. 481 */ 482 pthread_mutex_lock(&gdb_lock); 483 mevent_delete(write_event); 484 mevent_delete_close(read_event); 485 write_event = NULL; 486 read_event = NULL; 487 io_buffer_reset(&cur_comm); 488 io_buffer_reset(&cur_resp); 489 cur_fd = -1; 490 491 remove_all_sw_breakpoints(); 492 493 /* Clear any pending events. */ 494 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state)); 495 496 /* Resume any stopped vCPUs. */ 497 gdb_resume_vcpus(); 498 pthread_mutex_unlock(&gdb_lock); 499 } 500 501 static uint8_t 502 hex_digit(uint8_t nibble) 503 { 504 505 if (nibble <= 9) 506 return (nibble + '0'); 507 else 508 return (nibble + 'a' - 10); 509 } 510 511 static uint8_t 512 parse_digit(uint8_t v) 513 { 514 515 if (v >= '0' && v <= '9') 516 return (v - '0'); 517 if (v >= 'a' && v <= 'f') 518 return (v - 'a' + 10); 519 if (v >= 'A' && v <= 'F') 520 return (v - 'A' + 10); 521 return (0xF); 522 } 523 524 /* Parses big-endian hexadecimal. */ 525 static uintmax_t 526 parse_integer(const uint8_t *p, size_t len) 527 { 528 uintmax_t v; 529 530 v = 0; 531 while (len > 0) { 532 v <<= 4; 533 v |= parse_digit(*p); 534 p++; 535 len--; 536 } 537 return (v); 538 } 539 540 static uint8_t 541 parse_byte(const uint8_t *p) 542 { 543 544 return (parse_digit(p[0]) << 4 | parse_digit(p[1])); 545 } 546 547 static void 548 send_pending_data(int fd) 549 { 550 ssize_t nwritten; 551 552 if (cur_resp.len == 0) { 553 mevent_disable(write_event); 554 return; 555 } 556 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len); 557 if (nwritten == -1) { 558 warn("Write to GDB socket failed"); 559 close_connection(); 560 } else { 561 io_buffer_advance(&cur_resp, nwritten); 562 if (cur_resp.len == 0) 563 mevent_disable(write_event); 564 else 565 mevent_enable(write_event); 566 } 567 } 568 569 /* Append a single character to the output buffer. */ 570 static void 571 send_char(uint8_t data) 572 { 573 io_buffer_grow(&cur_resp, 1); 574 *io_buffer_tail(&cur_resp) = data; 575 cur_resp.len++; 576 } 577 578 /* Append an array of bytes to the output buffer. */ 579 static void 580 send_data(const uint8_t *data, size_t len) 581 { 582 583 io_buffer_grow(&cur_resp, len); 584 memcpy(io_buffer_tail(&cur_resp), data, len); 585 cur_resp.len += len; 586 } 587 588 static void 589 format_byte(uint8_t v, uint8_t *buf) 590 { 591 592 buf[0] = hex_digit(v >> 4); 593 buf[1] = hex_digit(v & 0xf); 594 } 595 596 /* 597 * Append a single byte (formatted as two hex characters) to the 598 * output buffer. 599 */ 600 static void 601 send_byte(uint8_t v) 602 { 603 uint8_t buf[2]; 604 605 format_byte(v, buf); 606 send_data(buf, sizeof(buf)); 607 } 608 609 static void 610 start_packet(void) 611 { 612 613 send_char('$'); 614 cur_csum = 0; 615 } 616 617 static void 618 finish_packet(void) 619 { 620 621 send_char('#'); 622 send_byte(cur_csum); 623 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp)); 624 } 625 626 /* 627 * Append a single character (for the packet payload) and update the 628 * checksum. 629 */ 630 static void 631 append_char(uint8_t v) 632 { 633 634 send_char(v); 635 cur_csum += v; 636 } 637 638 /* 639 * Append an array of bytes (for the packet payload) and update the 640 * checksum. 641 */ 642 static void 643 append_packet_data(const uint8_t *data, size_t len) 644 { 645 646 send_data(data, len); 647 while (len > 0) { 648 cur_csum += *data; 649 data++; 650 len--; 651 } 652 } 653 654 static void 655 append_binary_data(const uint8_t *data, size_t len) 656 { 657 uint8_t buf[2]; 658 659 for (; len > 0; data++, len--) { 660 switch (*data) { 661 case '}': 662 case '#': 663 case '$': 664 case '*': 665 buf[0] = 0x7d; 666 buf[1] = *data ^ 0x20; 667 append_packet_data(buf, 2); 668 break; 669 default: 670 append_packet_data(data, 1); 671 break; 672 } 673 } 674 } 675 676 static void 677 append_string(const char *str) 678 { 679 680 append_packet_data(str, strlen(str)); 681 } 682 683 static void 684 append_byte(uint8_t v) 685 { 686 uint8_t buf[2]; 687 688 format_byte(v, buf); 689 append_packet_data(buf, sizeof(buf)); 690 } 691 692 static void 693 append_unsigned_native(uintmax_t value, size_t len) 694 { 695 size_t i; 696 697 for (i = 0; i < len; i++) { 698 append_byte(value); 699 value >>= 8; 700 } 701 } 702 703 static void 704 append_unsigned_be(uintmax_t value, size_t len) 705 { 706 char buf[len * 2]; 707 size_t i; 708 709 for (i = 0; i < len; i++) { 710 format_byte(value, buf + (len - i - 1) * 2); 711 value >>= 8; 712 } 713 append_packet_data(buf, sizeof(buf)); 714 } 715 716 static void 717 append_integer(unsigned int value) 718 { 719 720 if (value == 0) 721 append_char('0'); 722 else 723 append_unsigned_be(value, (fls(value) + 7) / 8); 724 } 725 726 static void 727 append_asciihex(const char *str) 728 { 729 730 while (*str != '\0') { 731 append_byte(*str); 732 str++; 733 } 734 } 735 736 static void 737 send_empty_response(void) 738 { 739 740 start_packet(); 741 finish_packet(); 742 } 743 744 static void 745 send_error(int error) 746 { 747 748 start_packet(); 749 append_char('E'); 750 append_byte(error); 751 finish_packet(); 752 } 753 754 static void 755 send_ok(void) 756 { 757 758 start_packet(); 759 append_string("OK"); 760 finish_packet(); 761 } 762 763 static int 764 parse_threadid(const uint8_t *data, size_t len) 765 { 766 767 if (len == 1 && *data == '0') 768 return (0); 769 if (len == 2 && memcmp(data, "-1", 2) == 0) 770 return (-1); 771 if (len == 0) 772 return (-2); 773 return (parse_integer(data, len)); 774 } 775 776 /* 777 * Report the current stop event to the debugger. If the stop is due 778 * to an event triggered on a specific vCPU such as a breakpoint or 779 * stepping trap, stopped_vcpu will be set to the vCPU triggering the 780 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to 781 * the reporting vCPU for vCPU events. 782 */ 783 static void 784 report_stop(bool set_cur_vcpu) 785 { 786 struct vcpu_state *vs; 787 788 start_packet(); 789 if (stopped_vcpu == -1) { 790 append_char('S'); 791 append_byte(GDB_SIGNAL_TRAP); 792 } else { 793 vs = &vcpu_state[stopped_vcpu]; 794 if (set_cur_vcpu) 795 cur_vcpu = stopped_vcpu; 796 append_char('T'); 797 append_byte(GDB_SIGNAL_TRAP); 798 append_string("thread:"); 799 append_integer(stopped_vcpu + 1); 800 append_char(';'); 801 if (vs->hit_swbreak) { 802 debug("$vCPU %d reporting swbreak\n", stopped_vcpu); 803 if (swbreak_enabled) 804 append_string("swbreak:;"); 805 } else if (vs->stepped) 806 debug("$vCPU %d reporting step\n", stopped_vcpu); 807 else 808 debug("$vCPU %d reporting ???\n", stopped_vcpu); 809 } 810 finish_packet(); 811 report_next_stop = false; 812 } 813 814 /* 815 * If this stop is due to a vCPU event, clear that event to mark it as 816 * acknowledged. 817 */ 818 static void 819 discard_stop(void) 820 { 821 struct vcpu_state *vs; 822 823 if (stopped_vcpu != -1) { 824 vs = &vcpu_state[stopped_vcpu]; 825 vs->hit_swbreak = false; 826 vs->stepped = false; 827 stopped_vcpu = -1; 828 } 829 report_next_stop = true; 830 } 831 832 static void 833 gdb_finish_suspend_vcpus(void) 834 { 835 836 if (first_stop) { 837 first_stop = false; 838 stopped_vcpu = -1; 839 } else if (report_next_stop) { 840 assert(!response_pending()); 841 report_stop(true); 842 send_pending_data(cur_fd); 843 } 844 } 845 846 /* 847 * vCPU threads invoke this function whenever the vCPU enters the 848 * debug server to pause or report an event. vCPU threads wait here 849 * as long as the debug server keeps them suspended. 850 */ 851 static void 852 _gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop) 853 { 854 int vcpuid = vcpu_id(vcpu); 855 856 debug("$vCPU %d suspending\n", vcpuid); 857 CPU_SET(vcpuid, &vcpus_waiting); 858 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0) 859 gdb_finish_suspend_vcpus(); 860 while (CPU_ISSET(vcpuid, &vcpus_suspended)) 861 pthread_cond_wait(&idle_vcpus, &gdb_lock); 862 CPU_CLR(vcpuid, &vcpus_waiting); 863 debug("$vCPU %d resuming\n", vcpuid); 864 } 865 866 /* 867 * Requests vCPU single-stepping using a 868 * VMEXIT suitable for the host platform. 869 */ 870 static int 871 _gdb_set_step(struct vcpu *vcpu, int val) 872 { 873 int error; 874 875 #ifdef __amd64__ 876 /* 877 * If the MTRAP cap fails, we are running on an AMD host. 878 * In that case, we request DB exits caused by RFLAGS.TF. 879 */ 880 error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, val); 881 if (error != 0) 882 error = vm_set_capability(vcpu, VM_CAP_RFLAGS_TF, val); 883 if (error == 0) 884 (void)vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val); 885 #else /* __aarch64__ */ 886 error = vm_set_capability(vcpu, VM_CAP_SS_EXIT, val); 887 if (error == 0) 888 error = vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val); 889 #endif 890 return (error); 891 } 892 893 /* 894 * Checks whether single-stepping is supported for a given vCPU. 895 */ 896 static int 897 _gdb_check_step(struct vcpu *vcpu) 898 { 899 #ifdef __amd64__ 900 int val; 901 902 if (vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val) != 0) { 903 if (vm_get_capability(vcpu, VM_CAP_RFLAGS_TF, &val) != 0) 904 return (-1); 905 } 906 #else /* __aarch64__ */ 907 (void)vcpu; 908 #endif 909 return (0); 910 } 911 912 /* 913 * Invoked at the start of a vCPU thread's execution to inform the 914 * debug server about the new thread. 915 */ 916 void 917 gdb_cpu_add(struct vcpu *vcpu) 918 { 919 int vcpuid; 920 921 if (!gdb_active) 922 return; 923 vcpuid = vcpu_id(vcpu); 924 debug("$vCPU %d starting\n", vcpuid); 925 pthread_mutex_lock(&gdb_lock); 926 assert(vcpuid < guest_ncpus); 927 assert(vcpus[vcpuid] == NULL); 928 vcpus[vcpuid] = vcpu; 929 CPU_SET(vcpuid, &vcpus_active); 930 if (!TAILQ_EMPTY(&breakpoints)) { 931 vm_set_capability(vcpu, GDB_BREAKPOINT_CAP, 1); 932 debug("$vCPU %d enabled breakpoint exits\n", vcpuid); 933 } 934 935 /* 936 * If a vcpu is added while vcpus are stopped, suspend the new 937 * vcpu so that it will pop back out with a debug exit before 938 * executing the first instruction. 939 */ 940 if (!CPU_EMPTY(&vcpus_suspended)) { 941 CPU_SET(vcpuid, &vcpus_suspended); 942 _gdb_cpu_suspend(vcpu, false); 943 } 944 pthread_mutex_unlock(&gdb_lock); 945 } 946 947 /* 948 * Invoked by vCPU before resuming execution. This enables stepping 949 * if the vCPU is marked as stepping. 950 */ 951 static void 952 gdb_cpu_resume(struct vcpu *vcpu) 953 { 954 struct vcpu_state *vs; 955 int error; 956 957 vs = &vcpu_state[vcpu_id(vcpu)]; 958 959 /* 960 * Any pending event should already be reported before 961 * resuming. 962 */ 963 assert(vs->hit_swbreak == false); 964 assert(vs->stepped == false); 965 if (vs->stepping) { 966 error = _gdb_set_step(vcpu, 1); 967 assert(error == 0); 968 } 969 } 970 971 /* 972 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest 973 * has been suspended due to an event on different vCPU or in response 974 * to a guest-wide suspend such as Ctrl-C or the stop on attach. 975 */ 976 void 977 gdb_cpu_suspend(struct vcpu *vcpu) 978 { 979 980 if (!gdb_active) 981 return; 982 pthread_mutex_lock(&gdb_lock); 983 _gdb_cpu_suspend(vcpu, true); 984 gdb_cpu_resume(vcpu); 985 pthread_mutex_unlock(&gdb_lock); 986 } 987 988 static void 989 gdb_suspend_vcpus(void) 990 { 991 992 assert(pthread_mutex_isowned_np(&gdb_lock)); 993 debug("suspending all CPUs\n"); 994 vcpus_suspended = vcpus_active; 995 vm_suspend_all_cpus(ctx); 996 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0) 997 gdb_finish_suspend_vcpus(); 998 } 999 1000 /* 1001 * Invoked each time a vmexit handler needs to step a vCPU. 1002 * Handles MTRAP and RFLAGS.TF vmexits. 1003 */ 1004 static void 1005 gdb_cpu_step(struct vcpu *vcpu) 1006 { 1007 struct vcpu_state *vs; 1008 int vcpuid = vcpu_id(vcpu); 1009 int error; 1010 1011 debug("$vCPU %d stepped\n", vcpuid); 1012 pthread_mutex_lock(&gdb_lock); 1013 vs = &vcpu_state[vcpuid]; 1014 if (vs->stepping) { 1015 vs->stepping = false; 1016 vs->stepped = true; 1017 error = _gdb_set_step(vcpu, 0); 1018 assert(error == 0); 1019 1020 while (vs->stepped) { 1021 if (stopped_vcpu == -1) { 1022 debug("$vCPU %d reporting step\n", vcpuid); 1023 stopped_vcpu = vcpuid; 1024 gdb_suspend_vcpus(); 1025 } 1026 _gdb_cpu_suspend(vcpu, true); 1027 } 1028 gdb_cpu_resume(vcpu); 1029 } 1030 pthread_mutex_unlock(&gdb_lock); 1031 } 1032 1033 /* 1034 * A general handler for single-step exceptions. 1035 * Handles RFLAGS.TF exits on AMD SVM. 1036 */ 1037 void 1038 gdb_cpu_debug(struct vcpu *vcpu, struct vm_exit *vmexit) 1039 { 1040 if (!gdb_active) 1041 return; 1042 1043 #ifdef __amd64__ 1044 /* RFLAGS.TF exit? */ 1045 if (vmexit->u.dbg.trace_trap) { 1046 gdb_cpu_step(vcpu); 1047 } 1048 #else /* __aarch64__ */ 1049 (void)vmexit; 1050 gdb_cpu_step(vcpu); 1051 #endif 1052 } 1053 1054 /* 1055 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via 1056 * the VT-x-specific MTRAP exit. 1057 */ 1058 void 1059 gdb_cpu_mtrap(struct vcpu *vcpu) 1060 { 1061 if (!gdb_active) 1062 return; 1063 gdb_cpu_step(vcpu); 1064 } 1065 1066 static struct breakpoint * 1067 find_breakpoint(uint64_t gpa) 1068 { 1069 struct breakpoint *bp; 1070 1071 TAILQ_FOREACH(bp, &breakpoints, link) { 1072 if (bp->gpa == gpa) 1073 return (bp); 1074 } 1075 return (NULL); 1076 } 1077 1078 void 1079 gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit) 1080 { 1081 struct breakpoint *bp; 1082 struct vcpu_state *vs; 1083 uint64_t gpa; 1084 int error, vcpuid; 1085 1086 if (!gdb_active) { 1087 EPRINTLN("vm_loop: unexpected VMEXIT_DEBUG"); 1088 exit(4); 1089 } 1090 vcpuid = vcpu_id(vcpu); 1091 pthread_mutex_lock(&gdb_lock); 1092 error = guest_vaddr2paddr(vcpu, guest_pc(vmexit), &gpa); 1093 assert(error == 1); 1094 bp = find_breakpoint(gpa); 1095 if (bp != NULL) { 1096 vs = &vcpu_state[vcpuid]; 1097 assert(vs->stepping == false); 1098 assert(vs->stepped == false); 1099 assert(vs->hit_swbreak == false); 1100 vs->hit_swbreak = true; 1101 vm_set_register(vcpu, GDB_PC_REGNAME, guest_pc(vmexit)); 1102 for (;;) { 1103 if (stopped_vcpu == -1) { 1104 debug("$vCPU %d reporting breakpoint at rip %#lx\n", 1105 vcpuid, guest_pc(vmexit)); 1106 stopped_vcpu = vcpuid; 1107 gdb_suspend_vcpus(); 1108 } 1109 _gdb_cpu_suspend(vcpu, true); 1110 if (!vs->hit_swbreak) { 1111 /* Breakpoint reported. */ 1112 break; 1113 } 1114 bp = find_breakpoint(gpa); 1115 if (bp == NULL) { 1116 /* Breakpoint was removed. */ 1117 vs->hit_swbreak = false; 1118 break; 1119 } 1120 } 1121 gdb_cpu_resume(vcpu); 1122 } else { 1123 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid, 1124 guest_pc(vmexit)); 1125 #ifdef __amd64__ 1126 error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH, 1127 vmexit->u.bpt.inst_length); 1128 assert(error == 0); 1129 error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0); 1130 assert(error == 0); 1131 #else /* __aarch64__ */ 1132 uint64_t esr; 1133 1134 esr = (EXCP_BRK << ESR_ELx_EC_SHIFT) | vmexit->u.hyp.esr_el2; 1135 error = vm_inject_exception(vcpu, esr, 0); 1136 assert(error == 0); 1137 #endif 1138 } 1139 pthread_mutex_unlock(&gdb_lock); 1140 } 1141 1142 static bool 1143 gdb_step_vcpu(struct vcpu *vcpu) 1144 { 1145 int error, vcpuid; 1146 1147 vcpuid = vcpu_id(vcpu); 1148 debug("$vCPU %d step\n", vcpuid); 1149 error = _gdb_check_step(vcpu); 1150 if (error < 0) 1151 return (false); 1152 1153 discard_stop(); 1154 vcpu_state[vcpuid].stepping = true; 1155 vm_resume_cpu(vcpu); 1156 CPU_CLR(vcpuid, &vcpus_suspended); 1157 pthread_cond_broadcast(&idle_vcpus); 1158 return (true); 1159 } 1160 1161 static void 1162 gdb_resume_vcpus(void) 1163 { 1164 1165 assert(pthread_mutex_isowned_np(&gdb_lock)); 1166 vm_resume_all_cpus(ctx); 1167 debug("resuming all CPUs\n"); 1168 CPU_ZERO(&vcpus_suspended); 1169 pthread_cond_broadcast(&idle_vcpus); 1170 } 1171 1172 static void 1173 gdb_read_regs(void) 1174 { 1175 uint64_t regvals[nitems(gdb_regset)]; 1176 int regnums[nitems(gdb_regset)]; 1177 1178 for (size_t i = 0; i < nitems(gdb_regset); i++) 1179 regnums[i] = gdb_regset[i].id; 1180 if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset), 1181 regnums, regvals) == -1) { 1182 send_error(errno); 1183 return; 1184 } 1185 1186 start_packet(); 1187 for (size_t i = 0; i < nitems(gdb_regset); i++) { 1188 #ifdef GDB_REG_FIRST_EXT 1189 if (gdb_regset[i].id == GDB_REG_FIRST_EXT) 1190 break; 1191 #endif 1192 append_unsigned_native(regvals[i], gdb_regset[i].size); 1193 } 1194 finish_packet(); 1195 } 1196 1197 static void 1198 gdb_read_one_reg(const uint8_t *data, size_t len) 1199 { 1200 uint64_t regval; 1201 uintmax_t reg; 1202 1203 reg = parse_integer(data, len); 1204 if (reg >= nitems(gdb_regset)) { 1205 send_error(EINVAL); 1206 return; 1207 } 1208 1209 if (vm_get_register(vcpus[cur_vcpu], gdb_regset[reg].id, ®val) == 1210 -1) { 1211 send_error(errno); 1212 return; 1213 } 1214 1215 start_packet(); 1216 append_unsigned_native(regval, gdb_regset[reg].size); 1217 finish_packet(); 1218 } 1219 1220 static void 1221 gdb_read_mem(const uint8_t *data, size_t len) 1222 { 1223 uint64_t gpa, gva, val; 1224 uint8_t *cp; 1225 size_t resid, todo, bytes; 1226 bool started; 1227 int error; 1228 1229 assert(len >= 1); 1230 1231 /* Skip 'm' */ 1232 data += 1; 1233 len -= 1; 1234 1235 /* Parse and consume address. */ 1236 cp = memchr(data, ',', len); 1237 if (cp == NULL || cp == data) { 1238 send_error(EINVAL); 1239 return; 1240 } 1241 gva = parse_integer(data, cp - data); 1242 len -= (cp - data) + 1; 1243 data += (cp - data) + 1; 1244 1245 /* Parse length. */ 1246 resid = parse_integer(data, len); 1247 1248 started = false; 1249 while (resid > 0) { 1250 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1251 if (error == -1) { 1252 if (started) 1253 finish_packet(); 1254 else 1255 send_error(errno); 1256 return; 1257 } 1258 if (error == 0) { 1259 if (started) 1260 finish_packet(); 1261 else 1262 send_error(EFAULT); 1263 return; 1264 } 1265 1266 /* Read bytes from current page. */ 1267 todo = getpagesize() - gpa % getpagesize(); 1268 if (todo > resid) 1269 todo = resid; 1270 1271 cp = paddr_guest2host(ctx, gpa, todo); 1272 if (cp != NULL) { 1273 /* 1274 * If this page is guest RAM, read it a byte 1275 * at a time. 1276 */ 1277 if (!started) { 1278 start_packet(); 1279 started = true; 1280 } 1281 while (todo > 0) { 1282 append_byte(*cp); 1283 cp++; 1284 gpa++; 1285 gva++; 1286 resid--; 1287 todo--; 1288 } 1289 } else { 1290 /* 1291 * If this page isn't guest RAM, try to handle 1292 * it via MMIO. For MMIO requests, use 1293 * aligned reads of words when possible. 1294 */ 1295 while (todo > 0) { 1296 if (gpa & 1 || todo == 1) 1297 bytes = 1; 1298 else if (gpa & 2 || todo == 2) 1299 bytes = 2; 1300 else 1301 bytes = 4; 1302 error = read_mem(vcpus[cur_vcpu], gpa, &val, 1303 bytes); 1304 if (error == 0) { 1305 if (!started) { 1306 start_packet(); 1307 started = true; 1308 } 1309 gpa += bytes; 1310 gva += bytes; 1311 resid -= bytes; 1312 todo -= bytes; 1313 while (bytes > 0) { 1314 append_byte(val); 1315 val >>= 8; 1316 bytes--; 1317 } 1318 } else { 1319 if (started) 1320 finish_packet(); 1321 else 1322 send_error(EFAULT); 1323 return; 1324 } 1325 } 1326 } 1327 assert(resid == 0 || gpa % getpagesize() == 0); 1328 } 1329 if (!started) 1330 start_packet(); 1331 finish_packet(); 1332 } 1333 1334 static void 1335 gdb_write_mem(const uint8_t *data, size_t len) 1336 { 1337 uint64_t gpa, gva, val; 1338 uint8_t *cp; 1339 size_t resid, todo, bytes; 1340 int error; 1341 1342 assert(len >= 1); 1343 1344 /* Skip 'M' */ 1345 data += 1; 1346 len -= 1; 1347 1348 /* Parse and consume address. */ 1349 cp = memchr(data, ',', len); 1350 if (cp == NULL || cp == data) { 1351 send_error(EINVAL); 1352 return; 1353 } 1354 gva = parse_integer(data, cp - data); 1355 len -= (cp - data) + 1; 1356 data += (cp - data) + 1; 1357 1358 /* Parse and consume length. */ 1359 cp = memchr(data, ':', len); 1360 if (cp == NULL || cp == data) { 1361 send_error(EINVAL); 1362 return; 1363 } 1364 resid = parse_integer(data, cp - data); 1365 len -= (cp - data) + 1; 1366 data += (cp - data) + 1; 1367 1368 /* Verify the available bytes match the length. */ 1369 if (len != resid * 2) { 1370 send_error(EINVAL); 1371 return; 1372 } 1373 1374 while (resid > 0) { 1375 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1376 if (error == -1) { 1377 send_error(errno); 1378 return; 1379 } 1380 if (error == 0) { 1381 send_error(EFAULT); 1382 return; 1383 } 1384 1385 /* Write bytes to current page. */ 1386 todo = getpagesize() - gpa % getpagesize(); 1387 if (todo > resid) 1388 todo = resid; 1389 1390 cp = paddr_guest2host(ctx, gpa, todo); 1391 if (cp != NULL) { 1392 /* 1393 * If this page is guest RAM, write it a byte 1394 * at a time. 1395 */ 1396 while (todo > 0) { 1397 assert(len >= 2); 1398 *cp = parse_byte(data); 1399 data += 2; 1400 len -= 2; 1401 cp++; 1402 gpa++; 1403 gva++; 1404 resid--; 1405 todo--; 1406 } 1407 } else { 1408 /* 1409 * If this page isn't guest RAM, try to handle 1410 * it via MMIO. For MMIO requests, use 1411 * aligned writes of words when possible. 1412 */ 1413 while (todo > 0) { 1414 if (gpa & 1 || todo == 1) { 1415 bytes = 1; 1416 val = parse_byte(data); 1417 } else if (gpa & 2 || todo == 2) { 1418 bytes = 2; 1419 val = be16toh(parse_integer(data, 4)); 1420 } else { 1421 bytes = 4; 1422 val = be32toh(parse_integer(data, 8)); 1423 } 1424 error = write_mem(vcpus[cur_vcpu], gpa, val, 1425 bytes); 1426 if (error == 0) { 1427 gpa += bytes; 1428 gva += bytes; 1429 resid -= bytes; 1430 todo -= bytes; 1431 data += 2 * bytes; 1432 len -= 2 * bytes; 1433 } else { 1434 send_error(EFAULT); 1435 return; 1436 } 1437 } 1438 } 1439 assert(resid == 0 || gpa % getpagesize() == 0); 1440 } 1441 assert(len == 0); 1442 send_ok(); 1443 } 1444 1445 static bool 1446 set_breakpoint_caps(bool enable) 1447 { 1448 cpuset_t mask; 1449 int vcpu; 1450 1451 mask = vcpus_active; 1452 while (!CPU_EMPTY(&mask)) { 1453 vcpu = CPU_FFS(&mask) - 1; 1454 CPU_CLR(vcpu, &mask); 1455 if (vm_set_capability(vcpus[vcpu], GDB_BREAKPOINT_CAP, 1456 enable ? 1 : 0) < 0) 1457 return (false); 1458 debug("$vCPU %d %sabled breakpoint exits\n", vcpu, 1459 enable ? "en" : "dis"); 1460 } 1461 return (true); 1462 } 1463 1464 static void 1465 write_instr(uint8_t *dest, uint8_t *instr, size_t len) 1466 { 1467 memcpy(dest, instr, len); 1468 #ifdef __arm64__ 1469 __asm __volatile( 1470 "dc cvau, %0\n" 1471 "dsb ish\n" 1472 "ic ialluis\n" 1473 "dsb ish\n" 1474 : : "r" (dest) : "memory"); 1475 #endif 1476 } 1477 1478 static void 1479 remove_all_sw_breakpoints(void) 1480 { 1481 struct breakpoint *bp, *nbp; 1482 uint8_t *cp; 1483 1484 if (TAILQ_EMPTY(&breakpoints)) 1485 return; 1486 1487 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) { 1488 debug("remove breakpoint at %#lx\n", bp->gpa); 1489 cp = paddr_guest2host(ctx, bp->gpa, sizeof(bp->shadow_inst)); 1490 write_instr(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); 1491 TAILQ_REMOVE(&breakpoints, bp, link); 1492 free(bp); 1493 } 1494 TAILQ_INIT(&breakpoints); 1495 set_breakpoint_caps(false); 1496 } 1497 1498 static void 1499 update_sw_breakpoint(uint64_t gva, int kind, bool insert) 1500 { 1501 struct breakpoint *bp; 1502 uint64_t gpa; 1503 uint8_t *cp; 1504 int error; 1505 1506 if (kind != GDB_BP_SIZE) { 1507 send_error(EINVAL); 1508 return; 1509 } 1510 1511 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa); 1512 if (error == -1) { 1513 send_error(errno); 1514 return; 1515 } 1516 if (error == 0) { 1517 send_error(EFAULT); 1518 return; 1519 } 1520 1521 cp = paddr_guest2host(ctx, gpa, sizeof(bp->shadow_inst)); 1522 1523 /* Only permit breakpoints in guest RAM. */ 1524 if (cp == NULL) { 1525 send_error(EFAULT); 1526 return; 1527 } 1528 1529 /* Find any existing breakpoint. */ 1530 bp = find_breakpoint(gpa); 1531 1532 /* 1533 * Silently ignore duplicate commands since the protocol 1534 * requires these packets to be idempotent. 1535 */ 1536 if (insert) { 1537 if (bp == NULL) { 1538 if (TAILQ_EMPTY(&breakpoints) && 1539 !set_breakpoint_caps(true)) { 1540 send_empty_response(); 1541 return; 1542 } 1543 bp = malloc(sizeof(*bp)); 1544 bp->gpa = gpa; 1545 memcpy(bp->shadow_inst, cp, sizeof(bp->shadow_inst)); 1546 write_instr(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst)); 1547 TAILQ_INSERT_TAIL(&breakpoints, bp, link); 1548 debug("new breakpoint at %#lx\n", gpa); 1549 } 1550 } else { 1551 if (bp != NULL) { 1552 debug("remove breakpoint at %#lx\n", gpa); 1553 write_instr(cp, bp->shadow_inst, 1554 sizeof(bp->shadow_inst)); 1555 TAILQ_REMOVE(&breakpoints, bp, link); 1556 free(bp); 1557 if (TAILQ_EMPTY(&breakpoints)) 1558 set_breakpoint_caps(false); 1559 } 1560 } 1561 send_ok(); 1562 } 1563 1564 static void 1565 parse_breakpoint(const uint8_t *data, size_t len) 1566 { 1567 uint64_t gva; 1568 uint8_t *cp; 1569 bool insert; 1570 int kind, type; 1571 1572 insert = data[0] == 'Z'; 1573 1574 /* Skip 'Z/z' */ 1575 data += 1; 1576 len -= 1; 1577 1578 /* Parse and consume type. */ 1579 cp = memchr(data, ',', len); 1580 if (cp == NULL || cp == data) { 1581 send_error(EINVAL); 1582 return; 1583 } 1584 type = parse_integer(data, cp - data); 1585 len -= (cp - data) + 1; 1586 data += (cp - data) + 1; 1587 1588 /* Parse and consume address. */ 1589 cp = memchr(data, ',', len); 1590 if (cp == NULL || cp == data) { 1591 send_error(EINVAL); 1592 return; 1593 } 1594 gva = parse_integer(data, cp - data); 1595 len -= (cp - data) + 1; 1596 data += (cp - data) + 1; 1597 1598 /* Parse and consume kind. */ 1599 cp = memchr(data, ';', len); 1600 if (cp == data) { 1601 send_error(EINVAL); 1602 return; 1603 } 1604 if (cp != NULL) { 1605 /* 1606 * We do not advertise support for either the 1607 * ConditionalBreakpoints or BreakpointCommands 1608 * features, so we should not be getting conditions or 1609 * commands from the remote end. 1610 */ 1611 send_empty_response(); 1612 return; 1613 } 1614 kind = parse_integer(data, len); 1615 data += len; 1616 len = 0; 1617 1618 switch (type) { 1619 case 0: 1620 update_sw_breakpoint(gva, kind, insert); 1621 break; 1622 default: 1623 send_empty_response(); 1624 break; 1625 } 1626 } 1627 1628 static bool 1629 command_equals(const uint8_t *data, size_t len, const char *cmd) 1630 { 1631 1632 if (strlen(cmd) > len) 1633 return (false); 1634 return (memcmp(data, cmd, strlen(cmd)) == 0); 1635 } 1636 1637 static void 1638 check_features(const uint8_t *data, size_t len) 1639 { 1640 char *feature, *next_feature, *str, *value; 1641 bool supported; 1642 1643 str = malloc(len + 1); 1644 memcpy(str, data, len); 1645 str[len] = '\0'; 1646 next_feature = str; 1647 1648 while ((feature = strsep(&next_feature, ";")) != NULL) { 1649 /* 1650 * Null features shouldn't exist, but skip if they 1651 * do. 1652 */ 1653 if (strcmp(feature, "") == 0) 1654 continue; 1655 1656 /* 1657 * Look for the value or supported / not supported 1658 * flag. 1659 */ 1660 value = strchr(feature, '='); 1661 if (value != NULL) { 1662 *value = '\0'; 1663 value++; 1664 supported = true; 1665 } else { 1666 value = feature + strlen(feature) - 1; 1667 switch (*value) { 1668 case '+': 1669 supported = true; 1670 break; 1671 case '-': 1672 supported = false; 1673 break; 1674 default: 1675 /* 1676 * This is really a protocol error, 1677 * but we just ignore malformed 1678 * features for ease of 1679 * implementation. 1680 */ 1681 continue; 1682 } 1683 value = NULL; 1684 } 1685 1686 if (strcmp(feature, "swbreak") == 0) 1687 swbreak_enabled = supported; 1688 } 1689 free(str); 1690 1691 start_packet(); 1692 1693 /* This is an arbitrary limit. */ 1694 append_string("PacketSize=4096"); 1695 append_string(";swbreak+"); 1696 append_string(";qXfer:features:read+"); 1697 finish_packet(); 1698 } 1699 1700 static void 1701 gdb_query(const uint8_t *data, size_t len) 1702 { 1703 1704 /* 1705 * TODO: 1706 * - qSearch 1707 */ 1708 if (command_equals(data, len, "qAttached")) { 1709 start_packet(); 1710 append_char('1'); 1711 finish_packet(); 1712 } else if (command_equals(data, len, "qC")) { 1713 start_packet(); 1714 append_string("QC"); 1715 append_integer(cur_vcpu + 1); 1716 finish_packet(); 1717 } else if (command_equals(data, len, "qfThreadInfo")) { 1718 cpuset_t mask; 1719 bool first; 1720 int vcpu; 1721 1722 if (CPU_EMPTY(&vcpus_active)) { 1723 send_error(EINVAL); 1724 return; 1725 } 1726 mask = vcpus_active; 1727 start_packet(); 1728 append_char('m'); 1729 first = true; 1730 while (!CPU_EMPTY(&mask)) { 1731 vcpu = CPU_FFS(&mask) - 1; 1732 CPU_CLR(vcpu, &mask); 1733 if (first) 1734 first = false; 1735 else 1736 append_char(','); 1737 append_integer(vcpu + 1); 1738 } 1739 finish_packet(); 1740 } else if (command_equals(data, len, "qsThreadInfo")) { 1741 start_packet(); 1742 append_char('l'); 1743 finish_packet(); 1744 } else if (command_equals(data, len, "qSupported")) { 1745 data += strlen("qSupported"); 1746 len -= strlen("qSupported"); 1747 check_features(data, len); 1748 } else if (command_equals(data, len, "qThreadExtraInfo")) { 1749 char buf[16]; 1750 int tid; 1751 1752 data += strlen("qThreadExtraInfo"); 1753 len -= strlen("qThreadExtraInfo"); 1754 if (len == 0 || *data != ',') { 1755 send_error(EINVAL); 1756 return; 1757 } 1758 tid = parse_threadid(data + 1, len - 1); 1759 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) { 1760 send_error(EINVAL); 1761 return; 1762 } 1763 1764 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1); 1765 start_packet(); 1766 append_asciihex(buf); 1767 finish_packet(); 1768 } else if (command_equals(data, len, "qXfer:features:read:")) { 1769 struct stat sb; 1770 const char *xml; 1771 const uint8_t *pathend; 1772 char buf[64], path[PATH_MAX]; 1773 size_t xmllen; 1774 unsigned int doff, dlen; 1775 int fd; 1776 1777 data += strlen("qXfer:features:read:"); 1778 len -= strlen("qXfer:features:read:"); 1779 1780 pathend = memchr(data, ':', len); 1781 if (pathend == NULL || 1782 (size_t)(pathend - data) >= sizeof(path) - 1) { 1783 send_error(EINVAL); 1784 return; 1785 } 1786 memcpy(path, data, pathend - data); 1787 path[pathend - data] = '\0'; 1788 data += (pathend - data) + 1; 1789 len -= (pathend - data) + 1; 1790 1791 if (len > sizeof(buf) - 1) { 1792 send_error(EINVAL); 1793 return; 1794 } 1795 memcpy(buf, data, len); 1796 buf[len] = '\0'; 1797 if (sscanf(buf, "%x,%x", &doff, &dlen) != 2) { 1798 send_error(EINVAL); 1799 return; 1800 } 1801 1802 fd = openat(xml_dfd, path, O_RDONLY | O_RESOLVE_BENEATH); 1803 if (fd < 0) { 1804 send_error(errno); 1805 return; 1806 } 1807 if (fstat(fd, &sb) < 0) { 1808 send_error(errno); 1809 close(fd); 1810 return; 1811 } 1812 xml = mmap(NULL, sb.st_size, PROT_READ, MAP_SHARED, fd, 0); 1813 if (xml == MAP_FAILED) { 1814 send_error(errno); 1815 close(fd); 1816 return; 1817 } 1818 close(fd); 1819 xmllen = sb.st_size; 1820 1821 start_packet(); 1822 if (doff >= xmllen) { 1823 append_char('l'); 1824 } else if (doff + dlen >= xmllen) { 1825 append_char('l'); 1826 append_binary_data(xml + doff, xmllen - doff); 1827 } else { 1828 append_char('m'); 1829 append_binary_data(xml + doff, dlen); 1830 } 1831 finish_packet(); 1832 (void)munmap(__DECONST(void *, xml), xmllen); 1833 } else 1834 send_empty_response(); 1835 } 1836 1837 static void 1838 handle_command(const uint8_t *data, size_t len) 1839 { 1840 1841 /* Reject packets with a sequence-id. */ 1842 if (len >= 3 && data[0] >= '0' && data[0] <= '9' && 1843 data[0] >= '0' && data[0] <= '9' && data[2] == ':') { 1844 send_empty_response(); 1845 return; 1846 } 1847 1848 switch (*data) { 1849 case 'c': 1850 if (len != 1) { 1851 send_error(EINVAL); 1852 break; 1853 } 1854 1855 discard_stop(); 1856 gdb_resume_vcpus(); 1857 break; 1858 case 'D': 1859 send_ok(); 1860 1861 /* TODO: Resume any stopped CPUs. */ 1862 break; 1863 case 'g': 1864 gdb_read_regs(); 1865 break; 1866 case 'p': 1867 gdb_read_one_reg(data + 1, len - 1); 1868 break; 1869 case 'H': { 1870 int tid; 1871 1872 if (len < 2 || (data[1] != 'g' && data[1] != 'c')) { 1873 send_error(EINVAL); 1874 break; 1875 } 1876 tid = parse_threadid(data + 2, len - 2); 1877 if (tid == -2) { 1878 send_error(EINVAL); 1879 break; 1880 } 1881 1882 if (CPU_EMPTY(&vcpus_active)) { 1883 send_error(EINVAL); 1884 break; 1885 } 1886 if (tid == -1 || tid == 0) 1887 cur_vcpu = CPU_FFS(&vcpus_active) - 1; 1888 else if (CPU_ISSET(tid - 1, &vcpus_active)) 1889 cur_vcpu = tid - 1; 1890 else { 1891 send_error(EINVAL); 1892 break; 1893 } 1894 send_ok(); 1895 break; 1896 } 1897 case 'm': 1898 gdb_read_mem(data, len); 1899 break; 1900 case 'M': 1901 gdb_write_mem(data, len); 1902 break; 1903 case 'T': { 1904 int tid; 1905 1906 tid = parse_threadid(data + 1, len - 1); 1907 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) { 1908 send_error(EINVAL); 1909 return; 1910 } 1911 send_ok(); 1912 break; 1913 } 1914 case 'q': 1915 gdb_query(data, len); 1916 break; 1917 case 's': 1918 if (len != 1) { 1919 send_error(EINVAL); 1920 break; 1921 } 1922 1923 /* Don't send a reply until a stop occurs. */ 1924 if (!gdb_step_vcpu(vcpus[cur_vcpu])) { 1925 send_error(EOPNOTSUPP); 1926 break; 1927 } 1928 break; 1929 case 'z': 1930 case 'Z': 1931 parse_breakpoint(data, len); 1932 break; 1933 case '?': 1934 report_stop(false); 1935 break; 1936 case 'G': /* TODO */ 1937 case 'v': 1938 /* Handle 'vCont' */ 1939 /* 'vCtrlC' */ 1940 case 'P': /* TODO */ 1941 case 'Q': /* TODO */ 1942 case 't': /* TODO */ 1943 case 'X': /* TODO */ 1944 default: 1945 send_empty_response(); 1946 } 1947 } 1948 1949 /* Check for a valid packet in the command buffer. */ 1950 static void 1951 check_command(int fd) 1952 { 1953 uint8_t *head, *hash, *p, sum; 1954 size_t avail, plen; 1955 1956 for (;;) { 1957 avail = cur_comm.len; 1958 if (avail == 0) 1959 return; 1960 head = io_buffer_head(&cur_comm); 1961 switch (*head) { 1962 case 0x03: 1963 debug("<- Ctrl-C\n"); 1964 io_buffer_consume(&cur_comm, 1); 1965 1966 gdb_suspend_vcpus(); 1967 break; 1968 case '+': 1969 /* ACK of previous response. */ 1970 debug("<- +\n"); 1971 if (response_pending()) 1972 io_buffer_reset(&cur_resp); 1973 io_buffer_consume(&cur_comm, 1); 1974 if (stopped_vcpu != -1 && report_next_stop) { 1975 report_stop(true); 1976 send_pending_data(fd); 1977 } 1978 break; 1979 case '-': 1980 /* NACK of previous response. */ 1981 debug("<- -\n"); 1982 if (response_pending()) { 1983 cur_resp.len += cur_resp.start; 1984 cur_resp.start = 0; 1985 if (cur_resp.data[0] == '+') 1986 io_buffer_advance(&cur_resp, 1); 1987 debug("-> %.*s\n", (int)cur_resp.len, 1988 io_buffer_head(&cur_resp)); 1989 } 1990 io_buffer_consume(&cur_comm, 1); 1991 send_pending_data(fd); 1992 break; 1993 case '$': 1994 /* Packet. */ 1995 1996 if (response_pending()) { 1997 warnx("New GDB command while response in " 1998 "progress"); 1999 io_buffer_reset(&cur_resp); 2000 } 2001 2002 /* Is packet complete? */ 2003 hash = memchr(head, '#', avail); 2004 if (hash == NULL) 2005 return; 2006 plen = (hash - head + 1) + 2; 2007 if (avail < plen) 2008 return; 2009 debug("<- %.*s\n", (int)plen, head); 2010 2011 /* Verify checksum. */ 2012 for (sum = 0, p = head + 1; p < hash; p++) 2013 sum += *p; 2014 if (sum != parse_byte(hash + 1)) { 2015 io_buffer_consume(&cur_comm, plen); 2016 debug("-> -\n"); 2017 send_char('-'); 2018 send_pending_data(fd); 2019 break; 2020 } 2021 send_char('+'); 2022 2023 handle_command(head + 1, hash - (head + 1)); 2024 io_buffer_consume(&cur_comm, plen); 2025 if (!response_pending()) 2026 debug("-> +\n"); 2027 send_pending_data(fd); 2028 break; 2029 default: 2030 /* XXX: Possibly drop connection instead. */ 2031 debug("-> %02x\n", *head); 2032 io_buffer_consume(&cur_comm, 1); 2033 break; 2034 } 2035 } 2036 } 2037 2038 static void 2039 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused) 2040 { 2041 size_t pending; 2042 ssize_t nread; 2043 int n; 2044 2045 if (ioctl(fd, FIONREAD, &n) == -1) { 2046 warn("FIONREAD on GDB socket"); 2047 return; 2048 } 2049 assert(n >= 0); 2050 pending = n; 2051 2052 /* 2053 * 'pending' might be zero due to EOF. We need to call read 2054 * with a non-zero length to detect EOF. 2055 */ 2056 if (pending == 0) 2057 pending = 1; 2058 2059 /* Ensure there is room in the command buffer. */ 2060 io_buffer_grow(&cur_comm, pending); 2061 assert(io_buffer_avail(&cur_comm) >= pending); 2062 2063 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm)); 2064 if (nread == 0) { 2065 close_connection(); 2066 } else if (nread == -1) { 2067 if (errno == EAGAIN) 2068 return; 2069 2070 warn("Read from GDB socket"); 2071 close_connection(); 2072 } else { 2073 cur_comm.len += nread; 2074 pthread_mutex_lock(&gdb_lock); 2075 check_command(fd); 2076 pthread_mutex_unlock(&gdb_lock); 2077 } 2078 } 2079 2080 static void 2081 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused) 2082 { 2083 2084 send_pending_data(fd); 2085 } 2086 2087 static void 2088 new_connection(int fd, enum ev_type event __unused, void *arg) 2089 { 2090 int optval, s; 2091 2092 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK); 2093 if (s == -1) { 2094 if (arg != NULL) 2095 err(1, "Failed accepting initial GDB connection"); 2096 2097 /* Silently ignore errors post-startup. */ 2098 return; 2099 } 2100 2101 optval = 1; 2102 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) == 2103 -1) { 2104 warn("Failed to disable SIGPIPE for GDB connection"); 2105 close(s); 2106 return; 2107 } 2108 2109 pthread_mutex_lock(&gdb_lock); 2110 if (cur_fd != -1) { 2111 close(s); 2112 warnx("Ignoring additional GDB connection."); 2113 } 2114 2115 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL); 2116 if (read_event == NULL) { 2117 if (arg != NULL) 2118 err(1, "Failed to setup initial GDB connection"); 2119 pthread_mutex_unlock(&gdb_lock); 2120 return; 2121 } 2122 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL); 2123 if (write_event == NULL) { 2124 if (arg != NULL) 2125 err(1, "Failed to setup initial GDB connection"); 2126 mevent_delete_close(read_event); 2127 read_event = NULL; 2128 } 2129 2130 cur_fd = s; 2131 cur_vcpu = 0; 2132 stopped_vcpu = -1; 2133 2134 /* Break on attach. */ 2135 first_stop = true; 2136 report_next_stop = false; 2137 gdb_suspend_vcpus(); 2138 pthread_mutex_unlock(&gdb_lock); 2139 } 2140 2141 #ifndef WITHOUT_CAPSICUM 2142 static void 2143 limit_gdb_socket(int s) 2144 { 2145 cap_rights_t rights; 2146 unsigned long ioctls[] = { FIONREAD }; 2147 2148 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE, 2149 CAP_SETSOCKOPT, CAP_IOCTL); 2150 if (caph_rights_limit(s, &rights) == -1) 2151 errx(EX_OSERR, "Unable to apply rights for sandbox"); 2152 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1) 2153 errx(EX_OSERR, "Unable to apply rights for sandbox"); 2154 } 2155 #endif 2156 2157 void 2158 init_gdb(struct vmctx *_ctx) 2159 { 2160 #ifndef WITHOUT_CAPSICUM 2161 cap_rights_t rights; 2162 #endif 2163 int error, flags, optval, s; 2164 struct addrinfo hints; 2165 struct addrinfo *gdbaddr; 2166 const char *saddr, *value; 2167 char *sport; 2168 bool wait; 2169 2170 value = get_config_value("gdb.port"); 2171 if (value == NULL) 2172 return; 2173 sport = strdup(value); 2174 if (sport == NULL) 2175 errx(4, "Failed to allocate memory"); 2176 2177 wait = get_config_bool_default("gdb.wait", false); 2178 2179 saddr = get_config_value("gdb.address"); 2180 if (saddr == NULL) { 2181 saddr = "localhost"; 2182 } 2183 2184 debug("==> starting on %s:%s, %swaiting\n", 2185 saddr, sport, wait ? "" : "not "); 2186 2187 error = pthread_mutex_init(&gdb_lock, NULL); 2188 if (error != 0) 2189 errc(1, error, "gdb mutex init"); 2190 error = pthread_cond_init(&idle_vcpus, NULL); 2191 if (error != 0) 2192 errc(1, error, "gdb cv init"); 2193 2194 memset(&hints, 0, sizeof(hints)); 2195 hints.ai_family = AF_UNSPEC; 2196 hints.ai_socktype = SOCK_STREAM; 2197 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE; 2198 2199 error = getaddrinfo(saddr, sport, &hints, &gdbaddr); 2200 if (error != 0) 2201 errx(1, "gdb address resolution: %s", gai_strerror(error)); 2202 2203 ctx = _ctx; 2204 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0); 2205 if (s < 0) 2206 err(1, "gdb socket create"); 2207 2208 optval = 1; 2209 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); 2210 2211 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0) 2212 err(1, "gdb socket bind"); 2213 2214 if (listen(s, 1) < 0) 2215 err(1, "gdb socket listen"); 2216 2217 stopped_vcpu = -1; 2218 TAILQ_INIT(&breakpoints); 2219 vcpus = calloc(guest_ncpus, sizeof(*vcpus)); 2220 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state)); 2221 if (wait) { 2222 /* 2223 * Set vcpu 0 in vcpus_suspended. This will trigger the 2224 * logic in gdb_cpu_add() to suspend the first vcpu before 2225 * it starts execution. The vcpu will remain suspended 2226 * until a debugger connects. 2227 */ 2228 CPU_SET(0, &vcpus_suspended); 2229 stopped_vcpu = 0; 2230 } 2231 2232 flags = fcntl(s, F_GETFL); 2233 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1) 2234 err(1, "Failed to mark gdb socket non-blocking"); 2235 2236 #ifndef WITHOUT_CAPSICUM 2237 limit_gdb_socket(s); 2238 #endif 2239 mevent_add(s, EVF_READ, new_connection, NULL); 2240 gdb_active = true; 2241 freeaddrinfo(gdbaddr); 2242 free(sport); 2243 2244 xml_dfd = open(_PATH_GDB_XML, O_DIRECTORY); 2245 if (xml_dfd == -1) 2246 err(1, "Failed to open gdb xml directory"); 2247 #ifndef WITHOUT_CAPSICUM 2248 cap_rights_init(&rights, CAP_FSTAT, CAP_LOOKUP, CAP_MMAP_R, CAP_PREAD); 2249 if (caph_rights_limit(xml_dfd, &rights) == -1) 2250 err(1, "cap_rights_init"); 2251 #endif 2252 } 2253