1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #ifndef WITHOUT_CAPSICUM
30 #include <sys/capsicum.h>
31 #endif
32 #include <sys/endian.h>
33 #include <sys/ioctl.h>
34 #include <sys/mman.h>
35 #include <sys/queue.h>
36 #include <sys/socket.h>
37 #include <sys/stat.h>
38
39 #ifdef __aarch64__
40 #include <machine/armreg.h>
41 #endif
42 #include <machine/atomic.h>
43 #ifdef __amd64__
44 #include <machine/specialreg.h>
45 #endif
46 #include <machine/vmm.h>
47
48 #include <netinet/in.h>
49
50 #include <assert.h>
51 #ifndef WITHOUT_CAPSICUM
52 #include <capsicum_helpers.h>
53 #endif
54 #include <err.h>
55 #include <errno.h>
56 #include <fcntl.h>
57 #include <netdb.h>
58 #include <pthread.h>
59 #include <pthread_np.h>
60 #include <stdbool.h>
61 #include <stdio.h>
62 #include <stdlib.h>
63 #include <string.h>
64 #include <sysexits.h>
65 #include <unistd.h>
66 #include <vmmapi.h>
67
68 #include "bhyverun.h"
69 #include "config.h"
70 #include "debug.h"
71 #include "gdb.h"
72 #include "mem.h"
73 #include "mevent.h"
74
75 #define _PATH_GDB_XML "/usr/share/bhyve/gdb"
76
77 /*
78 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
79 * use SIGTRAP.
80 */
81 #define GDB_SIGNAL_TRAP 5
82
83 #if defined(__amd64__)
84 #define GDB_BP_SIZE 1
85 #define GDB_BP_INSTR (uint8_t []){0xcc}
86 #define GDB_PC_REGNAME VM_REG_GUEST_RIP
87 #define GDB_BREAKPOINT_CAP VM_CAP_BPT_EXIT
88 #elif defined(__aarch64__)
89 #define GDB_BP_SIZE 4
90 #define GDB_BP_INSTR (uint8_t []){0x00, 0x00, 0x20, 0xd4}
91 #define GDB_PC_REGNAME VM_REG_GUEST_PC
92 #define GDB_BREAKPOINT_CAP VM_CAP_BRK_EXIT
93 #else
94 #error "Unsupported architecture"
95 #endif
96
97 _Static_assert(sizeof(GDB_BP_INSTR) == GDB_BP_SIZE,
98 "GDB_BP_INSTR has wrong size");
99
100 static void gdb_resume_vcpus(void);
101 static void check_command(int fd);
102
103 static struct mevent *read_event, *write_event;
104
105 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
106 static pthread_mutex_t gdb_lock;
107 static pthread_cond_t idle_vcpus;
108 static bool first_stop, report_next_stop, swbreak_enabled;
109 static int xml_dfd = -1;
110
111 /*
112 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
113 * read buffer, 'start' is unused and 'len' contains the number of
114 * valid bytes in the buffer. For a write buffer, 'start' is set to
115 * the index of the next byte in 'data' to send, and 'len' contains
116 * the remaining number of valid bytes to send.
117 */
118 struct io_buffer {
119 uint8_t *data;
120 size_t capacity;
121 size_t start;
122 size_t len;
123 };
124
125 struct breakpoint {
126 uint64_t gpa;
127 uint8_t shadow_inst[GDB_BP_SIZE];
128 TAILQ_ENTRY(breakpoint) link;
129 };
130
131 /*
132 * When a vCPU stops to due to an event that should be reported to the
133 * debugger, information about the event is stored in this structure.
134 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
135 * and stops other vCPUs so the event can be reported. The
136 * report_stop() function reports the event for the 'stopped_vcpu'
137 * vCPU. When the debugger resumes execution via continue or step,
138 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
139 * event handlers until the associated event is reported or disabled.
140 *
141 * An idle vCPU will have all of the boolean fields set to false.
142 *
143 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
144 * released to execute the stepped instruction. When the vCPU reports
145 * the stepping trap, 'stepped' is set.
146 *
147 * When a vCPU hits a breakpoint set by the debug server,
148 * 'hit_swbreak' is set to true.
149 */
150 struct vcpu_state {
151 bool stepping;
152 bool stepped;
153 bool hit_swbreak;
154 };
155
156 static struct io_buffer cur_comm, cur_resp;
157 static uint8_t cur_csum;
158 static struct vmctx *ctx;
159 static int cur_fd = -1;
160 static TAILQ_HEAD(, breakpoint) breakpoints;
161 static struct vcpu_state *vcpu_state;
162 static struct vcpu **vcpus;
163 static int cur_vcpu, stopped_vcpu;
164 static bool gdb_active = false;
165
166 struct gdb_reg {
167 enum vm_reg_name id;
168 int size;
169 };
170
171 #ifdef __amd64__
172 static const struct gdb_reg gdb_regset[] = {
173 { .id = VM_REG_GUEST_RAX, .size = 8 },
174 { .id = VM_REG_GUEST_RBX, .size = 8 },
175 { .id = VM_REG_GUEST_RCX, .size = 8 },
176 { .id = VM_REG_GUEST_RDX, .size = 8 },
177 { .id = VM_REG_GUEST_RSI, .size = 8 },
178 { .id = VM_REG_GUEST_RDI, .size = 8 },
179 { .id = VM_REG_GUEST_RBP, .size = 8 },
180 { .id = VM_REG_GUEST_RSP, .size = 8 },
181 { .id = VM_REG_GUEST_R8, .size = 8 },
182 { .id = VM_REG_GUEST_R9, .size = 8 },
183 { .id = VM_REG_GUEST_R10, .size = 8 },
184 { .id = VM_REG_GUEST_R11, .size = 8 },
185 { .id = VM_REG_GUEST_R12, .size = 8 },
186 { .id = VM_REG_GUEST_R13, .size = 8 },
187 { .id = VM_REG_GUEST_R14, .size = 8 },
188 { .id = VM_REG_GUEST_R15, .size = 8 },
189 { .id = VM_REG_GUEST_RIP, .size = 8 },
190 { .id = VM_REG_GUEST_RFLAGS, .size = 4 },
191 { .id = VM_REG_GUEST_CS, .size = 4 },
192 { .id = VM_REG_GUEST_SS, .size = 4 },
193 { .id = VM_REG_GUEST_DS, .size = 4 },
194 { .id = VM_REG_GUEST_ES, .size = 4 },
195 { .id = VM_REG_GUEST_FS, .size = 4 },
196 { .id = VM_REG_GUEST_GS, .size = 4 },
197 /*
198 * Registers past this point are not included in a reply to a 'g' query,
199 * to provide compatibility with debuggers that do not fetch a target
200 * description. The debugger can query them individually with 'p' if it
201 * knows about them.
202 */
203 #define GDB_REG_FIRST_EXT VM_REG_GUEST_FS_BASE
204 { .id = VM_REG_GUEST_FS_BASE, .size = 8 },
205 { .id = VM_REG_GUEST_GS_BASE, .size = 8 },
206 { .id = VM_REG_GUEST_KGS_BASE, .size = 8 },
207 { .id = VM_REG_GUEST_CR0, .size = 8 },
208 { .id = VM_REG_GUEST_CR2, .size = 8 },
209 { .id = VM_REG_GUEST_CR3, .size = 8 },
210 { .id = VM_REG_GUEST_CR4, .size = 8 },
211 { .id = VM_REG_GUEST_TPR, .size = 8 },
212 { .id = VM_REG_GUEST_EFER, .size = 8 },
213 };
214 #else /* __aarch64__ */
215 static const struct gdb_reg gdb_regset[] = {
216 { .id = VM_REG_GUEST_X0, .size = 8 },
217 { .id = VM_REG_GUEST_X1, .size = 8 },
218 { .id = VM_REG_GUEST_X2, .size = 8 },
219 { .id = VM_REG_GUEST_X3, .size = 8 },
220 { .id = VM_REG_GUEST_X4, .size = 8 },
221 { .id = VM_REG_GUEST_X5, .size = 8 },
222 { .id = VM_REG_GUEST_X6, .size = 8 },
223 { .id = VM_REG_GUEST_X7, .size = 8 },
224 { .id = VM_REG_GUEST_X8, .size = 8 },
225 { .id = VM_REG_GUEST_X9, .size = 8 },
226 { .id = VM_REG_GUEST_X10, .size = 8 },
227 { .id = VM_REG_GUEST_X11, .size = 8 },
228 { .id = VM_REG_GUEST_X12, .size = 8 },
229 { .id = VM_REG_GUEST_X13, .size = 8 },
230 { .id = VM_REG_GUEST_X14, .size = 8 },
231 { .id = VM_REG_GUEST_X15, .size = 8 },
232 { .id = VM_REG_GUEST_X16, .size = 8 },
233 { .id = VM_REG_GUEST_X17, .size = 8 },
234 { .id = VM_REG_GUEST_X18, .size = 8 },
235 { .id = VM_REG_GUEST_X19, .size = 8 },
236 { .id = VM_REG_GUEST_X20, .size = 8 },
237 { .id = VM_REG_GUEST_X21, .size = 8 },
238 { .id = VM_REG_GUEST_X22, .size = 8 },
239 { .id = VM_REG_GUEST_X23, .size = 8 },
240 { .id = VM_REG_GUEST_X24, .size = 8 },
241 { .id = VM_REG_GUEST_X25, .size = 8 },
242 { .id = VM_REG_GUEST_X26, .size = 8 },
243 { .id = VM_REG_GUEST_X27, .size = 8 },
244 { .id = VM_REG_GUEST_X28, .size = 8 },
245 { .id = VM_REG_GUEST_X29, .size = 8 },
246 { .id = VM_REG_GUEST_LR, .size = 8 },
247 { .id = VM_REG_GUEST_SP, .size = 8 },
248 { .id = VM_REG_GUEST_PC, .size = 8 },
249 { .id = VM_REG_GUEST_CPSR, .size = 8 },
250 };
251 #endif
252
253 #ifdef GDB_LOG
254 #include <stdarg.h>
255 #include <stdio.h>
256
257 static void __printflike(1, 2)
debug(const char * fmt,...)258 debug(const char *fmt, ...)
259 {
260 static FILE *logfile;
261 va_list ap;
262
263 if (logfile == NULL) {
264 logfile = fopen("/tmp/bhyve_gdb.log", "w");
265 if (logfile == NULL)
266 return;
267 #ifndef WITHOUT_CAPSICUM
268 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
269 fclose(logfile);
270 logfile = NULL;
271 return;
272 }
273 #endif
274 setlinebuf(logfile);
275 }
276 va_start(ap, fmt);
277 vfprintf(logfile, fmt, ap);
278 va_end(ap);
279 }
280 #else
281 #define debug(...)
282 #endif
283
284 static void remove_all_sw_breakpoints(void);
285
286 static int
guest_paging_info(struct vcpu * vcpu,struct vm_guest_paging * paging)287 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
288 {
289 #ifdef __amd64__
290 uint64_t regs[4];
291 const int regset[4] = {
292 VM_REG_GUEST_CR0,
293 VM_REG_GUEST_CR3,
294 VM_REG_GUEST_CR4,
295 VM_REG_GUEST_EFER
296 };
297
298 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
299 return (-1);
300
301 /*
302 * For the debugger, always pretend to be the kernel (CPL 0),
303 * and if long-mode is enabled, always parse addresses as if
304 * in 64-bit mode.
305 */
306 paging->cr3 = regs[1];
307 paging->cpl = 0;
308 if (regs[3] & EFER_LMA)
309 paging->cpu_mode = CPU_MODE_64BIT;
310 else if (regs[0] & CR0_PE)
311 paging->cpu_mode = CPU_MODE_PROTECTED;
312 else
313 paging->cpu_mode = CPU_MODE_REAL;
314 if (!(regs[0] & CR0_PG))
315 paging->paging_mode = PAGING_MODE_FLAT;
316 else if (!(regs[2] & CR4_PAE))
317 paging->paging_mode = PAGING_MODE_32;
318 else if (regs[3] & EFER_LME)
319 paging->paging_mode = (regs[2] & CR4_LA57) ?
320 PAGING_MODE_64_LA57 : PAGING_MODE_64;
321 else
322 paging->paging_mode = PAGING_MODE_PAE;
323 return (0);
324 #else /* __aarch64__ */
325 uint64_t regs[6];
326 const int regset[6] = {
327 VM_REG_GUEST_TTBR0_EL1,
328 VM_REG_GUEST_TTBR1_EL1,
329 VM_REG_GUEST_TCR_EL1,
330 VM_REG_GUEST_TCR2_EL1,
331 VM_REG_GUEST_SCTLR_EL1,
332 VM_REG_GUEST_CPSR,
333 };
334
335 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
336 return (-1);
337
338 memset(paging, 0, sizeof(*paging));
339 paging->ttbr0_addr = regs[0] & ~(TTBR_ASID_MASK | TTBR_CnP);
340 paging->ttbr1_addr = regs[1] & ~(TTBR_ASID_MASK | TTBR_CnP);
341 paging->tcr_el1 = regs[2];
342 paging->tcr2_el1 = regs[3];
343 paging->flags = regs[5] & (PSR_M_MASK | PSR_M_32);
344 if ((regs[4] & SCTLR_M) != 0)
345 paging->flags |= VM_GP_MMU_ENABLED;
346
347 return (0);
348 #endif /* __aarch64__ */
349 }
350
351 /*
352 * Map a guest virtual address to a physical address (for a given vcpu).
353 * If a guest virtual address is valid, return 1. If the address is
354 * not valid, return 0. If an error occurs obtaining the mapping,
355 * return -1.
356 */
357 static int
guest_vaddr2paddr(struct vcpu * vcpu,uint64_t vaddr,uint64_t * paddr)358 guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
359 {
360 struct vm_guest_paging paging;
361 int fault;
362
363 if (guest_paging_info(vcpu, &paging) == -1)
364 return (-1);
365
366 /*
367 * Always use PROT_READ. We really care if the VA is
368 * accessible, not if the current vCPU can write.
369 */
370 if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
371 &fault) == -1)
372 return (-1);
373 if (fault)
374 return (0);
375 return (1);
376 }
377
378 static uint64_t
guest_pc(struct vm_exit * vme)379 guest_pc(struct vm_exit *vme)
380 {
381 #ifdef __amd64__
382 return (vme->rip);
383 #else /* __aarch64__ */
384 return (vme->pc);
385 #endif
386 }
387
388 static void
io_buffer_reset(struct io_buffer * io)389 io_buffer_reset(struct io_buffer *io)
390 {
391
392 io->start = 0;
393 io->len = 0;
394 }
395
396 /* Available room for adding data. */
397 static size_t
io_buffer_avail(struct io_buffer * io)398 io_buffer_avail(struct io_buffer *io)
399 {
400
401 return (io->capacity - (io->start + io->len));
402 }
403
404 static uint8_t *
io_buffer_head(struct io_buffer * io)405 io_buffer_head(struct io_buffer *io)
406 {
407
408 return (io->data + io->start);
409 }
410
411 static uint8_t *
io_buffer_tail(struct io_buffer * io)412 io_buffer_tail(struct io_buffer *io)
413 {
414
415 return (io->data + io->start + io->len);
416 }
417
418 static void
io_buffer_advance(struct io_buffer * io,size_t amount)419 io_buffer_advance(struct io_buffer *io, size_t amount)
420 {
421
422 assert(amount <= io->len);
423 io->start += amount;
424 io->len -= amount;
425 }
426
427 static void
io_buffer_consume(struct io_buffer * io,size_t amount)428 io_buffer_consume(struct io_buffer *io, size_t amount)
429 {
430
431 io_buffer_advance(io, amount);
432 if (io->len == 0) {
433 io->start = 0;
434 return;
435 }
436
437 /*
438 * XXX: Consider making this move optional and compacting on a
439 * future read() before realloc().
440 */
441 memmove(io->data, io_buffer_head(io), io->len);
442 io->start = 0;
443 }
444
445 static void
io_buffer_grow(struct io_buffer * io,size_t newsize)446 io_buffer_grow(struct io_buffer *io, size_t newsize)
447 {
448 uint8_t *new_data;
449 size_t avail, new_cap;
450
451 avail = io_buffer_avail(io);
452 if (newsize <= avail)
453 return;
454
455 new_cap = io->capacity + (newsize - avail);
456 new_data = realloc(io->data, new_cap);
457 if (new_data == NULL)
458 err(1, "Failed to grow GDB I/O buffer");
459 io->data = new_data;
460 io->capacity = new_cap;
461 }
462
463 static bool
response_pending(void)464 response_pending(void)
465 {
466
467 if (cur_resp.start == 0 && cur_resp.len == 0)
468 return (false);
469 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
470 return (false);
471 return (true);
472 }
473
474 static void
close_connection(void)475 close_connection(void)
476 {
477
478 /*
479 * XXX: This triggers a warning because mevent does the close
480 * before the EV_DELETE.
481 */
482 pthread_mutex_lock(&gdb_lock);
483 mevent_delete(write_event);
484 mevent_delete_close(read_event);
485 write_event = NULL;
486 read_event = NULL;
487 io_buffer_reset(&cur_comm);
488 io_buffer_reset(&cur_resp);
489 cur_fd = -1;
490
491 remove_all_sw_breakpoints();
492
493 /* Clear any pending events. */
494 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
495
496 /* Resume any stopped vCPUs. */
497 gdb_resume_vcpus();
498 pthread_mutex_unlock(&gdb_lock);
499 }
500
501 static uint8_t
hex_digit(uint8_t nibble)502 hex_digit(uint8_t nibble)
503 {
504
505 if (nibble <= 9)
506 return (nibble + '0');
507 else
508 return (nibble + 'a' - 10);
509 }
510
511 static uint8_t
parse_digit(uint8_t v)512 parse_digit(uint8_t v)
513 {
514
515 if (v >= '0' && v <= '9')
516 return (v - '0');
517 if (v >= 'a' && v <= 'f')
518 return (v - 'a' + 10);
519 if (v >= 'A' && v <= 'F')
520 return (v - 'A' + 10);
521 return (0xF);
522 }
523
524 /* Parses big-endian hexadecimal. */
525 static uintmax_t
parse_integer(const uint8_t * p,size_t len)526 parse_integer(const uint8_t *p, size_t len)
527 {
528 uintmax_t v;
529
530 v = 0;
531 while (len > 0) {
532 v <<= 4;
533 v |= parse_digit(*p);
534 p++;
535 len--;
536 }
537 return (v);
538 }
539
540 static uint8_t
parse_byte(const uint8_t * p)541 parse_byte(const uint8_t *p)
542 {
543
544 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
545 }
546
547 static void
send_pending_data(int fd)548 send_pending_data(int fd)
549 {
550 ssize_t nwritten;
551
552 if (cur_resp.len == 0) {
553 mevent_disable(write_event);
554 return;
555 }
556 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
557 if (nwritten == -1) {
558 warn("Write to GDB socket failed");
559 close_connection();
560 } else {
561 io_buffer_advance(&cur_resp, nwritten);
562 if (cur_resp.len == 0)
563 mevent_disable(write_event);
564 else
565 mevent_enable(write_event);
566 }
567 }
568
569 /* Append a single character to the output buffer. */
570 static void
send_char(uint8_t data)571 send_char(uint8_t data)
572 {
573 io_buffer_grow(&cur_resp, 1);
574 *io_buffer_tail(&cur_resp) = data;
575 cur_resp.len++;
576 }
577
578 /* Append an array of bytes to the output buffer. */
579 static void
send_data(const uint8_t * data,size_t len)580 send_data(const uint8_t *data, size_t len)
581 {
582
583 io_buffer_grow(&cur_resp, len);
584 memcpy(io_buffer_tail(&cur_resp), data, len);
585 cur_resp.len += len;
586 }
587
588 static void
format_byte(uint8_t v,uint8_t * buf)589 format_byte(uint8_t v, uint8_t *buf)
590 {
591
592 buf[0] = hex_digit(v >> 4);
593 buf[1] = hex_digit(v & 0xf);
594 }
595
596 /*
597 * Append a single byte (formatted as two hex characters) to the
598 * output buffer.
599 */
600 static void
send_byte(uint8_t v)601 send_byte(uint8_t v)
602 {
603 uint8_t buf[2];
604
605 format_byte(v, buf);
606 send_data(buf, sizeof(buf));
607 }
608
609 static void
start_packet(void)610 start_packet(void)
611 {
612
613 send_char('$');
614 cur_csum = 0;
615 }
616
617 static void
finish_packet(void)618 finish_packet(void)
619 {
620
621 send_char('#');
622 send_byte(cur_csum);
623 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
624 }
625
626 /*
627 * Append a single character (for the packet payload) and update the
628 * checksum.
629 */
630 static void
append_char(uint8_t v)631 append_char(uint8_t v)
632 {
633
634 send_char(v);
635 cur_csum += v;
636 }
637
638 /*
639 * Append an array of bytes (for the packet payload) and update the
640 * checksum.
641 */
642 static void
append_packet_data(const uint8_t * data,size_t len)643 append_packet_data(const uint8_t *data, size_t len)
644 {
645
646 send_data(data, len);
647 while (len > 0) {
648 cur_csum += *data;
649 data++;
650 len--;
651 }
652 }
653
654 static void
append_binary_data(const uint8_t * data,size_t len)655 append_binary_data(const uint8_t *data, size_t len)
656 {
657 uint8_t buf[2];
658
659 for (; len > 0; data++, len--) {
660 switch (*data) {
661 case '}':
662 case '#':
663 case '$':
664 case '*':
665 buf[0] = 0x7d;
666 buf[1] = *data ^ 0x20;
667 append_packet_data(buf, 2);
668 break;
669 default:
670 append_packet_data(data, 1);
671 break;
672 }
673 }
674 }
675
676 static void
append_string(const char * str)677 append_string(const char *str)
678 {
679
680 append_packet_data(str, strlen(str));
681 }
682
683 static void
append_byte(uint8_t v)684 append_byte(uint8_t v)
685 {
686 uint8_t buf[2];
687
688 format_byte(v, buf);
689 append_packet_data(buf, sizeof(buf));
690 }
691
692 static void
append_unsigned_native(uintmax_t value,size_t len)693 append_unsigned_native(uintmax_t value, size_t len)
694 {
695 size_t i;
696
697 for (i = 0; i < len; i++) {
698 append_byte(value);
699 value >>= 8;
700 }
701 }
702
703 static void
append_unsigned_be(uintmax_t value,size_t len)704 append_unsigned_be(uintmax_t value, size_t len)
705 {
706 char buf[len * 2];
707 size_t i;
708
709 for (i = 0; i < len; i++) {
710 format_byte(value, buf + (len - i - 1) * 2);
711 value >>= 8;
712 }
713 append_packet_data(buf, sizeof(buf));
714 }
715
716 static void
append_integer(unsigned int value)717 append_integer(unsigned int value)
718 {
719
720 if (value == 0)
721 append_char('0');
722 else
723 append_unsigned_be(value, (fls(value) + 7) / 8);
724 }
725
726 static void
append_asciihex(const char * str)727 append_asciihex(const char *str)
728 {
729
730 while (*str != '\0') {
731 append_byte(*str);
732 str++;
733 }
734 }
735
736 static void
send_empty_response(void)737 send_empty_response(void)
738 {
739
740 start_packet();
741 finish_packet();
742 }
743
744 static void
send_error(int error)745 send_error(int error)
746 {
747
748 start_packet();
749 append_char('E');
750 append_byte(error);
751 finish_packet();
752 }
753
754 static void
send_ok(void)755 send_ok(void)
756 {
757
758 start_packet();
759 append_string("OK");
760 finish_packet();
761 }
762
763 static int
parse_threadid(const uint8_t * data,size_t len)764 parse_threadid(const uint8_t *data, size_t len)
765 {
766
767 if (len == 1 && *data == '0')
768 return (0);
769 if (len == 2 && memcmp(data, "-1", 2) == 0)
770 return (-1);
771 if (len == 0)
772 return (-2);
773 return (parse_integer(data, len));
774 }
775
776 /*
777 * Report the current stop event to the debugger. If the stop is due
778 * to an event triggered on a specific vCPU such as a breakpoint or
779 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
780 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
781 * the reporting vCPU for vCPU events.
782 */
783 static void
report_stop(bool set_cur_vcpu)784 report_stop(bool set_cur_vcpu)
785 {
786 struct vcpu_state *vs;
787
788 start_packet();
789 if (stopped_vcpu == -1) {
790 append_char('S');
791 append_byte(GDB_SIGNAL_TRAP);
792 } else {
793 vs = &vcpu_state[stopped_vcpu];
794 if (set_cur_vcpu)
795 cur_vcpu = stopped_vcpu;
796 append_char('T');
797 append_byte(GDB_SIGNAL_TRAP);
798 append_string("thread:");
799 append_integer(stopped_vcpu + 1);
800 append_char(';');
801 if (vs->hit_swbreak) {
802 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
803 if (swbreak_enabled)
804 append_string("swbreak:;");
805 } else if (vs->stepped)
806 debug("$vCPU %d reporting step\n", stopped_vcpu);
807 else
808 debug("$vCPU %d reporting ???\n", stopped_vcpu);
809 }
810 finish_packet();
811 report_next_stop = false;
812 }
813
814 /*
815 * If this stop is due to a vCPU event, clear that event to mark it as
816 * acknowledged.
817 */
818 static void
discard_stop(void)819 discard_stop(void)
820 {
821 struct vcpu_state *vs;
822
823 if (stopped_vcpu != -1) {
824 vs = &vcpu_state[stopped_vcpu];
825 vs->hit_swbreak = false;
826 vs->stepped = false;
827 stopped_vcpu = -1;
828 }
829 report_next_stop = true;
830 }
831
832 static void
gdb_finish_suspend_vcpus(void)833 gdb_finish_suspend_vcpus(void)
834 {
835
836 if (first_stop) {
837 first_stop = false;
838 stopped_vcpu = -1;
839 } else if (report_next_stop) {
840 assert(!response_pending());
841 report_stop(true);
842 send_pending_data(cur_fd);
843 }
844 }
845
846 /*
847 * vCPU threads invoke this function whenever the vCPU enters the
848 * debug server to pause or report an event. vCPU threads wait here
849 * as long as the debug server keeps them suspended.
850 */
851 static void
_gdb_cpu_suspend(struct vcpu * vcpu,bool report_stop)852 _gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
853 {
854 int vcpuid = vcpu_id(vcpu);
855
856 debug("$vCPU %d suspending\n", vcpuid);
857 CPU_SET(vcpuid, &vcpus_waiting);
858 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
859 gdb_finish_suspend_vcpus();
860 while (CPU_ISSET(vcpuid, &vcpus_suspended))
861 pthread_cond_wait(&idle_vcpus, &gdb_lock);
862 CPU_CLR(vcpuid, &vcpus_waiting);
863 debug("$vCPU %d resuming\n", vcpuid);
864 }
865
866 /*
867 * Requests vCPU single-stepping using a
868 * VMEXIT suitable for the host platform.
869 */
870 static int
_gdb_set_step(struct vcpu * vcpu,int val)871 _gdb_set_step(struct vcpu *vcpu, int val)
872 {
873 int error;
874
875 #ifdef __amd64__
876 /*
877 * If the MTRAP cap fails, we are running on an AMD host.
878 * In that case, we request DB exits caused by RFLAGS.TF.
879 */
880 error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, val);
881 if (error != 0)
882 error = vm_set_capability(vcpu, VM_CAP_RFLAGS_TF, val);
883 if (error == 0)
884 (void)vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val);
885 #else /* __aarch64__ */
886 error = vm_set_capability(vcpu, VM_CAP_SS_EXIT, val);
887 if (error == 0)
888 error = vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val);
889 #endif
890 return (error);
891 }
892
893 /*
894 * Checks whether single-stepping is supported for a given vCPU.
895 */
896 static int
_gdb_check_step(struct vcpu * vcpu)897 _gdb_check_step(struct vcpu *vcpu)
898 {
899 #ifdef __amd64__
900 int val;
901
902 if (vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val) != 0) {
903 if (vm_get_capability(vcpu, VM_CAP_RFLAGS_TF, &val) != 0)
904 return (-1);
905 }
906 #else /* __aarch64__ */
907 (void)vcpu;
908 #endif
909 return (0);
910 }
911
912 /*
913 * Invoked at the start of a vCPU thread's execution to inform the
914 * debug server about the new thread.
915 */
916 void
gdb_cpu_add(struct vcpu * vcpu)917 gdb_cpu_add(struct vcpu *vcpu)
918 {
919 int vcpuid;
920
921 if (!gdb_active)
922 return;
923 vcpuid = vcpu_id(vcpu);
924 debug("$vCPU %d starting\n", vcpuid);
925 pthread_mutex_lock(&gdb_lock);
926 assert(vcpuid < guest_ncpus);
927 assert(vcpus[vcpuid] == NULL);
928 vcpus[vcpuid] = vcpu;
929 CPU_SET(vcpuid, &vcpus_active);
930 if (!TAILQ_EMPTY(&breakpoints)) {
931 vm_set_capability(vcpu, GDB_BREAKPOINT_CAP, 1);
932 debug("$vCPU %d enabled breakpoint exits\n", vcpuid);
933 }
934
935 /*
936 * If a vcpu is added while vcpus are stopped, suspend the new
937 * vcpu so that it will pop back out with a debug exit before
938 * executing the first instruction.
939 */
940 if (!CPU_EMPTY(&vcpus_suspended)) {
941 cpuset_t suspended;
942 int error;
943
944 error = vm_debug_cpus(ctx, &suspended);
945 assert(error == 0);
946
947 CPU_SET(vcpuid, &vcpus_suspended);
948 _gdb_cpu_suspend(vcpu, false);
949
950 /*
951 * In general, APs are started in a suspended mode such that
952 * they exit with VM_EXITCODE_DEBUG until the BSP starts them.
953 * In particular, this refers to the kernel's view of the vCPU
954 * state rather than our own. If the debugger resumes guest
955 * execution, vCPUs will be unsuspended from the kernel's point
956 * of view, so we should restore the previous state before
957 * continuing.
958 */
959 if (CPU_ISSET(vcpuid, &suspended)) {
960 error = vm_suspend_cpu(vcpu);
961 assert(error == 0);
962 }
963 }
964 pthread_mutex_unlock(&gdb_lock);
965 }
966
967 /*
968 * Invoked by vCPU before resuming execution. This enables stepping
969 * if the vCPU is marked as stepping.
970 */
971 static void
gdb_cpu_resume(struct vcpu * vcpu)972 gdb_cpu_resume(struct vcpu *vcpu)
973 {
974 struct vcpu_state *vs;
975 int error;
976
977 vs = &vcpu_state[vcpu_id(vcpu)];
978
979 /*
980 * Any pending event should already be reported before
981 * resuming.
982 */
983 assert(vs->hit_swbreak == false);
984 assert(vs->stepped == false);
985 if (vs->stepping) {
986 error = _gdb_set_step(vcpu, 1);
987 assert(error == 0);
988 }
989 }
990
991 /*
992 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
993 * has been suspended due to an event on different vCPU or in response
994 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
995 */
996 void
gdb_cpu_suspend(struct vcpu * vcpu)997 gdb_cpu_suspend(struct vcpu *vcpu)
998 {
999
1000 if (!gdb_active)
1001 return;
1002 pthread_mutex_lock(&gdb_lock);
1003 _gdb_cpu_suspend(vcpu, true);
1004 gdb_cpu_resume(vcpu);
1005 pthread_mutex_unlock(&gdb_lock);
1006 }
1007
1008 static void
gdb_suspend_vcpus(void)1009 gdb_suspend_vcpus(void)
1010 {
1011
1012 assert(pthread_mutex_isowned_np(&gdb_lock));
1013 debug("suspending all CPUs\n");
1014 vcpus_suspended = vcpus_active;
1015 vm_suspend_all_cpus(ctx);
1016 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
1017 gdb_finish_suspend_vcpus();
1018 }
1019
1020 /*
1021 * Invoked each time a vmexit handler needs to step a vCPU.
1022 * Handles MTRAP and RFLAGS.TF vmexits.
1023 */
1024 static void
gdb_cpu_step(struct vcpu * vcpu)1025 gdb_cpu_step(struct vcpu *vcpu)
1026 {
1027 struct vcpu_state *vs;
1028 int vcpuid = vcpu_id(vcpu);
1029 int error;
1030
1031 debug("$vCPU %d stepped\n", vcpuid);
1032 pthread_mutex_lock(&gdb_lock);
1033 vs = &vcpu_state[vcpuid];
1034 if (vs->stepping) {
1035 vs->stepping = false;
1036 vs->stepped = true;
1037 error = _gdb_set_step(vcpu, 0);
1038 assert(error == 0);
1039
1040 while (vs->stepped) {
1041 if (stopped_vcpu == -1) {
1042 debug("$vCPU %d reporting step\n", vcpuid);
1043 stopped_vcpu = vcpuid;
1044 gdb_suspend_vcpus();
1045 }
1046 _gdb_cpu_suspend(vcpu, true);
1047 }
1048 gdb_cpu_resume(vcpu);
1049 }
1050 pthread_mutex_unlock(&gdb_lock);
1051 }
1052
1053 /*
1054 * A general handler for single-step exceptions.
1055 * Handles RFLAGS.TF exits on AMD SVM.
1056 */
1057 void
gdb_cpu_debug(struct vcpu * vcpu,struct vm_exit * vmexit)1058 gdb_cpu_debug(struct vcpu *vcpu, struct vm_exit *vmexit)
1059 {
1060 if (!gdb_active)
1061 return;
1062
1063 #ifdef __amd64__
1064 /* RFLAGS.TF exit? */
1065 if (vmexit->u.dbg.trace_trap) {
1066 gdb_cpu_step(vcpu);
1067 }
1068 #else /* __aarch64__ */
1069 (void)vmexit;
1070 gdb_cpu_step(vcpu);
1071 #endif
1072 }
1073
1074 /*
1075 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
1076 * the VT-x-specific MTRAP exit.
1077 */
1078 void
gdb_cpu_mtrap(struct vcpu * vcpu)1079 gdb_cpu_mtrap(struct vcpu *vcpu)
1080 {
1081 if (!gdb_active)
1082 return;
1083 gdb_cpu_step(vcpu);
1084 }
1085
1086 static struct breakpoint *
find_breakpoint(uint64_t gpa)1087 find_breakpoint(uint64_t gpa)
1088 {
1089 struct breakpoint *bp;
1090
1091 TAILQ_FOREACH(bp, &breakpoints, link) {
1092 if (bp->gpa == gpa)
1093 return (bp);
1094 }
1095 return (NULL);
1096 }
1097
1098 void
gdb_cpu_breakpoint(struct vcpu * vcpu,struct vm_exit * vmexit)1099 gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
1100 {
1101 struct breakpoint *bp;
1102 struct vcpu_state *vs;
1103 uint64_t gpa;
1104 int error, vcpuid;
1105
1106 if (!gdb_active) {
1107 EPRINTLN("vm_loop: unexpected VMEXIT_DEBUG");
1108 exit(4);
1109 }
1110 vcpuid = vcpu_id(vcpu);
1111 pthread_mutex_lock(&gdb_lock);
1112 error = guest_vaddr2paddr(vcpu, guest_pc(vmexit), &gpa);
1113 assert(error == 1);
1114 bp = find_breakpoint(gpa);
1115 if (bp != NULL) {
1116 vs = &vcpu_state[vcpuid];
1117 assert(vs->stepping == false);
1118 assert(vs->stepped == false);
1119 assert(vs->hit_swbreak == false);
1120 vs->hit_swbreak = true;
1121 vm_set_register(vcpu, GDB_PC_REGNAME, guest_pc(vmexit));
1122 for (;;) {
1123 if (stopped_vcpu == -1) {
1124 debug("$vCPU %d reporting breakpoint at rip %#lx\n",
1125 vcpuid, guest_pc(vmexit));
1126 stopped_vcpu = vcpuid;
1127 gdb_suspend_vcpus();
1128 }
1129 _gdb_cpu_suspend(vcpu, true);
1130 if (!vs->hit_swbreak) {
1131 /* Breakpoint reported. */
1132 break;
1133 }
1134 bp = find_breakpoint(gpa);
1135 if (bp == NULL) {
1136 /* Breakpoint was removed. */
1137 vs->hit_swbreak = false;
1138 break;
1139 }
1140 }
1141 gdb_cpu_resume(vcpu);
1142 } else {
1143 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid,
1144 guest_pc(vmexit));
1145 #ifdef __amd64__
1146 error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
1147 vmexit->u.bpt.inst_length);
1148 assert(error == 0);
1149 error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
1150 assert(error == 0);
1151 #else /* __aarch64__ */
1152 uint64_t esr;
1153
1154 esr = (EXCP_BRK << ESR_ELx_EC_SHIFT) | vmexit->u.hyp.esr_el2;
1155 error = vm_inject_exception(vcpu, esr, 0);
1156 assert(error == 0);
1157 #endif
1158 }
1159 pthread_mutex_unlock(&gdb_lock);
1160 }
1161
1162 static bool
gdb_step_vcpu(struct vcpu * vcpu)1163 gdb_step_vcpu(struct vcpu *vcpu)
1164 {
1165 int error, vcpuid;
1166
1167 vcpuid = vcpu_id(vcpu);
1168 debug("$vCPU %d step\n", vcpuid);
1169 error = _gdb_check_step(vcpu);
1170 if (error < 0)
1171 return (false);
1172
1173 discard_stop();
1174 vcpu_state[vcpuid].stepping = true;
1175 vm_resume_cpu(vcpu);
1176 CPU_CLR(vcpuid, &vcpus_suspended);
1177 pthread_cond_broadcast(&idle_vcpus);
1178 return (true);
1179 }
1180
1181 static void
gdb_resume_vcpus(void)1182 gdb_resume_vcpus(void)
1183 {
1184
1185 assert(pthread_mutex_isowned_np(&gdb_lock));
1186 vm_resume_all_cpus(ctx);
1187 debug("resuming all CPUs\n");
1188 CPU_ZERO(&vcpus_suspended);
1189 pthread_cond_broadcast(&idle_vcpus);
1190 }
1191
1192 static void
gdb_read_regs(void)1193 gdb_read_regs(void)
1194 {
1195 uint64_t regvals[nitems(gdb_regset)];
1196 int regnums[nitems(gdb_regset)];
1197
1198 for (size_t i = 0; i < nitems(gdb_regset); i++)
1199 regnums[i] = gdb_regset[i].id;
1200 if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset),
1201 regnums, regvals) == -1) {
1202 send_error(errno);
1203 return;
1204 }
1205
1206 start_packet();
1207 for (size_t i = 0; i < nitems(gdb_regset); i++) {
1208 #ifdef GDB_REG_FIRST_EXT
1209 if (gdb_regset[i].id == GDB_REG_FIRST_EXT)
1210 break;
1211 #endif
1212 append_unsigned_native(regvals[i], gdb_regset[i].size);
1213 }
1214 finish_packet();
1215 }
1216
1217 static void
gdb_read_one_reg(const uint8_t * data,size_t len)1218 gdb_read_one_reg(const uint8_t *data, size_t len)
1219 {
1220 uint64_t regval;
1221 uintmax_t reg;
1222
1223 reg = parse_integer(data, len);
1224 if (reg >= nitems(gdb_regset)) {
1225 send_error(EINVAL);
1226 return;
1227 }
1228
1229 if (vm_get_register(vcpus[cur_vcpu], gdb_regset[reg].id, ®val) ==
1230 -1) {
1231 send_error(errno);
1232 return;
1233 }
1234
1235 start_packet();
1236 append_unsigned_native(regval, gdb_regset[reg].size);
1237 finish_packet();
1238 }
1239
1240 static void
gdb_read_mem(const uint8_t * data,size_t len)1241 gdb_read_mem(const uint8_t *data, size_t len)
1242 {
1243 uint64_t gpa, gva, val;
1244 uint8_t *cp;
1245 size_t resid, todo, bytes;
1246 bool started;
1247 int error;
1248
1249 assert(len >= 1);
1250
1251 /* Skip 'm' */
1252 data += 1;
1253 len -= 1;
1254
1255 /* Parse and consume address. */
1256 cp = memchr(data, ',', len);
1257 if (cp == NULL || cp == data) {
1258 send_error(EINVAL);
1259 return;
1260 }
1261 gva = parse_integer(data, cp - data);
1262 len -= (cp - data) + 1;
1263 data += (cp - data) + 1;
1264
1265 /* Parse length. */
1266 resid = parse_integer(data, len);
1267
1268 started = false;
1269 while (resid > 0) {
1270 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1271 if (error == -1) {
1272 if (started)
1273 finish_packet();
1274 else
1275 send_error(errno);
1276 return;
1277 }
1278 if (error == 0) {
1279 if (started)
1280 finish_packet();
1281 else
1282 send_error(EFAULT);
1283 return;
1284 }
1285
1286 /* Read bytes from current page. */
1287 todo = getpagesize() - gpa % getpagesize();
1288 if (todo > resid)
1289 todo = resid;
1290
1291 cp = paddr_guest2host(ctx, gpa, todo);
1292 if (cp != NULL) {
1293 /*
1294 * If this page is guest RAM, read it a byte
1295 * at a time.
1296 */
1297 if (!started) {
1298 start_packet();
1299 started = true;
1300 }
1301 while (todo > 0) {
1302 append_byte(*cp);
1303 cp++;
1304 gpa++;
1305 gva++;
1306 resid--;
1307 todo--;
1308 }
1309 } else {
1310 /*
1311 * If this page isn't guest RAM, try to handle
1312 * it via MMIO. For MMIO requests, use
1313 * aligned reads of words when possible.
1314 */
1315 while (todo > 0) {
1316 if (gpa & 1 || todo == 1)
1317 bytes = 1;
1318 else if (gpa & 2 || todo == 2)
1319 bytes = 2;
1320 else
1321 bytes = 4;
1322 error = read_mem(vcpus[cur_vcpu], gpa, &val,
1323 bytes);
1324 if (error == 0) {
1325 if (!started) {
1326 start_packet();
1327 started = true;
1328 }
1329 gpa += bytes;
1330 gva += bytes;
1331 resid -= bytes;
1332 todo -= bytes;
1333 while (bytes > 0) {
1334 append_byte(val);
1335 val >>= 8;
1336 bytes--;
1337 }
1338 } else {
1339 if (started)
1340 finish_packet();
1341 else
1342 send_error(EFAULT);
1343 return;
1344 }
1345 }
1346 }
1347 assert(resid == 0 || gpa % getpagesize() == 0);
1348 }
1349 if (!started)
1350 start_packet();
1351 finish_packet();
1352 }
1353
1354 static void
gdb_write_mem(const uint8_t * data,size_t len)1355 gdb_write_mem(const uint8_t *data, size_t len)
1356 {
1357 uint64_t gpa, gva, val;
1358 uint8_t *cp;
1359 size_t resid, todo, bytes;
1360 int error;
1361
1362 assert(len >= 1);
1363
1364 /* Skip 'M' */
1365 data += 1;
1366 len -= 1;
1367
1368 /* Parse and consume address. */
1369 cp = memchr(data, ',', len);
1370 if (cp == NULL || cp == data) {
1371 send_error(EINVAL);
1372 return;
1373 }
1374 gva = parse_integer(data, cp - data);
1375 len -= (cp - data) + 1;
1376 data += (cp - data) + 1;
1377
1378 /* Parse and consume length. */
1379 cp = memchr(data, ':', len);
1380 if (cp == NULL || cp == data) {
1381 send_error(EINVAL);
1382 return;
1383 }
1384 resid = parse_integer(data, cp - data);
1385 len -= (cp - data) + 1;
1386 data += (cp - data) + 1;
1387
1388 /* Verify the available bytes match the length. */
1389 if (len != resid * 2) {
1390 send_error(EINVAL);
1391 return;
1392 }
1393
1394 while (resid > 0) {
1395 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1396 if (error == -1) {
1397 send_error(errno);
1398 return;
1399 }
1400 if (error == 0) {
1401 send_error(EFAULT);
1402 return;
1403 }
1404
1405 /* Write bytes to current page. */
1406 todo = getpagesize() - gpa % getpagesize();
1407 if (todo > resid)
1408 todo = resid;
1409
1410 cp = paddr_guest2host(ctx, gpa, todo);
1411 if (cp != NULL) {
1412 /*
1413 * If this page is guest RAM, write it a byte
1414 * at a time.
1415 */
1416 while (todo > 0) {
1417 assert(len >= 2);
1418 *cp = parse_byte(data);
1419 data += 2;
1420 len -= 2;
1421 cp++;
1422 gpa++;
1423 gva++;
1424 resid--;
1425 todo--;
1426 }
1427 } else {
1428 /*
1429 * If this page isn't guest RAM, try to handle
1430 * it via MMIO. For MMIO requests, use
1431 * aligned writes of words when possible.
1432 */
1433 while (todo > 0) {
1434 if (gpa & 1 || todo == 1) {
1435 bytes = 1;
1436 val = parse_byte(data);
1437 } else if (gpa & 2 || todo == 2) {
1438 bytes = 2;
1439 val = be16toh(parse_integer(data, 4));
1440 } else {
1441 bytes = 4;
1442 val = be32toh(parse_integer(data, 8));
1443 }
1444 error = write_mem(vcpus[cur_vcpu], gpa, val,
1445 bytes);
1446 if (error == 0) {
1447 gpa += bytes;
1448 gva += bytes;
1449 resid -= bytes;
1450 todo -= bytes;
1451 data += 2 * bytes;
1452 len -= 2 * bytes;
1453 } else {
1454 send_error(EFAULT);
1455 return;
1456 }
1457 }
1458 }
1459 assert(resid == 0 || gpa % getpagesize() == 0);
1460 }
1461 assert(len == 0);
1462 send_ok();
1463 }
1464
1465 static bool
set_breakpoint_caps(bool enable)1466 set_breakpoint_caps(bool enable)
1467 {
1468 cpuset_t mask;
1469 int vcpu;
1470
1471 mask = vcpus_active;
1472 while (!CPU_EMPTY(&mask)) {
1473 vcpu = CPU_FFS(&mask) - 1;
1474 CPU_CLR(vcpu, &mask);
1475 if (vm_set_capability(vcpus[vcpu], GDB_BREAKPOINT_CAP,
1476 enable ? 1 : 0) < 0)
1477 return (false);
1478 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1479 enable ? "en" : "dis");
1480 }
1481 return (true);
1482 }
1483
1484 static void
write_instr(uint8_t * dest,uint8_t * instr,size_t len)1485 write_instr(uint8_t *dest, uint8_t *instr, size_t len)
1486 {
1487 memcpy(dest, instr, len);
1488 #ifdef __arm64__
1489 __asm __volatile(
1490 "dc cvau, %0\n"
1491 "dsb ish\n"
1492 "ic ialluis\n"
1493 "dsb ish\n"
1494 : : "r" (dest) : "memory");
1495 #endif
1496 }
1497
1498 static void
remove_all_sw_breakpoints(void)1499 remove_all_sw_breakpoints(void)
1500 {
1501 struct breakpoint *bp, *nbp;
1502 uint8_t *cp;
1503
1504 if (TAILQ_EMPTY(&breakpoints))
1505 return;
1506
1507 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1508 debug("remove breakpoint at %#lx\n", bp->gpa);
1509 cp = paddr_guest2host(ctx, bp->gpa, sizeof(bp->shadow_inst));
1510 write_instr(cp, bp->shadow_inst, sizeof(bp->shadow_inst));
1511 TAILQ_REMOVE(&breakpoints, bp, link);
1512 free(bp);
1513 }
1514 TAILQ_INIT(&breakpoints);
1515 set_breakpoint_caps(false);
1516 }
1517
1518 static void
update_sw_breakpoint(uint64_t gva,int kind,bool insert)1519 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1520 {
1521 struct breakpoint *bp;
1522 uint64_t gpa;
1523 uint8_t *cp;
1524 int error;
1525
1526 if (kind != GDB_BP_SIZE) {
1527 send_error(EINVAL);
1528 return;
1529 }
1530
1531 error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1532 if (error == -1) {
1533 send_error(errno);
1534 return;
1535 }
1536 if (error == 0) {
1537 send_error(EFAULT);
1538 return;
1539 }
1540
1541 cp = paddr_guest2host(ctx, gpa, sizeof(bp->shadow_inst));
1542
1543 /* Only permit breakpoints in guest RAM. */
1544 if (cp == NULL) {
1545 send_error(EFAULT);
1546 return;
1547 }
1548
1549 /* Find any existing breakpoint. */
1550 bp = find_breakpoint(gpa);
1551
1552 /*
1553 * Silently ignore duplicate commands since the protocol
1554 * requires these packets to be idempotent.
1555 */
1556 if (insert) {
1557 if (bp == NULL) {
1558 if (TAILQ_EMPTY(&breakpoints) &&
1559 !set_breakpoint_caps(true)) {
1560 send_empty_response();
1561 return;
1562 }
1563 bp = malloc(sizeof(*bp));
1564 bp->gpa = gpa;
1565 memcpy(bp->shadow_inst, cp, sizeof(bp->shadow_inst));
1566 write_instr(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst));
1567 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1568 debug("new breakpoint at %#lx\n", gpa);
1569 }
1570 } else {
1571 if (bp != NULL) {
1572 debug("remove breakpoint at %#lx\n", gpa);
1573 write_instr(cp, bp->shadow_inst,
1574 sizeof(bp->shadow_inst));
1575 TAILQ_REMOVE(&breakpoints, bp, link);
1576 free(bp);
1577 if (TAILQ_EMPTY(&breakpoints))
1578 set_breakpoint_caps(false);
1579 }
1580 }
1581 send_ok();
1582 }
1583
1584 static void
parse_breakpoint(const uint8_t * data,size_t len)1585 parse_breakpoint(const uint8_t *data, size_t len)
1586 {
1587 uint64_t gva;
1588 uint8_t *cp;
1589 bool insert;
1590 int kind, type;
1591
1592 insert = data[0] == 'Z';
1593
1594 /* Skip 'Z/z' */
1595 data += 1;
1596 len -= 1;
1597
1598 /* Parse and consume type. */
1599 cp = memchr(data, ',', len);
1600 if (cp == NULL || cp == data) {
1601 send_error(EINVAL);
1602 return;
1603 }
1604 type = parse_integer(data, cp - data);
1605 len -= (cp - data) + 1;
1606 data += (cp - data) + 1;
1607
1608 /* Parse and consume address. */
1609 cp = memchr(data, ',', len);
1610 if (cp == NULL || cp == data) {
1611 send_error(EINVAL);
1612 return;
1613 }
1614 gva = parse_integer(data, cp - data);
1615 len -= (cp - data) + 1;
1616 data += (cp - data) + 1;
1617
1618 /* Parse and consume kind. */
1619 cp = memchr(data, ';', len);
1620 if (cp == data) {
1621 send_error(EINVAL);
1622 return;
1623 }
1624 if (cp != NULL) {
1625 /*
1626 * We do not advertise support for either the
1627 * ConditionalBreakpoints or BreakpointCommands
1628 * features, so we should not be getting conditions or
1629 * commands from the remote end.
1630 */
1631 send_empty_response();
1632 return;
1633 }
1634 kind = parse_integer(data, len);
1635 data += len;
1636 len = 0;
1637
1638 switch (type) {
1639 case 0:
1640 update_sw_breakpoint(gva, kind, insert);
1641 break;
1642 default:
1643 send_empty_response();
1644 break;
1645 }
1646 }
1647
1648 static bool
command_equals(const uint8_t * data,size_t len,const char * cmd)1649 command_equals(const uint8_t *data, size_t len, const char *cmd)
1650 {
1651
1652 if (strlen(cmd) > len)
1653 return (false);
1654 return (memcmp(data, cmd, strlen(cmd)) == 0);
1655 }
1656
1657 static void
check_features(const uint8_t * data,size_t len)1658 check_features(const uint8_t *data, size_t len)
1659 {
1660 char *feature, *next_feature, *str, *value;
1661 bool supported;
1662
1663 str = malloc(len + 1);
1664 memcpy(str, data, len);
1665 str[len] = '\0';
1666 next_feature = str;
1667
1668 while ((feature = strsep(&next_feature, ";")) != NULL) {
1669 /*
1670 * Null features shouldn't exist, but skip if they
1671 * do.
1672 */
1673 if (strcmp(feature, "") == 0)
1674 continue;
1675
1676 /*
1677 * Look for the value or supported / not supported
1678 * flag.
1679 */
1680 value = strchr(feature, '=');
1681 if (value != NULL) {
1682 *value = '\0';
1683 value++;
1684 supported = true;
1685 } else {
1686 value = feature + strlen(feature) - 1;
1687 switch (*value) {
1688 case '+':
1689 supported = true;
1690 break;
1691 case '-':
1692 supported = false;
1693 break;
1694 default:
1695 /*
1696 * This is really a protocol error,
1697 * but we just ignore malformed
1698 * features for ease of
1699 * implementation.
1700 */
1701 continue;
1702 }
1703 value = NULL;
1704 }
1705
1706 if (strcmp(feature, "swbreak") == 0)
1707 swbreak_enabled = supported;
1708 }
1709 free(str);
1710
1711 start_packet();
1712
1713 /* This is an arbitrary limit. */
1714 append_string("PacketSize=4096");
1715 append_string(";swbreak+");
1716 append_string(";qXfer:features:read+");
1717 finish_packet();
1718 }
1719
1720 static void
gdb_query(const uint8_t * data,size_t len)1721 gdb_query(const uint8_t *data, size_t len)
1722 {
1723
1724 /*
1725 * TODO:
1726 * - qSearch
1727 */
1728 if (command_equals(data, len, "qAttached")) {
1729 start_packet();
1730 append_char('1');
1731 finish_packet();
1732 } else if (command_equals(data, len, "qC")) {
1733 start_packet();
1734 append_string("QC");
1735 append_integer(cur_vcpu + 1);
1736 finish_packet();
1737 } else if (command_equals(data, len, "qfThreadInfo")) {
1738 cpuset_t mask;
1739 bool first;
1740 int vcpu;
1741
1742 if (CPU_EMPTY(&vcpus_active)) {
1743 send_error(EINVAL);
1744 return;
1745 }
1746 mask = vcpus_active;
1747 start_packet();
1748 append_char('m');
1749 first = true;
1750 while (!CPU_EMPTY(&mask)) {
1751 vcpu = CPU_FFS(&mask) - 1;
1752 CPU_CLR(vcpu, &mask);
1753 if (first)
1754 first = false;
1755 else
1756 append_char(',');
1757 append_integer(vcpu + 1);
1758 }
1759 finish_packet();
1760 } else if (command_equals(data, len, "qsThreadInfo")) {
1761 start_packet();
1762 append_char('l');
1763 finish_packet();
1764 } else if (command_equals(data, len, "qSupported")) {
1765 data += strlen("qSupported");
1766 len -= strlen("qSupported");
1767 check_features(data, len);
1768 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1769 char buf[16];
1770 int tid;
1771
1772 data += strlen("qThreadExtraInfo");
1773 len -= strlen("qThreadExtraInfo");
1774 if (len == 0 || *data != ',') {
1775 send_error(EINVAL);
1776 return;
1777 }
1778 tid = parse_threadid(data + 1, len - 1);
1779 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1780 send_error(EINVAL);
1781 return;
1782 }
1783
1784 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1785 start_packet();
1786 append_asciihex(buf);
1787 finish_packet();
1788 } else if (command_equals(data, len, "qXfer:features:read:")) {
1789 struct stat sb;
1790 const char *xml;
1791 const uint8_t *pathend;
1792 char buf[64], path[PATH_MAX];
1793 size_t xmllen;
1794 unsigned int doff, dlen;
1795 int fd;
1796
1797 data += strlen("qXfer:features:read:");
1798 len -= strlen("qXfer:features:read:");
1799
1800 pathend = memchr(data, ':', len);
1801 if (pathend == NULL ||
1802 (size_t)(pathend - data) >= sizeof(path) - 1) {
1803 send_error(EINVAL);
1804 return;
1805 }
1806 memcpy(path, data, pathend - data);
1807 path[pathend - data] = '\0';
1808 data += (pathend - data) + 1;
1809 len -= (pathend - data) + 1;
1810
1811 if (len > sizeof(buf) - 1) {
1812 send_error(EINVAL);
1813 return;
1814 }
1815 memcpy(buf, data, len);
1816 buf[len] = '\0';
1817 if (sscanf(buf, "%x,%x", &doff, &dlen) != 2) {
1818 send_error(EINVAL);
1819 return;
1820 }
1821
1822 fd = openat(xml_dfd, path, O_RDONLY | O_RESOLVE_BENEATH);
1823 if (fd < 0) {
1824 send_error(errno);
1825 return;
1826 }
1827 if (fstat(fd, &sb) < 0) {
1828 send_error(errno);
1829 close(fd);
1830 return;
1831 }
1832 xml = mmap(NULL, sb.st_size, PROT_READ, MAP_SHARED, fd, 0);
1833 if (xml == MAP_FAILED) {
1834 send_error(errno);
1835 close(fd);
1836 return;
1837 }
1838 close(fd);
1839 xmllen = sb.st_size;
1840
1841 start_packet();
1842 if (doff >= xmllen) {
1843 append_char('l');
1844 } else if (doff + dlen >= xmllen) {
1845 append_char('l');
1846 append_binary_data(xml + doff, xmllen - doff);
1847 } else {
1848 append_char('m');
1849 append_binary_data(xml + doff, dlen);
1850 }
1851 finish_packet();
1852 (void)munmap(__DECONST(void *, xml), xmllen);
1853 } else
1854 send_empty_response();
1855 }
1856
1857 static void
handle_command(const uint8_t * data,size_t len)1858 handle_command(const uint8_t *data, size_t len)
1859 {
1860
1861 /* Reject packets with a sequence-id. */
1862 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1863 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1864 send_empty_response();
1865 return;
1866 }
1867
1868 switch (*data) {
1869 case 'c':
1870 if (len != 1) {
1871 send_error(EINVAL);
1872 break;
1873 }
1874
1875 discard_stop();
1876 gdb_resume_vcpus();
1877 break;
1878 case 'D':
1879 send_ok();
1880
1881 /* TODO: Resume any stopped CPUs. */
1882 break;
1883 case 'g':
1884 gdb_read_regs();
1885 break;
1886 case 'p':
1887 gdb_read_one_reg(data + 1, len - 1);
1888 break;
1889 case 'H': {
1890 int tid;
1891
1892 if (len < 2 || (data[1] != 'g' && data[1] != 'c')) {
1893 send_error(EINVAL);
1894 break;
1895 }
1896 tid = parse_threadid(data + 2, len - 2);
1897 if (tid == -2) {
1898 send_error(EINVAL);
1899 break;
1900 }
1901
1902 if (CPU_EMPTY(&vcpus_active)) {
1903 send_error(EINVAL);
1904 break;
1905 }
1906 if (tid == -1 || tid == 0)
1907 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1908 else if (CPU_ISSET(tid - 1, &vcpus_active))
1909 cur_vcpu = tid - 1;
1910 else {
1911 send_error(EINVAL);
1912 break;
1913 }
1914 send_ok();
1915 break;
1916 }
1917 case 'm':
1918 gdb_read_mem(data, len);
1919 break;
1920 case 'M':
1921 gdb_write_mem(data, len);
1922 break;
1923 case 'T': {
1924 int tid;
1925
1926 tid = parse_threadid(data + 1, len - 1);
1927 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1928 send_error(EINVAL);
1929 return;
1930 }
1931 send_ok();
1932 break;
1933 }
1934 case 'q':
1935 gdb_query(data, len);
1936 break;
1937 case 's':
1938 if (len != 1) {
1939 send_error(EINVAL);
1940 break;
1941 }
1942
1943 /* Don't send a reply until a stop occurs. */
1944 if (!gdb_step_vcpu(vcpus[cur_vcpu])) {
1945 send_error(EOPNOTSUPP);
1946 break;
1947 }
1948 break;
1949 case 'z':
1950 case 'Z':
1951 parse_breakpoint(data, len);
1952 break;
1953 case '?':
1954 report_stop(false);
1955 break;
1956 case 'G': /* TODO */
1957 case 'v':
1958 /* Handle 'vCont' */
1959 /* 'vCtrlC' */
1960 case 'P': /* TODO */
1961 case 'Q': /* TODO */
1962 case 't': /* TODO */
1963 case 'X': /* TODO */
1964 default:
1965 send_empty_response();
1966 }
1967 }
1968
1969 /* Check for a valid packet in the command buffer. */
1970 static void
check_command(int fd)1971 check_command(int fd)
1972 {
1973 uint8_t *head, *hash, *p, sum;
1974 size_t avail, plen;
1975
1976 for (;;) {
1977 avail = cur_comm.len;
1978 if (avail == 0)
1979 return;
1980 head = io_buffer_head(&cur_comm);
1981 switch (*head) {
1982 case 0x03:
1983 debug("<- Ctrl-C\n");
1984 io_buffer_consume(&cur_comm, 1);
1985
1986 gdb_suspend_vcpus();
1987 break;
1988 case '+':
1989 /* ACK of previous response. */
1990 debug("<- +\n");
1991 if (response_pending())
1992 io_buffer_reset(&cur_resp);
1993 io_buffer_consume(&cur_comm, 1);
1994 if (stopped_vcpu != -1 && report_next_stop) {
1995 report_stop(true);
1996 send_pending_data(fd);
1997 }
1998 break;
1999 case '-':
2000 /* NACK of previous response. */
2001 debug("<- -\n");
2002 if (response_pending()) {
2003 cur_resp.len += cur_resp.start;
2004 cur_resp.start = 0;
2005 if (cur_resp.data[0] == '+')
2006 io_buffer_advance(&cur_resp, 1);
2007 debug("-> %.*s\n", (int)cur_resp.len,
2008 io_buffer_head(&cur_resp));
2009 }
2010 io_buffer_consume(&cur_comm, 1);
2011 send_pending_data(fd);
2012 break;
2013 case '$':
2014 /* Packet. */
2015
2016 if (response_pending()) {
2017 warnx("New GDB command while response in "
2018 "progress");
2019 io_buffer_reset(&cur_resp);
2020 }
2021
2022 /* Is packet complete? */
2023 hash = memchr(head, '#', avail);
2024 if (hash == NULL)
2025 return;
2026 plen = (hash - head + 1) + 2;
2027 if (avail < plen)
2028 return;
2029 debug("<- %.*s\n", (int)plen, head);
2030
2031 /* Verify checksum. */
2032 for (sum = 0, p = head + 1; p < hash; p++)
2033 sum += *p;
2034 if (sum != parse_byte(hash + 1)) {
2035 io_buffer_consume(&cur_comm, plen);
2036 debug("-> -\n");
2037 send_char('-');
2038 send_pending_data(fd);
2039 break;
2040 }
2041 send_char('+');
2042
2043 handle_command(head + 1, hash - (head + 1));
2044 io_buffer_consume(&cur_comm, plen);
2045 if (!response_pending())
2046 debug("-> +\n");
2047 send_pending_data(fd);
2048 break;
2049 default:
2050 /* XXX: Possibly drop connection instead. */
2051 debug("-> %02x\n", *head);
2052 io_buffer_consume(&cur_comm, 1);
2053 break;
2054 }
2055 }
2056 }
2057
2058 static void
gdb_readable(int fd,enum ev_type event __unused,void * arg __unused)2059 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused)
2060 {
2061 size_t pending;
2062 ssize_t nread;
2063 int n;
2064
2065 if (ioctl(fd, FIONREAD, &n) == -1) {
2066 warn("FIONREAD on GDB socket");
2067 return;
2068 }
2069 assert(n >= 0);
2070 pending = n;
2071
2072 /*
2073 * 'pending' might be zero due to EOF. We need to call read
2074 * with a non-zero length to detect EOF.
2075 */
2076 if (pending == 0)
2077 pending = 1;
2078
2079 /* Ensure there is room in the command buffer. */
2080 io_buffer_grow(&cur_comm, pending);
2081 assert(io_buffer_avail(&cur_comm) >= pending);
2082
2083 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
2084 if (nread == 0) {
2085 close_connection();
2086 } else if (nread == -1) {
2087 if (errno == EAGAIN)
2088 return;
2089
2090 warn("Read from GDB socket");
2091 close_connection();
2092 } else {
2093 cur_comm.len += nread;
2094 pthread_mutex_lock(&gdb_lock);
2095 check_command(fd);
2096 pthread_mutex_unlock(&gdb_lock);
2097 }
2098 }
2099
2100 static void
gdb_writable(int fd,enum ev_type event __unused,void * arg __unused)2101 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused)
2102 {
2103
2104 send_pending_data(fd);
2105 }
2106
2107 static void
new_connection(int fd,enum ev_type event __unused,void * arg)2108 new_connection(int fd, enum ev_type event __unused, void *arg)
2109 {
2110 int optval, s;
2111
2112 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
2113 if (s == -1) {
2114 if (arg != NULL)
2115 err(1, "Failed accepting initial GDB connection");
2116
2117 /* Silently ignore errors post-startup. */
2118 return;
2119 }
2120
2121 optval = 1;
2122 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
2123 -1) {
2124 warn("Failed to disable SIGPIPE for GDB connection");
2125 close(s);
2126 return;
2127 }
2128
2129 pthread_mutex_lock(&gdb_lock);
2130 if (cur_fd != -1) {
2131 close(s);
2132 warnx("Ignoring additional GDB connection.");
2133 }
2134
2135 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
2136 if (read_event == NULL) {
2137 if (arg != NULL)
2138 err(1, "Failed to setup initial GDB connection");
2139 pthread_mutex_unlock(&gdb_lock);
2140 return;
2141 }
2142 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
2143 if (write_event == NULL) {
2144 if (arg != NULL)
2145 err(1, "Failed to setup initial GDB connection");
2146 mevent_delete_close(read_event);
2147 read_event = NULL;
2148 }
2149
2150 cur_fd = s;
2151 cur_vcpu = 0;
2152 stopped_vcpu = -1;
2153
2154 /* Break on attach. */
2155 first_stop = true;
2156 report_next_stop = false;
2157 gdb_suspend_vcpus();
2158 pthread_mutex_unlock(&gdb_lock);
2159 }
2160
2161 #ifndef WITHOUT_CAPSICUM
2162 static void
limit_gdb_socket(int s)2163 limit_gdb_socket(int s)
2164 {
2165 cap_rights_t rights;
2166 unsigned long ioctls[] = { FIONREAD };
2167
2168 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
2169 CAP_SETSOCKOPT, CAP_IOCTL);
2170 if (caph_rights_limit(s, &rights) == -1)
2171 errx(EX_OSERR, "Unable to apply rights for sandbox");
2172 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
2173 errx(EX_OSERR, "Unable to apply rights for sandbox");
2174 }
2175 #endif
2176
2177 void
init_gdb(struct vmctx * _ctx)2178 init_gdb(struct vmctx *_ctx)
2179 {
2180 #ifndef WITHOUT_CAPSICUM
2181 cap_rights_t rights;
2182 #endif
2183 int error, flags, optval, s;
2184 struct addrinfo hints;
2185 struct addrinfo *gdbaddr;
2186 const char *saddr, *value;
2187 char *sport;
2188 bool wait;
2189
2190 value = get_config_value("gdb.port");
2191 if (value == NULL)
2192 return;
2193 sport = strdup(value);
2194 if (sport == NULL)
2195 errx(4, "Failed to allocate memory");
2196
2197 wait = get_config_bool_default("gdb.wait", false);
2198
2199 saddr = get_config_value("gdb.address");
2200 if (saddr == NULL) {
2201 saddr = "localhost";
2202 }
2203
2204 debug("==> starting on %s:%s, %swaiting\n",
2205 saddr, sport, wait ? "" : "not ");
2206
2207 error = pthread_mutex_init(&gdb_lock, NULL);
2208 if (error != 0)
2209 errc(1, error, "gdb mutex init");
2210 error = pthread_cond_init(&idle_vcpus, NULL);
2211 if (error != 0)
2212 errc(1, error, "gdb cv init");
2213
2214 memset(&hints, 0, sizeof(hints));
2215 hints.ai_family = AF_UNSPEC;
2216 hints.ai_socktype = SOCK_STREAM;
2217 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
2218
2219 error = getaddrinfo(saddr, sport, &hints, &gdbaddr);
2220 if (error != 0)
2221 errx(1, "gdb address resolution: %s", gai_strerror(error));
2222
2223 ctx = _ctx;
2224 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0);
2225 if (s < 0)
2226 err(1, "gdb socket create");
2227
2228 optval = 1;
2229 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
2230
2231 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0)
2232 err(1, "gdb socket bind");
2233
2234 if (listen(s, 1) < 0)
2235 err(1, "gdb socket listen");
2236
2237 stopped_vcpu = -1;
2238 TAILQ_INIT(&breakpoints);
2239 vcpus = calloc(guest_ncpus, sizeof(*vcpus));
2240 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
2241 if (wait) {
2242 /*
2243 * Set vcpu 0 in vcpus_suspended. This will trigger the
2244 * logic in gdb_cpu_add() to suspend the first vcpu before
2245 * it starts execution. The vcpu will remain suspended
2246 * until a debugger connects.
2247 */
2248 CPU_SET(0, &vcpus_suspended);
2249 stopped_vcpu = 0;
2250 }
2251
2252 flags = fcntl(s, F_GETFL);
2253 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
2254 err(1, "Failed to mark gdb socket non-blocking");
2255
2256 #ifndef WITHOUT_CAPSICUM
2257 limit_gdb_socket(s);
2258 #endif
2259 mevent_add(s, EVF_READ, new_connection, NULL);
2260 gdb_active = true;
2261 freeaddrinfo(gdbaddr);
2262 free(sport);
2263
2264 xml_dfd = open(_PATH_GDB_XML, O_DIRECTORY);
2265 if (xml_dfd == -1)
2266 err(1, "Failed to open gdb xml directory");
2267 #ifndef WITHOUT_CAPSICUM
2268 cap_rights_init(&rights, CAP_FSTAT, CAP_LOOKUP, CAP_MMAP_R, CAP_PREAD);
2269 if (caph_rights_limit(xml_dfd, &rights) == -1)
2270 err(1, "cap_rights_init");
2271 #endif
2272 }
2273