xref: /illumos-gate/usr/src/cmd/bhyve/common/gdb.c (revision 5c4a5fe16715fb423db76577a6883b5bbecdbe45)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 
29 #include <sys/param.h>
30 #ifndef WITHOUT_CAPSICUM
31 #include <sys/capsicum.h>
32 #endif
33 #ifdef __FreeBSD__
34 #include <sys/endian.h>
35 #else
36 #include <endian.h>
37 #endif
38 #include <sys/ioctl.h>
39 #include <sys/mman.h>
40 #include <sys/queue.h>
41 #include <sys/socket.h>
42 #include <machine/atomic.h>
43 #include <machine/specialreg.h>
44 #include <machine/vmm.h>
45 #include <netinet/in.h>
46 #include <assert.h>
47 #ifndef WITHOUT_CAPSICUM
48 #include <capsicum_helpers.h>
49 #endif
50 #include <err.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 #include <netdb.h>
54 #include <pthread.h>
55 #include <pthread_np.h>
56 #include <stdbool.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <string.h>
60 #include <sysexits.h>
61 #include <unistd.h>
62 #include <vmmapi.h>
63 
64 #include "bhyverun.h"
65 #include "config.h"
66 #include "debug.h"
67 #include "gdb.h"
68 #include "mem.h"
69 #include "mevent.h"
70 
71 /*
72  * GDB_SIGNAL_* numbers are part of the GDB remote protocol.  Most stops
73  * use SIGTRAP.
74  */
75 #define	GDB_SIGNAL_TRAP		5
76 
77 #define	GDB_BP_SIZE		1
78 #define	GDB_BP_INSTR		(uint8_t []){0xcc}
79 #define	GDB_PC_REGNAME		VM_REG_GUEST_RIP
80 
81 _Static_assert(sizeof(GDB_BP_INSTR) == GDB_BP_SIZE,
82     "GDB_BP_INSTR has wrong size");
83 
84 static void gdb_resume_vcpus(void);
85 static void check_command(int fd);
86 
87 static struct mevent *read_event, *write_event;
88 
89 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
90 static pthread_mutex_t gdb_lock;
91 static pthread_cond_t idle_vcpus;
92 static bool first_stop, report_next_stop, swbreak_enabled;
93 
94 /*
95  * An I/O buffer contains 'capacity' bytes of room at 'data'.  For a
96  * read buffer, 'start' is unused and 'len' contains the number of
97  * valid bytes in the buffer.  For a write buffer, 'start' is set to
98  * the index of the next byte in 'data' to send, and 'len' contains
99  * the remaining number of valid bytes to send.
100  */
101 struct io_buffer {
102 	uint8_t *data;
103 	size_t capacity;
104 	size_t start;
105 	size_t len;
106 };
107 
108 struct breakpoint {
109 	uint64_t gpa;
110 	uint8_t shadow_inst[GDB_BP_SIZE];
111 	TAILQ_ENTRY(breakpoint) link;
112 };
113 
114 /*
115  * When a vCPU stops to due to an event that should be reported to the
116  * debugger, information about the event is stored in this structure.
117  * The vCPU thread then sets 'stopped_vcpu' if it is not already set
118  * and stops other vCPUs so the event can be reported.  The
119  * report_stop() function reports the event for the 'stopped_vcpu'
120  * vCPU.  When the debugger resumes execution via continue or step,
121  * the event for 'stopped_vcpu' is cleared.  vCPUs will loop in their
122  * event handlers until the associated event is reported or disabled.
123  *
124  * An idle vCPU will have all of the boolean fields set to false.
125  *
126  * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
127  * released to execute the stepped instruction.  When the vCPU reports
128  * the stepping trap, 'stepped' is set.
129  *
130  * When a vCPU hits a breakpoint set by the debug server,
131  * 'hit_swbreak' is set to true.
132  */
133 struct vcpu_state {
134 	bool stepping;
135 	bool stepped;
136 	bool hit_swbreak;
137 };
138 
139 static struct io_buffer cur_comm, cur_resp;
140 static uint8_t cur_csum;
141 static struct vmctx *ctx;
142 static int cur_fd = -1;
143 static TAILQ_HEAD(, breakpoint) breakpoints;
144 static struct vcpu_state *vcpu_state;
145 static struct vcpu **vcpus;
146 static int cur_vcpu, stopped_vcpu;
147 static bool gdb_active = false;
148 
149 struct gdb_reg {
150 	enum vm_reg_name id;
151 	int size;
152 };
153 
154 static const struct gdb_reg gdb_regset[] = {
155 	{ .id = VM_REG_GUEST_RAX, .size = 8 },
156 	{ .id = VM_REG_GUEST_RBX, .size = 8 },
157 	{ .id = VM_REG_GUEST_RCX, .size = 8 },
158 	{ .id = VM_REG_GUEST_RDX, .size = 8 },
159 	{ .id = VM_REG_GUEST_RSI, .size = 8 },
160 	{ .id = VM_REG_GUEST_RDI, .size = 8 },
161 	{ .id = VM_REG_GUEST_RBP, .size = 8 },
162 	{ .id = VM_REG_GUEST_RSP, .size = 8 },
163 	{ .id = VM_REG_GUEST_R8, .size = 8 },
164 	{ .id = VM_REG_GUEST_R9, .size = 8 },
165 	{ .id = VM_REG_GUEST_R10, .size = 8 },
166 	{ .id = VM_REG_GUEST_R11, .size = 8 },
167 	{ .id = VM_REG_GUEST_R12, .size = 8 },
168 	{ .id = VM_REG_GUEST_R13, .size = 8 },
169 	{ .id = VM_REG_GUEST_R14, .size = 8 },
170 	{ .id = VM_REG_GUEST_R15, .size = 8 },
171 	{ .id = VM_REG_GUEST_RIP, .size = 8 },
172 	{ .id = VM_REG_GUEST_RFLAGS, .size = 4 },
173 	{ .id = VM_REG_GUEST_CS, .size = 4 },
174 	{ .id = VM_REG_GUEST_SS, .size = 4 },
175 	{ .id = VM_REG_GUEST_DS, .size = 4 },
176 	{ .id = VM_REG_GUEST_ES, .size = 4 },
177 	{ .id = VM_REG_GUEST_FS, .size = 4 },
178 	{ .id = VM_REG_GUEST_GS, .size = 4 },
179 };
180 
181 #ifdef GDB_LOG
182 #include <stdarg.h>
183 #include <stdio.h>
184 
185 static void __printflike(1, 2)
debug(const char * fmt,...)186 debug(const char *fmt, ...)
187 {
188 	static FILE *logfile;
189 	va_list ap;
190 
191 	if (logfile == NULL) {
192 		logfile = fopen("/tmp/bhyve_gdb.log", "w");
193 		if (logfile == NULL)
194 			return;
195 #ifndef WITHOUT_CAPSICUM
196 		if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
197 			fclose(logfile);
198 			logfile = NULL;
199 			return;
200 		}
201 #endif
202 		setlinebuf(logfile);
203 	}
204 	va_start(ap, fmt);
205 	vfprintf(logfile, fmt, ap);
206 	va_end(ap);
207 }
208 #else
209 #ifndef __FreeBSD__
210 /*
211  * A totally empty debug() makes the compiler grumpy due to how its used with
212  * some control flow here.
213  */
214 #define debug(...) do { } while (0)
215 #else
216 #define debug(...)
217 #endif
218 #endif
219 
220 static void	remove_all_sw_breakpoints(void);
221 
222 static int
guest_paging_info(struct vcpu * vcpu,struct vm_guest_paging * paging)223 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
224 {
225 	uint64_t regs[4];
226 	const int regset[4] = {
227 		VM_REG_GUEST_CR0,
228 		VM_REG_GUEST_CR3,
229 		VM_REG_GUEST_CR4,
230 		VM_REG_GUEST_EFER
231 	};
232 
233 	if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
234 		return (-1);
235 
236 	/*
237 	 * For the debugger, always pretend to be the kernel (CPL 0),
238 	 * and if long-mode is enabled, always parse addresses as if
239 	 * in 64-bit mode.
240 	 */
241 	paging->cr3 = regs[1];
242 	paging->cpl = 0;
243 	if (regs[3] & EFER_LMA)
244 		paging->cpu_mode = CPU_MODE_64BIT;
245 	else if (regs[0] & CR0_PE)
246 		paging->cpu_mode = CPU_MODE_PROTECTED;
247 	else
248 		paging->cpu_mode = CPU_MODE_REAL;
249 	if (!(regs[0] & CR0_PG))
250 		paging->paging_mode = PAGING_MODE_FLAT;
251 	else if (!(regs[2] & CR4_PAE))
252 		paging->paging_mode = PAGING_MODE_32;
253 	else if (regs[3] & EFER_LME)
254 		paging->paging_mode = PAGING_MODE_64;
255 	else
256 		paging->paging_mode = PAGING_MODE_PAE;
257 	return (0);
258 }
259 
260 /*
261  * Map a guest virtual address to a physical address (for a given vcpu).
262  * If a guest virtual address is valid, return 1.  If the address is
263  * not valid, return 0.  If an error occurs obtaining the mapping,
264  * return -1.
265  */
266 static int
guest_vaddr2paddr(struct vcpu * vcpu,uint64_t vaddr,uint64_t * paddr)267 guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
268 {
269 	struct vm_guest_paging paging;
270 	int fault;
271 
272 	if (guest_paging_info(vcpu, &paging) == -1)
273 		return (-1);
274 
275 	/*
276 	 * Always use PROT_READ.  We really care if the VA is
277 	 * accessible, not if the current vCPU can write.
278 	 */
279 	if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
280 	    &fault) == -1)
281 		return (-1);
282 	if (fault)
283 		return (0);
284 	return (1);
285 }
286 
287 static uint64_t
guest_pc(struct vm_exit * vme)288 guest_pc(struct vm_exit *vme)
289 {
290 	return (vme->rip);
291 }
292 
293 static void
io_buffer_reset(struct io_buffer * io)294 io_buffer_reset(struct io_buffer *io)
295 {
296 
297 	io->start = 0;
298 	io->len = 0;
299 }
300 
301 /* Available room for adding data. */
302 static size_t
io_buffer_avail(struct io_buffer * io)303 io_buffer_avail(struct io_buffer *io)
304 {
305 
306 	return (io->capacity - (io->start + io->len));
307 }
308 
309 static uint8_t *
io_buffer_head(struct io_buffer * io)310 io_buffer_head(struct io_buffer *io)
311 {
312 
313 	return (io->data + io->start);
314 }
315 
316 static uint8_t *
io_buffer_tail(struct io_buffer * io)317 io_buffer_tail(struct io_buffer *io)
318 {
319 
320 	return (io->data + io->start + io->len);
321 }
322 
323 static void
io_buffer_advance(struct io_buffer * io,size_t amount)324 io_buffer_advance(struct io_buffer *io, size_t amount)
325 {
326 
327 	assert(amount <= io->len);
328 	io->start += amount;
329 	io->len -= amount;
330 }
331 
332 static void
io_buffer_consume(struct io_buffer * io,size_t amount)333 io_buffer_consume(struct io_buffer *io, size_t amount)
334 {
335 
336 	io_buffer_advance(io, amount);
337 	if (io->len == 0) {
338 		io->start = 0;
339 		return;
340 	}
341 
342 	/*
343 	 * XXX: Consider making this move optional and compacting on a
344 	 * future read() before realloc().
345 	 */
346 	memmove(io->data, io_buffer_head(io), io->len);
347 	io->start = 0;
348 }
349 
350 static void
io_buffer_grow(struct io_buffer * io,size_t newsize)351 io_buffer_grow(struct io_buffer *io, size_t newsize)
352 {
353 	uint8_t *new_data;
354 	size_t avail, new_cap;
355 
356 	avail = io_buffer_avail(io);
357 	if (newsize <= avail)
358 		return;
359 
360 	new_cap = io->capacity + (newsize - avail);
361 	new_data = realloc(io->data, new_cap);
362 	if (new_data == NULL)
363 		err(1, "Failed to grow GDB I/O buffer");
364 	io->data = new_data;
365 	io->capacity = new_cap;
366 }
367 
368 static bool
response_pending(void)369 response_pending(void)
370 {
371 
372 	if (cur_resp.start == 0 && cur_resp.len == 0)
373 		return (false);
374 	if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
375 		return (false);
376 	return (true);
377 }
378 
379 static void
close_connection(void)380 close_connection(void)
381 {
382 
383 	/*
384 	 * XXX: This triggers a warning because mevent does the close
385 	 * before the EV_DELETE.
386 	 */
387 	pthread_mutex_lock(&gdb_lock);
388 	mevent_delete(write_event);
389 	mevent_delete_close(read_event);
390 	write_event = NULL;
391 	read_event = NULL;
392 	io_buffer_reset(&cur_comm);
393 	io_buffer_reset(&cur_resp);
394 	cur_fd = -1;
395 
396 	remove_all_sw_breakpoints();
397 
398 	/* Clear any pending events. */
399 	memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
400 
401 	/* Resume any stopped vCPUs. */
402 	gdb_resume_vcpus();
403 	pthread_mutex_unlock(&gdb_lock);
404 }
405 
406 static uint8_t
hex_digit(uint8_t nibble)407 hex_digit(uint8_t nibble)
408 {
409 
410 	if (nibble <= 9)
411 		return (nibble + '0');
412 	else
413 		return (nibble + 'a' - 10);
414 }
415 
416 static uint8_t
parse_digit(uint8_t v)417 parse_digit(uint8_t v)
418 {
419 
420 	if (v >= '0' && v <= '9')
421 		return (v - '0');
422 	if (v >= 'a' && v <= 'f')
423 		return (v - 'a' + 10);
424 	if (v >= 'A' && v <= 'F')
425 		return (v - 'A' + 10);
426 	return (0xF);
427 }
428 
429 /* Parses big-endian hexadecimal. */
430 static uintmax_t
parse_integer(const uint8_t * p,size_t len)431 parse_integer(const uint8_t *p, size_t len)
432 {
433 	uintmax_t v;
434 
435 	v = 0;
436 	while (len > 0) {
437 		v <<= 4;
438 		v |= parse_digit(*p);
439 		p++;
440 		len--;
441 	}
442 	return (v);
443 }
444 
445 static uint8_t
parse_byte(const uint8_t * p)446 parse_byte(const uint8_t *p)
447 {
448 
449 	return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
450 }
451 
452 static void
send_pending_data(int fd)453 send_pending_data(int fd)
454 {
455 	ssize_t nwritten;
456 
457 	if (cur_resp.len == 0) {
458 		mevent_disable(write_event);
459 		return;
460 	}
461 	nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
462 	if (nwritten == -1) {
463 		warn("Write to GDB socket failed");
464 		close_connection();
465 	} else {
466 		io_buffer_advance(&cur_resp, nwritten);
467 		if (cur_resp.len == 0)
468 			mevent_disable(write_event);
469 		else
470 			mevent_enable(write_event);
471 	}
472 }
473 
474 /* Append a single character to the output buffer. */
475 static void
send_char(uint8_t data)476 send_char(uint8_t data)
477 {
478 	io_buffer_grow(&cur_resp, 1);
479 	*io_buffer_tail(&cur_resp) = data;
480 	cur_resp.len++;
481 }
482 
483 /* Append an array of bytes to the output buffer. */
484 static void
send_data(const uint8_t * data,size_t len)485 send_data(const uint8_t *data, size_t len)
486 {
487 
488 	io_buffer_grow(&cur_resp, len);
489 	memcpy(io_buffer_tail(&cur_resp), data, len);
490 	cur_resp.len += len;
491 }
492 
493 static void
format_byte(uint8_t v,uint8_t * buf)494 format_byte(uint8_t v, uint8_t *buf)
495 {
496 
497 	buf[0] = hex_digit(v >> 4);
498 	buf[1] = hex_digit(v & 0xf);
499 }
500 
501 /*
502  * Append a single byte (formatted as two hex characters) to the
503  * output buffer.
504  */
505 static void
send_byte(uint8_t v)506 send_byte(uint8_t v)
507 {
508 	uint8_t buf[2];
509 
510 	format_byte(v, buf);
511 	send_data(buf, sizeof(buf));
512 }
513 
514 static void
start_packet(void)515 start_packet(void)
516 {
517 
518 	send_char('$');
519 	cur_csum = 0;
520 }
521 
522 static void
finish_packet(void)523 finish_packet(void)
524 {
525 
526 	send_char('#');
527 	send_byte(cur_csum);
528 	debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
529 }
530 
531 /*
532  * Append a single character (for the packet payload) and update the
533  * checksum.
534  */
535 static void
append_char(uint8_t v)536 append_char(uint8_t v)
537 {
538 
539 	send_char(v);
540 	cur_csum += v;
541 }
542 
543 /*
544  * Append an array of bytes (for the packet payload) and update the
545  * checksum.
546  */
547 static void
append_packet_data(const uint8_t * data,size_t len)548 append_packet_data(const uint8_t *data, size_t len)
549 {
550 
551 	send_data(data, len);
552 	while (len > 0) {
553 		cur_csum += *data;
554 		data++;
555 		len--;
556 	}
557 }
558 
559 static void
append_string(const char * str)560 append_string(const char *str)
561 {
562 
563 #ifdef __FreeBSD__
564 	append_packet_data(str, strlen(str));
565 #else
566 	append_packet_data((const uint8_t *)str, strlen(str));
567 #endif
568 }
569 
570 static void
append_byte(uint8_t v)571 append_byte(uint8_t v)
572 {
573 	uint8_t buf[2];
574 
575 	format_byte(v, buf);
576 	append_packet_data(buf, sizeof(buf));
577 }
578 
579 static void
append_unsigned_native(uintmax_t value,size_t len)580 append_unsigned_native(uintmax_t value, size_t len)
581 {
582 	size_t i;
583 
584 	for (i = 0; i < len; i++) {
585 		append_byte(value);
586 		value >>= 8;
587 	}
588 }
589 
590 static void
append_unsigned_be(uintmax_t value,size_t len)591 append_unsigned_be(uintmax_t value, size_t len)
592 {
593 	char buf[len * 2];
594 	size_t i;
595 
596 	for (i = 0; i < len; i++) {
597 #ifdef __FreeBSD__
598 		format_byte(value, buf + (len - i - 1) * 2);
599 #else
600 		format_byte(value, (uint8_t *)(buf + (len - i - 1) * 2));
601 #endif
602 		value >>= 8;
603 	}
604 #ifdef __FreeBSD__
605 	append_packet_data(buf, sizeof(buf));
606 #else
607 	append_packet_data((const uint8_t *)buf, sizeof(buf));
608 #endif
609 }
610 
611 static void
append_integer(unsigned int value)612 append_integer(unsigned int value)
613 {
614 
615 	if (value == 0)
616 		append_char('0');
617 	else
618 		append_unsigned_be(value, (fls(value) + 7) / 8);
619 }
620 
621 static void
append_asciihex(const char * str)622 append_asciihex(const char *str)
623 {
624 
625 	while (*str != '\0') {
626 		append_byte(*str);
627 		str++;
628 	}
629 }
630 
631 static void
send_empty_response(void)632 send_empty_response(void)
633 {
634 
635 	start_packet();
636 	finish_packet();
637 }
638 
639 static void
send_error(int error)640 send_error(int error)
641 {
642 
643 	start_packet();
644 	append_char('E');
645 	append_byte(error);
646 	finish_packet();
647 }
648 
649 static void
send_ok(void)650 send_ok(void)
651 {
652 
653 	start_packet();
654 	append_string("OK");
655 	finish_packet();
656 }
657 
658 static int
parse_threadid(const uint8_t * data,size_t len)659 parse_threadid(const uint8_t *data, size_t len)
660 {
661 
662 	if (len == 1 && *data == '0')
663 		return (0);
664 	if (len == 2 && memcmp(data, "-1", 2) == 0)
665 		return (-1);
666 	if (len == 0)
667 		return (-2);
668 	return (parse_integer(data, len));
669 }
670 
671 /*
672  * Report the current stop event to the debugger.  If the stop is due
673  * to an event triggered on a specific vCPU such as a breakpoint or
674  * stepping trap, stopped_vcpu will be set to the vCPU triggering the
675  * stop.  If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
676  * the reporting vCPU for vCPU events.
677  */
678 static void
report_stop(bool set_cur_vcpu)679 report_stop(bool set_cur_vcpu)
680 {
681 	struct vcpu_state *vs;
682 
683 	start_packet();
684 	if (stopped_vcpu == -1) {
685 		append_char('S');
686 		append_byte(GDB_SIGNAL_TRAP);
687 	} else {
688 		vs = &vcpu_state[stopped_vcpu];
689 		if (set_cur_vcpu)
690 			cur_vcpu = stopped_vcpu;
691 		append_char('T');
692 		append_byte(GDB_SIGNAL_TRAP);
693 		append_string("thread:");
694 		append_integer(stopped_vcpu + 1);
695 		append_char(';');
696 		if (vs->hit_swbreak) {
697 			debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
698 			if (swbreak_enabled)
699 				append_string("swbreak:;");
700 		} else if (vs->stepped)
701 			debug("$vCPU %d reporting step\n", stopped_vcpu);
702 		else
703 			debug("$vCPU %d reporting ???\n", stopped_vcpu);
704 	}
705 	finish_packet();
706 	report_next_stop = false;
707 }
708 
709 /*
710  * If this stop is due to a vCPU event, clear that event to mark it as
711  * acknowledged.
712  */
713 static void
discard_stop(void)714 discard_stop(void)
715 {
716 	struct vcpu_state *vs;
717 
718 	if (stopped_vcpu != -1) {
719 		vs = &vcpu_state[stopped_vcpu];
720 		vs->hit_swbreak = false;
721 		vs->stepped = false;
722 		stopped_vcpu = -1;
723 	}
724 	report_next_stop = true;
725 }
726 
727 static void
gdb_finish_suspend_vcpus(void)728 gdb_finish_suspend_vcpus(void)
729 {
730 
731 	if (first_stop) {
732 		first_stop = false;
733 		stopped_vcpu = -1;
734 	} else if (report_next_stop) {
735 		assert(!response_pending());
736 		report_stop(true);
737 		send_pending_data(cur_fd);
738 	}
739 }
740 
741 /*
742  * vCPU threads invoke this function whenever the vCPU enters the
743  * debug server to pause or report an event.  vCPU threads wait here
744  * as long as the debug server keeps them suspended.
745  */
746 static void
_gdb_cpu_suspend(struct vcpu * vcpu,bool report_stop)747 _gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
748 {
749 	int vcpuid = vcpu_id(vcpu);
750 
751 	debug("$vCPU %d suspending\n", vcpuid);
752 	CPU_SET(vcpuid, &vcpus_waiting);
753 	if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
754 		gdb_finish_suspend_vcpus();
755 	while (CPU_ISSET(vcpuid, &vcpus_suspended))
756 		pthread_cond_wait(&idle_vcpus, &gdb_lock);
757 	CPU_CLR(vcpuid, &vcpus_waiting);
758 	debug("$vCPU %d resuming\n", vcpuid);
759 }
760 
761 /*
762  * Invoked at the start of a vCPU thread's execution to inform the
763  * debug server about the new thread.
764  */
765 void
gdb_cpu_add(struct vcpu * vcpu)766 gdb_cpu_add(struct vcpu *vcpu)
767 {
768 	int vcpuid;
769 
770 	if (!gdb_active)
771 		return;
772 	vcpuid = vcpu_id(vcpu);
773 	debug("$vCPU %d starting\n", vcpuid);
774 	pthread_mutex_lock(&gdb_lock);
775 	assert(vcpuid < guest_ncpus);
776 	assert(vcpus[vcpuid] == NULL);
777 	vcpus[vcpuid] = vcpu;
778 	CPU_SET(vcpuid, &vcpus_active);
779 	if (!TAILQ_EMPTY(&breakpoints)) {
780 		vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1);
781 		debug("$vCPU %d enabled breakpoint exits\n", vcpuid);
782 	}
783 
784 	/*
785 	 * If a vcpu is added while vcpus are stopped, suspend the new
786 	 * vcpu so that it will pop back out with a debug exit before
787 	 * executing the first instruction.
788 	 */
789 	if (!CPU_EMPTY(&vcpus_suspended)) {
790 		CPU_SET(vcpuid, &vcpus_suspended);
791 		_gdb_cpu_suspend(vcpu, false);
792 	}
793 	pthread_mutex_unlock(&gdb_lock);
794 }
795 
796 /*
797  * Invoked by vCPU before resuming execution.  This enables stepping
798  * if the vCPU is marked as stepping.
799  */
800 static void
gdb_cpu_resume(struct vcpu * vcpu)801 gdb_cpu_resume(struct vcpu *vcpu)
802 {
803 	struct vcpu_state *vs;
804 	int error;
805 
806 	vs = &vcpu_state[vcpu_id(vcpu)];
807 
808 	/*
809 	 * Any pending event should already be reported before
810 	 * resuming.
811 	 */
812 	assert(vs->hit_swbreak == false);
813 	assert(vs->stepped == false);
814 	if (vs->stepping) {
815 		error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1);
816 		assert(error == 0);
817 	}
818 }
819 
820 /*
821  * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
822  * has been suspended due to an event on different vCPU or in response
823  * to a guest-wide suspend such as Ctrl-C or the stop on attach.
824  */
825 void
gdb_cpu_suspend(struct vcpu * vcpu)826 gdb_cpu_suspend(struct vcpu *vcpu)
827 {
828 
829 	if (!gdb_active)
830 		return;
831 	pthread_mutex_lock(&gdb_lock);
832 	_gdb_cpu_suspend(vcpu, true);
833 	gdb_cpu_resume(vcpu);
834 	pthread_mutex_unlock(&gdb_lock);
835 }
836 
837 static void
gdb_suspend_vcpus(void)838 gdb_suspend_vcpus(void)
839 {
840 
841 	assert(pthread_mutex_isowned_np(&gdb_lock));
842 	debug("suspending all CPUs\n");
843 	vcpus_suspended = vcpus_active;
844 	vm_suspend_all_cpus(ctx);
845 	if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
846 		gdb_finish_suspend_vcpus();
847 }
848 
849 /*
850  * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
851  * the VT-x-specific MTRAP exit.
852  */
853 void
gdb_cpu_mtrap(struct vcpu * vcpu)854 gdb_cpu_mtrap(struct vcpu *vcpu)
855 {
856 	struct vcpu_state *vs;
857 	int vcpuid;
858 
859 	if (!gdb_active)
860 		return;
861 	vcpuid = vcpu_id(vcpu);
862 	debug("$vCPU %d MTRAP\n", vcpuid);
863 	pthread_mutex_lock(&gdb_lock);
864 	vs = &vcpu_state[vcpuid];
865 	if (vs->stepping) {
866 		vs->stepping = false;
867 		vs->stepped = true;
868 		vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0);
869 		while (vs->stepped) {
870 			if (stopped_vcpu == -1) {
871 				debug("$vCPU %d reporting step\n", vcpuid);
872 				stopped_vcpu = vcpuid;
873 				gdb_suspend_vcpus();
874 			}
875 			_gdb_cpu_suspend(vcpu, true);
876 		}
877 		gdb_cpu_resume(vcpu);
878 	}
879 	pthread_mutex_unlock(&gdb_lock);
880 }
881 
882 static struct breakpoint *
find_breakpoint(uint64_t gpa)883 find_breakpoint(uint64_t gpa)
884 {
885 	struct breakpoint *bp;
886 
887 	TAILQ_FOREACH(bp, &breakpoints, link) {
888 		if (bp->gpa == gpa)
889 			return (bp);
890 	}
891 	return (NULL);
892 }
893 
894 void
gdb_cpu_breakpoint(struct vcpu * vcpu,struct vm_exit * vmexit)895 gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
896 {
897 	struct breakpoint *bp;
898 	struct vcpu_state *vs;
899 	uint64_t gpa;
900 	int error, vcpuid;
901 
902 	if (!gdb_active) {
903 		EPRINTLN("vm_loop: unexpected VMEXIT_DEBUG");
904 		exit(4);
905 	}
906 	vcpuid = vcpu_id(vcpu);
907 	pthread_mutex_lock(&gdb_lock);
908 	error = guest_vaddr2paddr(vcpu, guest_pc(vmexit), &gpa);
909 	assert(error == 1);
910 	bp = find_breakpoint(gpa);
911 	if (bp != NULL) {
912 		vs = &vcpu_state[vcpuid];
913 		assert(vs->stepping == false);
914 		assert(vs->stepped == false);
915 		assert(vs->hit_swbreak == false);
916 		vs->hit_swbreak = true;
917 		vm_set_register(vcpu, GDB_PC_REGNAME, guest_pc(vmexit));
918 		for (;;) {
919 			if (stopped_vcpu == -1) {
920 				debug("$vCPU %d reporting breakpoint at rip %#lx\n",
921 				    vcpuid, guest_pc(vmexit));
922 				stopped_vcpu = vcpuid;
923 				gdb_suspend_vcpus();
924 			}
925 			_gdb_cpu_suspend(vcpu, true);
926 			if (!vs->hit_swbreak) {
927 				/* Breakpoint reported. */
928 				break;
929 			}
930 			bp = find_breakpoint(gpa);
931 			if (bp == NULL) {
932 				/* Breakpoint was removed. */
933 				vs->hit_swbreak = false;
934 				break;
935 			}
936 		}
937 		gdb_cpu_resume(vcpu);
938 	} else {
939 		debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid,
940 		    guest_pc(vmexit));
941 		error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
942 		    vmexit->u.bpt.inst_length);
943 		assert(error == 0);
944 		error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
945 		assert(error == 0);
946 	}
947 	pthread_mutex_unlock(&gdb_lock);
948 }
949 
950 static bool
gdb_step_vcpu(struct vcpu * vcpu)951 gdb_step_vcpu(struct vcpu *vcpu)
952 {
953 	int error, val, vcpuid;
954 
955 	vcpuid = vcpu_id(vcpu);
956 	debug("$vCPU %d step\n", vcpuid);
957 	error = vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val);
958 	if (error < 0)
959 		return (false);
960 
961 	discard_stop();
962 	vcpu_state[vcpuid].stepping = true;
963 	vm_resume_cpu(vcpu);
964 	CPU_CLR(vcpuid, &vcpus_suspended);
965 	pthread_cond_broadcast(&idle_vcpus);
966 	return (true);
967 }
968 
969 static void
gdb_resume_vcpus(void)970 gdb_resume_vcpus(void)
971 {
972 
973 	assert(pthread_mutex_isowned_np(&gdb_lock));
974 	vm_resume_all_cpus(ctx);
975 	debug("resuming all CPUs\n");
976 	CPU_ZERO(&vcpus_suspended);
977 	pthread_cond_broadcast(&idle_vcpus);
978 }
979 
980 static void
gdb_read_regs(void)981 gdb_read_regs(void)
982 {
983 	uint64_t regvals[nitems(gdb_regset)];
984 	int regnums[nitems(gdb_regset)];
985 
986 	for (size_t i = 0; i < nitems(gdb_regset); i++)
987 		regnums[i] = gdb_regset[i].id;
988 	if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset),
989 	    regnums, regvals) == -1) {
990 		send_error(errno);
991 		return;
992 	}
993 	start_packet();
994 	for (size_t i = 0; i < nitems(gdb_regset); i++)
995 		append_unsigned_native(regvals[i], gdb_regset[i].size);
996 	finish_packet();
997 }
998 
999 static void
gdb_read_one_reg(const uint8_t * data,size_t len)1000 gdb_read_one_reg(const uint8_t *data, size_t len)
1001 {
1002 	uint64_t regval;
1003 	uintmax_t reg;
1004 
1005 	reg = parse_integer(data, len);
1006 	if (reg >= nitems(gdb_regset)) {
1007 		send_error(EINVAL);
1008 		return;
1009 	}
1010 
1011 	if (vm_get_register(vcpus[cur_vcpu], gdb_regset[reg].id, &regval) ==
1012 	    -1) {
1013 		send_error(errno);
1014 		return;
1015 	}
1016 
1017 	start_packet();
1018 	append_unsigned_native(regval, gdb_regset[reg].size);
1019 	finish_packet();
1020 }
1021 
1022 static void
gdb_read_mem(const uint8_t * data,size_t len)1023 gdb_read_mem(const uint8_t *data, size_t len)
1024 {
1025 	uint64_t gpa, gva, val;
1026 	uint8_t *cp;
1027 	size_t resid, todo, bytes;
1028 	bool started;
1029 	int error;
1030 
1031 	assert(len >= 1);
1032 
1033 	/* Skip 'm' */
1034 	data += 1;
1035 	len -= 1;
1036 
1037 	/* Parse and consume address. */
1038 	cp = memchr(data, ',', len);
1039 	if (cp == NULL || cp == data) {
1040 		send_error(EINVAL);
1041 		return;
1042 	}
1043 	gva = parse_integer(data, cp - data);
1044 	len -= (cp - data) + 1;
1045 	data += (cp - data) + 1;
1046 
1047 	/* Parse length. */
1048 	resid = parse_integer(data, len);
1049 
1050 	started = false;
1051 	while (resid > 0) {
1052 		error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1053 		if (error == -1) {
1054 			if (started)
1055 				finish_packet();
1056 			else
1057 				send_error(errno);
1058 			return;
1059 		}
1060 		if (error == 0) {
1061 			if (started)
1062 				finish_packet();
1063 			else
1064 				send_error(EFAULT);
1065 			return;
1066 		}
1067 
1068 		/* Read bytes from current page. */
1069 		todo = getpagesize() - gpa % getpagesize();
1070 		if (todo > resid)
1071 			todo = resid;
1072 
1073 		cp = paddr_guest2host(ctx, gpa, todo);
1074 		if (cp != NULL) {
1075 			/*
1076 			 * If this page is guest RAM, read it a byte
1077 			 * at a time.
1078 			 */
1079 			if (!started) {
1080 				start_packet();
1081 				started = true;
1082 			}
1083 			while (todo > 0) {
1084 				append_byte(*cp);
1085 				cp++;
1086 				gpa++;
1087 				gva++;
1088 				resid--;
1089 				todo--;
1090 			}
1091 		} else {
1092 			/*
1093 			 * If this page isn't guest RAM, try to handle
1094 			 * it via MMIO.  For MMIO requests, use
1095 			 * aligned reads of words when possible.
1096 			 */
1097 			while (todo > 0) {
1098 				if (gpa & 1 || todo == 1)
1099 					bytes = 1;
1100 				else if (gpa & 2 || todo == 2)
1101 					bytes = 2;
1102 				else
1103 					bytes = 4;
1104 				error = read_mem(vcpus[cur_vcpu], gpa, &val,
1105 				    bytes);
1106 				if (error == 0) {
1107 					if (!started) {
1108 						start_packet();
1109 						started = true;
1110 					}
1111 					gpa += bytes;
1112 					gva += bytes;
1113 					resid -= bytes;
1114 					todo -= bytes;
1115 					while (bytes > 0) {
1116 						append_byte(val);
1117 						val >>= 8;
1118 						bytes--;
1119 					}
1120 				} else {
1121 					if (started)
1122 						finish_packet();
1123 					else
1124 						send_error(EFAULT);
1125 					return;
1126 				}
1127 			}
1128 		}
1129 		assert(resid == 0 || gpa % getpagesize() == 0);
1130 	}
1131 	if (!started)
1132 		start_packet();
1133 	finish_packet();
1134 }
1135 
1136 static void
gdb_write_mem(const uint8_t * data,size_t len)1137 gdb_write_mem(const uint8_t *data, size_t len)
1138 {
1139 	uint64_t gpa, gva, val;
1140 	uint8_t *cp;
1141 	size_t resid, todo, bytes;
1142 	int error;
1143 
1144 	assert(len >= 1);
1145 
1146 	/* Skip 'M' */
1147 	data += 1;
1148 	len -= 1;
1149 
1150 	/* Parse and consume address. */
1151 	cp = memchr(data, ',', len);
1152 	if (cp == NULL || cp == data) {
1153 		send_error(EINVAL);
1154 		return;
1155 	}
1156 	gva = parse_integer(data, cp - data);
1157 	len -= (cp - data) + 1;
1158 	data += (cp - data) + 1;
1159 
1160 	/* Parse and consume length. */
1161 	cp = memchr(data, ':', len);
1162 	if (cp == NULL || cp == data) {
1163 		send_error(EINVAL);
1164 		return;
1165 	}
1166 	resid = parse_integer(data, cp - data);
1167 	len -= (cp - data) + 1;
1168 	data += (cp - data) + 1;
1169 
1170 	/* Verify the available bytes match the length. */
1171 	if (len != resid * 2) {
1172 		send_error(EINVAL);
1173 		return;
1174 	}
1175 
1176 	while (resid > 0) {
1177 		error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1178 		if (error == -1) {
1179 			send_error(errno);
1180 			return;
1181 		}
1182 		if (error == 0) {
1183 			send_error(EFAULT);
1184 			return;
1185 		}
1186 
1187 		/* Write bytes to current page. */
1188 		todo = getpagesize() - gpa % getpagesize();
1189 		if (todo > resid)
1190 			todo = resid;
1191 
1192 		cp = paddr_guest2host(ctx, gpa, todo);
1193 		if (cp != NULL) {
1194 			/*
1195 			 * If this page is guest RAM, write it a byte
1196 			 * at a time.
1197 			 */
1198 			while (todo > 0) {
1199 				assert(len >= 2);
1200 				*cp = parse_byte(data);
1201 				data += 2;
1202 				len -= 2;
1203 				cp++;
1204 				gpa++;
1205 				gva++;
1206 				resid--;
1207 				todo--;
1208 			}
1209 		} else {
1210 			/*
1211 			 * If this page isn't guest RAM, try to handle
1212 			 * it via MMIO.  For MMIO requests, use
1213 			 * aligned writes of words when possible.
1214 			 */
1215 			while (todo > 0) {
1216 				if (gpa & 1 || todo == 1) {
1217 					bytes = 1;
1218 					val = parse_byte(data);
1219 				} else if (gpa & 2 || todo == 2) {
1220 					bytes = 2;
1221 					val = be16toh(parse_integer(data, 4));
1222 				} else {
1223 					bytes = 4;
1224 					val = be32toh(parse_integer(data, 8));
1225 				}
1226 				error = write_mem(vcpus[cur_vcpu], gpa, val,
1227 				    bytes);
1228 				if (error == 0) {
1229 					gpa += bytes;
1230 					gva += bytes;
1231 					resid -= bytes;
1232 					todo -= bytes;
1233 					data += 2 * bytes;
1234 					len -= 2 * bytes;
1235 				} else {
1236 					send_error(EFAULT);
1237 					return;
1238 				}
1239 			}
1240 		}
1241 		assert(resid == 0 || gpa % getpagesize() == 0);
1242 	}
1243 	assert(len == 0);
1244 	send_ok();
1245 }
1246 
1247 static bool
set_breakpoint_caps(bool enable)1248 set_breakpoint_caps(bool enable)
1249 {
1250 	cpuset_t mask;
1251 	int vcpu;
1252 
1253 	mask = vcpus_active;
1254 	while (!CPU_EMPTY(&mask)) {
1255 		vcpu = CPU_FFS(&mask) - 1;
1256 		CPU_CLR(vcpu, &mask);
1257 		if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT,
1258 		    enable ? 1 : 0) < 0)
1259 			return (false);
1260 		debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1261 		    enable ? "en" : "dis");
1262 	}
1263 	return (true);
1264 }
1265 
1266 static void
remove_all_sw_breakpoints(void)1267 remove_all_sw_breakpoints(void)
1268 {
1269 	struct breakpoint *bp, *nbp;
1270 	uint8_t *cp;
1271 
1272 	if (TAILQ_EMPTY(&breakpoints))
1273 		return;
1274 
1275 	TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1276 		debug("remove breakpoint at %#lx\n", bp->gpa);
1277 		cp = paddr_guest2host(ctx, bp->gpa, sizeof(bp->shadow_inst));
1278 		memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst));
1279 		TAILQ_REMOVE(&breakpoints, bp, link);
1280 		free(bp);
1281 	}
1282 	TAILQ_INIT(&breakpoints);
1283 	set_breakpoint_caps(false);
1284 }
1285 
1286 static void
update_sw_breakpoint(uint64_t gva,int kind,bool insert)1287 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1288 {
1289 	struct breakpoint *bp;
1290 	uint64_t gpa;
1291 	uint8_t *cp;
1292 	int error;
1293 
1294 	if (kind != GDB_BP_SIZE) {
1295 		send_error(EINVAL);
1296 		return;
1297 	}
1298 
1299 	error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
1300 	if (error == -1) {
1301 		send_error(errno);
1302 		return;
1303 	}
1304 	if (error == 0) {
1305 		send_error(EFAULT);
1306 		return;
1307 	}
1308 
1309 	cp = paddr_guest2host(ctx, gpa, sizeof(bp->shadow_inst));
1310 
1311 	/* Only permit breakpoints in guest RAM. */
1312 	if (cp == NULL) {
1313 		send_error(EFAULT);
1314 		return;
1315 	}
1316 
1317 	/* Find any existing breakpoint. */
1318 	bp = find_breakpoint(gpa);
1319 
1320 	/*
1321 	 * Silently ignore duplicate commands since the protocol
1322 	 * requires these packets to be idempotent.
1323 	 */
1324 	if (insert) {
1325 		if (bp == NULL) {
1326 			if (TAILQ_EMPTY(&breakpoints) &&
1327 			    !set_breakpoint_caps(true)) {
1328 				send_empty_response();
1329 				return;
1330 			}
1331 			bp = malloc(sizeof(*bp));
1332 			bp->gpa = gpa;
1333 			memcpy(bp->shadow_inst, cp, sizeof(bp->shadow_inst));
1334 			memcpy(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst));
1335 			TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1336 			debug("new breakpoint at %#lx\n", gpa);
1337 		}
1338 	} else {
1339 		if (bp != NULL) {
1340 			debug("remove breakpoint at %#lx\n", gpa);
1341 			memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst));
1342 			TAILQ_REMOVE(&breakpoints, bp, link);
1343 			free(bp);
1344 			if (TAILQ_EMPTY(&breakpoints))
1345 				set_breakpoint_caps(false);
1346 		}
1347 	}
1348 	send_ok();
1349 }
1350 
1351 static void
parse_breakpoint(const uint8_t * data,size_t len)1352 parse_breakpoint(const uint8_t *data, size_t len)
1353 {
1354 	uint64_t gva;
1355 	uint8_t *cp;
1356 	bool insert;
1357 	int kind, type;
1358 
1359 	insert = data[0] == 'Z';
1360 
1361 	/* Skip 'Z/z' */
1362 	data += 1;
1363 	len -= 1;
1364 
1365 	/* Parse and consume type. */
1366 	cp = memchr(data, ',', len);
1367 	if (cp == NULL || cp == data) {
1368 		send_error(EINVAL);
1369 		return;
1370 	}
1371 	type = parse_integer(data, cp - data);
1372 	len -= (cp - data) + 1;
1373 	data += (cp - data) + 1;
1374 
1375 	/* Parse and consume address. */
1376 	cp = memchr(data, ',', len);
1377 	if (cp == NULL || cp == data) {
1378 		send_error(EINVAL);
1379 		return;
1380 	}
1381 	gva = parse_integer(data, cp - data);
1382 	len -= (cp - data) + 1;
1383 	data += (cp - data) + 1;
1384 
1385 	/* Parse and consume kind. */
1386 	cp = memchr(data, ';', len);
1387 	if (cp == data) {
1388 		send_error(EINVAL);
1389 		return;
1390 	}
1391 	if (cp != NULL) {
1392 		/*
1393 		 * We do not advertise support for either the
1394 		 * ConditionalBreakpoints or BreakpointCommands
1395 		 * features, so we should not be getting conditions or
1396 		 * commands from the remote end.
1397 		 */
1398 		send_empty_response();
1399 		return;
1400 	}
1401 	kind = parse_integer(data, len);
1402 	data += len;
1403 	len = 0;
1404 
1405 	switch (type) {
1406 	case 0:
1407 		update_sw_breakpoint(gva, kind, insert);
1408 		break;
1409 	default:
1410 		send_empty_response();
1411 		break;
1412 	}
1413 }
1414 
1415 static bool
command_equals(const uint8_t * data,size_t len,const char * cmd)1416 command_equals(const uint8_t *data, size_t len, const char *cmd)
1417 {
1418 
1419 	if (strlen(cmd) > len)
1420 		return (false);
1421 	return (memcmp(data, cmd, strlen(cmd)) == 0);
1422 }
1423 
1424 static void
check_features(const uint8_t * data,size_t len)1425 check_features(const uint8_t *data, size_t len)
1426 {
1427 	char *feature, *next_feature, *str, *value;
1428 	bool supported;
1429 
1430 	str = malloc(len + 1);
1431 	memcpy(str, data, len);
1432 	str[len] = '\0';
1433 	next_feature = str;
1434 
1435 	while ((feature = strsep(&next_feature, ";")) != NULL) {
1436 		/*
1437 		 * Null features shouldn't exist, but skip if they
1438 		 * do.
1439 		 */
1440 		if (strcmp(feature, "") == 0)
1441 			continue;
1442 
1443 		/*
1444 		 * Look for the value or supported / not supported
1445 		 * flag.
1446 		 */
1447 		value = strchr(feature, '=');
1448 		if (value != NULL) {
1449 			*value = '\0';
1450 			value++;
1451 			supported = true;
1452 		} else {
1453 			value = feature + strlen(feature) - 1;
1454 			switch (*value) {
1455 			case '+':
1456 				supported = true;
1457 				break;
1458 			case '-':
1459 				supported = false;
1460 				break;
1461 			default:
1462 				/*
1463 				 * This is really a protocol error,
1464 				 * but we just ignore malformed
1465 				 * features for ease of
1466 				 * implementation.
1467 				 */
1468 				continue;
1469 			}
1470 			value = NULL;
1471 		}
1472 
1473 		if (strcmp(feature, "swbreak") == 0)
1474 			swbreak_enabled = supported;
1475 
1476 #ifndef __FreeBSD__
1477 		/*
1478 		 * The compiler dislikes 'supported' being set but never used.
1479 		 * Make it happy here.
1480 		 */
1481 		if (supported) {
1482 			debug("feature '%s' supported\n", feature);
1483 		}
1484 #endif /* __FreeBSD__ */
1485 	}
1486 	free(str);
1487 
1488 	start_packet();
1489 
1490 	/* This is an arbitrary limit. */
1491 	append_string("PacketSize=4096");
1492 	append_string(";swbreak+");
1493 	finish_packet();
1494 }
1495 
1496 static void
gdb_query(const uint8_t * data,size_t len)1497 gdb_query(const uint8_t *data, size_t len)
1498 {
1499 
1500 	/*
1501 	 * TODO:
1502 	 * - qSearch
1503 	 */
1504 	if (command_equals(data, len, "qAttached")) {
1505 		start_packet();
1506 		append_char('1');
1507 		finish_packet();
1508 	} else if (command_equals(data, len, "qC")) {
1509 		start_packet();
1510 		append_string("QC");
1511 		append_integer(cur_vcpu + 1);
1512 		finish_packet();
1513 	} else if (command_equals(data, len, "qfThreadInfo")) {
1514 		cpuset_t mask;
1515 		bool first;
1516 		int vcpu;
1517 
1518 		if (CPU_EMPTY(&vcpus_active)) {
1519 			send_error(EINVAL);
1520 			return;
1521 		}
1522 		mask = vcpus_active;
1523 		start_packet();
1524 		append_char('m');
1525 		first = true;
1526 		while (!CPU_EMPTY(&mask)) {
1527 			vcpu = CPU_FFS(&mask) - 1;
1528 			CPU_CLR(vcpu, &mask);
1529 			if (first)
1530 				first = false;
1531 			else
1532 				append_char(',');
1533 			append_integer(vcpu + 1);
1534 		}
1535 		finish_packet();
1536 	} else if (command_equals(data, len, "qsThreadInfo")) {
1537 		start_packet();
1538 		append_char('l');
1539 		finish_packet();
1540 	} else if (command_equals(data, len, "qSupported")) {
1541 		data += strlen("qSupported");
1542 		len -= strlen("qSupported");
1543 		check_features(data, len);
1544 	} else if (command_equals(data, len, "qThreadExtraInfo")) {
1545 		char buf[16];
1546 		int tid;
1547 
1548 		data += strlen("qThreadExtraInfo");
1549 		len -= strlen("qThreadExtraInfo");
1550 		if (len == 0 || *data != ',') {
1551 			send_error(EINVAL);
1552 			return;
1553 		}
1554 		tid = parse_threadid(data + 1, len - 1);
1555 		if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1556 			send_error(EINVAL);
1557 			return;
1558 		}
1559 
1560 		snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1561 		start_packet();
1562 		append_asciihex(buf);
1563 		finish_packet();
1564 	} else
1565 		send_empty_response();
1566 }
1567 
1568 static void
handle_command(const uint8_t * data,size_t len)1569 handle_command(const uint8_t *data, size_t len)
1570 {
1571 
1572 	/* Reject packets with a sequence-id. */
1573 	if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1574 	    data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1575 		send_empty_response();
1576 		return;
1577 	}
1578 
1579 	switch (*data) {
1580 	case 'c':
1581 		if (len != 1) {
1582 			send_error(EINVAL);
1583 			break;
1584 		}
1585 
1586 		discard_stop();
1587 		gdb_resume_vcpus();
1588 		break;
1589 	case 'D':
1590 		send_ok();
1591 
1592 		/* TODO: Resume any stopped CPUs. */
1593 		break;
1594 	case 'g':
1595 		gdb_read_regs();
1596 		break;
1597 	case 'p':
1598 		gdb_read_one_reg(data + 1, len - 1);
1599 		break;
1600 	case 'H': {
1601 		int tid;
1602 
1603 		if (len < 2 || (data[1] != 'g' && data[1] != 'c')) {
1604 			send_error(EINVAL);
1605 			break;
1606 		}
1607 		tid = parse_threadid(data + 2, len - 2);
1608 		if (tid == -2) {
1609 			send_error(EINVAL);
1610 			break;
1611 		}
1612 
1613 		if (CPU_EMPTY(&vcpus_active)) {
1614 			send_error(EINVAL);
1615 			break;
1616 		}
1617 		if (tid == -1 || tid == 0)
1618 			cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1619 		else if (CPU_ISSET(tid - 1, &vcpus_active))
1620 			cur_vcpu = tid - 1;
1621 		else {
1622 			send_error(EINVAL);
1623 			break;
1624 		}
1625 		send_ok();
1626 		break;
1627 	}
1628 	case 'm':
1629 		gdb_read_mem(data, len);
1630 		break;
1631 	case 'M':
1632 		gdb_write_mem(data, len);
1633 		break;
1634 	case 'T': {
1635 		int tid;
1636 
1637 		tid = parse_threadid(data + 1, len - 1);
1638 		if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1639 			send_error(EINVAL);
1640 			return;
1641 		}
1642 		send_ok();
1643 		break;
1644 	}
1645 	case 'q':
1646 		gdb_query(data, len);
1647 		break;
1648 	case 's':
1649 		if (len != 1) {
1650 			send_error(EINVAL);
1651 			break;
1652 		}
1653 
1654 		/* Don't send a reply until a stop occurs. */
1655 		if (!gdb_step_vcpu(vcpus[cur_vcpu])) {
1656 			send_error(EOPNOTSUPP);
1657 			break;
1658 		}
1659 		break;
1660 	case 'z':
1661 	case 'Z':
1662 		parse_breakpoint(data, len);
1663 		break;
1664 	case '?':
1665 		report_stop(false);
1666 		break;
1667 	case 'G': /* TODO */
1668 	case 'v':
1669 		/* Handle 'vCont' */
1670 		/* 'vCtrlC' */
1671 	case 'P': /* TODO */
1672 	case 'Q': /* TODO */
1673 	case 't': /* TODO */
1674 	case 'X': /* TODO */
1675 	default:
1676 		send_empty_response();
1677 	}
1678 }
1679 
1680 /* Check for a valid packet in the command buffer. */
1681 static void
check_command(int fd)1682 check_command(int fd)
1683 {
1684 	uint8_t *head, *hash, *p, sum;
1685 	size_t avail, plen;
1686 
1687 	for (;;) {
1688 		avail = cur_comm.len;
1689 		if (avail == 0)
1690 			return;
1691 		head = io_buffer_head(&cur_comm);
1692 		switch (*head) {
1693 		case 0x03:
1694 			debug("<- Ctrl-C\n");
1695 			io_buffer_consume(&cur_comm, 1);
1696 
1697 			gdb_suspend_vcpus();
1698 			break;
1699 		case '+':
1700 			/* ACK of previous response. */
1701 			debug("<- +\n");
1702 			if (response_pending())
1703 				io_buffer_reset(&cur_resp);
1704 			io_buffer_consume(&cur_comm, 1);
1705 			if (stopped_vcpu != -1 && report_next_stop) {
1706 				report_stop(true);
1707 				send_pending_data(fd);
1708 			}
1709 			break;
1710 		case '-':
1711 			/* NACK of previous response. */
1712 			debug("<- -\n");
1713 			if (response_pending()) {
1714 				cur_resp.len += cur_resp.start;
1715 				cur_resp.start = 0;
1716 				if (cur_resp.data[0] == '+')
1717 					io_buffer_advance(&cur_resp, 1);
1718 				debug("-> %.*s\n", (int)cur_resp.len,
1719 				    io_buffer_head(&cur_resp));
1720 			}
1721 			io_buffer_consume(&cur_comm, 1);
1722 			send_pending_data(fd);
1723 			break;
1724 		case '$':
1725 			/* Packet. */
1726 
1727 			if (response_pending()) {
1728 				warnx("New GDB command while response in "
1729 				    "progress");
1730 				io_buffer_reset(&cur_resp);
1731 			}
1732 
1733 			/* Is packet complete? */
1734 			hash = memchr(head, '#', avail);
1735 			if (hash == NULL)
1736 				return;
1737 			plen = (hash - head + 1) + 2;
1738 			if (avail < plen)
1739 				return;
1740 			debug("<- %.*s\n", (int)plen, head);
1741 
1742 			/* Verify checksum. */
1743 			for (sum = 0, p = head + 1; p < hash; p++)
1744 				sum += *p;
1745 			if (sum != parse_byte(hash + 1)) {
1746 				io_buffer_consume(&cur_comm, plen);
1747 				debug("-> -\n");
1748 				send_char('-');
1749 				send_pending_data(fd);
1750 				break;
1751 			}
1752 			send_char('+');
1753 
1754 			handle_command(head + 1, hash - (head + 1));
1755 			io_buffer_consume(&cur_comm, plen);
1756 			if (!response_pending()) {
1757 				debug("-> +\n");
1758 			}
1759 			send_pending_data(fd);
1760 			break;
1761 		default:
1762 			/* XXX: Possibly drop connection instead. */
1763 			debug("-> %02x\n", *head);
1764 			io_buffer_consume(&cur_comm, 1);
1765 			break;
1766 		}
1767 	}
1768 }
1769 
1770 static void
gdb_readable(int fd,enum ev_type event __unused,void * arg __unused)1771 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused)
1772 {
1773 	size_t pending;
1774 	ssize_t nread;
1775 	int n;
1776 
1777 	if (ioctl(fd, FIONREAD, &n) == -1) {
1778 		warn("FIONREAD on GDB socket");
1779 		return;
1780 	}
1781 	assert(n >= 0);
1782 	pending = n;
1783 
1784 	/*
1785 	 * 'pending' might be zero due to EOF.  We need to call read
1786 	 * with a non-zero length to detect EOF.
1787 	 */
1788 	if (pending == 0)
1789 		pending = 1;
1790 
1791 	/* Ensure there is room in the command buffer. */
1792 	io_buffer_grow(&cur_comm, pending);
1793 	assert(io_buffer_avail(&cur_comm) >= pending);
1794 
1795 	nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1796 	if (nread == 0) {
1797 		close_connection();
1798 	} else if (nread == -1) {
1799 		if (errno == EAGAIN)
1800 			return;
1801 
1802 		warn("Read from GDB socket");
1803 		close_connection();
1804 	} else {
1805 		cur_comm.len += nread;
1806 		pthread_mutex_lock(&gdb_lock);
1807 		check_command(fd);
1808 		pthread_mutex_unlock(&gdb_lock);
1809 	}
1810 }
1811 
1812 static void
gdb_writable(int fd,enum ev_type event __unused,void * arg __unused)1813 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused)
1814 {
1815 
1816 	send_pending_data(fd);
1817 }
1818 
1819 static void
new_connection(int fd,enum ev_type event __unused,void * arg)1820 new_connection(int fd, enum ev_type event __unused, void *arg)
1821 {
1822 	int optval, s;
1823 
1824 	s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1825 	if (s == -1) {
1826 		if (arg != NULL)
1827 			err(1, "Failed accepting initial GDB connection");
1828 
1829 		/* Silently ignore errors post-startup. */
1830 		return;
1831 	}
1832 
1833 	optval = 1;
1834 	if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1835 	    -1) {
1836 		warn("Failed to disable SIGPIPE for GDB connection");
1837 		close(s);
1838 		return;
1839 	}
1840 
1841 	pthread_mutex_lock(&gdb_lock);
1842 	if (cur_fd != -1) {
1843 		close(s);
1844 		warnx("Ignoring additional GDB connection.");
1845 	}
1846 
1847 	read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1848 	if (read_event == NULL) {
1849 		if (arg != NULL)
1850 			err(1, "Failed to setup initial GDB connection");
1851 		pthread_mutex_unlock(&gdb_lock);
1852 		return;
1853 	}
1854 	write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1855 	if (write_event == NULL) {
1856 		if (arg != NULL)
1857 			err(1, "Failed to setup initial GDB connection");
1858 		mevent_delete_close(read_event);
1859 		read_event = NULL;
1860 	}
1861 
1862 	cur_fd = s;
1863 	cur_vcpu = 0;
1864 	stopped_vcpu = -1;
1865 
1866 	/* Break on attach. */
1867 	first_stop = true;
1868 	report_next_stop = false;
1869 	gdb_suspend_vcpus();
1870 	pthread_mutex_unlock(&gdb_lock);
1871 }
1872 
1873 #ifndef WITHOUT_CAPSICUM
1874 static void
limit_gdb_socket(int s)1875 limit_gdb_socket(int s)
1876 {
1877 	cap_rights_t rights;
1878 	unsigned long ioctls[] = { FIONREAD };
1879 
1880 	cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1881 	    CAP_SETSOCKOPT, CAP_IOCTL);
1882 	if (caph_rights_limit(s, &rights) == -1)
1883 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1884 	if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1885 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1886 }
1887 #endif
1888 
1889 
1890 #ifndef __FreeBSD__
1891 /*
1892  * Equivalent to init_gdb() below, but without configuring the listening socket.
1893  * This will allow the bhyve process to tolerate mdb attaching/detaching from
1894  * the instance while it is running.
1895  */
1896 void
init_mdb(struct vmctx * _ctx)1897 init_mdb(struct vmctx *_ctx)
1898 {
1899 	int error;
1900 	bool wait;
1901 
1902 	wait = get_config_bool_default("gdb.wait", false);
1903 
1904 	error = pthread_mutex_init(&gdb_lock, NULL);
1905 	if (error != 0)
1906 		errc(1, error, "gdb mutex init");
1907 	error = pthread_cond_init(&idle_vcpus, NULL);
1908 	if (error != 0)
1909 		errc(1, error, "gdb cv init");
1910 
1911 	ctx = _ctx;
1912 	stopped_vcpu = -1;
1913 	TAILQ_INIT(&breakpoints);
1914 	vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1915 	if (wait) {
1916 		/*
1917 		 * Set vcpu 0 in vcpus_suspended.  This will trigger the
1918 		 * logic in gdb_cpu_add() to suspend the first vcpu before
1919 		 * it starts execution.  The vcpu will remain suspended
1920 		 * until a debugger connects.
1921 		 */
1922 		CPU_SET(0, &vcpus_suspended);
1923 		stopped_vcpu = 0;
1924 	}
1925 }
1926 #endif
1927 
1928 void
init_gdb(struct vmctx * _ctx)1929 init_gdb(struct vmctx *_ctx)
1930 {
1931 	int error, flags, optval, s;
1932 	struct addrinfo hints;
1933 	struct addrinfo *gdbaddr;
1934 	const char *saddr, *value;
1935 	char *sport;
1936 	bool wait;
1937 
1938 	value = get_config_value("gdb.port");
1939 	if (value == NULL)
1940 		return;
1941 	sport = strdup(value);
1942 	if (sport == NULL)
1943 		errx(4, "Failed to allocate memory");
1944 
1945 	wait = get_config_bool_default("gdb.wait", false);
1946 
1947 	saddr = get_config_value("gdb.address");
1948 	if (saddr == NULL) {
1949 		saddr = "localhost";
1950 	}
1951 
1952 	debug("==> starting on %s:%s, %swaiting\n",
1953 	    saddr, sport, wait ? "" : "not ");
1954 
1955 	error = pthread_mutex_init(&gdb_lock, NULL);
1956 	if (error != 0)
1957 		errc(1, error, "gdb mutex init");
1958 	error = pthread_cond_init(&idle_vcpus, NULL);
1959 	if (error != 0)
1960 		errc(1, error, "gdb cv init");
1961 
1962 	memset(&hints, 0, sizeof(hints));
1963 	hints.ai_family = AF_UNSPEC;
1964 	hints.ai_socktype = SOCK_STREAM;
1965 	hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
1966 
1967 	error = getaddrinfo(saddr, sport, &hints, &gdbaddr);
1968 	if (error != 0)
1969 		errx(1, "gdb address resolution: %s", gai_strerror(error));
1970 
1971 	ctx = _ctx;
1972 	s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0);
1973 	if (s < 0)
1974 		err(1, "gdb socket create");
1975 
1976 	optval = 1;
1977 	(void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1978 
1979 	if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0)
1980 		err(1, "gdb socket bind");
1981 
1982 	if (listen(s, 1) < 0)
1983 		err(1, "gdb socket listen");
1984 
1985 	stopped_vcpu = -1;
1986 	TAILQ_INIT(&breakpoints);
1987 	vcpus = calloc(guest_ncpus, sizeof(*vcpus));
1988 	vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1989 	if (wait) {
1990 		/*
1991 		 * Set vcpu 0 in vcpus_suspended.  This will trigger the
1992 		 * logic in gdb_cpu_add() to suspend the first vcpu before
1993 		 * it starts execution.  The vcpu will remain suspended
1994 		 * until a debugger connects.
1995 		 */
1996 		CPU_SET(0, &vcpus_suspended);
1997 		stopped_vcpu = 0;
1998 	}
1999 
2000 	flags = fcntl(s, F_GETFL);
2001 	if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
2002 		err(1, "Failed to mark gdb socket non-blocking");
2003 
2004 #ifndef WITHOUT_CAPSICUM
2005 	limit_gdb_socket(s);
2006 #endif
2007 	mevent_add(s, EVF_READ, new_connection, NULL);
2008 	gdb_active = true;
2009 	freeaddrinfo(gdbaddr);
2010 	free(sport);
2011 }
2012