xref: /titanic_41/usr/src/cmd/mdb/intel/kmdb/kaif.c (revision 05b96de272314c66a0e27fe7df41c9a9e1038c5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * The debugger/"PROM" interface layer
31  *
32  * (it makes more sense on SPARC)
33  */
34 
35 #include <kmdb/kmdb_dpi_impl.h>
36 #include <kmdb/kmdb_kdi.h>
37 #include <kmdb/kmdb_umemglue.h>
38 #include <kmdb/kaif.h>
39 #include <kmdb/kaif_asmutil.h>
40 #include <mdb/mdb_err.h>
41 #include <mdb/mdb_debug.h>
42 #include <mdb/mdb_isautil.h>
43 #include <mdb/mdb_io_impl.h>
44 #include <mdb/mdb_kreg.h>
45 #include <mdb/mdb.h>
46 
47 #include <sys/types.h>
48 #include <sys/segments.h>
49 #include <sys/bitmap.h>
50 
51 kaif_cpusave_t	*kaif_cpusave;
52 int		kaif_ncpusave;
53 
54 kaif_drreg_t	kaif_drreg;
55 
56 uint32_t	kaif_waptmap;
57 
58 #ifndef __amd64
59 /* Used to track the current set of valid kernel selectors. */
60 uint32_t	kaif_cs;
61 uint32_t	kaif_ds;
62 uint32_t	kaif_fs;
63 uint32_t	kaif_gs;
64 #endif
65 
66 uint_t		kaif_msr_wrexit_msr;
67 uint64_t	*kaif_msr_wrexit_valp;
68 
69 uintptr_t	kaif_kernel_handler;
70 uintptr_t	kaif_sys_sysenter;
71 
72 int		kaif_trap_switch;
73 
74 void (*kaif_modchg_cb)(struct modctl *, int);
75 
76 #define	KAIF_MEMRANGES_MAX	2
77 
78 kaif_memrange_t	kaif_memranges[KAIF_MEMRANGES_MAX];
79 int		kaif_nmemranges;
80 
81 enum {
82 	M_SYSRET	= 0x07, /* after M_ESC */
83 	M_ESC		= 0x0f,
84 	M_SYSEXIT	= 0x35, /* after M_ESC */
85 	M_REX_LO	= 0x40, /* first REX prefix */
86 	M_REX_HI	= 0x4f, /* last REX prefix */
87 	M_PUSHF		= 0x9c,	/* pushfl and pushfq */
88 	M_POPF		= 0x9d,	/* popfl and popfq */
89 	M_INT3		= 0xcc,
90 	M_INTX		= 0xcd,
91 	M_INTO		= 0xce,
92 	M_IRET		= 0xcf,
93 	M_CLI		= 0xfa,
94 	M_STI		= 0xfb
95 };
96 
97 #define	KAIF_BREAKPOINT_INSTR	M_INT3
98 
99 #define	KAIF_WPPRIV2ID(wp)	(int)(uintptr_t)((wp)->wp_priv)
100 
101 #ifdef __amd64
102 #define	FLAGS_REG_NAME		"rflags"
103 #else
104 #define	FLAGS_REG_NAME		"eflags"
105 #endif
106 
107 /*
108  * Called during normal debugger operation and during debugger faults.
109  */
110 static void
111 kaif_enter_mon(void)
112 {
113 	char c;
114 
115 	for (;;) {
116 		mdb_iob_printf(mdb.m_out,
117 		    "%s: Do you really want to reboot? (y/n) ",
118 		    mdb.m_pname);
119 		mdb_iob_flush(mdb.m_out);
120 
121 		while (IOP_READ(mdb.m_term, &c, 1) != 1)
122 			continue;
123 		mdb_iob_printf(mdb.m_out, "%c%s", c, (c == '\n' ? "" : "\n"));
124 
125 		if (c == 'n' || c == 'N')
126 			return;
127 		else if (c == 'y' || c == 'Y') {
128 			mdb_iob_printf(mdb.m_out, "Rebooting...\n");
129 
130 			kmdb_dpi_reboot();
131 		}
132 	}
133 }
134 
135 static kaif_cpusave_t *
136 kaif_cpuid2save(int cpuid)
137 {
138 	kaif_cpusave_t *save;
139 
140 	if (cpuid == DPI_MASTER_CPUID)
141 		return (&kaif_cpusave[kaif_master_cpuid]);
142 
143 	if (cpuid < 0 || cpuid >= kaif_ncpusave) {
144 		(void) set_errno(EINVAL);
145 		return (NULL);
146 	}
147 
148 	save = &kaif_cpusave[cpuid];
149 
150 	if (save->krs_cpu_state != KAIF_CPU_STATE_MASTER &&
151 	    save->krs_cpu_state != KAIF_CPU_STATE_SLAVE) {
152 		(void) set_errno(EINVAL);
153 		return (NULL);
154 	}
155 
156 	return (save);
157 }
158 
159 static int
160 kaif_get_cpu_state(int cpuid)
161 {
162 	kaif_cpusave_t *save;
163 
164 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
165 		return (-1); /* errno is set for us */
166 
167 	switch (save->krs_cpu_state) {
168 	case KAIF_CPU_STATE_MASTER:
169 		return (DPI_CPU_STATE_MASTER);
170 	case KAIF_CPU_STATE_SLAVE:
171 		return (DPI_CPU_STATE_SLAVE);
172 	default:
173 		return (set_errno(EINVAL));
174 	}
175 }
176 
177 static int
178 kaif_get_master_cpuid(void)
179 {
180 	return (kaif_master_cpuid);
181 }
182 
183 static const mdb_tgt_gregset_t *
184 kaif_get_gregs(int cpuid)
185 {
186 	kaif_cpusave_t *save;
187 
188 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
189 		return (NULL); /* errno is set for us */
190 
191 	return (save->krs_gregs);
192 }
193 
194 typedef struct kaif_reg_synonyms {
195 	const char *rs_syn;
196 	const char *rs_name;
197 } kaif_reg_synonyms_t;
198 
199 static kreg_t *
200 kaif_find_regp(const char *regname)
201 {
202 	static const kaif_reg_synonyms_t synonyms[] = {
203 #ifdef __amd64
204 	    { "pc", "rip" },
205 	    { "sp", "rsp" },
206 	    { "fp", "rbp" },
207 #else
208 	    { "pc", "eip" },
209 	    { "sp", "esp" },
210 	    { "fp", "ebp" },
211 #endif
212 	    { "tt", "trapno" }
213 	};
214 
215 	kaif_cpusave_t *save;
216 	int i;
217 
218 	save = kaif_cpuid2save(DPI_MASTER_CPUID);
219 
220 	for (i = 0; i < sizeof (synonyms) / sizeof (synonyms[0]); i++) {
221 		if (strcmp(synonyms[i].rs_syn, regname) == 0)
222 			regname = synonyms[i].rs_name;
223 	}
224 
225 	for (i = 0; mdb_isa_kregs[i].rd_name != NULL; i++) {
226 		const mdb_tgt_regdesc_t *rd = &mdb_isa_kregs[i];
227 
228 		if (strcmp(rd->rd_name, regname) == 0)
229 			return (&save->krs_gregs->kregs[rd->rd_num]);
230 	}
231 
232 	(void) set_errno(ENOENT);
233 	return (NULL);
234 }
235 
236 /*ARGSUSED*/
237 static int
238 kaif_get_register(const char *regname, kreg_t *valp)
239 {
240 	kreg_t *regp;
241 
242 	if ((regp = kaif_find_regp(regname)) == NULL)
243 		return (-1);
244 
245 	*valp = *regp;
246 
247 	return (0);
248 }
249 
250 static int
251 kaif_set_register(const char *regname, kreg_t val)
252 {
253 	kreg_t *regp;
254 
255 	if ((regp = kaif_find_regp(regname)) == NULL)
256 		return (-1);
257 
258 	*regp = val;
259 
260 	return (0);
261 }
262 
263 static int
264 kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
265 {
266 	mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;
267 
268 	if (mdb_tgt_vread(mdb.m_target, instrp, sizeof (mdb_instr_t), addr) !=
269 	    sizeof (mdb_instr_t))
270 		return (-1); /* errno is set for us */
271 
272 	if (mdb_tgt_vwrite(mdb.m_target, &bkpt, sizeof (mdb_instr_t), addr) !=
273 	    sizeof (mdb_instr_t))
274 		return (-1); /* errno is set for us */
275 
276 	return (0);
277 }
278 
279 static int
280 kaif_brkpt_disarm(uintptr_t addr, mdb_instr_t instrp)
281 {
282 	if (mdb_tgt_vwrite(mdb.m_target, &instrp, sizeof (mdb_instr_t), addr) !=
283 	    sizeof (mdb_instr_t))
284 		return (-1); /* errno is set for us */
285 
286 	return (0);
287 }
288 
289 /*
290  * Intel watchpoints are even more fun than SPARC ones.  The Intel architecture
291  * manuals refer to watchpoints as breakpoints.  For consistency  with the
292  * terminology used in other portions of kmdb, we will, however, refer to them
293  * as watchpoints.
294  *
295  * Execute, data write, I/O read/write, and data read/write watchpoints are
296  * supported by the hardware.  Execute watchpoints must be one byte in length,
297  * and must be placed on the first byte of the instruction to be watched.
298  * Lengths of other watchpoints are more varied.
299  *
300  * Given that we already have a breakpoint facility, and given the restrictions
301  * placed on execute watchpoints, we're going to disallow the creation of
302  * execute watchpoints.  The others will be fully supported.  See the Debugging
303  * chapter in both the IA32 and AMD64 System Programming books for more details.
304  */
305 
306 #ifdef __amd64
307 #define	WAPT_DATA_MAX_SIZE	8
308 #define	WAPT_DATA_SIZES_MSG	"1, 2, 4, or 8"
309 #else
310 #define	WAPT_DATA_MAX_SIZE	4
311 #define	WAPT_DATA_SIZES_MSG	"1, 2, or 4"
312 #endif
313 
314 static int
315 kaif_wapt_validate(kmdb_wapt_t *wp)
316 {
317 	if (wp->wp_type == DPI_WAPT_TYPE_IO) {
318 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W)) {
319 			warn("I/O port watchpoints must be read/write\n");
320 			return (set_errno(EINVAL));
321 		}
322 
323 		if (wp->wp_size != 1 && wp->wp_size != 2 && wp->wp_size != 4) {
324 			warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
325 			return (set_errno(EINVAL));
326 		}
327 
328 	} else if (wp->wp_type == DPI_WAPT_TYPE_PHYS) {
329 		warn("physical address watchpoints are not supported on this "
330 		    "platform\n");
331 		return (set_errno(EMDB_TGTHWNOTSUP));
332 
333 	} else {
334 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W) &&
335 		    wp->wp_wflags != MDB_TGT_WA_W) {
336 			warn("watchpoints must be read/write or write-only\n");
337 			return (set_errno(EINVAL));
338 		}
339 
340 		if ((wp->wp_size & -(wp->wp_size)) != wp->wp_size ||
341 		    wp->wp_size > WAPT_DATA_MAX_SIZE) {
342 			warn("data watchpoint size must be " WAPT_DATA_SIZES_MSG
343 			    " bytes\n");
344 			return (set_errno(EINVAL));
345 		}
346 
347 	}
348 
349 	if (wp->wp_addr & (wp->wp_size - 1)) {
350 		warn("%lu-byte watchpoints must be %lu-byte aligned\n",
351 		    (ulong_t)wp->wp_size, (ulong_t)wp->wp_size);
352 		return (set_errno(EINVAL));
353 	}
354 
355 	return (0);
356 }
357 
358 static int
359 kaif_wapt_reserve(kmdb_wapt_t *wp)
360 {
361 	int id;
362 
363 	for (id = 0; id <= KREG_MAXWPIDX; id++) {
364 		if (!BT_TEST(&kaif_waptmap, id)) {
365 			/* found one */
366 			BT_SET(&kaif_waptmap, id);
367 			wp->wp_priv = (void *)(uintptr_t)id;
368 			return (0);
369 		}
370 	}
371 
372 	return (set_errno(EMDB_WPTOOMANY));
373 }
374 
375 static void
376 kaif_wapt_release(kmdb_wapt_t *wp)
377 {
378 	int id = KAIF_WPPRIV2ID(wp);
379 
380 	ASSERT(BT_TEST(&kaif_waptmap, id));
381 	BT_CLEAR(&kaif_waptmap, id);
382 }
383 
384 /*ARGSUSED*/
385 static void
386 kaif_wapt_arm(kmdb_wapt_t *wp)
387 {
388 	uint_t rw;
389 	int hwid = KAIF_WPPRIV2ID(wp);
390 
391 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
392 
393 	if (wp->wp_type == DPI_WAPT_TYPE_IO)
394 		rw = KREG_DRCTL_WP_IORW;
395 	else if (wp->wp_wflags & MDB_TGT_WA_R)
396 		rw = KREG_DRCTL_WP_RW;
397 	else if (wp->wp_wflags & MDB_TGT_WA_X)
398 		rw = KREG_DRCTL_WP_EXEC;
399 	else
400 		rw = KREG_DRCTL_WP_WONLY;
401 
402 	kaif_drreg.dr_addr[hwid] = wp->wp_addr;
403 
404 	kaif_drreg.dr_ctl &= ~KREG_DRCTL_WP_MASK(hwid);
405 	kaif_drreg.dr_ctl |= KREG_DRCTL_WP_LENRW(hwid, wp->wp_size - 1, rw);
406 	kaif_drreg.dr_ctl |= KREG_DRCTL_WPEN(hwid);
407 }
408 
409 /*ARGSUSED*/
410 static void
411 kaif_wapt_disarm(kmdb_wapt_t *wp)
412 {
413 	int hwid = KAIF_WPPRIV2ID(wp);
414 
415 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
416 
417 	kaif_drreg.dr_addr[hwid] = 0;
418 	kaif_drreg.dr_ctl &= ~(KREG_DRCTL_WP_MASK(hwid) |
419 	    KREG_DRCTL_WPEN_MASK(hwid));
420 }
421 
422 /*ARGSUSED*/
423 static int
424 kaif_wapt_match(kmdb_wapt_t *wp)
425 {
426 	int hwid = KAIF_WPPRIV2ID(wp);
427 	uint32_t mask = KREG_DRSTAT_WP_MASK(hwid);
428 	int n = 0;
429 	int i;
430 
431 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
432 
433 	for (i = 0; i < kaif_ncpusave; i++)
434 		n += (kaif_cpusave[i].krs_dr.dr_stat & mask) != 0;
435 
436 	return (n);
437 }
438 
439 static int
440 kaif_step(void)
441 {
442 	kreg_t pc, fl, oldfl, newfl, sp;
443 	mdb_tgt_addr_t npc;
444 	mdb_instr_t instr;
445 	int emulated = 0, rchk = 0;
446 	size_t pcoff = 0;
447 
448 	(void) kmdb_dpi_get_register("pc", &pc);
449 
450 	if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
451 	    MDB_TGT_AS_VIRT, pc)) == pc) {
452 		warn("failed to decode instruction at %a for step\n", pc);
453 		return (set_errno(EINVAL));
454 	}
455 
456 	/*
457 	 * Stepping behavior depends on the type of instruction.  It does not
458 	 * depend on the presence of a REX prefix, as the action we take for a
459 	 * given instruction doesn't currently vary for 32-bit instructions
460 	 * versus their 64-bit counterparts.
461 	 */
462 	do {
463 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
464 		    pc + pcoff) != sizeof (mdb_instr_t)) {
465 			warn("failed to read at %p for step",
466 			    (void *)(pc + pcoff));
467 			return (-1);
468 		}
469 	} while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));
470 
471 	switch (instr) {
472 	case M_IRET:
473 		warn("iret cannot be stepped\n");
474 		return (set_errno(EMDB_TGTNOTSUP));
475 
476 	case M_INT3:
477 	case M_INTX:
478 	case M_INTO:
479 		warn("int cannot be stepped\n");
480 		return (set_errno(EMDB_TGTNOTSUP));
481 
482 	case M_ESC:
483 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
484 		    pc + pcoff) != sizeof (mdb_instr_t)) {
485 			warn("failed to read at %p for step",
486 			    (void *)(pc + pcoff));
487 			return (-1);
488 		}
489 
490 		switch (instr) {
491 		case M_SYSRET:
492 			warn("sysret cannot be stepped\n");
493 			return (set_errno(EMDB_TGTNOTSUP));
494 		case M_SYSEXIT:
495 			warn("sysexit cannot be stepped\n");
496 			return (set_errno(EMDB_TGTNOTSUP));
497 		}
498 		break;
499 
500 	/*
501 	 * Some instructions need to be emulated.  We need to prevent direct
502 	 * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
503 	 * popfl also receive special handling, as they manipulate both EFLAGS
504 	 * and %esp.
505 	 */
506 	case M_CLI:
507 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
508 		fl &= ~KREG_EFLAGS_IF_MASK;
509 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
510 
511 		emulated = 1;
512 		break;
513 
514 	case M_STI:
515 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
516 		fl |= (1 << KREG_EFLAGS_IF_SHIFT);
517 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
518 
519 		emulated = 1;
520 		break;
521 
522 	case M_POPF:
523 		/*
524 		 * popfl will restore a pushed EFLAGS from the stack, and could
525 		 * in so doing cause IF to be turned on, if only for a a brief
526 		 * period.  To avoid this, we'll secretly replace the stack's
527 		 * EFLAGS with our decaffeinated brand.  We'll then manually
528 		 * load our EFLAGS copy with the real verion after the step.
529 		 */
530 		(void) kmdb_dpi_get_register("sp", &sp);
531 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
532 
533 		if (mdb_tgt_vread(mdb.m_target, &newfl, sizeof (kreg_t),
534 		    sp) != sizeof (kreg_t)) {
535 			warn("failed to read " FLAGS_REG_NAME
536 			    " at %p for popfl step\n", (void *)sp);
537 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
538 		}
539 
540 		fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;
541 
542 		if (mdb_tgt_vwrite(mdb.m_target, &fl, sizeof (kreg_t),
543 		    sp) != sizeof (kreg_t)) {
544 			warn("failed to update " FLAGS_REG_NAME
545 			    " at %p for popfl step\n", (void *)sp);
546 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
547 		}
548 		break;
549 	}
550 
551 	if (emulated) {
552 		(void) kmdb_dpi_set_register("pc", npc);
553 		return (0);
554 	}
555 
556 	/* Do the step with IF off, and TF (step) on */
557 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
558 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
559 	    ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));
560 
561 	kmdb_dpi_resume_master(); /* ... there and back again ... */
562 
563 	/* EFLAGS has now changed, and may require tuning */
564 
565 	switch (instr) {
566 	case M_POPF:
567 		/*
568 		 * Use the EFLAGS we grabbed before the pop - see the pre-step
569 		 * M_POPFL comment.
570 		 */
571 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
572 		return (0);
573 
574 	case M_PUSHF:
575 		/*
576 		 * We pushed our modified EFLAGS (with IF and TF turned off)
577 		 * onto the stack.  Replace the pushed version with our
578 		 * unmodified one.
579 		 */
580 		(void) kmdb_dpi_get_register("sp", &sp);
581 
582 		if (mdb_tgt_vwrite(mdb.m_target, &oldfl, sizeof (kreg_t),
583 		    sp) != sizeof (kreg_t)) {
584 			warn("failed to update pushed " FLAGS_REG_NAME
585 			    " at %p after pushfl step\n", (void *)sp);
586 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
587 		}
588 
589 		/* Go back to using the EFLAGS we were using before the step */
590 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
591 		return (0);
592 
593 	default:
594 		/*
595 		 * The stepped instruction may have altered EFLAGS.  We only
596 		 * really care about the value of IF, and we know the stepped
597 		 * instruction didn't alter it, so we can simply copy the
598 		 * pre-step value.  We'll also need to turn TF back off.
599 		 */
600 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
601 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
602 		    ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
603 		    (oldfl & KREG_EFLAGS_IF_MASK)));
604 		return (0);
605 	}
606 }
607 
608 /*
609  * The target has already configured the chip for branch step, leaving us to
610  * actually make the machine go.  Due to a number of issues involving
611  * the potential alteration of system state via instructions like sti, cli,
612  * pushfl, and popfl, we're going to treat this like a normal system resume.
613  * All CPUs will be released, on the kernel's IDT.  Our primary concern is
614  * the alteration/storage of our TF'd EFLAGS via pushfl and popfl.  There's no
615  * real workaround - we don't have opcode breakpoints - so the best we can do is
616  * to ensure that the world won't end if someone does bad things to EFLAGS.
617  *
618  * Two things can happen:
619  *  1. EFLAGS.TF may be cleared, either maliciously or via a popfl from saved
620  *     state.  The CPU will continue execution beyond the branch, and will not
621  *     reenter the debugger unless brought/sent in by other means.
622  *  2. Someone may pushlf the TF'd EFLAGS, and may stash a copy of it somewhere.
623  *     When the saved version is popfl'd back into place, the debugger will be
624  *     re-entered on a single-step trap.
625  */
626 static void
627 kaif_step_branch(void)
628 {
629 	kreg_t fl;
630 
631 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
632 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
633 	    (fl | (1 << KREG_EFLAGS_TF_SHIFT)));
634 
635 	kmdb_dpi_resume_master();
636 
637 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
638 }
639 
640 /*ARGSUSED*/
641 static uintptr_t
642 kaif_call(uintptr_t funcva, uint_t argc, const uintptr_t argv[])
643 {
644 	return (kaif_invoke(funcva, argc, argv));
645 }
646 
647 static void
648 dump_crumb(kaif_crumb_t *krmp)
649 {
650 	kaif_crumb_t krm;
651 
652 	if (mdb_vread(&krm, sizeof (kaif_crumb_t), (uintptr_t)krmp) !=
653 	    sizeof (kaif_crumb_t)) {
654 		warn("failed to read crumb at %p", krmp);
655 		return;
656 	}
657 
658 	mdb_printf("state: ");
659 	switch (krm.krm_cpu_state) {
660 	case KAIF_CPU_STATE_MASTER:
661 		mdb_printf("M");
662 		break;
663 	case KAIF_CPU_STATE_SLAVE:
664 		mdb_printf("S");
665 		break;
666 	default:
667 		mdb_printf("%d", krm.krm_cpu_state);
668 	}
669 
670 	mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
671 	    krm.krm_trapno, krm.krm_sp, krm.krm_flag, krm.krm_pc, krm.krm_pc);
672 }
673 
674 static void
675 dump_crumbs(kaif_cpusave_t *save)
676 {
677 	int i;
678 
679 	for (i = KAIF_NCRUMBS; i > 0; i--) {
680 		uint_t idx = (save->krs_curcrumbidx + i) % KAIF_NCRUMBS;
681 		dump_crumb(&save->krs_crumbs[idx]);
682 	}
683 }
684 
685 static void
686 kaif_dump_crumbs(uintptr_t addr, int cpuid)
687 {
688 	int i;
689 
690 	if (addr != NULL) {
691 		/* dump_crumb will protect us against bogus addresses */
692 		dump_crumb((kaif_crumb_t *)addr);
693 
694 	} else if (cpuid != -1) {
695 		if (cpuid < 0 || cpuid >= kaif_ncpusave)
696 			return;
697 
698 		dump_crumbs(&kaif_cpusave[cpuid]);
699 
700 	} else {
701 		for (i = 0; i < kaif_ncpusave; i++) {
702 			kaif_cpusave_t *save = &kaif_cpusave[i];
703 
704 			if (save->krs_cpu_state == KAIF_CPU_STATE_NONE)
705 				continue;
706 
707 			mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
708 			    (i == 0 ? "" : "\n"), i, save->krs_curcrumbidx);
709 
710 			dump_crumbs(save);
711 		}
712 	}
713 }
714 
715 static void
716 kaif_modchg_register(void (*func)(struct modctl *, int))
717 {
718 	kaif_modchg_cb = func;
719 }
720 
721 static void
722 kaif_modchg_cancel(void)
723 {
724 	ASSERT(kaif_modchg_cb != NULL);
725 
726 	kaif_modchg_cb = NULL;
727 }
728 
729 void
730 kaif_mod_loaded(struct modctl *modp)
731 {
732 	if (kaif_modchg_cb != NULL)
733 		kaif_modchg_cb(modp, 1);
734 }
735 
736 void
737 kaif_mod_unloading(struct modctl *modp)
738 {
739 	if (kaif_modchg_cb != NULL)
740 		kaif_modchg_cb(modp, 0);
741 }
742 
743 /*
744  * On some processors, we'll need to clear a certain MSR before proceeding into
745  * the debugger.  Complicating matters, this MSR must be cleared before we take
746  * any branches.  We have patch points in every trap handler, which will cover
747  * all entry paths for master CPUs.  We also have a patch point in the slave
748  * entry code.
749  */
750 static void
751 kaif_msr_add_clrentry(uint_t msr)
752 {
753 #ifdef __amd64
754 	uchar_t code[] = {
755 		0x51, 0x50, 0x52,		/* pushq %rcx, %rax, %rdx */
756 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
757 		0x31, 0xc0,			/* clr %eax */
758 		0x31, 0xd2,			/* clr %edx */
759 		0x0f, 0x30,			/* wrmsr */
760 		0x5a, 0x58, 0x59		/* popq %rdx, %rax, %rcx */
761 	};
762 	uchar_t *patch = &code[4];
763 #else
764 	uchar_t code[] = {
765 		0x60,				/* pushal */
766 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
767 		0x31, 0xc0,			/* clr %eax */
768 		0x31, 0xd2,			/* clr %edx */
769 		0x0f, 0x30,			/* wrmsr */
770 		0x61				/* popal */
771 	};
772 	uchar_t *patch = &code[2];
773 #endif
774 
775 	bcopy(&msr, patch, sizeof (uint32_t));
776 
777 	kaif_idt_patch((caddr_t)code, sizeof (code));
778 
779 	bcopy(code, &kaif_slave_entry_patch, sizeof (code));
780 }
781 
782 static void
783 kaif_msr_add_wrexit(uint_t msr, uint64_t *valp)
784 {
785 	kaif_msr_wrexit_msr = msr;
786 	kaif_msr_wrexit_valp = valp;
787 }
788 
789 static void
790 kaif_msr_add(const kmdb_msr_t *msrs)
791 {
792 	kmdb_msr_t *save;
793 	int nmsrs, i;
794 
795 	ASSERT(kaif_cpusave[0].krs_msr == NULL);
796 
797 	for (i = 0; msrs[i].msr_num != 0; i++) {
798 		switch (msrs[i].msr_type) {
799 		case KMDB_MSR_CLEARENTRY:
800 			kaif_msr_add_clrentry(msrs[i].msr_num);
801 			break;
802 
803 		case KMDB_MSR_WRITEDELAY:
804 			kaif_msr_add_wrexit(msrs[i].msr_num, msrs[i].msr_valp);
805 			break;
806 		}
807 	}
808 	nmsrs = i + 1; /* we want to copy the terminating kmdb_msr_t too */
809 
810 	save = mdb_zalloc(sizeof (kmdb_msr_t) * nmsrs * kaif_ncpusave,
811 	    UM_SLEEP);
812 
813 	for (i = 0; i < kaif_ncpusave; i++) {
814 		bcopy(msrs, &save[nmsrs * i], sizeof (kmdb_msr_t) * nmsrs);
815 		kaif_cpusave[i].krs_msr = &save[nmsrs * i];
816 	}
817 }
818 
819 static uint64_t
820 kaif_msr_get(int cpuid, uint_t num)
821 {
822 	kaif_cpusave_t *save;
823 	kmdb_msr_t *msr;
824 	int i;
825 
826 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
827 		return (-1); /* errno is set for us */
828 
829 	msr = save->krs_msr;
830 
831 	for (i = 0; msr[i].msr_num != 0; i++) {
832 		if (msr[i].msr_num == num &&
833 		    (msr[i].msr_type & KMDB_MSR_READ))
834 			return (msr[i].msr_val);
835 	}
836 
837 	return (0);
838 }
839 
840 int
841 kaif_memrange_add(caddr_t base, size_t len)
842 {
843 	kaif_memrange_t *mr = &kaif_memranges[kaif_nmemranges];
844 
845 	if (kaif_nmemranges == KAIF_MEMRANGES_MAX)
846 		return (set_errno(ENOSPC));
847 
848 	/*
849 	 * In the unlikely event that someone is stepping through this routine,
850 	 * we need to make sure that kaif_memranges knows about the new range
851 	 * before umem gets it.  That way the entry code can recognize stacks
852 	 * allocated from the new region.
853 	 */
854 	mr->mr_base = base;
855 	mr->mr_lim = base + len - 1;
856 	kaif_nmemranges++;
857 
858 	if (mdb_umem_add(base, len) < 0) {
859 		kaif_nmemranges--;
860 		return (-1); /* errno is set for us */
861 	}
862 
863 	return (0);
864 }
865 
866 void
867 kaif_trap_set_debugger(void)
868 {
869 	set_idt(&kaif_idtr);
870 }
871 
872 void
873 kaif_trap_set_saved(kaif_cpusave_t *cpusave)
874 {
875 	set_idt(&cpusave->krs_idtr);
876 }
877 
878 static int
879 kaif_init(kmdb_auxv_t *kav)
880 {
881 	int i;
882 
883 	/* Allocate the per-CPU save areas */
884 	kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu,
885 	    UM_SLEEP);
886 	kaif_ncpusave = kav->kav_ncpu;
887 
888 	for (i = 0; i < kaif_ncpusave; i++) {
889 		kaif_cpusave_t *save = &kaif_cpusave[i];
890 
891 		save->krs_cpu_id = i;
892 		save->krs_curcrumbidx = KAIF_NCRUMBS - 1;
893 		save->krs_curcrumb = &save->krs_crumbs[save->krs_curcrumbidx];
894 	}
895 
896 	kaif_idt_init();
897 
898 	/* The initial selector set.  Updated by the debugger-entry code */
899 #ifndef __amd64
900 	kaif_cs = BOOTCODE_SEL;
901 	kaif_ds = kaif_fs = kaif_gs = BOOTFLAT_SEL;
902 #endif
903 
904 	kaif_memranges[0].mr_base = kav->kav_dseg;
905 	kaif_memranges[0].mr_lim = kav->kav_dseg + kav->kav_dseg_size - 1;
906 	kaif_nmemranges = 1;
907 
908 	kaif_modchg_cb = NULL;
909 
910 	kaif_waptmap = 0;
911 
912 	kaif_drreg.dr_ctl = KREG_DRCTL_RESERVED;
913 	kaif_drreg.dr_stat = KREG_DRSTAT_RESERVED;
914 
915 	kaif_msr_wrexit_msr = 0;
916 	kaif_msr_wrexit_valp = NULL;
917 
918 	kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0;
919 
920 	if ((kaif_sys_sysenter = kmdb_kdi_lookup_by_name("unix",
921 	    "sys_sysenter")) == NULL)
922 		return (set_errno(ENOENT));
923 
924 	return (0);
925 }
926 
927 dpi_ops_t kmdb_dpi_ops = {
928 	kaif_init,
929 	kaif_activate,
930 	kaif_deactivate,
931 	kaif_enter_mon,
932 	kaif_modchg_register,
933 	kaif_modchg_cancel,
934 	kaif_get_cpu_state,
935 	kaif_get_master_cpuid,
936 	kaif_get_gregs,
937 	kaif_get_register,
938 	kaif_set_register,
939 	kaif_brkpt_arm,
940 	kaif_brkpt_disarm,
941 	kaif_wapt_validate,
942 	kaif_wapt_reserve,
943 	kaif_wapt_release,
944 	kaif_wapt_arm,
945 	kaif_wapt_disarm,
946 	kaif_wapt_match,
947 	kaif_step,
948 	kaif_step_branch,
949 	kaif_call,
950 	kaif_dump_crumbs,
951 	kaif_memrange_add,
952 	kaif_msr_add,
953 	kaif_msr_get,
954 };
955