xref: /titanic_41/usr/src/cmd/mdb/intel/kmdb/kaif.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * The debugger/"PROM" interface layer
31  *
32  * (it makes more sense on SPARC)
33  */
34 
35 #include <kmdb/kmdb_dpi_impl.h>
36 #include <kmdb/kmdb_kdi.h>
37 #include <kmdb/kmdb_umemglue.h>
38 #include <kmdb/kaif.h>
39 #include <kmdb/kaif_asmutil.h>
40 #include <mdb/mdb_err.h>
41 #include <mdb/mdb_debug.h>
42 #include <mdb/mdb_isautil.h>
43 #include <mdb/mdb_io_impl.h>
44 #include <mdb/mdb_kreg.h>
45 #include <mdb/mdb.h>
46 
47 #include <sys/types.h>
48 #include <sys/segments.h>
49 #include <sys/bitmap.h>
50 
51 kaif_cpusave_t	*kaif_cpusave;
52 int		kaif_ncpusave;
53 
54 kaif_drreg_t	kaif_drreg;
55 
56 uint32_t	kaif_waptmap;
57 
58 #ifndef __amd64
59 /* Used to track the current set of valid kernel selectors. */
60 uint32_t	kaif_cs;
61 uint32_t	kaif_ds;
62 uint32_t	kaif_fs;
63 uint32_t	kaif_gs;
64 #endif
65 
66 uint_t		kaif_msr_wrexit_msr;
67 uint64_t	*kaif_msr_wrexit_valp;
68 
69 uintptr_t	kaif_kernel_handler;
70 uintptr_t	kaif_sys_sysenter;
71 
72 int		kaif_trap_switch;
73 
74 void (*kaif_modchg_cb)(struct modctl *, int);
75 
76 #define	KAIF_MEMRANGES_MAX	2
77 
78 kaif_memrange_t	kaif_memranges[KAIF_MEMRANGES_MAX];
79 int		kaif_nmemranges;
80 
81 enum {
82 	M_SYSRET	= 0x07, /* after M_ESC */
83 	M_ESC		= 0x0f,
84 	M_SYSEXIT	= 0x35, /* after M_ESC */
85 	M_REX_LO	= 0x40, /* first REX prefix */
86 	M_REX_HI	= 0x4f, /* last REX prefix */
87 	M_PUSHF		= 0x9c,	/* pushfl and pushfq */
88 	M_POPF		= 0x9d,	/* popfl and popfq */
89 	M_INT3		= 0xcc,
90 	M_INTX		= 0xcd,
91 	M_INTO		= 0xce,
92 	M_IRET		= 0xcf,
93 	M_CLI		= 0xfa,
94 	M_STI		= 0xfb
95 };
96 
97 #define	KAIF_BREAKPOINT_INSTR	M_INT3
98 
99 #define	KAIF_WPPRIV2ID(wp)	(int)(uintptr_t)((wp)->wp_priv)
100 
101 #ifdef __amd64
102 #define	FLAGS_REG_NAME		"rflags"
103 #else
104 #define	FLAGS_REG_NAME		"eflags"
105 #endif
106 
107 /*
108  * Called during normal debugger operation and during debugger faults.
109  */
110 static void
111 kaif_enter_mon(void)
112 {
113 	char c;
114 
115 	for (;;) {
116 		mdb_iob_printf(mdb.m_out,
117 		    "%s: Do you really want to reboot? (y/n) ",
118 		    mdb.m_pname);
119 		mdb_iob_flush(mdb.m_out);
120 
121 		while (IOP_READ(mdb.m_term, &c, 1) != 1)
122 			continue;
123 		mdb_iob_printf(mdb.m_out, "%c%s", c, (c == '\n' ? "" : "\n"));
124 
125 		if (c == 'n' || c == 'N')
126 			return;
127 		else if (c == 'y' || c == 'Y') {
128 			mdb_iob_printf(mdb.m_out, "Rebooting...\n");
129 
130 			kmdb_dpi_reboot();
131 		}
132 	}
133 }
134 
135 static int
136 kaif_get_cpu_state(int cpuid)
137 {
138 	if (cpuid == DPI_MASTER_CPUID)
139 		return (DPI_CPU_STATE_MASTER);
140 
141 	if (cpuid < 0 || cpuid >= kaif_ncpusave)
142 		return (set_errno(EINVAL));
143 
144 	switch (kaif_cpusave[cpuid].krs_cpu_state) {
145 	case KAIF_CPU_STATE_MASTER:
146 		return (DPI_CPU_STATE_MASTER);
147 	case KAIF_CPU_STATE_SLAVE:
148 		return (DPI_CPU_STATE_SLAVE);
149 	default:
150 		return (set_errno(EINVAL));
151 	}
152 }
153 
154 static int
155 kaif_get_master_cpuid(void)
156 {
157 	return (kaif_master_cpuid);
158 }
159 
160 static const mdb_tgt_gregset_t *
161 kaif_get_gregs(int cpuid)
162 {
163 	if (cpuid == DPI_MASTER_CPUID)
164 		cpuid = kaif_master_cpuid;
165 
166 	if (cpuid < 0 || cpuid >= kaif_ncpusave) {
167 		(void) set_errno(EINVAL);
168 		return (NULL);
169 	}
170 
171 	return (kaif_cpusave[cpuid].krs_gregs);
172 }
173 
174 typedef struct kaif_reg_synonyms {
175 	const char *rs_syn;
176 	const char *rs_name;
177 } kaif_reg_synonyms_t;
178 
179 static kreg_t *
180 kaif_find_regp(int cpuid, const char *regname)
181 {
182 	static const kaif_reg_synonyms_t synonyms[] = {
183 #ifdef __amd64
184 	    { "pc", "rip" },
185 	    { "sp", "rsp" },
186 	    { "fp", "rbp" },
187 #else
188 	    { "pc", "eip" },
189 	    { "sp", "esp" },
190 	    { "fp", "ebp" },
191 #endif
192 	    { "tt", "trapno" }
193 	};
194 	int i;
195 
196 	if (cpuid == DPI_MASTER_CPUID)
197 		cpuid = kaif_master_cpuid;
198 
199 	if (cpuid < 0 || cpuid >= kaif_ncpusave) {
200 		(void) set_errno(EINVAL);
201 		return (NULL);
202 	}
203 
204 	for (i = 0; i < sizeof (synonyms) / sizeof (synonyms[0]); i++) {
205 		if (strcmp(synonyms[i].rs_syn, regname) == 0)
206 			regname = synonyms[i].rs_name;
207 	}
208 
209 	for (i = 0; mdb_isa_kregs[i].rd_name != NULL; i++) {
210 		const mdb_tgt_regdesc_t *rd = &mdb_isa_kregs[i];
211 
212 		if (strcmp(rd->rd_name, regname) == 0)
213 			return (&kaif_cpusave[cpuid].krs_gregs->
214 			    kregs[rd->rd_num]);
215 	}
216 
217 	(void) set_errno(ENOENT);
218 	return (NULL);
219 }
220 
221 /*ARGSUSED*/
222 static int
223 kaif_get_cpu_register(int cpuid, const char *regname, kreg_t *valp)
224 {
225 	kreg_t *regp;
226 
227 	if ((regp = kaif_find_regp(cpuid, regname)) == NULL)
228 		return (-1);
229 
230 	*valp = *regp;
231 
232 	return (0);
233 }
234 
235 static int
236 kaif_set_cpu_register(int cpuid, const char *regname, kreg_t val)
237 {
238 	kreg_t *regp;
239 
240 	if ((regp = kaif_find_regp(cpuid, regname)) == NULL)
241 		return (-1);
242 
243 	*regp = val;
244 
245 	return (0);
246 }
247 
248 static int
249 kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
250 {
251 	mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;
252 
253 	if (mdb_tgt_vread(mdb.m_target, instrp, sizeof (mdb_instr_t), addr) !=
254 	    sizeof (mdb_instr_t))
255 		return (-1); /* errno is set for us */
256 
257 	if (mdb_tgt_vwrite(mdb.m_target, &bkpt, sizeof (mdb_instr_t), addr) !=
258 	    sizeof (mdb_instr_t))
259 		return (-1); /* errno is set for us */
260 
261 	return (0);
262 }
263 
264 static int
265 kaif_brkpt_disarm(uintptr_t addr, mdb_instr_t instrp)
266 {
267 	if (mdb_tgt_vwrite(mdb.m_target, &instrp, sizeof (mdb_instr_t), addr) !=
268 	    sizeof (mdb_instr_t))
269 		return (-1); /* errno is set for us */
270 
271 	return (0);
272 }
273 
274 /*
275  * Intel watchpoints are even more fun than SPARC ones.  The Intel architecture
276  * manuals refer to watchpoints as breakpoints.  For consistency  with the
277  * terminology used in other portions of kmdb, we will, however, refer to them
278  * as watchpoints.
279  *
280  * Execute, data write, I/O read/write, and data read/write watchpoints are
281  * supported by the hardware.  Execute watchpoints must be one byte in length,
282  * and must be placed on the first byte of the instruction to be watched.
283  * Lengths of other watchpoints are more varied.
284  *
285  * Given that we already have a breakpoint facility, and given the restrictions
286  * placed on execute watchpoints, we're going to disallow the creation of
287  * execute watchpoints.  The others will be fully supported.  See the Debugging
288  * chapter in both the IA32 and AMD64 System Programming books for more details.
289  */
290 
291 #ifdef __amd64
292 #define	WAPT_DATA_MAX_SIZE	8
293 #define	WAPT_DATA_SIZES_MSG	"1, 2, 4, or 8"
294 #else
295 #define	WAPT_DATA_MAX_SIZE	4
296 #define	WAPT_DATA_SIZES_MSG	"1, 2, or 4"
297 #endif
298 
299 static int
300 kaif_wapt_validate(kmdb_wapt_t *wp)
301 {
302 	if (wp->wp_type == DPI_WAPT_TYPE_IO) {
303 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W)) {
304 			warn("I/O port watchpoints must be read/write\n");
305 			return (set_errno(EINVAL));
306 		}
307 
308 		if (wp->wp_size != 1 && wp->wp_size != 2 && wp->wp_size != 4) {
309 			warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
310 			return (set_errno(EINVAL));
311 		}
312 
313 	} else if (wp->wp_type == DPI_WAPT_TYPE_PHYS) {
314 		warn("physical address watchpoints are not supported on this "
315 		    "platform\n");
316 		return (set_errno(EMDB_TGTHWNOTSUP));
317 
318 	} else {
319 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W) &&
320 		    wp->wp_wflags != MDB_TGT_WA_W) {
321 			warn("watchpoints must be read/write or write-only\n");
322 			return (set_errno(EINVAL));
323 		}
324 
325 		if ((wp->wp_size & -(wp->wp_size)) != wp->wp_size ||
326 		    wp->wp_size > WAPT_DATA_MAX_SIZE) {
327 			warn("data watchpoint size must be " WAPT_DATA_SIZES_MSG
328 			    " bytes\n");
329 			return (set_errno(EINVAL));
330 		}
331 
332 	}
333 
334 	if (wp->wp_addr & (wp->wp_size - 1)) {
335 		warn("%lu-byte watchpoints must be %lu-byte aligned\n",
336 		    (ulong_t)wp->wp_size, (ulong_t)wp->wp_size);
337 		return (set_errno(EINVAL));
338 	}
339 
340 	return (0);
341 }
342 
343 static int
344 kaif_wapt_reserve(kmdb_wapt_t *wp)
345 {
346 	int id;
347 
348 	for (id = 0; id <= KREG_MAXWPIDX; id++) {
349 		if (!BT_TEST(&kaif_waptmap, id)) {
350 			/* found one */
351 			BT_SET(&kaif_waptmap, id);
352 			wp->wp_priv = (void *)(uintptr_t)id;
353 			return (0);
354 		}
355 	}
356 
357 	return (set_errno(EMDB_WPTOOMANY));
358 }
359 
360 static void
361 kaif_wapt_release(kmdb_wapt_t *wp)
362 {
363 	int id = KAIF_WPPRIV2ID(wp);
364 
365 	ASSERT(BT_TEST(&kaif_waptmap, id));
366 	BT_CLEAR(&kaif_waptmap, id);
367 }
368 
369 /*ARGSUSED*/
370 static void
371 kaif_wapt_arm(kmdb_wapt_t *wp)
372 {
373 	uint_t rw;
374 	int hwid = KAIF_WPPRIV2ID(wp);
375 
376 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
377 
378 	if (wp->wp_type == DPI_WAPT_TYPE_IO)
379 		rw = KREG_DRCTL_WP_IORW;
380 	else if (wp->wp_wflags & MDB_TGT_WA_R)
381 		rw = KREG_DRCTL_WP_RW;
382 	else if (wp->wp_wflags & MDB_TGT_WA_X)
383 		rw = KREG_DRCTL_WP_EXEC;
384 	else
385 		rw = KREG_DRCTL_WP_WONLY;
386 
387 	kaif_drreg.dr_addr[hwid] = wp->wp_addr;
388 
389 	kaif_drreg.dr_ctl &= ~KREG_DRCTL_WP_MASK(hwid);
390 	kaif_drreg.dr_ctl |= KREG_DRCTL_WP_LENRW(hwid, wp->wp_size - 1, rw);
391 	kaif_drreg.dr_ctl |= KREG_DRCTL_WPEN(hwid);
392 }
393 
394 /*ARGSUSED*/
395 static void
396 kaif_wapt_disarm(kmdb_wapt_t *wp)
397 {
398 	int hwid = KAIF_WPPRIV2ID(wp);
399 
400 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
401 
402 	kaif_drreg.dr_addr[hwid] = 0;
403 	kaif_drreg.dr_ctl &= ~(KREG_DRCTL_WP_MASK(hwid) |
404 	    KREG_DRCTL_WPEN_MASK(hwid));
405 }
406 
407 /*ARGSUSED*/
408 static int
409 kaif_wapt_match(kmdb_wapt_t *wp)
410 {
411 	int hwid = KAIF_WPPRIV2ID(wp);
412 	uint32_t mask = KREG_DRSTAT_WP_MASK(hwid);
413 	int n = 0;
414 	int i;
415 
416 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
417 
418 	for (i = 0; i < kaif_ncpusave; i++)
419 		n += (kaif_cpusave[i].krs_dr.dr_stat & mask) != 0;
420 
421 	return (n);
422 }
423 
424 static int
425 kaif_step(void)
426 {
427 	kreg_t pc, fl, oldfl, newfl, sp;
428 	mdb_tgt_addr_t npc;
429 	mdb_instr_t instr;
430 	int emulated = 0, rchk = 0;
431 	size_t pcoff = 0;
432 
433 	(void) kmdb_dpi_get_register("pc", &pc);
434 
435 	if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
436 	    MDB_TGT_AS_VIRT, pc)) == pc) {
437 		warn("failed to decode instruction at %a for step\n", pc);
438 		return (set_errno(EINVAL));
439 	}
440 
441 	/*
442 	 * Stepping behavior depends on the type of instruction.  It does not
443 	 * depend on the presence of a REX prefix, as the action we take for a
444 	 * given instruction doesn't currently vary for 32-bit instructions
445 	 * versus their 64-bit counterparts.
446 	 */
447 	do {
448 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
449 		    pc + pcoff) != sizeof (mdb_instr_t)) {
450 			warn("failed to read at %p for step",
451 			    (void *)(pc + pcoff));
452 			return (-1);
453 		}
454 	} while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));
455 
456 	switch (instr) {
457 	case M_IRET:
458 		warn("iret cannot be stepped\n");
459 		return (set_errno(EMDB_TGTNOTSUP));
460 
461 	case M_INT3:
462 	case M_INTX:
463 	case M_INTO:
464 		warn("int cannot be stepped\n");
465 		return (set_errno(EMDB_TGTNOTSUP));
466 
467 	case M_ESC:
468 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
469 		    pc + pcoff) != sizeof (mdb_instr_t)) {
470 			warn("failed to read at %p for step",
471 			    (void *)(pc + pcoff));
472 			return (-1);
473 		}
474 
475 		switch (instr) {
476 		case M_SYSRET:
477 			warn("sysret cannot be stepped\n");
478 			return (set_errno(EMDB_TGTNOTSUP));
479 		case M_SYSEXIT:
480 			warn("sysexit cannot be stepped\n");
481 			return (set_errno(EMDB_TGTNOTSUP));
482 		}
483 		break;
484 
485 	/*
486 	 * Some instructions need to be emulated.  We need to prevent direct
487 	 * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
488 	 * popfl also receive special handling, as they manipulate both EFLAGS
489 	 * and %esp.
490 	 */
491 	case M_CLI:
492 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
493 		fl &= ~KREG_EFLAGS_IF_MASK;
494 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
495 
496 		emulated = 1;
497 		break;
498 
499 	case M_STI:
500 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
501 		fl |= (1 << KREG_EFLAGS_IF_SHIFT);
502 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
503 
504 		emulated = 1;
505 		break;
506 
507 	case M_POPF:
508 		/*
509 		 * popfl will restore a pushed EFLAGS from the stack, and could
510 		 * in so doing cause IF to be turned on, if only for a a brief
511 		 * period.  To avoid this, we'll secretly replace the stack's
512 		 * EFLAGS with our decaffeinated brand.  We'll then manually
513 		 * load our EFLAGS copy with the real verion after the step.
514 		 */
515 		(void) kmdb_dpi_get_register("sp", &sp);
516 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
517 
518 		if (mdb_tgt_vread(mdb.m_target, &newfl, sizeof (kreg_t),
519 		    sp) != sizeof (kreg_t)) {
520 			warn("failed to read " FLAGS_REG_NAME
521 			    " at %p for popfl step\n", (void *)sp);
522 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
523 		}
524 
525 		fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;
526 
527 		if (mdb_tgt_vwrite(mdb.m_target, &fl, sizeof (kreg_t),
528 		    sp) != sizeof (kreg_t)) {
529 			warn("failed to update " FLAGS_REG_NAME
530 			    " at %p for popfl step\n", (void *)sp);
531 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
532 		}
533 		break;
534 	}
535 
536 	if (emulated) {
537 		(void) kmdb_dpi_set_register("pc", npc);
538 		return (0);
539 	}
540 
541 	/* Do the step with IF off, and TF (step) on */
542 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
543 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
544 	    ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));
545 
546 	kmdb_dpi_resume_master(); /* ... there and back again ... */
547 
548 	/* EFLAGS has now changed, and may require tuning */
549 
550 	switch (instr) {
551 	case M_POPF:
552 		/*
553 		 * Use the EFLAGS we grabbed before the pop - see the pre-step
554 		 * M_POPFL comment.
555 		 */
556 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
557 		return (0);
558 
559 	case M_PUSHF:
560 		/*
561 		 * We pushed our modified EFLAGS (with IF and TF turned off)
562 		 * onto the stack.  Replace the pushed version with our
563 		 * unmodified one.
564 		 */
565 		(void) kmdb_dpi_get_register("sp", &sp);
566 
567 		if (mdb_tgt_vwrite(mdb.m_target, &oldfl, sizeof (kreg_t),
568 		    sp) != sizeof (kreg_t)) {
569 			warn("failed to update pushed " FLAGS_REG_NAME
570 			    " at %p after pushfl step\n", (void *)sp);
571 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
572 		}
573 
574 		/* Go back to using the EFLAGS we were using before the step */
575 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
576 		return (0);
577 
578 	default:
579 		/*
580 		 * The stepped instruction may have altered EFLAGS.  We only
581 		 * really care about the value of IF, and we know the stepped
582 		 * instruction didn't alter it, so we can simply copy the
583 		 * pre-step value.  We'll also need to turn TF back off.
584 		 */
585 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
586 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
587 		    ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
588 		    (oldfl & KREG_EFLAGS_IF_MASK)));
589 		return (0);
590 	}
591 }
592 
593 /*
594  * The target has already configured the chip for branch step, leaving us to
595  * actually make the machine go.  Due to a number of issues involving
596  * the potential alteration of system state via instructions like sti, cli,
597  * pushfl, and popfl, we're going to treat this like a normal system resume.
598  * All CPUs will be released, on the kernel's IDT.  Our primary concern is
599  * the alteration/storage of our TF'd EFLAGS via pushfl and popfl.  There's no
600  * real workaround - we don't have opcode breakpoints - so the best we can do is
601  * to ensure that the world won't end if someone does bad things to EFLAGS.
602  *
603  * Two things can happen:
604  *  1. EFLAGS.TF may be cleared, either maliciously or via a popfl from saved
605  *     state.  The CPU will continue execution beyond the branch, and will not
606  *     reenter the debugger unless brought/sent in by other means.
607  *  2. Someone may pushlf the TF'd EFLAGS, and may stash a copy of it somewhere.
608  *     When the saved version is popfl'd back into place, the debugger will be
609  *     re-entered on a single-step trap.
610  */
611 static void
612 kaif_step_branch(void)
613 {
614 	kreg_t fl;
615 
616 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
617 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
618 	    (fl | (1 << KREG_EFLAGS_TF_SHIFT)));
619 
620 	kmdb_dpi_resume_master();
621 
622 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
623 }
624 
625 /*ARGSUSED*/
626 static uintptr_t
627 kaif_call(uintptr_t funcva, uint_t argc, const uintptr_t argv[])
628 {
629 	return (kaif_invoke(funcva, argc, argv));
630 }
631 
632 static void
633 dump_crumb(kaif_crumb_t *crumb)
634 {
635 	mdb_printf("state: ");
636 	switch (crumb->krm_cpu_state) {
637 	case KAIF_CPU_STATE_MASTER:
638 		mdb_printf("M");
639 		break;
640 	case KAIF_CPU_STATE_SLAVE:
641 		mdb_printf("S");
642 		break;
643 	default:
644 		mdb_printf("%d", crumb->krm_cpu_state);
645 	}
646 
647 	mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
648 	    crumb->krm_trapno, crumb->krm_sp, crumb->krm_flag,
649 	    crumb->krm_pc, crumb->krm_pc);
650 }
651 
652 static void
653 dump_crumbs(kaif_cpusave_t *save)
654 {
655 	int i;
656 
657 	for (i = KAIF_NCRUMBS; i > 0; i--) {
658 		uint_t idx = (save->krs_curcrumbidx + i) % KAIF_NCRUMBS;
659 
660 		dump_crumb(&save->krs_crumbs[idx]);
661 	}
662 }
663 
664 static void
665 kaif_dump_crumbs(uintptr_t addr, int cpuid)
666 {
667 	int i;
668 
669 	if (addr != NULL) {
670 		dump_crumb((kaif_crumb_t *)addr);
671 
672 	} else if (cpuid != -1) {
673 		if (cpuid >= kaif_ncpusave)
674 			return;
675 
676 		dump_crumbs(&kaif_cpusave[cpuid]);
677 
678 	} else {
679 		for (i = 0; i < kaif_ncpusave; i++) {
680 			kaif_cpusave_t *save = &kaif_cpusave[i];
681 
682 			if (save->krs_cpu_state == KAIF_CPU_STATE_NONE)
683 				continue;
684 
685 			mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
686 			    (i == 0 ? "" : "\n"), i, save->krs_curcrumbidx);
687 
688 			dump_crumbs(save);
689 		}
690 	}
691 }
692 
693 static void
694 kaif_modchg_register(void (*func)(struct modctl *, int))
695 {
696 	kaif_modchg_cb = func;
697 }
698 
699 static void
700 kaif_modchg_cancel(void)
701 {
702 	ASSERT(kaif_modchg_cb != NULL);
703 
704 	kaif_modchg_cb = NULL;
705 }
706 
707 void
708 kaif_mod_loaded(struct modctl *modp)
709 {
710 	if (kaif_modchg_cb != NULL)
711 		kaif_modchg_cb(modp, 1);
712 }
713 
714 void
715 kaif_mod_unloading(struct modctl *modp)
716 {
717 	if (kaif_modchg_cb != NULL)
718 		kaif_modchg_cb(modp, 0);
719 }
720 
721 /*
722  * On some processors, we'll need to clear a certain MSR before proceeding into
723  * the debugger.  Complicating matters, this MSR must be cleared before we take
724  * any branches.  We have patch points in every trap handler, which will cover
725  * all entry paths for master CPUs.  We also have a patch point in the slave
726  * entry code.
727  */
728 static void
729 kaif_msr_add_clrentry(uint_t msr)
730 {
731 #ifdef __amd64
732 	uchar_t code[] = {
733 		0x51, 0x50, 0x52,		/* pushq %rcx, %rax, %rdx */
734 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
735 		0x31, 0xc0,			/* clr %eax */
736 		0x31, 0xd2,			/* clr %edx */
737 		0x0f, 0x30,			/* wrmsr */
738 		0x5a, 0x58, 0x59		/* popq %rdx, %rax, %rcx */
739 	};
740 	uchar_t *patch = &code[4];
741 #else
742 	uchar_t code[] = {
743 		0x60,				/* pushal */
744 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
745 		0x31, 0xc0,			/* clr %eax */
746 		0x31, 0xd2,			/* clr %edx */
747 		0x0f, 0x30,			/* wrmsr */
748 		0x61				/* popal */
749 	};
750 	uchar_t *patch = &code[2];
751 #endif
752 
753 	bcopy(&msr, patch, sizeof (uint32_t));
754 
755 	kaif_idt_patch((caddr_t)code, sizeof (code));
756 
757 	bcopy(code, &kaif_slave_entry_patch, sizeof (code));
758 }
759 
760 static void
761 kaif_msr_add_wrexit(uint_t msr, uint64_t *valp)
762 {
763 	kaif_msr_wrexit_msr = msr;
764 	kaif_msr_wrexit_valp = valp;
765 }
766 
767 static void
768 kaif_msr_add(const kmdb_msr_t *msrs)
769 {
770 	kmdb_msr_t *save;
771 	int nmsrs, i;
772 
773 	ASSERT(kaif_cpusave[0].krs_msr == NULL);
774 
775 	for (i = 0; msrs[i].msr_num != 0; i++) {
776 		switch (msrs[i].msr_type) {
777 		case KMDB_MSR_CLEARENTRY:
778 			kaif_msr_add_clrentry(msrs[i].msr_num);
779 			break;
780 
781 		case KMDB_MSR_WRITEDELAY:
782 			kaif_msr_add_wrexit(msrs[i].msr_num, msrs[i].msr_valp);
783 			break;
784 		}
785 	}
786 	nmsrs = i + 1; /* we want to copy the terminating kmdb_msr_t too */
787 
788 	save = mdb_zalloc(sizeof (kmdb_msr_t) * nmsrs * kaif_ncpusave,
789 	    UM_SLEEP);
790 
791 	for (i = 0; i < kaif_ncpusave; i++) {
792 		bcopy(msrs, &save[nmsrs * i], sizeof (kmdb_msr_t) * nmsrs);
793 		kaif_cpusave[i].krs_msr = &save[nmsrs * i];
794 	}
795 }
796 
797 static uint64_t
798 kaif_msr_get(int cpuid, uint_t num)
799 {
800 	kmdb_msr_t *msr;
801 	int i;
802 
803 	if (cpuid == DPI_MASTER_CPUID)
804 		cpuid = kaif_master_cpuid;
805 	msr = kaif_cpusave[cpuid].krs_msr;
806 
807 	for (i = 0; msr[i].msr_num != 0; i++) {
808 		if (msr[i].msr_num == num &&
809 		    (msr[i].msr_type & KMDB_MSR_READ))
810 			return (msr[i].msr_val);
811 	}
812 
813 	return (0);
814 }
815 
816 int
817 kaif_memrange_add(caddr_t base, size_t len)
818 {
819 	kaif_memrange_t *mr = &kaif_memranges[kaif_nmemranges];
820 
821 	if (kaif_nmemranges == KAIF_MEMRANGES_MAX)
822 		return (set_errno(ENOSPC));
823 
824 	/*
825 	 * In the unlikely event that someone is stepping through this routine,
826 	 * we need to make sure that kaif_memranges knows about the new range
827 	 * before umem gets it.  That way the entry code can recognize stacks
828 	 * allocated from the new region.
829 	 */
830 	mr->mr_base = base;
831 	mr->mr_lim = base + len - 1;
832 	kaif_nmemranges++;
833 
834 	if (mdb_umem_add(base, len) < 0) {
835 		kaif_nmemranges--;
836 		return (-1); /* errno is set for us */
837 	}
838 
839 	return (0);
840 }
841 
842 void
843 kaif_trap_set_debugger(void)
844 {
845 	set_idt(&kaif_idtr);
846 }
847 
848 void
849 kaif_trap_set_saved(kaif_cpusave_t *cpusave)
850 {
851 	set_idt(&cpusave->krs_idtr);
852 }
853 
854 static int
855 kaif_init(kmdb_auxv_t *kav)
856 {
857 	int i;
858 
859 	/* Allocate the per-CPU save areas */
860 	kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu,
861 	    UM_SLEEP);
862 	kaif_ncpusave = kav->kav_ncpu;
863 
864 	for (i = 0; i < kaif_ncpusave; i++) {
865 		kaif_cpusave[i].krs_cpu_id = i;
866 
867 		kaif_cpusave[i].krs_curcrumb =
868 		    &kaif_cpusave[i].krs_crumbs[KAIF_NCRUMBS - 1];
869 		kaif_cpusave[i].krs_curcrumbidx = KAIF_NCRUMBS - 1;
870 	}
871 
872 	kaif_idt_init();
873 
874 	/* The initial selector set.  Updated by the debugger-entry code */
875 #ifndef __amd64
876 	kaif_cs = BOOTCODE_SEL;
877 	kaif_ds = kaif_fs = kaif_gs = BOOTFLAT_SEL;
878 #endif
879 
880 	kaif_memranges[0].mr_base = kav->kav_dseg;
881 	kaif_memranges[0].mr_lim = kav->kav_dseg + kav->kav_dseg_size - 1;
882 	kaif_nmemranges = 1;
883 
884 	kaif_modchg_cb = NULL;
885 
886 	kaif_waptmap = 0;
887 
888 	kaif_drreg.dr_ctl = KREG_DRCTL_RESERVED;
889 	kaif_drreg.dr_stat = KREG_DRSTAT_RESERVED;
890 
891 	kaif_msr_wrexit_msr = 0;
892 	kaif_msr_wrexit_valp = NULL;
893 
894 	kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0;
895 
896 	if ((kaif_sys_sysenter = kmdb_kdi_lookup_by_name("unix",
897 	    "sys_sysenter")) == NULL)
898 		return (set_errno(ENOENT));
899 
900 	return (0);
901 }
902 
903 dpi_ops_t kmdb_dpi_ops = {
904 	kaif_init,
905 	kaif_activate,
906 	kaif_deactivate,
907 	kaif_enter_mon,
908 	kaif_modchg_register,
909 	kaif_modchg_cancel,
910 	kaif_get_cpu_state,
911 	kaif_get_master_cpuid,
912 	kaif_get_gregs,
913 	kaif_get_cpu_register,
914 	kaif_set_cpu_register,
915 	kaif_brkpt_arm,
916 	kaif_brkpt_disarm,
917 	kaif_wapt_validate,
918 	kaif_wapt_reserve,
919 	kaif_wapt_release,
920 	kaif_wapt_arm,
921 	kaif_wapt_disarm,
922 	kaif_wapt_match,
923 	kaif_step,
924 	kaif_step_branch,
925 	kaif_call,
926 	kaif_dump_crumbs,
927 	kaif_memrange_add,
928 	kaif_msr_add,
929 	kaif_msr_get,
930 };
931